code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from barrel import Store, Field, FloatField, EmbeddedStoreField
from barrel.rpc import RpcMixin
from barrel_reaktor.document.models import Document
from . import get_search_sources
class Stat(Store):
"""The reaktor always passes in `name` as the value to use for the search
facet. Since it's a value, let's rename it. Some fields also provide a
label, which we keep untouched.
"""
count = Field(target="count")
value = Field(target="name")
label = Field(target="label")
class CategoryStat(Store):
"""Category searching facet is inconsistent with other facets.
This model is there as an attempt to normalize that.
"""
count = Field(target="count")
value = Field(target="id")
label = Field(target="name")
class DocumentResult(Store):
"""Search result object wrapping search itemsalongside search info
like pagination information.
"""
class DocumentItem(Store):
"""Search result item wrapping a document alongside search info like
item relevance.
"""
document = EmbeddedStoreField(target="searchResult", store_class=Document)
relevance = FloatField(target="relevance")
class Stats(Store):
"""Represents stats about a search result, e.g. how many books for
this language, how many books available as pdf, ...
"""
category = EmbeddedStoreField(target="category", store_class=CategoryStat, is_array=True)
collection_title = EmbeddedStoreField(target="collectionTitle", store_class=Stat, is_array=True)
drm = EmbeddedStoreField(target="drmType", store_class=Stat, is_array=True)
format = EmbeddedStoreField(target="format", store_class=Stat, is_array=True)
language = EmbeddedStoreField(target="language", store_class=Stat, is_array=True)
price = EmbeddedStoreField(target="price", store_class=Stat, is_array=True)
pub_date = EmbeddedStoreField(target="publication_date", store_class=Stat, is_array=True)
rating = EmbeddedStoreField(target="rating", store_class=Stat, is_array=True)
source = EmbeddedStoreField(target="source", store_class=Stat, is_array=True)
tag = EmbeddedStoreField(target="tag", store_class=Stat, is_array=True)
# Without blocking search, other fields don't make sense anymore so there
# they are just ignored.
count = Field(target="numberOfResults")
has_less = Field(target="hasLess")
has_more = Field(target="hasMore")
items = EmbeddedStoreField(target='results', store_class=DocumentItem, is_array=True)
offset = Field(target="offset")
stats = EmbeddedStoreField(target='relatedObjects', store_class=Stats)
total_count = Field(target="totalNumberOfResults")
class Search(RpcMixin):
"""Interface to various API search endpoints. Beware that this one is not
a `Store`, which means that when calling its class methods,
expect different types.
"""
interface = 'WSSearchDocument'
@classmethod
def documents(cls, token, search_string, offset, number_of_results, sort=None, direction=None, include_search_fields=None, source=None, related=None, options=None):
"""Returns documents for a given string."""
invert = direction == 'desc'
if not options:
options = {'resultType': 'Object'}
if source:
sources = get_search_sources(source)
return cls.signature(method='searchDocuments', data_converter=DocumentResult,
args=[token, search_string, sources, offset, number_of_results, sort, invert, related, include_search_fields, options])
@classmethod
def suggestions(cls, token, search_string, number_of_results, sources=None, highlight=None):
"""Returns document suggestions for a given string."""
args = [token, search_string, sources, number_of_results]
method = 'getSuggestionObjects'
if highlight:
method = 'getSuggestionObjectsWithHighlights'
args.append(highlight)
return cls.signature(method=method, data_converter=Document, args=args)
|
[
"barrel.FloatField",
"barrel.EmbeddedStoreField",
"barrel.Field"
] |
[((411, 432), 'barrel.Field', 'Field', ([], {'target': '"""count"""'}), "(target='count')\n", (416, 432), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((445, 465), 'barrel.Field', 'Field', ([], {'target': '"""name"""'}), "(target='name')\n", (450, 465), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((478, 499), 'barrel.Field', 'Field', ([], {'target': '"""label"""'}), "(target='label')\n", (483, 499), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((673, 694), 'barrel.Field', 'Field', ([], {'target': '"""count"""'}), "(target='count')\n", (678, 694), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((707, 725), 'barrel.Field', 'Field', ([], {'target': '"""id"""'}), "(target='id')\n", (712, 725), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((738, 758), 'barrel.Field', 'Field', ([], {'target': '"""name"""'}), "(target='name')\n", (743, 758), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((2369, 2400), 'barrel.Field', 'Field', ([], {'target': '"""numberOfResults"""'}), "(target='numberOfResults')\n", (2374, 2400), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((2416, 2439), 'barrel.Field', 'Field', ([], {'target': '"""hasLess"""'}), "(target='hasLess')\n", (2421, 2439), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((2455, 2478), 'barrel.Field', 'Field', ([], {'target': '"""hasMore"""'}), "(target='hasMore')\n", (2460, 2478), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((2491, 2568), 'barrel.EmbeddedStoreField', 'EmbeddedStoreField', ([], {'target': '"""results"""', 'store_class': 'DocumentItem', 'is_array': '(True)'}), "(target='results', store_class=DocumentItem, is_array=True)\n", (2509, 2568), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((2582, 2604), 'barrel.Field', 'Field', ([], {'target': '"""offset"""'}), "(target='offset')\n", (2587, 2604), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((2617, 2679), 'barrel.EmbeddedStoreField', 'EmbeddedStoreField', ([], {'target': '"""relatedObjects"""', 'store_class': 'Stats'}), "(target='relatedObjects', store_class=Stats)\n", (2635, 2679), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((2698, 2734), 'barrel.Field', 'Field', ([], {'target': '"""totalNumberOfResults"""'}), "(target='totalNumberOfResults')\n", (2703, 2734), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((1065, 1128), 'barrel.EmbeddedStoreField', 'EmbeddedStoreField', ([], {'target': '"""searchResult"""', 'store_class': 'Document'}), "(target='searchResult', store_class=Document)\n", (1083, 1128), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((1149, 1179), 'barrel.FloatField', 'FloatField', ([], {'target': '"""relevance"""'}), "(target='relevance')\n", (1159, 1179), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((1371, 1449), 'barrel.EmbeddedStoreField', 'EmbeddedStoreField', ([], {'target': '"""category"""', 'store_class': 'CategoryStat', 'is_array': '(True)'}), "(target='category', store_class=CategoryStat, is_array=True)\n", (1389, 1449), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((1477, 1554), 'barrel.EmbeddedStoreField', 'EmbeddedStoreField', ([], {'target': '"""collectionTitle"""', 'store_class': 'Stat', 'is_array': '(True)'}), "(target='collectionTitle', store_class=Stat, is_array=True)\n", (1495, 1554), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((1569, 1638), 'barrel.EmbeddedStoreField', 'EmbeddedStoreField', ([], {'target': '"""drmType"""', 'store_class': 'Stat', 'is_array': '(True)'}), "(target='drmType', store_class=Stat, is_array=True)\n", (1587, 1638), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((1656, 1724), 'barrel.EmbeddedStoreField', 'EmbeddedStoreField', ([], {'target': '"""format"""', 'store_class': 'Stat', 'is_array': '(True)'}), "(target='format', store_class=Stat, is_array=True)\n", (1674, 1724), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((1744, 1814), 'barrel.EmbeddedStoreField', 'EmbeddedStoreField', ([], {'target': '"""language"""', 'store_class': 'Stat', 'is_array': '(True)'}), "(target='language', store_class=Stat, is_array=True)\n", (1762, 1814), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((1831, 1898), 'barrel.EmbeddedStoreField', 'EmbeddedStoreField', ([], {'target': '"""price"""', 'store_class': 'Stat', 'is_array': '(True)'}), "(target='price', store_class=Stat, is_array=True)\n", (1849, 1898), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((1918, 1996), 'barrel.EmbeddedStoreField', 'EmbeddedStoreField', ([], {'target': '"""publication_date"""', 'store_class': 'Stat', 'is_array': '(True)'}), "(target='publication_date', store_class=Stat, is_array=True)\n", (1936, 1996), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((2014, 2082), 'barrel.EmbeddedStoreField', 'EmbeddedStoreField', ([], {'target': '"""rating"""', 'store_class': 'Stat', 'is_array': '(True)'}), "(target='rating', store_class=Stat, is_array=True)\n", (2032, 2082), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((2100, 2168), 'barrel.EmbeddedStoreField', 'EmbeddedStoreField', ([], {'target': '"""source"""', 'store_class': 'Stat', 'is_array': '(True)'}), "(target='source', store_class=Stat, is_array=True)\n", (2118, 2168), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n'), ((2183, 2248), 'barrel.EmbeddedStoreField', 'EmbeddedStoreField', ([], {'target': '"""tag"""', 'store_class': 'Stat', 'is_array': '(True)'}), "(target='tag', store_class=Stat, is_array=True)\n", (2201, 2248), False, 'from barrel import Store, Field, FloatField, EmbeddedStoreField\n')]
|
import math
from random import randint
from numpy import sqrt
def GCD(a, b):
if b == 0:
return a
return GCD(b, a % b)
#######################################
def ExtendedEuclid(a, b):
if b == 0:
return (1, 0)
(x, y) = ExtendedEuclid(b, a % b)
k = a // b
return (y, x - k * y)
def InvertModulo(a, n):
(b, x) = ExtendedEuclid(a, n)
if b < 0:
b = (b % n + n) % n # we don’t want −ve integers
return b
##################################
def PowMod(a, n, mod):
if n == 0:
return 1 % mod
elif n == 1:
return a % mod
else:
b = PowMod(a, n // 2, mod)
b = b * b % mod
if n % 2 == 0:
return b
else:
return b * a % mod
def ConvertToInt( message_str):
res = 0
for i in range(len(message_str)):
res = res * 256 + ord(message_str[i])
return res
#####################################
def ConvertToStr(n):
res = ""
while n > 0:
res += chr(n % 256)
n //= 256
return res[::-1]
#question1
def Encrypt(m, n, e):
m=ConvertToInt(m)
c=PowMod(m,e,n)
return c
#############################
def Decrypt(c, p, q, e):
euler=(p-1)*(q-1)
d=InvertModulo(e,euler)
n=p*q
m=PowMod(c,d,n)
m=ConvertToStr(m)
return m
chiper_message=Encrypt("attack", 1000000007*1000000009,23917)
print(Decrypt(chiper_message, 1000000007,1000000009,23917))
#question2
def DecipherSimple(c, n, e, potential_messages):
decipheredtext=''
for i in potential_messages:
if Encrypt(i,n,e)==c:
decipheredtext=i
return decipheredtext
modulo = 101
exponent = 12
ciphertext = Encrypt("attack", modulo, exponent)
print(DecipherSimple(ciphertext, modulo, exponent, ["attack", "don't attack", "wait"]))
# get a missing prime number
def get_prime_number(i,j,n):
for i in range(i,j):
if(n%i==0):
return i
return 0
##question3
def DecipherSmallPrime(c, n, e):
p=get_prime_number(2,1000000,n)
decipheredtext=Decrypt(c,p,n//p,e)
return decipheredtext
modulo = 101 *18298970732541109011012304219376080251334480295537316123696052970419466495220522723330315111017831737980079504337868198011077274303193766040393009648852841770668239779097280026631944319501437547002412556176186750790476901358334138818777298389724049250700606462316428106882097210008142941838672676714188593227684360287806974345181893018133710957167334490627178666071809992955566020058374505477745993383434501768887090900283569055646901291270870833498474402084748161755197005050874785474707550376333429671113753137201128897550014524209754619355308207537703754006699795711188492048286436285518105948050401762394690148387
exponent = 239
ciphertext = Encrypt("attack", modulo, exponent)
print(DecipherSmallPrime(ciphertext, modulo, exponent))
#question4
def DecipherSmallDiff(c, n, e):
p=get_prime_number(int(sqrt(n)-5000),int(sqrt(n)),n)
decipheredtext=Decrypt(c,p,n//p,e)
return decipheredtext
p = 1000000007
q = 1000000009
n = p * q
e = 239
ciphertext = Encrypt("attack", n, e)
message = DecipherSmallDiff(ciphertext, n, e)
print(message)
#question5
def DecipherCommonDivisor(c1, n1, e1, c2, n2, e2):
p=GCD(n1,n2)
first_decipheredtext= Decrypt(c1,p,n1//p,e1)
second_decipheredtext=Decrypt(c2,p,n2//p,e2)
return first_decipheredtext, second_decipheredtext
p = 101
q1 = 18298970732541109011012304219376080251334480295537316123696052970419466495220522723330315111017831737980079504337868198011077274303193766040393009648852841770668239779097280026631944319501437547002412556176186750790476901358334138818777298389724049250700606462316428106882097210008142941838672676714188593227684360287806974345181893018133710957167334490627178666071809992955566020058374505477745993383434501768887090900283569055646901291270870833498474402084748161755197005050874785474707550376333429671113753137201128897550014524209754619355308207537703754006699795711188492048286436285518105948050401762394690148387
q2 = 1000000007
first_modulo = p * q1
second_modulo = p * q2
first_exponent = 239
second_exponent = 17
first_ciphertext = Encrypt("attack", first_modulo, first_exponent)
second_ciphertext = Encrypt("wait", second_modulo, second_exponent)
print(DecipherCommonDivisor(first_ciphertext, first_modulo, first_exponent, second_ciphertext, second_modulo, second_exponent))
#question6
def DecipherHastad(c1, n1, c2, n2, e):
N1=(n1*n2)//n1
N2=(n1*n2)//n2
x1=InvertModulo(N1,n1)
x2=InvertModulo(N2,n2)
c_square=(c1*N1*x1+c2*N2*x2)%(n1*n2)
c=int(round(sqrt(float(c_square))))
broadcast_message=ConvertToStr(c)
# m1= int(round(sqrt(float(c1))))
#m2= int(round(sqrt(float(c2))))
#if(m1==m2):
# broadcast_message=ConvertToStr(m1)
return broadcast_message
p1 = 790383132652258876190399065097
q1 = 662503581792812531719955475509
p2 = 656917682542437675078478868539
q2 = 1263581691331332127259083713503
n1 = p1 * q1
n2 = p2 * q2
e = 2
ciphertext1 = Encrypt("attack", n1, e)
ciphertext2 = Encrypt("attack", n2, e)
message = DecipherHastad(ciphertext1, n1, ciphertext2, n2, e)
print(message)
|
[
"numpy.sqrt"
] |
[((2985, 2992), 'numpy.sqrt', 'sqrt', (['n'], {}), '(n)\n', (2989, 2992), False, 'from numpy import sqrt\n'), ((2967, 2974), 'numpy.sqrt', 'sqrt', (['n'], {}), '(n)\n', (2971, 2974), False, 'from numpy import sqrt\n')]
|
#!/Users/isobar/.virtualenvs/py3/bin/python
# -*- coding: utf-8 -*-
# <bitbar.title>JPY to NTD</bitbar.title>
# <bitbar.version>1.0</bitbar.version>
# <bitbar.author>wwwins</bitbar.author>
# <bitbar.author.github>wwwins</bitbar.author.github>
# <bitbar.desc>Japanese Yen to Taiwan New Dollar Rate</bitbar.desc>
# <bitbar.image></bitbar.image>
import time
import requests
from lxml import html
# Setting your currency buying/selling rate
BUY_RATE = 0.270
color = "cadetblue"
if (int(time.strftime("%H")) > 17):
print ('🈚️')
exit()
if (int(time.strftime("%H")) < 9):
print ('🈚️')
exit()
r = requests.get("https://rate.bot.com.tw/xrt?Lang=zh-TW")
doc = html.fromstring(r.text)
content = doc.cssselect("td.rate-content-cash")
jpy = content[15].text
if (float(jpy) < BUY_RATE):
color = "red"
print ('JPY:'+jpy+'| color='+color)
|
[
"time.strftime",
"requests.get",
"lxml.html.fromstring"
] |
[((598, 652), 'requests.get', 'requests.get', (['"""https://rate.bot.com.tw/xrt?Lang=zh-TW"""'], {}), "('https://rate.bot.com.tw/xrt?Lang=zh-TW')\n", (610, 652), False, 'import requests\n'), ((659, 682), 'lxml.html.fromstring', 'html.fromstring', (['r.text'], {}), '(r.text)\n', (674, 682), False, 'from lxml import html\n'), ((485, 504), 'time.strftime', 'time.strftime', (['"""%H"""'], {}), "('%H')\n", (498, 504), False, 'import time\n'), ((544, 563), 'time.strftime', 'time.strftime', (['"""%H"""'], {}), "('%H')\n", (557, 563), False, 'import time\n')]
|
import json
def supe(digit):
digit = str(digit)
if len(digit)==1:
return digit
else:
cont = 0
for i in range(len(digit)):
cont+= int(digit[i])
return supe(cont)
# TODO Complete!
def super_digit(n, k):
digit = str(str(n)*k)
return int(supe(digit))
if __name__ == '__main__':
with open('./data.json') as f:
tests = json.load(f)
for i, test in enumerate(tests):
n = test["n"]
k = test["k"]
actual = super_digit(n, k)
expected = test['result']
assert actual == expected, f'Test {i} | n: {n} | k: {k} | expected: {expected}, actual: {actual}'
print('OK!')
|
[
"json.load"
] |
[((410, 422), 'json.load', 'json.load', (['f'], {}), '(f)\n', (419, 422), False, 'import json\n')]
|
from spec2scl import settings
from spec2scl import transformer
from spec2scl.decorators import matches
@transformer.Transformer.register_transformer
class PerlTransformer(transformer.Transformer):
def __init__(self, options={}):
super(PerlTransformer, self).__init__(options)
@matches(r'^[^\n]*%{__perl}\s+', one_line=False, sections=settings.RUNTIME_SECTIONS)
@matches(r'^\s*perl\s+', one_line=False, sections=settings.RUNTIME_SECTIONS) # carefully here, "perl" will occur often in the specfile
@matches(r'./Build', one_line=False)
def handle_perl_specific_commands(self, original_spec, pattern, text):
return self.sclize_all_commands(pattern, text)
|
[
"spec2scl.decorators.matches"
] |
[((296, 385), 'spec2scl.decorators.matches', 'matches', (['"""^[^\\\\n]*%{__perl}\\\\s+"""'], {'one_line': '(False)', 'sections': 'settings.RUNTIME_SECTIONS'}), "('^[^\\\\n]*%{__perl}\\\\s+', one_line=False, sections=settings.\n RUNTIME_SECTIONS)\n", (303, 385), False, 'from spec2scl.decorators import matches\n'), ((385, 461), 'spec2scl.decorators.matches', 'matches', (['"""^\\\\s*perl\\\\s+"""'], {'one_line': '(False)', 'sections': 'settings.RUNTIME_SECTIONS'}), "('^\\\\s*perl\\\\s+', one_line=False, sections=settings.RUNTIME_SECTIONS)\n", (392, 461), False, 'from spec2scl.decorators import matches\n'), ((525, 559), 'spec2scl.decorators.matches', 'matches', (['"""./Build"""'], {'one_line': '(False)'}), "('./Build', one_line=False)\n", (532, 559), False, 'from spec2scl.decorators import matches\n')]
|
import smtplib
from email.message import EmailMessage
# function to send email to listed email address
def send_email(info,news):
email = EmailMessage()
email['From'] = '< Sender Name >'
email['To'] = info[1]
email['Subject'] = 'Hello '+info[0]
email.set_content(news,'html')
with smtplib.SMTP(host='smtp.gmail.com', port=587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.login('<Sender Email>','<Sender Password>')
smtp.send_message(email)
smtp.quit()
|
[
"email.message.EmailMessage",
"smtplib.SMTP"
] |
[((144, 158), 'email.message.EmailMessage', 'EmailMessage', ([], {}), '()\n', (156, 158), False, 'from email.message import EmailMessage\n'), ((307, 352), 'smtplib.SMTP', 'smtplib.SMTP', ([], {'host': '"""smtp.gmail.com"""', 'port': '(587)'}), "(host='smtp.gmail.com', port=587)\n", (319, 352), False, 'import smtplib\n')]
|
#!/usr/bin/env python3
from pathlib import Path
def get_project_root() -> Path:
"""
Get project root directory with assumed structure as:
${PACKAGE_ROOT}/core/common/path.py
"""
return Path(__file__).resolve().parent.parent.parent
def get_config_file() -> Path:
"""
Get default config file.
"""
return get_project_root()/'data/config/config.yaml'
def main():
print(get_project_root())
if __name__ == '__main__':
main()
|
[
"pathlib.Path"
] |
[((208, 222), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (212, 222), False, 'from pathlib import Path\n')]
|
# See LICENSE.incore for details
import pathlib
import logging
import argparse
import os
import sys
import subprocess
import operator
import shlex
import ruamel
from ruamel.yaml import YAML
#from riscof.log import logger
yaml = YAML(typ="rt")
yaml.default_flow_style = False
yaml.allow_unicode = True
logger = logging.getLogger(__name__)
def dump_yaml(foo, outfile):
yaml.dump(foo, outfile)
def load_yaml(foo):
try:
with open(foo, "r") as file:
return dict(yaml.load(file))
except ruamel.yaml.constructor.DuplicateKeyError as msg:
logger = logging.getLogger(__name__)
error = "\n".join(str(msg).split("\n")[2:-7])
logger.error(error)
raise SystemExit
def absolute_path(config_dir, entry_path):
"""
Create an absolute path based on the config's file directory location and a
path value from a configuration entry.
"""
# Allow entries relative to user home.
entry_path = os.path.expanduser(entry_path)
if os.path.exists(entry_path):
# If the entry is already a valid path, return the absolute value of it.
logger.debug("Path entry found: " + str(entry_path))
abs_entry_path = os.path.abspath(entry_path)
else:
# Assume that the entry is relative to the location of the config file.
logger.debug("Path entry '{}' not found. Combine it with config file "\
"location '{}'.".format(entry_path, config_dir))
abs_entry_path = os.path.abspath(os.path.join(config_dir, entry_path))
logger.debug("Using the path: " +str(abs_entry_path))
return abs_entry_path
class makeUtil():
"""
Utility for ease of use of make commands like `make` and `pmake`.
Supports automatic addition and execution of targets. Uses the class
:py:class:`shellCommand` to execute commands.
"""
def __init__(self,makeCommand='make',makefilePath="./Makefile"):
""" Constructor.
:param makeCommand: The variant of make to be used with optional arguments.
Ex - `pmake -j 8`
:type makeCommand: str
:param makefilePath: The path to the makefile to be used.
:type makefilePath: str
"""
self.makeCommand=makeCommand
self.makefilePath = makefilePath
self.targets = []
def add_target(self,command,tname=""):
"""
Function to add a target to the makefile.
:param command: The command to be executed when the target is run.
:type command: str
:param tname: The name of the target to be used. If not specified, TARGET<num> is used as the name.
:type tname: str
"""
if tname == "":
tname = "TARGET"+str(len(self.targets))
with open(self.makefilePath,"a") as makefile:
makefile.write("\n\n.PHONY : " + tname + "\n" + tname + " :\n\t"+command.replace("\n","\n\t"))
self.targets.append(tname)
def execute_target(self,tname,cwd="./"):
"""
Function to execute a particular target only.
:param tname: Name of the target to execute.
:type tname: str
:param cwd: The working directory to be set while executing the make command.
:type cwd: str
:raise AssertionError: If target name is not present in the list of defined targets.
"""
assert tname in self.targets, "Target does not exist."
return shellCommand(self.makeCommand+" -f "+self.makefilePath+" "+tname).run(cwd=cwd)
def execute_all(self,cwd):
"""
Function to execute all the defined targets.
:param cwd: The working directory to be set while executing the make command.
:type cwd: str
"""
return shellCommand(self.makeCommand+" -f "+self.makefilePath+" "+" ".join(self.targets)).run(cwd=cwd)
class Command():
"""
Class for command build which is supported
by :py:mod:`suprocess` module. Supports automatic
conversion of :py:class:`pathlib.Path` instances to
valid format for :py:mod:`subprocess` functions.
"""
def __init__(self, *args, pathstyle='auto', ensure_absolute_paths=False):
"""Constructor.
:param pathstyle: Determine the path style when adding instance of
:py:class:`pathlib.Path`. Path style determines the slash type
which separates the path components. If pathstyle is `auto`, then
on Windows backslashes are used and on Linux forward slashes are used.
When backslashes should be prevented on all systems, the pathstyle
should be `posix`. No other values are allowed.
:param ensure_absolute_paths: If true, then any passed path will be
converted to absolute path.
:param args: Initial command.
:type pathstyle: str
:type ensure_absolute_paths: bool
"""
self.ensure_absolute_paths = ensure_absolute_paths
self.pathstyle = pathstyle
self.args = []
for arg in args:
self.append(arg)
def append(self, arg):
"""Add new argument to command.
:param arg: Argument to be added. It may be list, tuple,
:py:class:`Command` instance or any instance which
supports :py:func:`str`.
"""
to_add = []
if type(arg) is list:
to_add = arg
elif type(arg) is tuple:
to_add = list(arg)
elif isinstance(arg, type(self)):
to_add = arg.args
elif isinstance(arg, str) and not self._is_shell_command():
to_add = shlex.split(arg)
else:
# any object which will be converted into str.
to_add.append(arg)
# Convert all arguments to its string representation.
# pathlib.Path instances
to_add = [
self._path2str(el) if isinstance(el, pathlib.Path) else str(el)
for el in to_add
]
self.args.extend(to_add)
def clear(self):
"""Clear arguments."""
self.args = []
def run(self, **kwargs):
"""Execute the current command.
Uses :py:class:`subprocess.Popen` to execute the command.
:return: The return code of the process .
:raise subprocess.CalledProcessError: If `check` is set
to true in `kwargs` and the process returns
non-zero value.
"""
kwargs.setdefault('shell', self._is_shell_command())
cwd = self._path2str(kwargs.get(
'cwd')) if not kwargs.get('cwd') is None else self._path2str(
os.getcwd())
kwargs.update({'cwd': cwd})
logger.debug(cwd)
# When running as shell command, subprocess expects
# The arguments to be string.
logger.debug(str(self))
cmd = str(self) if kwargs['shell'] else self
x = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
out, err = x.communicate()
out = out.rstrip()
err = err.rstrip()
if x.returncode != 0:
if out:
logger.error(out.decode("ascii"))
if err:
logger.error(err.decode("ascii"))
else:
if out:
logger.warning(out.decode("ascii"))
if err:
logger.warning(err.decode("ascii"))
return x.returncode
def _is_shell_command(self):
"""
Return true if current command is supposed to be executed
as shell script otherwise false.
"""
return any('|' in arg for arg in self.args)
def _path2str(self, path):
"""Convert :py:class:`pathlib.Path` to string.
The final form of the string is determined by the
configuration of `Command` instance.
:param path: Path-like object which will be converted
into string.
:return: String representation of `path`
"""
path = pathlib.Path(path)
if self.ensure_absolute_paths and not path.is_absolute():
path = path.resolve()
if self.pathstyle == 'posix':
return path.as_posix()
elif self.pathstyle == 'auto':
return str(path)
else:
raise ValueError(f"Invalid pathstyle {self.pathstyle}")
def __add__(self, other):
cmd = Command(self,
pathstyle=self.pathstyle,
ensure_absolute_paths=self.ensure_absolute_paths)
cmd += other
return cmd
def __iadd__(self, other):
self.append(other)
return self
def __iter__(self):
"""
Support iteration so functions from :py:mod:`subprocess` module
support `Command` instance.
"""
return iter(self.args)
def __repr__(self):
return f'<{self.__class__.__name__} args={self.args}>'
def __str__(self):
return ' '.join(self.args)
class shellCommand(Command):
"""
Sub Class of the command class which always executes commands as shell commands.
"""
def __init__(self, *args, pathstyle='auto', ensure_absolute_paths=False):
"""
:param pathstyle: Determine the path style when adding instance of
:py:class:`pathlib.Path`. Path style determines the slash type
which separates the path components. If pathstyle is `auto`, then
on Windows backslashes are used and on Linux forward slashes are used.
When backslashes should be prevented on all systems, the pathstyle
should be `posix`. No other values are allowed.
:param ensure_absolute_paths: If true, then any passed path will be
converted to absolute path.
:param args: Initial command.
:type pathstyle: str
:type ensure_absolute_paths: bool
"""
return super().__init__(*args,
pathstyle=pathstyle,
ensure_absolute_paths=ensure_absolute_paths)
def _is_shell_command(self):
return True
class ColoredFormatter(logging.Formatter):
"""
Class to create a log output which is colored based on level.
"""
def __init__(self, *args, **kwargs):
super(ColoredFormatter, self).__init__(*args, **kwargs)
self.colors = {
'DEBUG': '\033[94m',
'INFO': '\033[92m',
'WARNING': '\033[93m',
'ERROR': '\033[91m',
}
self.reset = '\033[0m'
def format(self, record):
msg = str(record.msg)
level_name = str(record.levelname)
name = str(record.name)
color_prefix = self.colors[level_name]
return '{0}{1:>9s} | [--{2}--]: {3}{4}'.format(color_prefix,
level_name, name, msg,
self.reset)
def setup_logging(log_level):
"""Setup logging
Verbosity decided on user input
:param log_level: User defined log level
:type log_level: str
"""
numeric_level = getattr(logging, log_level.upper(), None)
if not isinstance(numeric_level, int):
print(
"\033[91mInvalid log level passed. Please select from debug | info | warning | error\033[0m"
)
raise ValueError("{}-Invalid log level.".format(log_level))
logging.basicConfig(level=numeric_level)
class SortingHelpFormatter(argparse.HelpFormatter):
def add_arguments(self, actions):
actions = sorted(actions, key=operator.attrgetter('option_strings'))
super(SortingHelpFormatter, self).add_arguments(actions)
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
raise SystemExit
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def print_help(self,file=None):
if file is None:
file = sys.stdout
self._print_message(self.format_help(), file)
subparsers_actions = [
action for action in self._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
self._print_message("Action '{}'\n\n".format(choice),file)
self._print_message("\t"+(subparser.format_help()).replace("\n","\n\t")+"\n",file)
class CustomAction(argparse.Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
argparse.Action.__init__(self,
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar,
)
return
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, list):
values = [ str(pathlib.Path(v).absolute()) for v in values ]
else:
values = [str(pathlib.Path(values).absolute())]
existing_val = getattr(namespace, self.dest, None)
if existing_val:
setattr(namespace, self.dest, existing_val + values)
else:
setattr(namespace, self.dest, values)
def riscof_cmdline_args():
parser = MyParser(
formatter_class=SortingHelpFormatter,
prog="riscof",
description="RISCOF is a framework used to run the Architectural Tests on a DUT and check compatibility with the RISC-V ISA")
parser.add_argument('--version','-v',
help='Print version of RISCOF being used',
action='store_true')
parser.add_argument('--verbose',
action='store',
default='info',
choices = ['debug','info','warning','error'],
help='[Default=info]',
metavar="")
subparsers = parser.add_subparsers(dest='command',title="Action",description="The action to be performed by riscof.",help="List of actions supported by riscof.")
coverage = subparsers.add_parser('coverage',help='Generate Coverage Report for the given YAML spec.',formatter_class=SortingHelpFormatter)
coverage.add_argument('--config',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the config file. [Default=./config.ini]',
metavar= 'PATH',
default=str(pathlib.Path('./config.ini').absolute())
)
coverage.add_argument('--cgf',
action=CustomAction,
# required=True,
help='The Path to the cgf file(s). Multiple allowed',
metavar= 'PATH')
coverage.add_argument('--suite',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the custom suite directory.',
metavar= 'PATH')
coverage.add_argument('--env',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the custom env directory.',
metavar= 'PATH')
coverage.add_argument('--work-dir',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the work-dir.',
metavar= 'PATH',
default=str(pathlib.Path('./riscof_work').absolute())
)
coverage.add_argument('--no-browser',action='store_true',
help="Do not open the browser for showing the test report.")
generatedb = subparsers.add_parser('gendb',help='Generate Database for the standard suite.',formatter_class=SortingHelpFormatter)
generatedb.add_argument('--suite',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the custom suite directory.',
metavar= 'PATH')
generatedb.add_argument('--env',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the custom env directory.',
metavar= 'PATH')
generatedb.add_argument('--work-dir',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the work-dir.',
metavar= 'PATH',
default=str(pathlib.Path('./riscof_work').absolute())
)
setup = subparsers.add_parser('setup',help='Initiate setup for riscof.',formatter_class=SortingHelpFormatter)
setup.add_argument('--dutname',
action='store',
help='Name of DUT plugin. [Default=spike]',
default='spike',
metavar= 'NAME')
setup.add_argument('--refname',
action='store',
help='Name of Reference plugin. [Default=sail_cSim]',
default='sail_cSim',
metavar= 'NAME')
setup.add_argument('--work-dir',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the work-dir.',
metavar= 'PATH',
default=str(pathlib.Path('./riscof_work').absolute())
)
validate = subparsers.add_parser('validateyaml',
help='Validate the Input YAMLs using riscv-config.',formatter_class=SortingHelpFormatter)
validate.add_argument('--config',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the config file. [Default=./config.ini]',
metavar= 'PATH',
default=str(pathlib.Path('./config.ini').absolute())
)
validate.add_argument('--work-dir',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the work-dir.',
metavar= 'PATH',
default=str(pathlib.Path('./riscof_work').absolute())
)
run = subparsers.add_parser('run',
help='Run the tests on DUT and reference and compare signatures.',formatter_class=SortingHelpFormatter)
run.add_argument('--config',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the config file. [Default=./config.ini]',
metavar= 'PATH',
default=str(pathlib.Path('./config.ini').absolute())
)
run.add_argument('--suite',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the custom suite directory.',
metavar= 'PATH')
run.add_argument('--env',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the custom env directory.',
metavar= 'PATH')
run.add_argument('--no-browser',action='store_true',
help="Do not open the browser for showing the test report.")
run.add_argument('--work-dir',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the work-dir.',
metavar= 'PATH',
default=str(pathlib.Path('./riscof_work').absolute())
)
testlist = subparsers.add_parser('testlist',
help='Generate the test list for the given DUT and suite.',formatter_class=SortingHelpFormatter)
testlist.add_argument('--work-dir',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the work-dir.',
metavar= 'PATH',
default=str(pathlib.Path('./riscof_work').absolute())
)
testlist.add_argument('--config',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the config file. [Default=./config.ini]',
metavar= 'PATH',
default=str(pathlib.Path('./config.ini').absolute())
)
testlist.add_argument('--suite',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the custom suite directory.',
metavar= 'PATH')
testlist.add_argument('--env',
type= lambda p: str(pathlib.Path(p).absolute()),
action='store',
help='The Path to the custom env directory.',
metavar= 'PATH')
return parser
|
[
"os.path.abspath",
"subprocess.Popen",
"os.path.join",
"logging.basicConfig",
"os.getcwd",
"os.path.exists",
"shlex.split",
"ruamel.yaml.YAML",
"argparse.Action.__init__",
"pathlib.Path",
"operator.attrgetter",
"sys.stderr.write",
"os.path.expanduser",
"logging.getLogger"
] |
[((231, 245), 'ruamel.yaml.YAML', 'YAML', ([], {'typ': '"""rt"""'}), "(typ='rt')\n", (235, 245), False, 'from ruamel.yaml import YAML\n'), ((314, 341), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (331, 341), False, 'import logging\n'), ((964, 994), 'os.path.expanduser', 'os.path.expanduser', (['entry_path'], {}), '(entry_path)\n', (982, 994), False, 'import os\n'), ((1002, 1028), 'os.path.exists', 'os.path.exists', (['entry_path'], {}), '(entry_path)\n', (1016, 1028), False, 'import os\n'), ((11492, 11532), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'numeric_level'}), '(level=numeric_level)\n', (11511, 11532), False, 'import logging\n'), ((1197, 1224), 'os.path.abspath', 'os.path.abspath', (['entry_path'], {}), '(entry_path)\n', (1212, 1224), False, 'import os\n'), ((6879, 6958), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n', (6895, 6958), False, 'import subprocess\n'), ((8072, 8090), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (8084, 8090), False, 'import pathlib\n'), ((11848, 11889), 'sys.stderr.write', 'sys.stderr.write', (["('error: %s\\n' % message)"], {}), "('error: %s\\n' % message)\n", (11864, 11889), False, 'import sys\n'), ((13613, 13811), 'argparse.Action.__init__', 'argparse.Action.__init__', (['self'], {'option_strings': 'option_strings', 'dest': 'dest', 'nargs': 'nargs', 'const': 'const', 'default': 'default', 'type': 'type', 'choices': 'choices', 'required': 'required', 'help': 'help', 'metavar': 'metavar'}), '(self, option_strings=option_strings, dest=dest,\n nargs=nargs, const=const, default=default, type=type, choices=choices,\n required=required, help=help, metavar=metavar)\n', (13637, 13811), False, 'import argparse\n'), ((586, 613), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (603, 613), False, 'import logging\n'), ((1501, 1537), 'os.path.join', 'os.path.join', (['config_dir', 'entry_path'], {}), '(config_dir, entry_path)\n', (1513, 1537), False, 'import os\n'), ((6609, 6620), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6618, 6620), False, 'import os\n'), ((11663, 11700), 'operator.attrgetter', 'operator.attrgetter', (['"""option_strings"""'], {}), "('option_strings')\n", (11682, 11700), False, 'import operator\n'), ((5597, 5613), 'shlex.split', 'shlex.split', (['arg'], {}), '(arg)\n', (5608, 5613), False, 'import shlex\n'), ((15954, 15982), 'pathlib.Path', 'pathlib.Path', (['"""./config.ini"""'], {}), "('./config.ini')\n", (15966, 15982), False, 'import pathlib\n'), ((17072, 17101), 'pathlib.Path', 'pathlib.Path', (['"""./riscof_work"""'], {}), "('./riscof_work')\n", (17084, 17101), False, 'import pathlib\n'), ((18267, 18296), 'pathlib.Path', 'pathlib.Path', (['"""./riscof_work"""'], {}), "('./riscof_work')\n", (18279, 18296), False, 'import pathlib\n'), ((19200, 19229), 'pathlib.Path', 'pathlib.Path', (['"""./riscof_work"""'], {}), "('./riscof_work')\n", (19212, 19229), False, 'import pathlib\n'), ((19747, 19775), 'pathlib.Path', 'pathlib.Path', (['"""./config.ini"""'], {}), "('./config.ini')\n", (19759, 19775), False, 'import pathlib\n'), ((20104, 20133), 'pathlib.Path', 'pathlib.Path', (['"""./riscof_work"""'], {}), "('./riscof_work')\n", (20116, 20133), False, 'import pathlib\n'), ((20646, 20674), 'pathlib.Path', 'pathlib.Path', (['"""./config.ini"""'], {}), "('./config.ini')\n", (20658, 20674), False, 'import pathlib\n'), ((21649, 21678), 'pathlib.Path', 'pathlib.Path', (['"""./riscof_work"""'], {}), "('./riscof_work')\n", (21661, 21678), False, 'import pathlib\n'), ((22175, 22204), 'pathlib.Path', 'pathlib.Path', (['"""./riscof_work"""'], {}), "('./riscof_work')\n", (22187, 22204), False, 'import pathlib\n'), ((22555, 22583), 'pathlib.Path', 'pathlib.Path', (['"""./config.ini"""'], {}), "('./config.ini')\n", (22567, 22583), False, 'import pathlib\n'), ((14321, 14336), 'pathlib.Path', 'pathlib.Path', (['v'], {}), '(v)\n', (14333, 14336), False, 'import pathlib\n'), ((14407, 14427), 'pathlib.Path', 'pathlib.Path', (['values'], {}), '(values)\n', (14419, 14427), False, 'import pathlib\n'), ((15724, 15739), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (15736, 15739), False, 'import pathlib\n'), ((16343, 16358), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (16355, 16358), False, 'import pathlib\n'), ((16604, 16619), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (16616, 16619), False, 'import pathlib\n'), ((16868, 16883), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (16880, 16883), False, 'import pathlib\n'), ((17506, 17521), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (17518, 17521), False, 'import pathlib\n'), ((17785, 17800), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (17797, 17800), False, 'import pathlib\n'), ((18063, 18078), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (18075, 18078), False, 'import pathlib\n'), ((18996, 19011), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (19008, 19011), False, 'import pathlib\n'), ((19517, 19532), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (19529, 19532), False, 'import pathlib\n'), ((19900, 19915), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (19912, 19915), False, 'import pathlib\n'), ((20416, 20431), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (20428, 20431), False, 'import pathlib\n'), ((20791, 20806), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (20803, 20806), False, 'import pathlib\n'), ((21047, 21062), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (21059, 21062), False, 'import pathlib\n'), ((21445, 21460), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (21457, 21460), False, 'import pathlib\n'), ((21971, 21986), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (21983, 21986), False, 'import pathlib\n'), ((22325, 22340), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (22337, 22340), False, 'import pathlib\n'), ((22704, 22719), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (22716, 22719), False, 'import pathlib\n'), ((22965, 22980), 'pathlib.Path', 'pathlib.Path', (['p'], {}), '(p)\n', (22977, 22980), False, 'import pathlib\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import with_statement
import abc
import json
import logging
import numpy as np
import os
import keras
from keras.optimizers import Adadelta, SGD, RMSprop, Adam
from nlplingo.nn.constants import supported_pytorch_models
from nlplingo.nn.keras_models.common import keras_custom_objects
import time
from datetime import datetime
from shutil import copyfile
import random
import math
from nlplingo.nn.framework.sentence_re import SentenceRETrain
logger = logging.getLogger(__name__)
class ExtractionModel(abc.ABC):
verbosity = 0
def __init__(self, params, extractor_params, event_domain, embeddings, hyper_params, features):
"""
:type event_domain: nlplingo.tasks.event_domain.EventDomain
:type embeddings: dict[str : nlplingo.embeddings.word_embeddings.WordEmbedding]
:type model_name: str
:type features: object containing a 'feature_strings' attribute
"""
self.hyper_params = hyper_params
self.params = params
self.extractor_params = extractor_params
self.event_domain = event_domain
self.num_event_types = len(event_domain.event_types)
self.num_role_types = len(event_domain.event_roles)
self.num_ne_types = len(event_domain.entity_types)
self.num_ne_bio_types = None
self.num_entity_relation_types = len(event_domain.entity_relation_types)
self.num_eer_types = len(event_domain.eer_types)
self.word_vec_length = 1 # because we use word vector index
self.embeddings_vector_size = None
if 'embeddings' in extractor_params:
self.embeddings_vector_size = extractor_params['embeddings']['vector_size']
self.word_embeddings = None
if embeddings is not None and 'word_embeddings' in embeddings:
self.word_embeddings = embeddings['word_embeddings'].word_vec
""":type: numpy.ndarray"""
self.model_type = extractor_params['model_type']
self.optimizer = self._configure_optimizer(extractor_params)
self.model_file = extractor_params['model_file']
self.data_keys = []
self.num_output = None
self.model_dir = None
self.model = None
self.id2label = dict([(v, k) for k, v in self.event_domain.event_roles.items()])
self.trained_model = None
self.features = features
if 'engine' in extractor_params and (extractor_params['engine'] == 'pytorch'):
import torch
import random
torch.manual_seed(extractor_params['seed'])
np.random.seed(extractor_params['seed'])
random.seed(1234)
self.extractor_params['cuda'] = torch.cuda.is_available()
if extractor_params.get('cpu', False):
self.extractor_params['cuda'] = False
elif extractor_params.get('cuda', False):
torch.cuda.manual_seed(extractor_params['seed'])
self.layers = None
def _get_framework_class(self):
if self.model_type in supported_pytorch_models:
return SentenceRETrain
else:
raise Exception('model type ' + self.model_type + ' is not supported')
def fit_txt(self, train_path, dev_path, test_path):
# uses framework (with distinct initialization args)
framework_class = self._get_framework_class()
framework = framework_class(self.model, train_path, dev_path, test_path, self.extractor_params, self.hyper_params, self.features, self.event_domain)
framework.train_model()
def fit_model(self, train_data_list, train_label, test_data_list, test_label):
# uses framework
if self.extractor_params.get('engine') == 'pytorch':
framework_class = self._get_framework_class()
framework = framework_class(self.model, train_data_list, train_label, test_data_list, test_label, self.extractor_params, self.hyper_params, self.features, self.event_domain)
framework.train_model()
elif 'engine' not in self.extractor_params or (('engine' in self.extractor_params) and (self.extractor_params['engine'] == 'keras')):
raise IOError(
"Extractor engine in {'keras', None} but KerasExtractionModel "
"should have implemented its own fit method overriding "
"ExtractionModel.fit_model. This error should no longer exist "
"once KerasExtractionModel is part of framework_class system.")
else:
raise Exception('Only Keras or PyTorch engines are supported.')
def _configure_optimizer(self, params):
optimizer_params = params.get('optimizer', dict())
tunable_params = {}
if 'engine' not in self.extractor_params or (('engine' in self.extractor_params) and (self.extractor_params['engine'] == 'keras')):
if optimizer_params.get('name') == 'SGD':
tunable_params = {
'name': 'SGD',
'lr': optimizer_params.get('lr', 0.01),
'momentum': optimizer_params.get('momentum', 0.0),
'decay': optimizer_params.get('decay', 0.0),
'nesterov': optimizer_params.get('nesterov', False)
}
optimizer = SGD(
lr=tunable_params['lr'],
momentum=tunable_params['momentum'],
decay=tunable_params['decay'],
nesterov=tunable_params['nesterov']
)
elif optimizer_params.get('name') == 'RMSprop':
tunable_params = {
'name': 'RMSprop',
'lr': optimizer_params.get('lr', 0.001),
'rho': optimizer_params.get('rho', 0.9),
'epsilon': optimizer_params.get('epsilon', None),
'decay': optimizer_params.get('decay', 0.0)
}
optimizer = RMSprop(
lr=tunable_params['lr'],
rho=tunable_params['rho'],
epsilon=tunable_params['epsilon'],
decay=tunable_params['decay']
)
elif optimizer_params.get('name') == 'Adam':
tunable_params = {
'name': 'Adam',
'lr': optimizer_params.get('lr', 0.001)
}
optimizer = Adam(
lr=tunable_params['lr']
)
else:
tunable_params = {
'name': 'Adadelta',
'lr': optimizer_params.get('lr', 0.1),
'rho': optimizer_params.get('rho', 0.95),
'epsilon': optimizer_params.get('epsilon', 1e-6),
'decay': optimizer_params.get('decay', 0.0)
}
# Default Adadelta
optimizer = Adadelta(
lr=tunable_params['lr'],
rho=tunable_params['rho'],
epsilon=tunable_params['epsilon']
)
print('=== Optimization parameters ===')
print(json.dumps(tunable_params, sort_keys=True, indent=4))
print('=== Optimization parameters ===')
return optimizer
elif self.extractor_params['engine'] == 'pytorch':
# TODO: make optimizer more configurable
optimizer_params['name'] = optimizer_params.get('name', 'sgd')
optimizer_params['lr'] = optimizer_params.get('lr', 0.3)
optimizer_params['lr_decay'] = optimizer_params.get('lr_decay', 0.9)
optimizer_params['decay_epoch'] = optimizer_params.get('decay_epoch', 5)
return optimizer_params
elif self.extractor_params['engine'] == 'transformers':
pass
else:
raise Exception('Only Keras or PyTorch engines are supported.')
def create_model(self):
pass
def __getstate__(self):
u"""Defines what is to be pickled.
Keras models cannot be pickled. Should call save_keras_model() and load_keras_model() separately.
The sequence is :
obj.save_keras_model('kerasFilename')
pickle.dump(obj, fileHandle)
...
obj = pickle.load(fileHandle)
obj.load_keras_model()"""
# Create state without self.keras_model
state = dict(self.__dict__)
#state.pop(u'keras_model') # probably not needed anymore, now that we've made keras_model global
return state
def __setstate__(self, state):
# Reload state for unpickling
self.__dict__ = state
def load_keras_model(self, filename=None):
self.model = keras.models.load_model(filename, keras_custom_objects)
def save_keras_model(self, filename):
self.model.save(filename)
print(self.model.summary())
def predict(self, test_data_list):
if 'engine' not in self.extractor_params or (('engine' in self.extractor_params) and (self.extractor_params['engine'] == 'keras')):
return self.model.predict(test_data_list)
elif self.extractor_params['engine'] == 'pytorch':
from data.loader import DataLoader as BatchDataLoader
print("Evaluating on test set...")
predictions = []
test_batch = BatchDataLoader(test_data_list, self.features.feature_strings, None, self.hyper_params.dict['batch_size'], self.hyper_params.dict, self.event_domain.event_roles, evaluation=True, test_mode=True)
for i, batch in enumerate(test_batch):
preds, _ = self.trained_model.predict(batch, compute_loss=False, compute_logits=True)
predictions.append(preds)
return np.vstack(predictions)
else:
raise Exception('Only Keras or PyTorch engines are supported.')
|
[
"keras.models.load_model",
"keras.optimizers.Adadelta",
"numpy.random.seed",
"keras.optimizers.SGD",
"torch.manual_seed",
"torch.cuda.manual_seed",
"data.loader.DataLoader",
"json.dumps",
"keras.optimizers.Adam",
"random.seed",
"torch.cuda.is_available",
"numpy.vstack",
"keras.optimizers.RMSprop",
"logging.getLogger"
] |
[((544, 571), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (561, 571), False, 'import logging\n'), ((8820, 8875), 'keras.models.load_model', 'keras.models.load_model', (['filename', 'keras_custom_objects'], {}), '(filename, keras_custom_objects)\n', (8843, 8875), False, 'import keras\n'), ((2619, 2662), 'torch.manual_seed', 'torch.manual_seed', (["extractor_params['seed']"], {}), "(extractor_params['seed'])\n", (2636, 2662), False, 'import torch\n'), ((2675, 2715), 'numpy.random.seed', 'np.random.seed', (["extractor_params['seed']"], {}), "(extractor_params['seed'])\n", (2689, 2715), True, 'import numpy as np\n'), ((2728, 2745), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (2739, 2745), False, 'import random\n'), ((2790, 2815), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2813, 2815), False, 'import torch\n'), ((5388, 5526), 'keras.optimizers.SGD', 'SGD', ([], {'lr': "tunable_params['lr']", 'momentum': "tunable_params['momentum']", 'decay': "tunable_params['decay']", 'nesterov': "tunable_params['nesterov']"}), "(lr=tunable_params['lr'], momentum=tunable_params['momentum'], decay=\n tunable_params['decay'], nesterov=tunable_params['nesterov'])\n", (5391, 5526), False, 'from keras.optimizers import Adadelta, SGD, RMSprop, Adam\n'), ((7256, 7308), 'json.dumps', 'json.dumps', (['tunable_params'], {'sort_keys': '(True)', 'indent': '(4)'}), '(tunable_params, sort_keys=True, indent=4)\n', (7266, 7308), False, 'import json\n'), ((9449, 9653), 'data.loader.DataLoader', 'BatchDataLoader', (['test_data_list', 'self.features.feature_strings', 'None', "self.hyper_params.dict['batch_size']", 'self.hyper_params.dict', 'self.event_domain.event_roles'], {'evaluation': '(True)', 'test_mode': '(True)'}), "(test_data_list, self.features.feature_strings, None, self.\n hyper_params.dict['batch_size'], self.hyper_params.dict, self.\n event_domain.event_roles, evaluation=True, test_mode=True)\n", (9464, 9653), True, 'from data.loader import DataLoader as BatchDataLoader\n'), ((9858, 9880), 'numpy.vstack', 'np.vstack', (['predictions'], {}), '(predictions)\n', (9867, 9880), True, 'import numpy as np\n'), ((2992, 3040), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (["extractor_params['seed']"], {}), "(extractor_params['seed'])\n", (3014, 3040), False, 'import torch\n'), ((6056, 6186), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': "tunable_params['lr']", 'rho': "tunable_params['rho']", 'epsilon': "tunable_params['epsilon']", 'decay': "tunable_params['decay']"}), "(lr=tunable_params['lr'], rho=tunable_params['rho'], epsilon=\n tunable_params['epsilon'], decay=tunable_params['decay'])\n", (6063, 6186), False, 'from keras.optimizers import Adadelta, SGD, RMSprop, Adam\n'), ((6514, 6543), 'keras.optimizers.Adam', 'Adam', ([], {'lr': "tunable_params['lr']"}), "(lr=tunable_params['lr'])\n", (6518, 6543), False, 'from keras.optimizers import Adadelta, SGD, RMSprop, Adam\n'), ((7011, 7111), 'keras.optimizers.Adadelta', 'Adadelta', ([], {'lr': "tunable_params['lr']", 'rho': "tunable_params['rho']", 'epsilon': "tunable_params['epsilon']"}), "(lr=tunable_params['lr'], rho=tunable_params['rho'], epsilon=\n tunable_params['epsilon'])\n", (7019, 7111), False, 'from keras.optimizers import Adadelta, SGD, RMSprop, Adam\n')]
|
from jumpscale.core.base import Base, fields
from enum import Enum
import hashlib
class ThreebotState(Enum):
RUNNING = "RUNNING" # the workloads are deployed and running
DELETED = "DELETED" # workloads and backups deleted
STOPPED = "STOPPED" # expired or manually stoped (delete workloads only)
class UserThreebot(Base):
# instance name is the f"threebot_{solution uuid}"
solution_uuid = fields.String()
identity_tid = fields.Integer()
name = fields.String()
owner_tname = fields.String() # owner's tname in Threefold Connect after cleaning
farm_name = fields.String()
state = fields.Enum(ThreebotState)
continent = fields.String()
explorer_url = fields.String()
threebot_container_wid = fields.Integer()
trc_container_wid = fields.Integer()
reverse_proxy_wid = fields.Integer()
subdomain_wid = fields.Integer()
secret_hash = fields.String()
def verify_secret(self, secret):
if not self.secret_hash:
return True
return self.secret_hash == hashlib.md5(secret.encode()).hexdigest()
def hash_secret(self, secret):
self.secret_hash = hashlib.md5(secret.encode()).hexdigest()
|
[
"jumpscale.core.base.fields.Integer",
"jumpscale.core.base.fields.String",
"jumpscale.core.base.fields.Enum"
] |
[((415, 430), 'jumpscale.core.base.fields.String', 'fields.String', ([], {}), '()\n', (428, 430), False, 'from jumpscale.core.base import Base, fields\n'), ((450, 466), 'jumpscale.core.base.fields.Integer', 'fields.Integer', ([], {}), '()\n', (464, 466), False, 'from jumpscale.core.base import Base, fields\n'), ((478, 493), 'jumpscale.core.base.fields.String', 'fields.String', ([], {}), '()\n', (491, 493), False, 'from jumpscale.core.base import Base, fields\n'), ((512, 527), 'jumpscale.core.base.fields.String', 'fields.String', ([], {}), '()\n', (525, 527), False, 'from jumpscale.core.base import Base, fields\n'), ((597, 612), 'jumpscale.core.base.fields.String', 'fields.String', ([], {}), '()\n', (610, 612), False, 'from jumpscale.core.base import Base, fields\n'), ((625, 651), 'jumpscale.core.base.fields.Enum', 'fields.Enum', (['ThreebotState'], {}), '(ThreebotState)\n', (636, 651), False, 'from jumpscale.core.base import Base, fields\n'), ((668, 683), 'jumpscale.core.base.fields.String', 'fields.String', ([], {}), '()\n', (681, 683), False, 'from jumpscale.core.base import Base, fields\n'), ((703, 718), 'jumpscale.core.base.fields.String', 'fields.String', ([], {}), '()\n', (716, 718), False, 'from jumpscale.core.base import Base, fields\n'), ((748, 764), 'jumpscale.core.base.fields.Integer', 'fields.Integer', ([], {}), '()\n', (762, 764), False, 'from jumpscale.core.base import Base, fields\n'), ((789, 805), 'jumpscale.core.base.fields.Integer', 'fields.Integer', ([], {}), '()\n', (803, 805), False, 'from jumpscale.core.base import Base, fields\n'), ((830, 846), 'jumpscale.core.base.fields.Integer', 'fields.Integer', ([], {}), '()\n', (844, 846), False, 'from jumpscale.core.base import Base, fields\n'), ((867, 883), 'jumpscale.core.base.fields.Integer', 'fields.Integer', ([], {}), '()\n', (881, 883), False, 'from jumpscale.core.base import Base, fields\n'), ((902, 917), 'jumpscale.core.base.fields.String', 'fields.String', ([], {}), '()\n', (915, 917), False, 'from jumpscale.core.base import Base, fields\n')]
|
import graphene
from graphene import Int
class TotalItemsConnection(graphene.relay.Connection):
class Meta:
abstract = True
total = Int()
def resolve_total(self, info, **kwargs):
return len(self.iterable)
class BaseConnectionField(graphene.relay.ConnectionField):
def __init__(self, type, *args, **kwargs):
filters = type._meta.node._meta.filter_class
if filters is not None:
for key, value in vars(filters()).items():
kwargs.setdefault(key, value)
super(BaseConnectionField, self).__init__(type, *args, **kwargs)
|
[
"graphene.Int"
] |
[((151, 156), 'graphene.Int', 'Int', ([], {}), '()\n', (154, 156), False, 'from graphene import Int\n')]
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class Preferences(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, settings: object=None, permissions: object=None, interests: object=None): # noqa: E501
"""Preferences - a model defined in Swagger
:param settings: The settings of this Preferences. # noqa: E501
:type settings: object
:param permissions: The permissions of this Preferences. # noqa: E501
:type permissions: object
:param interests: The interests of this Preferences. # noqa: E501
:type interests: object
"""
self.swagger_types = {
'settings': object,
'permissions': object,
'interests': object
}
self.attribute_map = {
'settings': 'settings',
'permissions': 'permissions',
'interests': 'interests'
}
self._settings = settings
self._permissions = permissions
self._interests = interests
@classmethod
def from_dict(cls, dikt) -> 'Preferences':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Preferences of this Preferences. # noqa: E501
:rtype: Preferences
"""
return util.deserialize_model(dikt, cls)
@property
def settings(self) -> object:
"""Gets the settings of this Preferences.
:return: The settings of this Preferences.
:rtype: object
"""
return self._settings
@settings.setter
def settings(self, settings: object):
"""Sets the settings of this Preferences.
:param settings: The settings of this Preferences.
:type settings: object
"""
self._settings = settings
@property
def permissions(self) -> object:
"""Gets the permissions of this Preferences.
:return: The permissions of this Preferences.
:rtype: object
"""
return self._permissions
@permissions.setter
def permissions(self, permissions: object):
"""Sets the permissions of this Preferences.
:param permissions: The permissions of this Preferences.
:type permissions: object
"""
self._permissions = permissions
@property
def interests(self) -> object:
"""Gets the interests of this Preferences.
:return: The interests of this Preferences.
:rtype: object
"""
return self._interests
@interests.setter
def interests(self, interests: object):
"""Sets the interests of this Preferences.
:param interests: The interests of this Preferences.
:type interests: object
"""
self._interests = interests
|
[
"swagger_server.util.deserialize_model"
] |
[((1574, 1607), 'swagger_server.util.deserialize_model', 'util.deserialize_model', (['dikt', 'cls'], {}), '(dikt, cls)\n', (1596, 1607), False, 'from swagger_server import util\n')]
|
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import unittest
import os
import copy
from irma.configuration.config import ConfigurationSection
from irma.configuration.ini import IniConfiguration, TemplatedConfiguration
from irma.common.exceptions import IrmaConfigurationError
# =================
# Logging options
# =================
def enable_logging(level=logging.INFO, handler=None, formatter=None):
log = logging.getLogger()
if formatter is None:
formatter = logging.Formatter("%(asctime)s [%(name)s] " +
"%(levelname)s: %(message)s")
if handler is None:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(level)
# ============
# Test Cases
# ============
class TestIniConfiguration(unittest.TestCase):
def test_ini_config_value(self):
directory = os.path.dirname(os.path.realpath(__file__))
config = IniConfiguration("{0}/test.ini".format(directory))
self.assertEqual(config["foo"].bar, "foobar")
self.assertEqual(config["foo bar"].foo, "foo")
self.assertEqual(config["foo bar"].bar, "bar")
def test_ini_config_types(self):
directory = os.path.dirname(os.path.realpath(__file__))
config = IniConfiguration("{0}/test.ini".format(directory))
self.assertEqual(isinstance(config, IniConfiguration),
True)
self.assertEqual(isinstance(config["foo bar"], ConfigurationSection),
True)
self.assertEqual(isinstance(config["foo bar"].bar, str),
True)
template = {'foo':
[('bar', TemplatedConfiguration.string, None)],
'foo bar':
[('foo', TemplatedConfiguration.string, None),
('bar', TemplatedConfiguration.string, None),
('val', TemplatedConfiguration.integer, 1337)],
'bar':
[('foo1', TemplatedConfiguration.integer, 42),
('foo2', TemplatedConfiguration.string, "Answer"),
('foo3', TemplatedConfiguration.boolean, None),
('foo4', TemplatedConfiguration.boolean, False)
]
}
class TestTemplatedConfiguration(unittest.TestCase):
def test_templated_config_value(self):
directory = os.path.dirname(os.path.realpath(__file__))
template_path = "{0}/test.ini".format(directory)
config = TemplatedConfiguration(template_path, template)
self.assertTrue(isinstance(config, TemplatedConfiguration))
self.assertEqual(config["foo"].bar, "foobar")
self.assertEqual(config["foo bar"].foo, "foo")
self.assertEqual(config["foo bar"].bar, "bar")
self.assertEqual(config["bar"].foo1, 65)
self.assertTrue(config["bar"].foo3)
def test_templated_config_default_value(self):
directory = os.path.dirname(os.path.realpath(__file__))
template_path = "{0}/test.ini".format(directory)
config = TemplatedConfiguration(template_path, template)
self.assertEqual(config["foo bar"].val, 1337)
self.assertEqual(config["bar"].foo2, "Answer")
self.assertFalse(config["bar"].foo4)
def test_templated_config_missing_value(self):
directory = os.path.dirname(os.path.realpath(__file__))
template1 = copy.copy(template)
template1['missingsection'] = [
('missingkey', TemplatedConfiguration.string, None)]
with self.assertRaises(IrmaConfigurationError):
TemplatedConfiguration("{0}/test.ini".format(directory), template1)
def test_templated_config_section_only_default_value(self):
directory = os.path.dirname(os.path.realpath(__file__))
template1 = copy.copy(template)
template1['missingsection'] = [
('missingkey', TemplatedConfiguration.string, "with_def_value")]
config = TemplatedConfiguration("{0}/test.ini".format(directory),
template1)
self.assertTrue(isinstance(config["missingsection"],
ConfigurationSection))
self.assertEqual(config["missingsection"].missingkey,
"with_def_value")
def test_templated_config_value_with_space(self):
directory = os.path.dirname(os.path.realpath(__file__))
template1 = copy.copy(template)
template1['missingsection'] = [
('one missing key',
TemplatedConfiguration.string,
"with_def_value")]
config = TemplatedConfiguration("{0}/test.ini".format(directory),
template1)
self.assertTrue(isinstance(config["missingsection"],
ConfigurationSection))
self.assertEqual(config["missingsection"]["one missing key"],
"with_def_value")
def test_templated_config_wrong_template_tuple_instead_of_list(self):
directory = os.path.dirname(os.path.realpath(__file__))
template1 = copy.copy(template)
template1['missingsection'] = (('key',
TemplatedConfiguration.string,
None))
with self.assertRaises(IrmaConfigurationError):
TemplatedConfiguration("{0}/test.ini".format(directory), template1)
def test_templated_config_wrong_value(self):
directory = os.path.dirname(os.path.realpath(__file__))
template_path = "{0}/test.ini".format(directory)
template1 = copy.copy(template)
template1['WrongVal'] = [('an_int',
TemplatedConfiguration.integer,
None)]
with self.assertRaises(IrmaConfigurationError):
TemplatedConfiguration(template_path, template1)
if __name__ == '__main__':
enable_logging()
unittest.main()
|
[
"unittest.main",
"os.path.realpath",
"logging.StreamHandler",
"copy.copy",
"logging.Formatter",
"irma.configuration.ini.TemplatedConfiguration",
"logging.getLogger"
] |
[((912, 931), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (929, 931), False, 'import logging\n'), ((6428, 6443), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6441, 6443), False, 'import unittest\n'), ((978, 1053), 'logging.Formatter', 'logging.Formatter', (["('%(asctime)s [%(name)s] ' + '%(levelname)s: %(message)s')"], {}), "('%(asctime)s [%(name)s] ' + '%(levelname)s: %(message)s')\n", (995, 1053), False, 'import logging\n'), ((1134, 1157), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1155, 1157), False, 'import logging\n'), ((2955, 3002), 'irma.configuration.ini.TemplatedConfiguration', 'TemplatedConfiguration', (['template_path', 'template'], {}), '(template_path, template)\n', (2977, 3002), False, 'from irma.configuration.ini import IniConfiguration, TemplatedConfiguration\n'), ((3518, 3565), 'irma.configuration.ini.TemplatedConfiguration', 'TemplatedConfiguration', (['template_path', 'template'], {}), '(template_path, template)\n', (3540, 3565), False, 'from irma.configuration.ini import IniConfiguration, TemplatedConfiguration\n'), ((3856, 3875), 'copy.copy', 'copy.copy', (['template'], {}), '(template)\n', (3865, 3875), False, 'import copy\n'), ((4266, 4285), 'copy.copy', 'copy.copy', (['template'], {}), '(template)\n', (4275, 4285), False, 'import copy\n'), ((4891, 4910), 'copy.copy', 'copy.copy', (['template'], {}), '(template)\n', (4900, 4910), False, 'import copy\n'), ((5575, 5594), 'copy.copy', 'copy.copy', (['template'], {}), '(template)\n', (5584, 5594), False, 'import copy\n'), ((6087, 6106), 'copy.copy', 'copy.copy', (['template'], {}), '(template)\n', (6096, 6106), False, 'import copy\n'), ((1413, 1439), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1429, 1439), False, 'import os\n'), ((1747, 1773), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1763, 1773), False, 'import os\n'), ((2853, 2879), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2869, 2879), False, 'import os\n'), ((3416, 3442), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3432, 3442), False, 'import os\n'), ((3808, 3834), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3824, 3834), False, 'import os\n'), ((4218, 4244), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4234, 4244), False, 'import os\n'), ((4843, 4869), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4859, 4869), False, 'import os\n'), ((5527, 5553), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (5543, 5553), False, 'import os\n'), ((5982, 6008), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (5998, 6008), False, 'import os\n'), ((6326, 6374), 'irma.configuration.ini.TemplatedConfiguration', 'TemplatedConfiguration', (['template_path', 'template1'], {}), '(template_path, template1)\n', (6348, 6374), False, 'from irma.configuration.ini import IniConfiguration, TemplatedConfiguration\n')]
|
#!/usr/bin/python3
'''
NAME:
lf_check.py
PURPOSE:
lf_check.py will run a series of tests based on the test TEST_DICTIONARY listed in lf_check_config.ini.
The lf_check_config.ini file is copied from lf_check_config_template.ini and local configuration is made
to the lf_check_config.ini.
EXAMPLE:
lf_check.py
NOTES:
Before using lf_check.py
1. copy lf_check_config_template.ini to the lf_check_config.ini
2. update lf_check_config.ini to enable (TRUE) tests to be run in the TEST_DICTIONARY , the TEST_DICTIONARY needs to be passed in
'''
import datetime
import pprint
import sys
if sys.version_info[0] != 3:
print("This script requires Python3")
exit()
import os
import socket
import logging
import time
from time import sleep
import argparse
import json
import configparser
import subprocess
import csv
import shutil
import os.path
# lf_report is from the parent of the current file
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_dir_path = os.path.abspath(os.path.join(dir_path,os.pardir))
sys.path.insert(0, parent_dir_path)
#sys.path.append('../')
from lf_report import lf_report
sys.path.append('/')
CONFIG_FILE = os.getcwd() + '/lf_check_config.ini'
RUN_CONDITION = 'ENABLE'
# setup logging FORMAT
FORMAT = '%(asctime)s %(name)s %(levelname)s: %(message)s'
# lf_check class contains verificaiton configuration and ocastrates the testing.
class lf_check():
def __init__(self,
_csv_results,
_outfile):
self.lf_mgr_ip = ""
self.lf_mgr_port = ""
self.radio_dict = {}
self.test_dict = {}
path_parent = os.path.dirname(os.getcwd())
os.chdir(path_parent)
self.scripts_wd = os.getcwd()
self.results = ""
self.outfile = _outfile
self.test_result = "Failure"
self.results_col_titles = ["Test","Command","Result","STDOUT","STDERR"]
self.html_results = ""
self.background_green = "background-color:green"
self.background_red = "background-color:red"
self.background_purple = "background-color:purple"
self.http_test_ip = ""
self.ftp_test_ip = ""
self.test_ip = ""
# section TEST_GENERIC
self.radio_lf = ""
self.ssdi = ""
self.ssid_pw = ""
self.security = ""
self.num_sta = ""
self.col_names = ""
self.upstream_port = ""
self.csv_results = _csv_results
self.csv_results_file = ""
self.csv_results_writer = ""
self.csv_results_column_headers = ""
self.logger = logging.getLogger(__name__)
self.test_timeout = 120
self.use_blank_db = "FALSE"
self.use_factory_default_db = "FALSE"
self.use_custom_db = "FALSE"
self.production_run = "FALSE"
self.email_list_production = ""
self.host_ip_production = None
self.email_list_test = ""
self.host_ip_test = None
# NOT complete : will send the email results
def send_results_email(self, report_file=None):
if (report_file is None):
print( "No report file, not sending email.")
return
report_url=report_file.replace('/home/lanforge/', '')
if report_url.startswith('/'):
report_url = report_url[1:]
# Following recommendation
# NOTE: https://stackoverflow.com/questions/24196932/how-can-i-get-the-ip-address-from-nic-in-python
#command = 'echo "$HOSTNAME mail system works!" | mail -s "Test: $HOSTNAME $(date)" chuck.re<EMAIL>'
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
message_txt = """Results from {hostname}:\\n
http://{ip}/{report}\\n
NOTE: for now to see stdout and stderr remove /home/lanforge from path.\\n
""".format(hostname=hostname, ip=ip, report=report_url)
mail_subject = "Regression Test [{hostname}] {date}".format(hostname=hostname,
date=datetime.datetime.now())
try:
if self.production_run == "TRUE":
msg = message_txt.format(ip=self.host_ip_production)
command = "echo \"{message}\" | mail -s \"{subject}\" {address}".format(
message=msg,
subject=mail_subject,
ip=self.host_ip_production,
address=self.email_list_production)
else:
msg = message_txt.format(ip=ip)
command = "echo \"{message}\" | mail -s \"{subject}\" {address}".format(
message=msg,
subject=mail_subject,
ip=ip, #self.host_ip_test,
address=self.email_list_test)
print("running:[{}]".format(command))
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
# have email on separate timeout
process.wait(timeout=int(self.test_timeout))
except subprocess.TimeoutExpired:
print("send email timed out")
process.terminate()
def get_csv_results(self):
return self.csv_file.name
def start_csv_results(self):
print("self.csv_results")
self.csv_results_file = open(self.csv_results, "w")
self.csv_results_writer = csv.writer(self.csv_results_file, delimiter=",")
self.csv_results_column_headers = ['Test','Command','Result','STDOUT','STDERR']
self.csv_results_writer.writerow(self.csv_results_column_headers)
self.csv_results_file.flush()
def get_html_results(self):
return self.html_results
def start_html_results(self):
self.html_results += """
<table border="1" class="dataframe">
<thead>
<tr style="text-align: left;">
<th>Test</th>
<th>Command</th>
<th>Result</th>
<th>STDOUT</th>
<th>STDERR</th>
</tr>
</thead>
<tbody>
"""
def finish_html_results(self):
self.html_results += """
</tbody>
</table>
<br>
<br>
<br>
"""
# Functions in this section are/can be overridden by descendants
# This code reads the lf_check_config.ini file to populate the test variables
def read_config_contents(self):
self.logger.info("read_config_contents {}".format(CONFIG_FILE))
config_file = configparser.ConfigParser()
success = True
success = config_file.read(CONFIG_FILE)
self.logger.info("logger worked")
if 'LF_MGR' in config_file.sections():
section = config_file['LF_MGR']
self.lf_mgr_ip = section['LF_MGR_IP']
self.lf_mgr_port = section['LF_MGR_PORT']
self.logger.info("lf_mgr_ip {}".format(self.lf_mgr_ip))
self.logger.info("lf_mgr_port {}".format(self.lf_mgr_port))
if 'TEST_NETWORK' in config_file.sections():
section = config_file['TEST_NETWORK']
self.http_test_ip = section['HTTP_TEST_IP']
self.logger.info("http_test_ip {}".format(self.http_test_ip))
self.ftp_test_ip = section['FTP_TEST_IP']
self.logger.info("ftp_test_ip {}".format(self.ftp_test_ip))
self.test_ip = section['TEST_IP']
self.logger.info("test_ip {}".format(self.test_ip))
if 'TEST_GENERIC' in config_file.sections():
section = config_file['TEST_GENERIC']
self.radio_lf = section['RADIO_USED']
self.logger.info("radio_lf {}".format(self.radio_lf))
self.ssid = section['SSID_USED']
self.logger.info("ssid {}".format(self.ssid))
self.ssid_pw = section['SSID_PW_USED']
self.logger.info("ssid_pw {}".format(self.ssid_pw))
self.security = section['SECURITY_USED']
self.logger.info("secruity {}".format(self.security))
self.num_sta = section['NUM_STA']
self.logger.info("num_sta {}".format(self.num_sta))
self.col_names = section['COL_NAMES']
self.logger.info("col_names {}".format(self.col_names))
self.upstream_port = section['UPSTREAM_PORT']
self.logger.info("upstream_port {}".format(self.upstream_port))
if 'TEST_PARAMETERS' in config_file.sections():
section = config_file['TEST_PARAMETERS']
self.test_timeout = section['TEST_TIMEOUT']
self.use_blank_db = section['LOAD_BLANK_DB']
self.use_factory_default_db = section['LOAD_FACTORY_DEFAULT_DB']
self.use_custom_db = section['LOAD_CUSTOM_DB']
self.custom_db = section['CUSTOM_DB']
self.production_run = section['PRODUCTION_RUN']
self.email_list_production = section['EMAIL_LIST_PRODUCTION']
self.host_ip_production = section['HOST_IP_PRODUCTION']
self.email_list_test = section['EMAIL_LIST_TEST']
self.host_ip_test = section['HOST_IP_TEST']
if 'RADIO_DICTIONARY' in config_file.sections():
section = config_file['RADIO_DICTIONARY']
self.radio_dict = json.loads(section.get('RADIO_DICT', self.radio_dict))
self.logger.info("self.radio_dict {}".format(self.radio_dict))
if 'TEST_DICTIONARY' in config_file.sections():
section = config_file['TEST_DICTIONARY']
# for json replace the \n and \r they are invalid json characters, allows for multiple line args
try:
self.test_dict = json.loads(section.get('TEST_DICT', self.test_dict).replace('\n',' ').replace('\r',' '))
self.logger.info("TEST_DICTIONARY: {}".format(self.test_dict))
except:
self.logger.info("Excpetion loading TEST_DICTIONARY, is there comma after the last entry? Check syntax")
def load_factory_default_db(self):
#self.logger.info("file_wd {}".format(self.scripts_wd))
try:
os.chdir(self.scripts_wd)
#self.logger.info("Current Working Directory {}".format(os.getcwd()))
except:
self.logger.info("failed to change to {}".format(self.scripts_wd))
# no spaces after FACTORY_DFLT
command = "./{} {}".format("scenario.py", "--load FACTORY_DFLT")
process = subprocess.Popen((command).split(' '), shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
# wait for the process to terminate
out, err = process.communicate()
errcode = process.returncode
# Not currently used
def load_blank_db(self):
#self.logger.info("file_wd {}".format(self.scripts_wd))
try:
os.chdir(self.scripts_wd)
#self.logger.info("Current Working Directory {}".format(os.getcwd()))
except:
self.logger.info("failed to change to {}".format(self.scripts_wd))
# no spaces after FACTORY_DFLT
command = "./{} {}".format("scenario.py", "--load BLANK")
process = subprocess.Popen((command).split(' '), shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
def load_custom_db(self,custom_db):
#self.logger.info("file_wd {}".format(self.scripts_wd))
try:
os.chdir(self.scripts_wd)
#self.logger.info("Current Working Directory {}".format(os.getcwd()))
except:
self.logger.info("failed to change to {}".format(self.scripts_wd))
# no spaces after FACTORY_DFLT
command = "./{} {}".format("scenario.py", "--load {}".format(custom_db))
process = subprocess.Popen((command).split(' '), shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
# wait for the process to terminate
out, err = process.communicate()
errcode = process.returncode
def run_script_test(self):
self.start_html_results()
self.start_csv_results()
for test in self.test_dict:
if self.test_dict[test]['enabled'] == "FALSE":
self.logger.info("test: {} skipped".format(test))
# load the default database
elif self.test_dict[test]['enabled'] == "TRUE":
# Make the command replace ment a separate method call.
# loop through radios
for radio in self.radio_dict:
# Replace RADIO, SSID, PASSWD, SECURITY with actual config values (e.g. RADIO_0_CFG to values)
# not "KEY" is just a word to refer to the RADIO define (e.g. RADIO_0_CFG) to get the vlaues
# --num_stations needs to be int not string (no double quotes)
if self.radio_dict[radio]["KEY"] in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace(self.radio_dict[radio]["KEY"],'--radio {} --ssid {} --passwd {} --security {} --num_stations {}'
.format(self.radio_dict[radio]['RADIO'],self.radio_dict[radio]['SSID'],self.radio_dict[radio]['PASSWD'],self.radio_dict[radio]['SECURITY'],self.radio_dict[radio]['STATIONS']))
if 'HTTP_TEST_IP' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('HTTP_TEST_IP',self.http_test_ip)
if 'FTP_TEST_IP' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('FTP_TEST_IP',self.ftp_test_ip)
if 'TEST_IP' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('TEST_IP',self.test_ip)
if 'RADIO_USED' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('RADIO_USED',self.radio_lf)
if 'SSID_USED' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('SSID_USED',self.ssid)
if 'SSID_PW_USED' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('SSID_PW_USED',self.ssid_pw)
if 'SECURITY_USED' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('SECURITY_USED',self.security)
if 'NUM_STA' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('NUM_STA',self.num_sta)
if 'COL_NAMES' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('COL_NAMES',self.col_names)
if 'UPSTREAM_PORT' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('UPSTREAM_PORT',self.col_names)
if self.use_factory_default_db == "TRUE":
self.load_factory_default_db()
sleep(3)
self.logger.info("FACTORY_DFLT loaded between tests with scenario.py --load FACTORY_DFLT")
if self.use_blank_db == "TRUE":
self.load_blank_db()
sleep(1)
self.logger.info("BLANK loaded between tests with scenario.py --load BLANK")
if self.use_custom_db == "TRUE":
try:
self.load_custom_db(self.custom_db)
sleep(1)
self.logger.info("{} loaded between tests with scenario.py --load {}".format(self.custom_db,self.custom_db))
except:
self.logger.info("custom database failed to load check existance and location")
else:
self.logger.info("no db loaded between tests: {}".format(self.use_custom_db))
sleep(1) # the sleep is to allow for the database to stablize
try:
os.chdir(self.scripts_wd)
#self.logger.info("Current Working Directory {}".format(os.getcwd()))
except:
self.logger.info("failed to change to {}".format(self.scripts_wd))
cmd_args = "{}".format(self.test_dict[test]['args'])
command = "./{} {}".format(self.test_dict[test]['command'], cmd_args)
self.logger.info("command: {}".format(command))
self.logger.info("cmd_args {}".format(cmd_args))
if self.outfile is not None:
stdout_log_txt = self.outfile
stdout_log_txt = stdout_log_txt + "-{}-stdout.txt".format(test)
#self.logger.info("stdout_log_txt: {}".format(stdout_log_txt))
stdout_log = open(stdout_log_txt, 'a')
stderr_log_txt = self.outfile
stderr_log_txt = stderr_log_txt + "-{}-stderr.txt".format(test)
#self.logger.info("stderr_log_txt: {}".format(stderr_log_txt))
stderr_log = open(stderr_log_txt, 'a')
print("running {}".format(command))
process = subprocess.Popen((command).split(' '), shell=False, stdout=stdout_log, stderr=stderr_log, universal_newlines=True)
try:
#out, err = process.communicate()
process.wait(timeout=int(self.test_timeout))
except subprocess.TimeoutExpired:
process.terminate()
self.test_result = "TIMEOUT"
#if err:
# self.logger.info("command Test timed out: {}".format(command))
#self.logger.info(stderr_log_txt)
if(self.test_result != "TIMEOUT"):
stderr_log_size = os.path.getsize(stderr_log_txt)
if stderr_log_size > 0 :
self.logger.info("File: {} is not empty: {}".format(stderr_log_txt,str(stderr_log_size)))
self.test_result = "Failure"
background = self.background_red
else:
self.logger.info("File: {} is empty: {}".format(stderr_log_txt,str(stderr_log_size)))
self.test_result = "Success"
background = self.background_green
else:
self.logger.info("TIMEOUT FAILURE, Check LANforge Radios")
self.test_result = "Time Out"
background = self.background_purple
self.html_results += """
<tr><td>""" + str(test) + """</td><td class='scriptdetails'>""" + str(command) + """</td>
<td style="""+ str(background) + """>""" + str(self.test_result) + """
<td><a href=""" + str(stdout_log_txt) + """ target=\"_blank\">STDOUT</a></td>"""
if self.test_result == "Failure":
self.html_results += """<td><a href=""" + str(stderr_log_txt) + """ target=\"_blank\">STDERR</a></td>"""
elif self.test_result == "Time Out":
self.html_results += """<td><a href=""" + str(stderr_log_txt) + """ target=\"_blank\">STDERR</a></td>"""
#self.html_results += """<td></td>"""
else:
self.html_results += """<td></td>"""
self.html_results += """</tr>"""
row = [test,command,self.test_result,stdout_log_txt,stderr_log_txt]
self.csv_results_writer.writerow(row)
self.csv_results_file.flush()
#self.logger.info("row: {}".format(row))
self.logger.info("test: {} executed".format(test))
else:
self.logger.info("enable value {} invalid for test: {}, test skipped".format(self.test_dict[test]['enabled'],test))
self.finish_html_results()
def main():
# arguments
parser = argparse.ArgumentParser(
prog='lf_check.py',
formatter_class=argparse.RawTextHelpFormatter,
epilog='''\
lf_check.py : for running scripts listed in lf_check_config.ini file
''',
description='''\
lf_check.py
-----------
Summary :
---------
for running scripts listed in lf_check_config.ini
''')
parser.add_argument('--outfile', help="--outfile <Output Generic Name> used as base name for all files generated", default="")
parser.add_argument('--logfile', help="--logfile <logfile Name> logging for output of lf_check.py script", default="lf_check.log")
args = parser.parse_args()
# output report.
report = lf_report(_results_dir_name="lf_check",
_output_html="lf_check.html",
_output_pdf="lf-check.pdf")
current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
csv_results = "lf_check{}-{}.csv".format(args.outfile,current_time)
csv_results = report.file_add_path(csv_results)
outfile = "lf_check-{}-{}".format(args.outfile,current_time)
outfile_path = report.file_add_path(outfile)
# lf_check() class created
check = lf_check(_csv_results = csv_results,
_outfile = outfile_path)
# get the git sha
process = subprocess.Popen(["git", "rev-parse", "HEAD"], stdout=subprocess.PIPE)
(commit_hash, err) = process.communicate()
exit_code = process.wait()
git_sha = commit_hash.decode('utf-8','ignore')
# set up logging
logfile = args.logfile[:-4]
print("logfile: {}".format(logfile))
logfile = "{}-{}.log".format(logfile,current_time)
logfile = report.file_add_path(logfile)
print("logfile {}".format(logfile))
formatter = logging.Formatter(FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(logfile, "w")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(logging.StreamHandler(sys.stdout)) # allows to logging to file and stdout
logger.info("commit_hash: {}".format(commit_hash))
logger.info("commit_hash2: {}".format(commit_hash.decode('utf-8','ignore')))
check.read_config_contents() # CMR need mode to just print out the test config and not run
check.run_script_test()
# Generate Ouptput reports
report.set_title("LF Check: lf_check.py")
report.build_banner()
report.start_content_div()
report.set_table_title("LF Check Test Results")
report.build_table_title()
report.set_text("git sha: {}".format(git_sha))
report.build_text()
html_results = check.get_html_results()
report.set_custom_html(html_results)
report.build_custom()
html_report = report.write_html_with_timestamp()
print("html report: {}".format(html_report))
report.write_pdf_with_timestamp()
report_path = os.path.dirname(html_report)
parent_report_dir = os.path.dirname(report_path)
# copy results to lastest so someone may see the latest.
lf_check_latest_html = parent_report_dir + "/lf_check_latest.html"
# duplicates html_report file up one directory
lf_check_html_report = parent_report_dir + "/{}.html".format(outfile)
#
banner_src_png = report_path + "/banner.png"
banner_dest_png = parent_report_dir + "/banner.png"
CandelaLogo_src_png = report_path + "/CandelaLogo2-90dpi-200x90-trans.png"
CandelaLogo_dest_png = parent_report_dir + "/CandelaLogo2-90dpi-200x90-trans.png"
report_src_css = report_path + "/report.css"
report_dest_css = parent_report_dir + "/report.css"
custom_src_css = report_path + "/custom.css"
custom_dest_css = parent_report_dir + "/custom.css"
font_src_woff = report_path + "/CenturyGothic.woff"
font_dest_woff = parent_report_dir + "/CenturyGothic.woff"
#pprint.pprint([
# ('banner_src', banner_src_png),
# ('banner_dest', banner_dest_png),
# ('CandelaLogo_src_png', CandelaLogo_src_png),
# ('CandelaLogo_dest_png', CandelaLogo_dest_png),
# ('report_src_css', report_src_css),
# ('custom_src_css', custom_src_css)
#])
# copy one directory above
shutil.copyfile(html_report, lf_check_latest_html)
shutil.copyfile(html_report, lf_check_html_report)
# copy banner and logo
shutil.copyfile(banner_src_png, banner_dest_png)
shutil.copyfile(CandelaLogo_src_png, CandelaLogo_dest_png)
shutil.copyfile(report_src_css, report_dest_css)
shutil.copyfile(custom_src_css, custom_dest_css)
shutil.copyfile(font_src_woff, font_dest_woff)
print("lf_check_latest.html: "+lf_check_latest_html)
print("lf_check_html_report: "+lf_check_html_report)
check.send_results_email(report_file=lf_check_html_report)
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"logging.Formatter",
"os.path.join",
"os.chdir",
"sys.path.append",
"logging.FileHandler",
"os.path.dirname",
"socket.gethostbyname",
"socket.gethostname",
"lf_report.lf_report",
"shutil.copyfile",
"configparser.ConfigParser",
"datetime.datetime.now",
"time.localtime",
"subprocess.Popen",
"csv.writer",
"os.path.getsize",
"os.path.realpath",
"logging.StreamHandler",
"time.sleep",
"os.getcwd",
"sys.path.insert",
"logging.getLogger"
] |
[((1024, 1059), 'sys.path.insert', 'sys.path.insert', (['(0)', 'parent_dir_path'], {}), '(0, parent_dir_path)\n', (1039, 1059), False, 'import sys\n'), ((1117, 1137), 'sys.path.append', 'sys.path.append', (['"""/"""'], {}), "('/')\n", (1132, 1137), False, 'import sys\n'), ((928, 954), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (944, 954), False, 'import os\n'), ((990, 1023), 'os.path.join', 'os.path.join', (['dir_path', 'os.pardir'], {}), '(dir_path, os.pardir)\n', (1002, 1023), False, 'import os\n'), ((1153, 1164), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1162, 1164), False, 'import os\n'), ((20553, 20903), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""lf_check.py"""', 'formatter_class': 'argparse.RawTextHelpFormatter', 'epilog': '""" lf_check.py : for running scripts listed in lf_check_config.ini file\n """', 'description': '"""lf_check.py\n-----------\n\nSummary :\n---------\nfor running scripts listed in lf_check_config.ini\n """'}), '(prog=\'lf_check.py\', formatter_class=argparse.\n RawTextHelpFormatter, epilog=\n """ lf_check.py : for running scripts listed in lf_check_config.ini file\n """\n , description=\n """lf_check.py\n-----------\n\nSummary :\n---------\nfor running scripts listed in lf_check_config.ini\n """\n )\n', (20576, 20903), False, 'import argparse\n'), ((21256, 21357), 'lf_report.lf_report', 'lf_report', ([], {'_results_dir_name': '"""lf_check"""', '_output_html': '"""lf_check.html"""', '_output_pdf': '"""lf-check.pdf"""'}), "(_results_dir_name='lf_check', _output_html='lf_check.html',\n _output_pdf='lf-check.pdf')\n", (21265, 21357), False, 'from lf_report import lf_report\n'), ((21875, 21945), 'subprocess.Popen', 'subprocess.Popen', (["['git', 'rev-parse', 'HEAD']"], {'stdout': 'subprocess.PIPE'}), "(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE)\n", (21891, 21945), False, 'import subprocess\n'), ((22326, 22351), 'logging.Formatter', 'logging.Formatter', (['FORMAT'], {}), '(FORMAT)\n', (22343, 22351), False, 'import logging\n'), ((22365, 22392), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (22382, 22392), False, 'import logging\n'), ((22446, 22479), 'logging.FileHandler', 'logging.FileHandler', (['logfile', '"""w"""'], {}), "(logfile, 'w')\n", (22465, 22479), False, 'import logging\n'), ((23479, 23507), 'os.path.dirname', 'os.path.dirname', (['html_report'], {}), '(html_report)\n', (23494, 23507), False, 'import os\n'), ((23532, 23560), 'os.path.dirname', 'os.path.dirname', (['report_path'], {}), '(report_path)\n', (23547, 23560), False, 'import os\n'), ((24778, 24828), 'shutil.copyfile', 'shutil.copyfile', (['html_report', 'lf_check_latest_html'], {}), '(html_report, lf_check_latest_html)\n', (24793, 24828), False, 'import shutil\n'), ((24844, 24894), 'shutil.copyfile', 'shutil.copyfile', (['html_report', 'lf_check_html_report'], {}), '(html_report, lf_check_html_report)\n', (24859, 24894), False, 'import shutil\n'), ((24938, 24986), 'shutil.copyfile', 'shutil.copyfile', (['banner_src_png', 'banner_dest_png'], {}), '(banner_src_png, banner_dest_png)\n', (24953, 24986), False, 'import shutil\n'), ((24999, 25057), 'shutil.copyfile', 'shutil.copyfile', (['CandelaLogo_src_png', 'CandelaLogo_dest_png'], {}), '(CandelaLogo_src_png, CandelaLogo_dest_png)\n', (25014, 25057), False, 'import shutil\n'), ((25065, 25113), 'shutil.copyfile', 'shutil.copyfile', (['report_src_css', 'report_dest_css'], {}), '(report_src_css, report_dest_css)\n', (25080, 25113), False, 'import shutil\n'), ((25126, 25174), 'shutil.copyfile', 'shutil.copyfile', (['custom_src_css', 'custom_dest_css'], {}), '(custom_src_css, custom_dest_css)\n', (25141, 25174), False, 'import shutil\n'), ((25187, 25233), 'shutil.copyfile', 'shutil.copyfile', (['font_src_woff', 'font_dest_woff'], {}), '(font_src_woff, font_dest_woff)\n', (25202, 25233), False, 'import shutil\n'), ((1657, 1678), 'os.chdir', 'os.chdir', (['path_parent'], {}), '(path_parent)\n', (1665, 1678), False, 'import os\n'), ((1705, 1716), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1714, 1716), False, 'import os\n'), ((2582, 2609), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2599, 2609), False, 'import logging\n'), ((3571, 3591), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (3589, 3591), False, 'import socket\n'), ((3605, 3635), 'socket.gethostbyname', 'socket.gethostbyname', (['hostname'], {}), '(hostname)\n', (3625, 3635), False, 'import socket\n'), ((5396, 5444), 'csv.writer', 'csv.writer', (['self.csv_results_file'], {'delimiter': '""","""'}), "(self.csv_results_file, delimiter=',')\n", (5406, 5444), False, 'import csv\n'), ((6730, 6757), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (6755, 6757), False, 'import configparser\n'), ((21455, 21471), 'time.localtime', 'time.localtime', ([], {}), '()\n', (21469, 21471), False, 'import time\n'), ((22579, 22612), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (22600, 22612), False, 'import logging\n'), ((1636, 1647), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1645, 1647), False, 'import os\n'), ((4827, 4942), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'universal_newlines': '(True)'}), '(command, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE, universal_newlines=True)\n', (4843, 4942), False, 'import subprocess\n'), ((10335, 10360), 'os.chdir', 'os.chdir', (['self.scripts_wd'], {}), '(self.scripts_wd)\n', (10343, 10360), False, 'import os\n'), ((11060, 11085), 'os.chdir', 'os.chdir', (['self.scripts_wd'], {}), '(self.scripts_wd)\n', (11068, 11085), False, 'import os\n'), ((11642, 11667), 'os.chdir', 'os.chdir', (['self.scripts_wd'], {}), '(self.scripts_wd)\n', (11650, 11667), False, 'import os\n'), ((4006, 4029), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4027, 4029), False, 'import datetime\n'), ((16390, 16398), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (16395, 16398), False, 'from time import sleep\n'), ((15486, 15494), 'time.sleep', 'sleep', (['(3)'], {}), '(3)\n', (15491, 15494), False, 'from time import sleep\n'), ((15715, 15723), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (15720, 15723), False, 'from time import sleep\n'), ((16494, 16519), 'os.chdir', 'os.chdir', (['self.scripts_wd'], {}), '(self.scripts_wd)\n', (16502, 16519), False, 'import os\n'), ((18372, 18403), 'os.path.getsize', 'os.path.getsize', (['stderr_log_txt'], {}), '(stderr_log_txt)\n', (18387, 18403), False, 'import os\n'), ((15979, 15987), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (15984, 15987), False, 'from time import sleep\n')]
|
"""
Name: <NAME>
"""
import heapq
g4 = [('S',['a','S','S']),
('S',[])]
def tdpstep(g, input_categories_parses): # compute all possible next steps from (ws,cs)
global n_steps
(ws,cs,p) = input_categories_parses
if len(cs)>0:
cs1=cs[1:] # copy of predicted categories except cs[0]
p1 = p[:] # copy of rules used so far
nextsteps=[]
for (lhs,rhs) in g:
if lhs == cs[0]:
n_steps += 1
print('expand',lhs,'->',rhs) # for trace
nextsteps.append((ws,rhs+cs1,p1+[[lhs]+rhs]))
if len(ws)>0 and ws[0] == cs[0]:
n_steps += 1
print('scan',ws[0]) # for trace
ws1=ws[1:]
nextsteps.append((ws1,cs1,p1))
return nextsteps
else:
return []
def derive(g,beam,k):
while beam != [] and not (min(beam)[1] == [] and min(beam)[2] == []):
(prob0,ws0,cs0,p0) = heapq.heappop(beam)
nextsteps = tdpstep(g,(ws0,cs0,p0))
print('nextsteps=',nextsteps)
if len(nextsteps) > 0:
prob1 = prob0/float(len(nextsteps))
if -(prob1) > k:
for (ws1,cs1,p1) in nextsteps:
heapq.heappush(beam,(prob1,ws1,cs1,p1))
print ('pushed',(prob1,ws1,cs1)) # for trace
print('|beam|=',len(beam)) # for trace
def parse(g,ws,k):
global n_steps
n_steps = 0
beam = [(-1.,ws,['S'],[])]
heapq.heapify(beam) # make list of derivations into a "min-heap"
while beam != []:
derive(g,beam,k)
if beam == []:
return 'False'
else:
d=heapq.heappop(beam)
print('ll=', d[3])
print('Number of steps are: ' + str(n_steps))
# ans = input('another? ')
# if len(ans)>0 and ans[0]=='n':
# return d[3]
# parse(g4, list('a'), 0.0001)
# parse(g4, list('aa'), 0.0001)
# parse(g4, list('aaa'), 0.0001)
# parse(g4, list('aaaa'), 0.0001)
# parse(g4, list('aaaaa'), 0.0001)
# parse(g4, list('aaaaaa'), 0.0001)
# parse(g4, list('aaaaaaa'), 0.0000001)
############################################################################################
# 3. Number of steps to parse 'a': 7
# Number of steps to parse 'aa': 19
# Number of steps to parse 'aaa': 52
# Number of steps to parse 'aaaa': 150
# Number of steps to parse 'aaaaa': 456
# Number of steps to parse 'aaaaaa': 1446
# Number of steps to parse 'aaaaaaa': 4735
#
# 4. For all values of 'n' greater than or equal to 1, the number of steps required
# to find all parses of the sentence exceed 2^n.
#
#
|
[
"heapq.heappush",
"heapq.heapify",
"heapq.heappop"
] |
[((1458, 1477), 'heapq.heapify', 'heapq.heapify', (['beam'], {}), '(beam)\n', (1471, 1477), False, 'import heapq\n'), ((939, 958), 'heapq.heappop', 'heapq.heappop', (['beam'], {}), '(beam)\n', (952, 958), False, 'import heapq\n'), ((1648, 1667), 'heapq.heappop', 'heapq.heappop', (['beam'], {}), '(beam)\n', (1661, 1667), False, 'import heapq\n'), ((1216, 1259), 'heapq.heappush', 'heapq.heappush', (['beam', '(prob1, ws1, cs1, p1)'], {}), '(beam, (prob1, ws1, cs1, p1))\n', (1230, 1259), False, 'import heapq\n')]
|
from unittest import TestCase
from day7 import TreeCreator
INPUT = '''pbga (66)
xhth (57)
ebii (61)
havc (66)
ktlj (57)
fwft (72) -> ktlj, cntj, xhth
qoyq (66)
padx (45) -> pbga, havc, qoyq
tknk (41) -> ugml, padx, fwft
jptl (61)
ugml (68) -> gyxo, ebii, jptl
gyxo (61)
cntj (57)'''
class TestTreeCreator(TestCase):
def test_create_tree(self):
sut = TreeCreator(INPUT)
sut.create()
self.assertEqual('tknk', sut.get_base_node().name)
def test_find_unbalanced_node(self):
sut = TreeCreator(INPUT)
sut.create()
self.assertEqual('ugml', sut.find_unbalanced_node().name)
self.assertEqual(60, sut.calculate_correct_weight_for_unbalanced_node(sut.find_unbalanced_node()))
|
[
"day7.TreeCreator"
] |
[((366, 384), 'day7.TreeCreator', 'TreeCreator', (['INPUT'], {}), '(INPUT)\n', (377, 384), False, 'from day7 import TreeCreator\n'), ((521, 539), 'day7.TreeCreator', 'TreeCreator', (['INPUT'], {}), '(INPUT)\n', (532, 539), False, 'from day7 import TreeCreator\n')]
|
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, Pango, Gdk, GLib
import datetime
import ui
import conv
def icon_image(icon_name):
theme = Gtk.IconTheme.get_default()
icon = theme.load_icon(icon_name, -1, Gtk.IconLookupFlags.FORCE_SIZE)
img = Gtk.Image.new_from_pixbuf(icon)
return img
class MainWin(Gtk.Window):
width = 300
height = int(width * 3/2)
def __init__(self):
super().__init__(border_width=0, title="ui test")
self.set_size_request(MainWin.width, MainWin.height)
txt = """Longer note. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party."""
txt2 = """1. Longer note. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party.
2. Longer note. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party.
3. Longer note. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party."""
lbl = Gtk.Label()
lbl.set_hexpand(True)
lbl.set_xalign(0)
lbl.set_yalign(0)
lbl.set_line_wrap(True)
lbl.set_ellipsize(Pango.EllipsizeMode.END)
lbl.set_lines(2)
lbl.set_max_width_chars(5)
lbl.set_markup(txt2.strip().split("\n")[0])
self.add(ui.frame(lbl, "heading"))
self.connect("destroy", Gtk.main_quit)
self.show_all()
if __name__ == "__main__":
w = MainWin()
Gtk.main()
|
[
"gi.require_version",
"gi.repository.Gtk.main",
"gi.repository.Gtk.IconTheme.get_default",
"gi.repository.Gtk.Image.new_from_pixbuf",
"ui.frame",
"gi.repository.Gtk.Label"
] |
[((10, 42), 'gi.require_version', 'gi.require_version', (['"""Gtk"""', '"""3.0"""'], {}), "('Gtk', '3.0')\n", (28, 42), False, 'import gi\n'), ((170, 197), 'gi.repository.Gtk.IconTheme.get_default', 'Gtk.IconTheme.get_default', ([], {}), '()\n', (195, 197), False, 'from gi.repository import Gtk, Pango, Gdk, GLib\n'), ((282, 313), 'gi.repository.Gtk.Image.new_from_pixbuf', 'Gtk.Image.new_from_pixbuf', (['icon'], {}), '(icon)\n', (307, 313), False, 'from gi.repository import Gtk, Pango, Gdk, GLib\n'), ((2443, 2453), 'gi.repository.Gtk.main', 'Gtk.main', ([], {}), '()\n', (2451, 2453), False, 'from gi.repository import Gtk, Pango, Gdk, GLib\n'), ((1987, 1998), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {}), '()\n', (1996, 1998), False, 'from gi.repository import Gtk, Pango, Gdk, GLib\n'), ((2294, 2318), 'ui.frame', 'ui.frame', (['lbl', '"""heading"""'], {}), "(lbl, 'heading')\n", (2302, 2318), False, 'import ui\n')]
|
# coding: utf-8
"""
Modern Logic Api
Manage and version your customer decision logic outside of your codebase # noqa: E501
OpenAPI spec version: 1.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Customer(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'extra_properties': 'object',
'customer_id': 'str',
'first_name': 'str',
'last_name': 'str',
'email': 'str',
'phone': 'str',
'address_street': 'str',
'address_street2': 'str',
'address_city': 'str',
'address_state_code': 'str',
'address_zip': 'str',
'address_country_code': 'str',
'dob': 'date'
}
attribute_map = {
'extra_properties': 'extraProperties',
'customer_id': 'customerId',
'first_name': 'firstName',
'last_name': 'lastName',
'email': 'email',
'phone': 'phone',
'address_street': 'addressStreet',
'address_street2': 'addressStreet2',
'address_city': 'addressCity',
'address_state_code': 'addressStateCode',
'address_zip': 'addressZip',
'address_country_code': 'addressCountryCode',
'dob': 'dob'
}
def __init__(self, extra_properties=None, customer_id=None, first_name=None, last_name=None, email=None, phone=None, address_street=None, address_street2=None, address_city=None, address_state_code=None, address_zip=None, address_country_code=None, dob=None): # noqa: E501
"""Customer - a model defined in Swagger""" # noqa: E501
self._extra_properties = None
self._customer_id = None
self._first_name = None
self._last_name = None
self._email = None
self._phone = None
self._address_street = None
self._address_street2 = None
self._address_city = None
self._address_state_code = None
self._address_zip = None
self._address_country_code = None
self._dob = None
self.discriminator = None
if extra_properties is not None:
self.extra_properties = extra_properties
if customer_id is not None:
self.customer_id = customer_id
if first_name is not None:
self.first_name = first_name
if last_name is not None:
self.last_name = last_name
if email is not None:
self.email = email
if phone is not None:
self.phone = phone
if address_street is not None:
self.address_street = address_street
if address_street2 is not None:
self.address_street2 = address_street2
if address_city is not None:
self.address_city = address_city
if address_state_code is not None:
self.address_state_code = address_state_code
if address_zip is not None:
self.address_zip = address_zip
if address_country_code is not None:
self.address_country_code = address_country_code
if dob is not None:
self.dob = dob
@property
def extra_properties(self):
"""Gets the extra_properties of this Customer. # noqa: E501
:return: The extra_properties of this Customer. # noqa: E501
:rtype: object
"""
return self._extra_properties
@extra_properties.setter
def extra_properties(self, extra_properties):
"""Sets the extra_properties of this Customer.
:param extra_properties: The extra_properties of this Customer. # noqa: E501
:type: object
"""
self._extra_properties = extra_properties
@property
def customer_id(self):
"""Gets the customer_id of this Customer. # noqa: E501
Way that you uniquely identify customers # noqa: E501
:return: The customer_id of this Customer. # noqa: E501
:rtype: str
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""Sets the customer_id of this Customer.
Way that you uniquely identify customers # noqa: E501
:param customer_id: The customer_id of this Customer. # noqa: E501
:type: str
"""
self._customer_id = customer_id
@property
def first_name(self):
"""Gets the first_name of this Customer. # noqa: E501
Legal first name of the user being evaluated. # noqa: E501
:return: The first_name of this Customer. # noqa: E501
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""Sets the first_name of this Customer.
Legal first name of the user being evaluated. # noqa: E501
:param first_name: The first_name of this Customer. # noqa: E501
:type: str
"""
self._first_name = first_name
@property
def last_name(self):
"""Gets the last_name of this Customer. # noqa: E501
Legal last name (surname) of the user being evaluated. # noqa: E501
:return: The last_name of this Customer. # noqa: E501
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""Sets the last_name of this Customer.
Legal last name (surname) of the user being evaluated. # noqa: E501
:param last_name: The last_name of this Customer. # noqa: E501
:type: str
"""
self._last_name = last_name
@property
def email(self):
"""Gets the email of this Customer. # noqa: E501
Email address provided by user being evaluated. # noqa: E501
:return: The email of this Customer. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this Customer.
Email address provided by user being evaluated. # noqa: E501
:param email: The email of this Customer. # noqa: E501
:type: str
"""
self._email = email
@property
def phone(self):
"""Gets the phone of this Customer. # noqa: E501
Phone number of user being evaluated. # noqa: E501
:return: The phone of this Customer. # noqa: E501
:rtype: str
"""
return self._phone
@phone.setter
def phone(self, phone):
"""Sets the phone of this Customer.
Phone number of user being evaluated. # noqa: E501
:param phone: The phone of this Customer. # noqa: E501
:type: str
"""
self._phone = phone
@property
def address_street(self):
"""Gets the address_street of this Customer. # noqa: E501
Home address of user being evaluated # noqa: E501
:return: The address_street of this Customer. # noqa: E501
:rtype: str
"""
return self._address_street
@address_street.setter
def address_street(self, address_street):
"""Sets the address_street of this Customer.
Home address of user being evaluated # noqa: E501
:param address_street: The address_street of this Customer. # noqa: E501
:type: str
"""
self._address_street = address_street
@property
def address_street2(self):
"""Gets the address_street2 of this Customer. # noqa: E501
:return: The address_street2 of this Customer. # noqa: E501
:rtype: str
"""
return self._address_street2
@address_street2.setter
def address_street2(self, address_street2):
"""Sets the address_street2 of this Customer.
:param address_street2: The address_street2 of this Customer. # noqa: E501
:type: str
"""
self._address_street2 = address_street2
@property
def address_city(self):
"""Gets the address_city of this Customer. # noqa: E501
:return: The address_city of this Customer. # noqa: E501
:rtype: str
"""
return self._address_city
@address_city.setter
def address_city(self, address_city):
"""Sets the address_city of this Customer.
:param address_city: The address_city of this Customer. # noqa: E501
:type: str
"""
self._address_city = address_city
@property
def address_state_code(self):
"""Gets the address_state_code of this Customer. # noqa: E501
:return: The address_state_code of this Customer. # noqa: E501
:rtype: str
"""
return self._address_state_code
@address_state_code.setter
def address_state_code(self, address_state_code):
"""Sets the address_state_code of this Customer.
:param address_state_code: The address_state_code of this Customer. # noqa: E501
:type: str
"""
self._address_state_code = address_state_code
@property
def address_zip(self):
"""Gets the address_zip of this Customer. # noqa: E501
:return: The address_zip of this Customer. # noqa: E501
:rtype: str
"""
return self._address_zip
@address_zip.setter
def address_zip(self, address_zip):
"""Sets the address_zip of this Customer.
:param address_zip: The address_zip of this Customer. # noqa: E501
:type: str
"""
self._address_zip = address_zip
@property
def address_country_code(self):
"""Gets the address_country_code of this Customer. # noqa: E501
:return: The address_country_code of this Customer. # noqa: E501
:rtype: str
"""
return self._address_country_code
@address_country_code.setter
def address_country_code(self, address_country_code):
"""Sets the address_country_code of this Customer.
:param address_country_code: The address_country_code of this Customer. # noqa: E501
:type: str
"""
self._address_country_code = address_country_code
@property
def dob(self):
"""Gets the dob of this Customer. # noqa: E501
Date of birth for user being evaluated # noqa: E501
:return: The dob of this Customer. # noqa: E501
:rtype: date
"""
return self._dob
@dob.setter
def dob(self, dob):
"""Sets the dob of this Customer.
Date of birth for user being evaluated # noqa: E501
:param dob: The dob of this Customer. # noqa: E501
:type: date
"""
self._dob = dob
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Customer, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Customer):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"six.iteritems"
] |
[((11197, 11230), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (11210, 11230), False, 'import six\n')]
|
from interrogatio.core.exceptions import ValidationError
from interrogatio.validators import Validator
class PythonIdentifierValidator(Validator):
def validate(self, value, context=None):
if not value:
return
if not value.isidentifier():
raise ValidationError('Introduced data is not a valid Python identifier')
|
[
"interrogatio.core.exceptions.ValidationError"
] |
[((291, 358), 'interrogatio.core.exceptions.ValidationError', 'ValidationError', (['"""Introduced data is not a valid Python identifier"""'], {}), "('Introduced data is not a valid Python identifier')\n", (306, 358), False, 'from interrogatio.core.exceptions import ValidationError\n')]
|
import os
import re
import subprocess
from dogen.tools import Tools, Chdir
from dogen.plugin import Plugin
class DistGitPlugin(Plugin):
@staticmethod
def info():
return "dist-git", "Support for dist-git repositories"
@staticmethod
def inject_args(parser):
parser.add_argument('--dist-git-enable', action='store_true', help='Enables dist-git plugin')
parser.add_argument('--dist-git-assume-yes', action='store_true', help='Skip interactive mode and answer all question with "yes"')
parser.add_argument('--dist-git-scratch', action='store_true', help='Scratch build')
parser.add_argument('--dist-git-tech-preview', action='store_true', help='Change the type of image to tech-preview')
return parser
def __init__(self, dogen, args):
super(DistGitPlugin, self).__init__(dogen, args)
if not self.args.dist_git_enable:
return
self.repo = None
self.branch = None
def prepare(self, cfg):
if not self.args.dist_git_enable:
return
dist_git_cfg = cfg.get('dogen', {}).get('plugins', {}).get('dist_git', None)
if dist_git_cfg:
self.repo = dist_git_cfg.get('repo')
self.branch = dist_git_cfg.get('branch')
if not (self.repo and self.branch):
raise Exception("Dit-git plugin was activated, but repository and branch was not correctly provided")
self.git = Git(self.log, self.output, os.path.dirname(self.descriptor), self.repo, self.branch, self.args.dist_git_assume_yes)
self.git.prepare()
self.git.clean()
def before_sources(self, cfg):
if not self.args.dist_git_enable:
return
if not self.args.dist_git_tech_preview:
return
name = cfg.get('name')
family, name = name.split('/')
tech_preview_name = "%s-tech-preview/%s" % (family, name)
self.log.info("Generating tech-preview image, updating image name to: %s" % tech_preview_name)
cfg['name'] = tech_preview_name
def after_sources(self, files):
if not self.args.dist_git_enable:
return
with Chdir(self.output):
self.update_lookaside_cache(files)
self.git.add()
if self.git.stage_modified():
self.git.commit()
self.git.push()
else:
self.log.info("No changes made to the code, committing skipped")
self.build()
def update_lookaside_cache(self, artifacts):
if not artifacts:
return
self.log.info("Updating lookaside cache...")
subprocess.check_output(["rhpkg", "new-sources"] + artifacts.keys())
self.log.info("Update finished.")
def build(self):
if self.args.dist_git_assume_yes or Tools.decision("Do you want to execute a build on OSBS?"):
self.log.info("Executing container build on OSBS...")
cmd = ["rhpkg", "container-build"]
if self.args.dist_git_scratch:
cmd.append('--scratch')
subprocess.call(cmd)
class Git(object):
"""
Git support for target directories
"""
@staticmethod
def repo_info(path):
with Chdir(path):
if subprocess.check_output(["git", "rev-parse", "--is-inside-work-tree"]).strip() != "true":
raise Exception("Directory %s doesn't seem to be a git repository. Please make sure you specified correct path." % path)
name = os.path.basename(subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).strip())
branch = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]).strip()
commit = subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()
return name, branch, commit
def __init__(self, log, output, source, repo, branch, noninteractive=False):
self.log = log
self.output = output
self.repo = repo
self.branch = branch
self.dockerfile = os.path.join(self.output, "Dockerfile")
self.noninteractive = noninteractive
self.source_repo_name, self.source_repo_branch, self.source_repo_commit = Git.repo_info(source)
def stage_modified(self):
# Check if there are any files in stage (return code 1). If there are no files
# (return code 0) it means that this is a rebuild, so skip committing
if subprocess.call(["git", "diff-index", "--quiet", "--cached", "HEAD"]):
return True
return False
def prepare(self):
if os.path.exists(self.output):
with Chdir(self.output):
self.log.info("Pulling latest changes in repo %s..." % self.repo)
subprocess.check_output(["git", "fetch"])
subprocess.check_output(["git", "checkout", "-f", self.branch], stderr=subprocess.STDOUT)
subprocess.check_output(["git", "reset", "--hard", "origin/%s" % self.branch])
self.log.debug("Changes pulled")
else:
self.log.info("Cloning %s git repository (%s branch)..." % (self.repo, self.branch))
subprocess.check_output(["rhpkg", "-q", "clone", "-b", self.branch, self.repo, self.output])
self.log.debug("Repository %s cloned" % self.repo)
def clean(self):
""" Removes old generated scripts, repos and cct directories """
with Chdir(self.output):
for d in ["scripts", "repos", "cct"]:
if os.path.exists(d):
self.log.info("Removing old '%s' directory" % d)
subprocess.check_output(["git", "rm", "-rf", d])
def add(self):
# Add new Dockerfile
subprocess.check_call(["git", "add", "Dockerfile"])
for d in ["scripts", "repos", "cct"]:
if os.path.exists(os.path.join(self.output, d)):
subprocess.check_call(["git", "add", d])
def commit(self):
commit_msg = "Sync"
if self.source_repo_name:
commit_msg += " with %s" % self.source_repo_name
if self.source_repo_commit:
commit_msg += ", commit %s" % self.source_repo_commit
# Commit the change
self.log.info("Commiting with message: '%s'" % commit_msg)
subprocess.check_output(["git", "commit", "-q", "-m", commit_msg])
untracked = subprocess.check_output(["git", "ls-files", "--others", "--exclude-standard"])
if untracked:
self.log.warn("There are following untracked files: %s. Please review your commit." % ", ".join(untracked.splitlines()))
diffs = subprocess.check_output(["git", "diff-files", "--name-only"])
if diffs:
self.log.warn("There are uncommited changes in following files: '%s'. Please review your commit." % ", ".join(diffs.splitlines()))
if not self.noninteractive:
subprocess.call(["git", "status"])
subprocess.call(["git", "show"])
if not (self.noninteractive or Tools.decision("Are you ok with the changes?")):
subprocess.call(["bash"])
def push(self):
if self.noninteractive or Tools.decision("Do you want to push the commit?"):
print("")
self.log.info("Pushing change to the upstream repository...")
subprocess.check_output(["git", "push", "-q"])
self.log.info("Change pushed.")
|
[
"dogen.tools.Tools.decision",
"dogen.tools.Chdir",
"subprocess.check_output",
"os.path.dirname",
"os.path.exists",
"subprocess.call",
"os.path.join",
"subprocess.check_call"
] |
[((4067, 4106), 'os.path.join', 'os.path.join', (['self.output', '"""Dockerfile"""'], {}), "(self.output, 'Dockerfile')\n", (4079, 4106), False, 'import os\n'), ((4464, 4533), 'subprocess.call', 'subprocess.call', (["['git', 'diff-index', '--quiet', '--cached', 'HEAD']"], {}), "(['git', 'diff-index', '--quiet', '--cached', 'HEAD'])\n", (4479, 4533), False, 'import subprocess\n'), ((4616, 4643), 'os.path.exists', 'os.path.exists', (['self.output'], {}), '(self.output)\n', (4630, 4643), False, 'import os\n'), ((5758, 5809), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'add', 'Dockerfile']"], {}), "(['git', 'add', 'Dockerfile'])\n", (5779, 5809), False, 'import subprocess\n'), ((6329, 6395), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'commit', '-q', '-m', commit_msg]"], {}), "(['git', 'commit', '-q', '-m', commit_msg])\n", (6352, 6395), False, 'import subprocess\n'), ((6417, 6495), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'ls-files', '--others', '--exclude-standard']"], {}), "(['git', 'ls-files', '--others', '--exclude-standard'])\n", (6440, 6495), False, 'import subprocess\n'), ((6669, 6730), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'diff-files', '--name-only']"], {}), "(['git', 'diff-files', '--name-only'])\n", (6692, 6730), False, 'import subprocess\n'), ((1485, 1517), 'os.path.dirname', 'os.path.dirname', (['self.descriptor'], {}), '(self.descriptor)\n', (1500, 1517), False, 'import os\n'), ((2186, 2204), 'dogen.tools.Chdir', 'Chdir', (['self.output'], {}), '(self.output)\n', (2191, 2204), False, 'from dogen.tools import Tools, Chdir\n'), ((2848, 2905), 'dogen.tools.Tools.decision', 'Tools.decision', (['"""Do you want to execute a build on OSBS?"""'], {}), "('Do you want to execute a build on OSBS?')\n", (2862, 2905), False, 'from dogen.tools import Tools, Chdir\n'), ((3115, 3135), 'subprocess.call', 'subprocess.call', (['cmd'], {}), '(cmd)\n', (3130, 3135), False, 'import subprocess\n'), ((3268, 3279), 'dogen.tools.Chdir', 'Chdir', (['path'], {}), '(path)\n', (3273, 3279), False, 'from dogen.tools import Tools, Chdir\n'), ((5191, 5288), 'subprocess.check_output', 'subprocess.check_output', (["['rhpkg', '-q', 'clone', '-b', self.branch, self.repo, self.output]"], {}), "(['rhpkg', '-q', 'clone', '-b', self.branch, self.\n repo, self.output])\n", (5214, 5288), False, 'import subprocess\n'), ((5455, 5473), 'dogen.tools.Chdir', 'Chdir', (['self.output'], {}), '(self.output)\n', (5460, 5473), False, 'from dogen.tools import Tools, Chdir\n'), ((6942, 6976), 'subprocess.call', 'subprocess.call', (["['git', 'status']"], {}), "(['git', 'status'])\n", (6957, 6976), False, 'import subprocess\n'), ((6989, 7021), 'subprocess.call', 'subprocess.call', (["['git', 'show']"], {}), "(['git', 'show'])\n", (7004, 7021), False, 'import subprocess\n'), ((7123, 7148), 'subprocess.call', 'subprocess.call', (["['bash']"], {}), "(['bash'])\n", (7138, 7148), False, 'import subprocess\n'), ((7204, 7253), 'dogen.tools.Tools.decision', 'Tools.decision', (['"""Do you want to push the commit?"""'], {}), "('Do you want to push the commit?')\n", (7218, 7253), False, 'from dogen.tools import Tools, Chdir\n'), ((7363, 7409), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'push', '-q']"], {}), "(['git', 'push', '-q'])\n", (7386, 7409), False, 'import subprocess\n'), ((4662, 4680), 'dogen.tools.Chdir', 'Chdir', (['self.output'], {}), '(self.output)\n', (4667, 4680), False, 'from dogen.tools import Tools, Chdir\n'), ((4780, 4821), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'fetch']"], {}), "(['git', 'fetch'])\n", (4803, 4821), False, 'import subprocess\n'), ((4838, 4932), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'checkout', '-f', self.branch]"], {'stderr': 'subprocess.STDOUT'}), "(['git', 'checkout', '-f', self.branch], stderr=\n subprocess.STDOUT)\n", (4861, 4932), False, 'import subprocess\n'), ((4944, 5022), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'reset', '--hard', 'origin/%s' % self.branch]"], {}), "(['git', 'reset', '--hard', 'origin/%s' % self.branch])\n", (4967, 5022), False, 'import subprocess\n'), ((5544, 5561), 'os.path.exists', 'os.path.exists', (['d'], {}), '(d)\n', (5558, 5561), False, 'import os\n'), ((5887, 5915), 'os.path.join', 'os.path.join', (['self.output', 'd'], {}), '(self.output, d)\n', (5899, 5915), False, 'import os\n'), ((5934, 5974), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'add', d]"], {}), "(['git', 'add', d])\n", (5955, 5974), False, 'import subprocess\n'), ((7062, 7108), 'dogen.tools.Tools.decision', 'Tools.decision', (['"""Are you ok with the changes?"""'], {}), "('Are you ok with the changes?')\n", (7076, 7108), False, 'from dogen.tools import Tools, Chdir\n'), ((3655, 3724), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'rev-parse', '--abbrev-ref', 'HEAD']"], {}), "(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n", (3678, 3724), False, 'import subprocess\n'), ((3754, 3807), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'rev-parse', 'HEAD']"], {}), "(['git', 'rev-parse', 'HEAD'])\n", (3777, 3807), False, 'import subprocess\n'), ((5652, 5700), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'rm', '-rf', d]"], {}), "(['git', 'rm', '-rf', d])\n", (5675, 5700), False, 'import subprocess\n'), ((3296, 3366), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'rev-parse', '--is-inside-work-tree']"], {}), "(['git', 'rev-parse', '--is-inside-work-tree'])\n", (3319, 3366), False, 'import subprocess\n'), ((3560, 3624), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'rev-parse', '--show-toplevel']"], {}), "(['git', 'rev-parse', '--show-toplevel'])\n", (3583, 3624), False, 'import subprocess\n')]
|
from sys import path, stdout
path.insert(0, './git_management')
path.insert(0, './gradle_management')
from git_management import git_init
from gradle_management import migrateFromEclipseToAS
from os import path
from os import scandir
from os import remove
import migration
import errno, os, stat, shutil
import subprocess
from ownStyle import GREEN,BLUE,BOLD,GREEN,RED,RESET,CYAN
from shutil import copytree, ignore_patterns
from colorama import init,deinit
def cleanAmberProject(sourceDirectory):
for file in scandir(sourceDirectory):
if path.isdir(file):
cleanAmberProject(file)
else:
if "AmberProblem.txt" in file.name:
os.remove(file)
#Code of the class
init()
#Constants: Application under test
ActionBarCompatSampleDir='D:\\Git\\FormationAndroid2ee\\Formation_ICS_AS\\ActionBarCompatSample'
AmberDir='D:\\Git\\MyProjets\\AmberTeam'
ChronoDir='D:\\Git\\FormationAndroid2ee\\FormationInitiale_InitGui_AS\\ChronoTuto'
ForecastDir='D:\\Git\\MyProjets\\ForecastYahooRest\\ForecastRestWithLibs'
MyLightDir='D:\\Git\\MyProjets\\MyLight'
FtagDir='D:\\Git\\ProjetsExternes\\Tag\\ft_ag_app'
ActionBarCompatSampleTarget='D:\\Git\\Temp\\Res\\ActionBarCompatSample'
AmberTarget='D:\\Git\\Temp\\Res\\AmberTeam'
ChronoTarget='D:\\Git\\Temp\\Res\\ChronoTuto'
ForecastTarget='D:\\Git\\Temp\\Res\\ForecastRestWithLibs'
MyLightTarget='D:\\Git\\Temp\\Res\\MyLight'
FtagTarget='D:\\Git\\Temp\\Res\\ft_ag_app'
#Launch your test on your targets
print(BLUE+"#############################################")
print(GREEN+"#############################################")
print(RED+"#############################################")
print(CYAN+"#############################################")
print(BOLD+"Starting the migration of the elements")
print(BLUE+"#############################################")
print(GREEN+"#############################################")
print(RED+"#############################################")
print(CYAN+"#############################################\n\n")
errorFound=['list of errors']
successFound=['list of working project']
# cleanAmberProject(AmberDir)
# launchTest(FtagDir,FtagTarget)
result=migration.migrate(ActionBarCompatSampleDir,ActionBarCompatSampleTarget)#Works fine
successFound=successFound+result[0]
errorFound=errorFound+result[1]
result=migration.migrate(AmberDir,AmberTarget)#Failed: AndoidxMigration failed with android.support.design and projectName and myGradleGroupd are Res :(
successFound=successFound+result[0]
errorFound=errorFound+result[1]
result=migration.migrate(ChronoDir,ChronoTarget)#Fine
successFound=successFound+result[0]
errorFound=errorFound+result[1]
result=migration.migrate(ForecastDir,ForecastTarget)#Could not find unknown properties versionCode
successFound=successFound+result[0]
errorFound=errorFound+result[1]
result=migration.migrate(MyLightDir,MyLightTarget)#Fine
successFound=successFound+result[0]
errorFound=errorFound+result[1]
print(BLUE+'final result :')
#https://stackoverflow.com/questions/37340049/how-do-i-print-colored-output-to-the-terminal-in-python/37340245
for elem in successFound:
print(GREEN+elem)
for elem in errorFound:
print(RED+elem)
print(RESET)
deinit()
#This is the step2:Pushing every one in GitHub
# git_init.gitInit(targetDir)
|
[
"colorama.init",
"os.remove",
"os.path.isdir",
"os.path.insert",
"colorama.deinit",
"migration.migrate",
"os.scandir"
] |
[((29, 63), 'os.path.insert', 'path.insert', (['(0)', '"""./git_management"""'], {}), "(0, './git_management')\n", (40, 63), False, 'from os import path\n'), ((64, 101), 'os.path.insert', 'path.insert', (['(0)', '"""./gradle_management"""'], {}), "(0, './gradle_management')\n", (75, 101), False, 'from os import path\n'), ((727, 733), 'colorama.init', 'init', ([], {}), '()\n', (731, 733), False, 'from colorama import init, deinit\n'), ((2185, 2257), 'migration.migrate', 'migration.migrate', (['ActionBarCompatSampleDir', 'ActionBarCompatSampleTarget'], {}), '(ActionBarCompatSampleDir, ActionBarCompatSampleTarget)\n', (2202, 2257), False, 'import migration\n'), ((2344, 2384), 'migration.migrate', 'migration.migrate', (['AmberDir', 'AmberTarget'], {}), '(AmberDir, AmberTarget)\n', (2361, 2384), False, 'import migration\n'), ((2566, 2608), 'migration.migrate', 'migration.migrate', (['ChronoDir', 'ChronoTarget'], {}), '(ChronoDir, ChronoTarget)\n', (2583, 2608), False, 'import migration\n'), ((2689, 2735), 'migration.migrate', 'migration.migrate', (['ForecastDir', 'ForecastTarget'], {}), '(ForecastDir, ForecastTarget)\n', (2706, 2735), False, 'import migration\n'), ((2857, 2901), 'migration.migrate', 'migration.migrate', (['MyLightDir', 'MyLightTarget'], {}), '(MyLightDir, MyLightTarget)\n', (2874, 2901), False, 'import migration\n'), ((3220, 3228), 'colorama.deinit', 'deinit', ([], {}), '()\n', (3226, 3228), False, 'from colorama import init, deinit\n'), ((520, 544), 'os.scandir', 'scandir', (['sourceDirectory'], {}), '(sourceDirectory)\n', (527, 544), False, 'from os import scandir\n'), ((557, 573), 'os.path.isdir', 'path.isdir', (['file'], {}), '(file)\n', (567, 573), False, 'from os import path\n'), ((689, 704), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (698, 704), False, 'import errno, os, stat, shutil\n')]
|
from pydub import AudioSegment
import parselmouth
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def draw_pitch(pitch):
# Extract selected pitch contour, and
# replace unvoiced samples by NaN to not plot
pitch_values = pitch.selected_array['frequency']
pitch_values[pitch_values == 0] = np.nan
plt.plot(pitch.xs(), pitch_values, 'o', markersize=5, color='w')
plt.plot(pitch.xs(), pitch_values, 'o', markersize=2)
plt.grid(False)
plt.ylim(0, pitch.ceiling)
plt.ylabel("fundamental frequency [Hz]")
def draw_spectrogram(spectrogram, dynamic_range=70):
X, Y = spectrogram.x_grid(), spectrogram.y_grid()
sg_db = 10 * np.log10(spectrogram.values)
plt.pcolormesh(X, Y, sg_db, vmin=sg_db.max() -
dynamic_range, cmap='afmhot')
plt.ylim([spectrogram.ymin, spectrogram.ymax])
plt.xlabel("time [s]")
plt.ylabel("frequency [Hz]")
def draw_intensity(intensity):
plt.plot(intensity.xs(), intensity.values.T, linewidth=3, color='w')
plt.plot(intensity.xs(), intensity.values.T, linewidth=1)
plt.grid(False)
plt.ylim(0)
plt.ylabel("intensity [dB]")
if __name__ == '__main__':
sns.set() # Use seaborn's default style to make attractive graphs
# Plot nice figures using Python's "standard" matplotlib library
snd = parselmouth.Sound(
'output.mp3')
# plt.figure()
# plt.plot(snd.xs(), snd.values.T)
# plt.xlim([snd.xmin, snd.xmax])
# plt.xlabel("time [s]")
# plt.ylabel("amplitude")
# # or plt.savefig("sound.png"), or plt.savefig("sound.pdf")
# plt.savefig("sound.png")
pitch = snd.to_pitch()
# If desired, pre-emphasize the sound fragment before calculating the spectrogram
pre_emphasized_snd = snd.copy()
pre_emphasized_snd.pre_emphasize()
spectrogram = pre_emphasized_snd.to_spectrogram(
window_length=0.03, maximum_frequency=8000)
plt.figure()
draw_spectrogram(spectrogram)
plt.twinx()
draw_pitch(pitch)
plt.xlim([snd.xmin, snd.xmax])
plt.savefig("pitch.png")
# sound = AudioSegment.from_mp3(
# '/Users/dimashulhin/Desktop/kyky_original.mp3')
# # get raw audio data as a bytestring
# raw_data = sound.raw_data
# # get the frame rate
# sample_rate = sound.frame_rate
# # get amount of bytes contained in one sample
# sample_size = sound.sample_width
# # get channels
# channels = sound.channels
# beginning = sound[13000:17000]
# print(beginning.raw_data)
|
[
"matplotlib.pyplot.xlim",
"parselmouth.Sound",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.twinx",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"numpy.log10",
"matplotlib.pyplot.xlabel",
"seaborn.set",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.grid"
] |
[((471, 486), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (479, 486), True, 'import matplotlib.pyplot as plt\n'), ((491, 517), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', 'pitch.ceiling'], {}), '(0, pitch.ceiling)\n', (499, 517), True, 'import matplotlib.pyplot as plt\n'), ((522, 562), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""fundamental frequency [Hz]"""'], {}), "('fundamental frequency [Hz]')\n", (532, 562), True, 'import matplotlib.pyplot as plt\n'), ((822, 868), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[spectrogram.ymin, spectrogram.ymax]'], {}), '([spectrogram.ymin, spectrogram.ymax])\n', (830, 868), True, 'import matplotlib.pyplot as plt\n'), ((873, 895), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time [s]"""'], {}), "('time [s]')\n", (883, 895), True, 'import matplotlib.pyplot as plt\n'), ((900, 928), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency [Hz]"""'], {}), "('frequency [Hz]')\n", (910, 928), True, 'import matplotlib.pyplot as plt\n'), ((1101, 1116), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (1109, 1116), True, 'import matplotlib.pyplot as plt\n'), ((1121, 1132), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)'], {}), '(0)\n', (1129, 1132), True, 'import matplotlib.pyplot as plt\n'), ((1137, 1165), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""intensity [dB]"""'], {}), "('intensity [dB]')\n", (1147, 1165), True, 'import matplotlib.pyplot as plt\n'), ((1199, 1208), 'seaborn.set', 'sns.set', ([], {}), '()\n', (1206, 1208), True, 'import seaborn as sns\n'), ((1346, 1377), 'parselmouth.Sound', 'parselmouth.Sound', (['"""output.mp3"""'], {}), "('output.mp3')\n", (1363, 1377), False, 'import parselmouth\n'), ((1934, 1946), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1944, 1946), True, 'import matplotlib.pyplot as plt\n'), ((1985, 1996), 'matplotlib.pyplot.twinx', 'plt.twinx', ([], {}), '()\n', (1994, 1996), True, 'import matplotlib.pyplot as plt\n'), ((2023, 2053), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[snd.xmin, snd.xmax]'], {}), '([snd.xmin, snd.xmax])\n', (2031, 2053), True, 'import matplotlib.pyplot as plt\n'), ((2058, 2082), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pitch.png"""'], {}), "('pitch.png')\n", (2069, 2082), True, 'import matplotlib.pyplot as plt\n'), ((689, 717), 'numpy.log10', 'np.log10', (['spectrogram.values'], {}), '(spectrogram.values)\n', (697, 717), True, 'import numpy as np\n')]
|
import setuptools
import os
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'reactopya', 'VERSION')) as version_file:
version = version_file.read().strip()
setuptools.setup(
name="reactopya",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="",
packages=setuptools.find_packages(),
scripts=['bin/reactopya', 'bin/reactopya-server'],
include_package_data=True,
install_requires=[
"jinja2",
"numpy",
"simplejson"
],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
)
)
|
[
"os.path.realpath",
"setuptools.find_packages"
] |
[((325, 351), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (349, 351), False, 'import setuptools\n'), ((68, 94), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (84, 94), False, 'import os\n')]
|
import sys
sys.path.append('/Users/kolbt/Desktop/compiled/hoomd-blue/build')
import hoomd
from hoomd import md
from hoomd import dem
from hoomd import deprecated
import numpy as np
# Simulation box mesh into grid delimit by particle diameter
# list of mesh indices random number generator to select index
# remove index from list once particle is placed
tsteps = 5000000
dump_freq = 10000
part_perc_a = 50
part_frac_a = float(part_perc_a) / float(100)
pe_a = 80
pe_b = 300
phi = 0.6
part_num = 24102
dumps = tsteps/dump_freq
diameter = 1
# find the box parameters
area_part = np.pi * ((float(diameter)/float(2))**2) * part_num
box_area = area_part / phi
side = int(np.sqrt(box_area))
side = 140
#while side % 10 != 0: # this is sub par... fix it
#side += 1 # or just pick part_num so that this is okay
# initialize system randomly
hoomd.context.initialize()
part_num = 13950
part_a = part_num * part_frac_a # get the total number of A particles
part_a = int(part_a)
part_b = part_num - part_a # get the total number of B particles
mid = int(part_a) # starting point for assigning B particles
snap = hoomd.data.make_snapshot(N = part_num,
box = hoomd.data.boxdim(L=side,
dimensions=2),
particle_types = ['A', 'B'])
part = np.zeros((3))
start_y = -69.5 # box is -70:70 for x and y dimensions
sep_row = 0.90 # distance between particles along x axis
sep_col = 0.78 # distance to increment rows (maintains center to center distance)
ith = 0 # particle counter
m = 0 # incrementer for y value
row = 2 # start on an even row (this determines first x placement in row)
# Places particles in lower left quadrant (-70, -70) - (0, 0)
# while loop that increments y value
while 1:
part[0] = start_y + m
n = 0
# while that increments x value (place row at constant height, y value)
while 1:
# ensures rows are offset from one another
if row % 2 == 0:
start_x = -69.50
else:
start_x = -69.05
part[1] = start_x + n
snap.particles.position[ith] = part
snap.particles.typeid[ith] = 0
ith += 1
n += sep_row
# placing into lower left quadrant
if start_x + n > 0:
break
row += 1
m += sep_col
# ensure particles are limited to lower left quadrant
if -69.5 + m > 0:
break
# Places particles in upper right quadrant (0,0) - (70, 70)
m = 0
row = 2
start_y = 0.5
while 1:
part[0] = start_y + m
n = 0
while 1:
if row % 2 == 0:
start_x = 0.5
else:
start_x = 0.95
part[1] = 0.5 + n
snap.particles.position[ith] = part
snap.particles.typeid[ith] = 1
ith += 1
n += sep_row
if start_x + n > 70:
break
row += 1
m += sep_col
if start_y + m > 70:
break
print(ith)
print(ith)
# now let's get the quaternion and moment of inertia
thetas = np.random.uniform(0, 2*np.pi, (part_num,)) # generate random angles
quats = np.array([np.cos(thetas/2),
np.zeros_like(thetas),
np.zeros_like(thetas),
np.sin(thetas/2)]).T # generate quaternions from the angles
snap.particles.orientation[:] = quats
inertia = float(1)/float(16)
snap.particles.diameter[:] = 1 # set particle diameters
snap.particles.moment_inertia[:] = (inertia, inertia, 0) # set moment of inertia
snap.particles.types = ['A', 'B'] # or 0, 1 in typeid vernacular
####################################
### NOW SET FORCES / INTEGRATORS ###
####################################
# initialize the system
system = hoomd.init.read_snapshot(snap)
all = hoomd.group.all()
gA = hoomd.group.type(type = 'A', update=True)
gB = hoomd.group.type(type = 'B', update=True)
N = len(all)
part_num = N
Na = len(gA)
Nb = len(gB)
print(part_num)
nl = hoomd.md.nlist.cell()
lj = hoomd.md.pair.lj(r_cut=2**(1/6), nlist=nl)
lj.set_params(mode='shift')
lj.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
lj.pair_coeff.set('A', 'B', epsilon=1.0, sigma=1.0)
lj.pair_coeff.set('B', 'B', epsilon=1.0, sigma=1.0)
angle = np.random.rand(part_num) * 2 * np.pi # random orientation of each particle
if part_perc_a != 0 and part_perc_a != 100:
activity_a = []
for i in range(0,mid):
x = (np.cos(angle[i])) * pe_a
y = (np.sin(angle[i])) * pe_a
z = 0
tuple = (x, y, z)
activity_a.append(tuple)
activity_b = []
for i in range(mid,part_num):
x = (np.cos(angle[i])) * pe_b
y = (np.sin(angle[i])) * pe_b
z = 0
tuple = (x, y, z)
activity_b.append(tuple)
hoomd.md.force.active(group=gA,
seed=123,
f_lst=activity_a,
rotation_diff=3.0,
orientation_link=False)
hoomd.md.force.active(group=gB,
seed=375,
f_lst=activity_b,
rotation_diff=3.0,
orientation_link=False)
else:
if part_perc_a == 0:
activity_b = []
for i in range(0,part_num):
x = (np.cos(angle[i])) * pe_b
y = (np.sin(angle[i])) * pe_b
z = 0
tuple = (x, y, z)
activity_b.append(tuple)
hoomd.md.force.active(group=gB,
seed=375,
f_lst=activity_b,
rotation_diff=3.0,
orientation_link=False)
else:
activity_a = []
for i in range(0,part_num):
x = (np.cos(angle[i])) * pe_a
y = (np.sin(angle[i])) * pe_a
z = 0
tuple = (x, y, z)
activity_a.append(tuple)
hoomd.md.force.active(group=gA,
seed=123,
f_lst=activity_a,
rotation_diff=3.0,
orientation_link=False)
# minimize for no overlaps
fire=hoomd.md.integrate.mode_minimize_fire(group=all,
dt=0.00001,
ftol=1e-2,
Etol=1e-7)
hoomd.run(1000)
# brownian integration
hoomd.md.integrate.mode_standard(dt=0.000002)
bd = hoomd.md.integrate.brownian(group=all, kT=0.5, seed=123)
bd.set_gamma('A', gamma=1.0)
bd.set_gamma_r('A', gamma_r=1.0)
#write dump
hoomd.dump.gsd("hcp_test.gsd", period=1000, group=all, overwrite=True, static=[])
#run
hoomd.run(tsteps)
|
[
"hoomd.md.nlist.cell",
"hoomd.group.type",
"numpy.sin",
"hoomd.dump.gsd",
"sys.path.append",
"numpy.zeros_like",
"hoomd.run",
"hoomd.data.boxdim",
"hoomd.init.read_snapshot",
"hoomd.group.all",
"hoomd.md.pair.lj",
"hoomd.context.initialize",
"hoomd.md.integrate.mode_minimize_fire",
"numpy.cos",
"hoomd.md.integrate.mode_standard",
"numpy.random.uniform",
"hoomd.md.force.active",
"numpy.zeros",
"hoomd.md.integrate.brownian",
"numpy.random.rand",
"numpy.sqrt"
] |
[((11, 76), 'sys.path.append', 'sys.path.append', (['"""/Users/kolbt/Desktop/compiled/hoomd-blue/build"""'], {}), "('/Users/kolbt/Desktop/compiled/hoomd-blue/build')\n", (26, 76), False, 'import sys\n'), ((880, 906), 'hoomd.context.initialize', 'hoomd.context.initialize', ([], {}), '()\n', (904, 906), False, 'import hoomd\n'), ((1434, 1445), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1442, 1445), True, 'import numpy as np\n'), ((3217, 3261), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)', '(part_num,)'], {}), '(0, 2 * np.pi, (part_num,))\n', (3234, 3261), True, 'import numpy as np\n'), ((3984, 4014), 'hoomd.init.read_snapshot', 'hoomd.init.read_snapshot', (['snap'], {}), '(snap)\n', (4008, 4014), False, 'import hoomd\n'), ((4022, 4039), 'hoomd.group.all', 'hoomd.group.all', ([], {}), '()\n', (4037, 4039), False, 'import hoomd\n'), ((4045, 4084), 'hoomd.group.type', 'hoomd.group.type', ([], {'type': '"""A"""', 'update': '(True)'}), "(type='A', update=True)\n", (4061, 4084), False, 'import hoomd\n'), ((4092, 4131), 'hoomd.group.type', 'hoomd.group.type', ([], {'type': '"""B"""', 'update': '(True)'}), "(type='B', update=True)\n", (4108, 4131), False, 'import hoomd\n'), ((4209, 4230), 'hoomd.md.nlist.cell', 'hoomd.md.nlist.cell', ([], {}), '()\n', (4228, 4230), False, 'import hoomd\n'), ((4236, 4282), 'hoomd.md.pair.lj', 'hoomd.md.pair.lj', ([], {'r_cut': '(2 ** (1 / 6))', 'nlist': 'nl'}), '(r_cut=2 ** (1 / 6), nlist=nl)\n', (4252, 4282), False, 'import hoomd\n'), ((6409, 6495), 'hoomd.md.integrate.mode_minimize_fire', 'hoomd.md.integrate.mode_minimize_fire', ([], {'group': 'all', 'dt': '(1e-05)', 'ftol': '(0.01)', 'Etol': '(1e-07)'}), '(group=all, dt=1e-05, ftol=0.01, Etol=\n 1e-07)\n', (6446, 6495), False, 'import hoomd\n'), ((6621, 6636), 'hoomd.run', 'hoomd.run', (['(1000)'], {}), '(1000)\n', (6630, 6636), False, 'import hoomd\n'), ((6661, 6703), 'hoomd.md.integrate.mode_standard', 'hoomd.md.integrate.mode_standard', ([], {'dt': '(2e-06)'}), '(dt=2e-06)\n', (6693, 6703), False, 'import hoomd\n'), ((6712, 6768), 'hoomd.md.integrate.brownian', 'hoomd.md.integrate.brownian', ([], {'group': 'all', 'kT': '(0.5)', 'seed': '(123)'}), '(group=all, kT=0.5, seed=123)\n', (6739, 6768), False, 'import hoomd\n'), ((6844, 6929), 'hoomd.dump.gsd', 'hoomd.dump.gsd', (['"""hcp_test.gsd"""'], {'period': '(1000)', 'group': 'all', 'overwrite': '(True)', 'static': '[]'}), "('hcp_test.gsd', period=1000, group=all, overwrite=True,\n static=[])\n", (6858, 6929), False, 'import hoomd\n'), ((6932, 6949), 'hoomd.run', 'hoomd.run', (['tsteps'], {}), '(tsteps)\n', (6941, 6949), False, 'import hoomd\n'), ((668, 685), 'numpy.sqrt', 'np.sqrt', (['box_area'], {}), '(box_area)\n', (675, 685), True, 'import numpy as np\n'), ((4997, 5104), 'hoomd.md.force.active', 'hoomd.md.force.active', ([], {'group': 'gA', 'seed': '(123)', 'f_lst': 'activity_a', 'rotation_diff': '(3.0)', 'orientation_link': '(False)'}), '(group=gA, seed=123, f_lst=activity_a, rotation_diff=\n 3.0, orientation_link=False)\n', (5018, 5104), False, 'import hoomd\n'), ((5208, 5315), 'hoomd.md.force.active', 'hoomd.md.force.active', ([], {'group': 'gB', 'seed': '(375)', 'f_lst': 'activity_b', 'rotation_diff': '(3.0)', 'orientation_link': '(False)'}), '(group=gB, seed=375, f_lst=activity_b, rotation_diff=\n 3.0, orientation_link=False)\n', (5229, 5315), False, 'import hoomd\n'), ((1268, 1307), 'hoomd.data.boxdim', 'hoomd.data.boxdim', ([], {'L': 'side', 'dimensions': '(2)'}), '(L=side, dimensions=2)\n', (1285, 1307), False, 'import hoomd\n'), ((4472, 4496), 'numpy.random.rand', 'np.random.rand', (['part_num'], {}), '(part_num)\n', (4486, 4496), True, 'import numpy as np\n'), ((5683, 5790), 'hoomd.md.force.active', 'hoomd.md.force.active', ([], {'group': 'gB', 'seed': '(375)', 'f_lst': 'activity_b', 'rotation_diff': '(3.0)', 'orientation_link': '(False)'}), '(group=gB, seed=375, f_lst=activity_b, rotation_diff=\n 3.0, orientation_link=False)\n', (5704, 5790), False, 'import hoomd\n'), ((6153, 6260), 'hoomd.md.force.active', 'hoomd.md.force.active', ([], {'group': 'gA', 'seed': '(123)', 'f_lst': 'activity_a', 'rotation_diff': '(3.0)', 'orientation_link': '(False)'}), '(group=gA, seed=123, f_lst=activity_a, rotation_diff=\n 3.0, orientation_link=False)\n', (6174, 6260), False, 'import hoomd\n'), ((3311, 3329), 'numpy.cos', 'np.cos', (['(thetas / 2)'], {}), '(thetas / 2)\n', (3317, 3329), True, 'import numpy as np\n'), ((3347, 3368), 'numpy.zeros_like', 'np.zeros_like', (['thetas'], {}), '(thetas)\n', (3360, 3368), True, 'import numpy as np\n'), ((3388, 3409), 'numpy.zeros_like', 'np.zeros_like', (['thetas'], {}), '(thetas)\n', (3401, 3409), True, 'import numpy as np\n'), ((3429, 3447), 'numpy.sin', 'np.sin', (['(thetas / 2)'], {}), '(thetas / 2)\n', (3435, 3447), True, 'import numpy as np\n'), ((4654, 4670), 'numpy.cos', 'np.cos', (['angle[i]'], {}), '(angle[i])\n', (4660, 4670), True, 'import numpy as np\n'), ((4692, 4708), 'numpy.sin', 'np.sin', (['angle[i]'], {}), '(angle[i])\n', (4698, 4708), True, 'import numpy as np\n'), ((4857, 4873), 'numpy.cos', 'np.cos', (['angle[i]'], {}), '(angle[i])\n', (4863, 4873), True, 'import numpy as np\n'), ((4895, 4911), 'numpy.sin', 'np.sin', (['angle[i]'], {}), '(angle[i])\n', (4901, 4911), True, 'import numpy as np\n'), ((5523, 5539), 'numpy.cos', 'np.cos', (['angle[i]'], {}), '(angle[i])\n', (5529, 5539), True, 'import numpy as np\n'), ((5565, 5581), 'numpy.sin', 'np.sin', (['angle[i]'], {}), '(angle[i])\n', (5571, 5581), True, 'import numpy as np\n'), ((5993, 6009), 'numpy.cos', 'np.cos', (['angle[i]'], {}), '(angle[i])\n', (5999, 6009), True, 'import numpy as np\n'), ((6035, 6051), 'numpy.sin', 'np.sin', (['angle[i]'], {}), '(angle[i])\n', (6041, 6051), True, 'import numpy as np\n')]
|
from intake.middleware import MiddlewareBase
from easyaudit.middleware.easyaudit import clear_request
class ClearRequestMiddleware(MiddlewareBase):
def process_response(self, response):
clear_request()
|
[
"easyaudit.middleware.easyaudit.clear_request"
] |
[((201, 216), 'easyaudit.middleware.easyaudit.clear_request', 'clear_request', ([], {}), '()\n', (214, 216), False, 'from easyaudit.middleware.easyaudit import clear_request\n')]
|
"""server docstring"""
import json
import random
import time
from datetime import datetime
from flask import Flask, Response, render_template, redirect, url_for
from flask_mongoengine import MongoEngine
from sensores.db.models import Sensor, Medition
from sensores.db import util
from .businesslogic import get_sensors_data
from ..db.util import connect_redis
application = Flask(__name__)
application.config.from_object('sensores.server.config')
db = MongoEngine(application)
redis_client = connect_redis()
# register blueprint's
from .views import main as main_blueprint
from .sensors import sensor as sensor_blueprint
application.register_blueprint(main_blueprint)
application.register_blueprint(sensor_blueprint)
random.seed() # Initialize the random number generator
@application.route('/stream/sensors/data')
def stream():
def stream_sensors_data():
for s in Sensor.objects:
meditions = []
for m in Medition.objects(sensor=s.id):
meditions.append({
'fechahora': m.fechahora.strftime('%Y-%m-%d %H:%M:%S'),
'value': m.value
})
json_data = json.dumps({
'sensor': {
'type': s.type,
'name': s.name,
'meditions': meditions
}
})
yield f"data: {json_data}\n\n"
time.sleep(0.6)
return Response(stream_sensors_data(), mimetype='text/event-stream')
if __name__ == '__main__':
util.conectdb()
application.run(debug=True, threaded=True)
|
[
"flask_mongoengine.MongoEngine",
"flask.Flask",
"json.dumps",
"time.sleep",
"sensores.db.models.Medition.objects",
"random.seed",
"sensores.db.util.conectdb"
] |
[((377, 392), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (382, 392), False, 'from flask import Flask, Response, render_template, redirect, url_for\n'), ((455, 479), 'flask_mongoengine.MongoEngine', 'MongoEngine', (['application'], {}), '(application)\n', (466, 479), False, 'from flask_mongoengine import MongoEngine\n'), ((724, 737), 'random.seed', 'random.seed', ([], {}), '()\n', (735, 737), False, 'import random\n'), ((1548, 1563), 'sensores.db.util.conectdb', 'util.conectdb', ([], {}), '()\n', (1561, 1563), False, 'from sensores.db import util\n'), ((957, 986), 'sensores.db.models.Medition.objects', 'Medition.objects', ([], {'sensor': 's.id'}), '(sensor=s.id)\n', (973, 986), False, 'from sensores.db.models import Sensor, Medition\n'), ((1180, 1265), 'json.dumps', 'json.dumps', (["{'sensor': {'type': s.type, 'name': s.name, 'meditions': meditions}}"], {}), "({'sensor': {'type': s.type, 'name': s.name, 'meditions': meditions}}\n )\n", (1190, 1265), False, 'import json\n'), ((1425, 1440), 'time.sleep', 'time.sleep', (['(0.6)'], {}), '(0.6)\n', (1435, 1440), False, 'import time\n')]
|
import matplotlib.pyplot as plt
plt.rcParams['toolbar'] = 'None'
import numpy as np # importando numpy
def genera_montecarlo(N=100000):
plt.figure(figsize=(6,6))
x, y = np.random.uniform(-1, 1, size=(2, N))
interior = (x**2 + y**2) <= 1
pi = interior.sum() * 4 / N
error = abs((pi - np.pi) / pi) * 100
exterior = np.invert(interior)
plt.plot(x[interior], y[interior], 'b.')
plt.plot(x[exterior], y[exterior], 'r.')
plt.plot(0, 0, label='$\hat \pi$ = {:4.4f} \nerror = {:4.4f}%'.format(pi,error), alpha=0, color='g')
plt.axis('square')
plt.legend(frameon=True, framealpha=0.9, fontsize=16)
plt.show()
genera_montecarlo()
|
[
"numpy.random.uniform",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.invert",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure"
] |
[((143, 169), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (153, 169), True, 'import matplotlib.pyplot as plt\n'), ((185, 222), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(2, N)'}), '(-1, 1, size=(2, N))\n', (202, 222), True, 'import numpy as np\n'), ((345, 364), 'numpy.invert', 'np.invert', (['interior'], {}), '(interior)\n', (354, 364), True, 'import numpy as np\n'), ((369, 409), 'matplotlib.pyplot.plot', 'plt.plot', (['x[interior]', 'y[interior]', '"""b."""'], {}), "(x[interior], y[interior], 'b.')\n", (377, 409), True, 'import matplotlib.pyplot as plt\n'), ((414, 454), 'matplotlib.pyplot.plot', 'plt.plot', (['x[exterior]', 'y[exterior]', '"""r."""'], {}), "(x[exterior], y[exterior], 'r.')\n", (422, 454), True, 'import matplotlib.pyplot as plt\n'), ((564, 582), 'matplotlib.pyplot.axis', 'plt.axis', (['"""square"""'], {}), "('square')\n", (572, 582), True, 'import matplotlib.pyplot as plt\n'), ((587, 640), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(True)', 'framealpha': '(0.9)', 'fontsize': '(16)'}), '(frameon=True, framealpha=0.9, fontsize=16)\n', (597, 640), True, 'import matplotlib.pyplot as plt\n'), ((645, 655), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (653, 655), True, 'import matplotlib.pyplot as plt\n')]
|
import sys
sys.path.append("../")
sys.path.append("../examples/")
import argparse
from configs import supported
from configs.utils import populate_defaults
import wilds
# Taken from https://sumit-ghosh.com/articles/parsing-dictionary-key-value-pairs-kwargs-argparse-python/
class ParseKwargs(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, dict())
for value in values:
key, value_str = value.split('=')
if value_str.replace('-','').isnumeric():
processed_val = int(value_str)
elif value_str.replace('-','').replace('.','').isnumeric():
processed_val = float(value_str)
elif value_str in ['True', 'true']:
processed_val = True
elif value_str in ['False', 'false']:
processed_val = False
else:
processed_val = value_str
getattr(namespace, self.dest)[key] = processed_val
def parse_bool(v):
if v.lower()=='true':
return True
elif v.lower()=='false':
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_config(dataset, algorithm, root_dir):
parser = argparse.ArgumentParser()
# Required arguments
parser.add_argument('-d', '--dataset', choices=wilds.supported_datasets, required=True)
parser.add_argument('--algorithm', required=True, choices=supported.algorithms)
parser.add_argument('--root_dir', required=True,
help='The directory where [dataset]/data can be found (or should be downloaded to, if it does not exist).')
parser.add_argument('--enable_privacy', default=False, action='store_true')
# Dataset
parser.add_argument('--split_scheme', help='Identifies how the train/val/test split is constructed. Choices are dataset-specific.')
parser.add_argument('--dataset_kwargs', nargs='*', action=ParseKwargs, default={})
parser.add_argument('--download', default=False, type=parse_bool, const=True, nargs='?',
help='If true, tries to downloads the dataset if it does not exist in root_dir.')
parser.add_argument('--subsample', default=False, type=parse_bool, const=True, nargs='?',
help='If true, subsample every group to the minimum group size.')
parser.add_argument('--frac', type=float, default=1.0,
help='Convenience parameter that scales all dataset splits down to the specified fraction, for development purposes. Note that this also scales the test set down, so the reported numbers are not comparable with the full test set.')
parser.add_argument('--version', default=None, type=str)
# Loaders
parser.add_argument('--loader_kwargs', nargs='*', action=ParseKwargs, default={})
parser.add_argument('--train_loader', choices=['standard', 'group'])
parser.add_argument('--uniform_over_groups', type=parse_bool, const=True, nargs='?')
parser.add_argument('--distinct_groups', type=parse_bool, const=True, nargs='?')
parser.add_argument('--n_groups_per_batch', type=int)
parser.add_argument('--batch_size', type=int)
parser.add_argument('--eval_loader', choices=['standard'], default='standard')
parser.add_argument('--weighted_uniform_iid', type=parse_bool, const=True, nargs='?')
parser.add_argument('--uniform_iid', type=parse_bool, const=True, nargs='?')
parser.add_argument("--sample_rate", type=float, default=0.001, metavar="SR",
help="sample rate used for batch construction (default: 0.001)",)
# Model
parser.add_argument('--model', choices=supported.models)
parser.add_argument('--model_kwargs', nargs='*', action=ParseKwargs, default={},
help='keyword arguments for model initialization passed as key1=value1 key2=value2')
# Transforms
parser.add_argument('--transform', choices=supported.transforms)
parser.add_argument('--target_resolution', nargs='+', type=int, help='The input resolution that images will be resized to before being passed into the model. For example, use --target_resolution 224 224 for a standard ResNet.')
parser.add_argument('--resize_scale', type=float)
parser.add_argument('--max_token_length', type=int)
# Objective
parser.add_argument('--loss_function', choices = supported.losses)
parser.add_argument('--loss_kwargs', nargs='*', action=ParseKwargs, default={},
help='keyword arguments for loss initialization passed as key1=value1 key2=value2')
# Algorithm
parser.add_argument('--groupby_fields', nargs='+')
parser.add_argument('--group_dro_step_size', type=float)
parser.add_argument('--coral_penalty_weight', type=float)
parser.add_argument('--irm_lambda', type=float)
parser.add_argument('--irm_penalty_anneal_iters', type=int)
parser.add_argument('--algo_log_metric')
# Model selection
parser.add_argument('--val_metric')
parser.add_argument('--val_metric_decreasing', type=parse_bool, const=True, nargs='?')
# Optimization
parser.add_argument('--n_epochs', type=int)
parser.add_argument('--optimizer', choices=supported.optimizers)
parser.add_argument('--lr', type=float)
parser.add_argument('--weight_decay', type=float)
parser.add_argument('--max_grad_norm', type=float)
parser.add_argument('--optimizer_kwargs', nargs='*', action=ParseKwargs, default={})
parser.add_argument('--sigma', type=float, default=1.0)
parser.add_argument('--max_per_sample_grad_norm', type=float, default=1.0)
parser.add_argument('--delta', type=float, default=1e-5)
# Scheduler
parser.add_argument('--scheduler', choices=supported.schedulers)
parser.add_argument('--scheduler_kwargs', nargs='*', action=ParseKwargs, default={})
parser.add_argument('--scheduler_metric_split', choices=['train', 'val'], default='val')
parser.add_argument('--scheduler_metric_name')
# Evaluation
parser.add_argument('--process_outputs_function', choices = supported.process_outputs_functions)
parser.add_argument('--evaluate_all_splits', type=parse_bool, const=True, nargs='?', default=True)
parser.add_argument('--eval_splits', nargs='+', default=[])
parser.add_argument('--eval_only', type=parse_bool, const=True, nargs='?', default=False)
parser.add_argument('--eval_epoch', default=None, type=int, help='If eval_only is set, then eval_epoch allows you to specify evaluating at a particular epoch. By default, it evaluates the best epoch by validation performance.')
# Misc
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--log_dir', default='./logs')
parser.add_argument('--log_every', default=50, type=int)
parser.add_argument('--save_step', type=int)
parser.add_argument('--save_best', type=parse_bool, const=True, nargs='?', default=True)
parser.add_argument('--save_last', type=parse_bool, const=True, nargs='?', default=True)
parser.add_argument('--save_pred', type=parse_bool, const=True, nargs='?', default=True)
parser.add_argument('--no_group_logging', type=parse_bool, const=True, nargs='?')
parser.add_argument('--use_wandb', type=parse_bool, const=True, nargs='?', default=False)
parser.add_argument('--progress_bar', type=parse_bool, const=True, nargs='?', default=False)
parser.add_argument('--resume', type=parse_bool, const=True, nargs='?', default=False)
config = parser.parse_args(["--dataset", dataset, "--algorithm", algorithm, "--root_dir", root_dir])
config = populate_defaults(config)
return config
|
[
"sys.path.append",
"configs.utils.populate_defaults",
"argparse.ArgumentParser",
"argparse.ArgumentTypeError"
] |
[((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((34, 65), 'sys.path.append', 'sys.path.append', (['"""../examples/"""'], {}), "('../examples/')\n", (49, 65), False, 'import sys\n'), ((1277, 1302), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1300, 1302), False, 'import argparse\n'), ((7669, 7694), 'configs.utils.populate_defaults', 'populate_defaults', (['config'], {}), '(config)\n', (7686, 7694), False, 'from configs.utils import populate_defaults\n'), ((1163, 1216), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (1189, 1216), False, 'import argparse\n')]
|
"""
The MIT License (MIT)
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import tensorflow as tf
from tensorflow.contrib.keras import backend
from tensorflow.contrib.keras import layers as keras_layers
class DLGMLayer(keras_layers.Layer):
"""
This layer is inspired by the paper "Stochastic Backpropagation and
Approximate Inference in Deep Generative Models"
incoming (Lasagne Layer): preceding layer in DLGM
num_units (int): number of output units in this layer
srng (theano RandomState): random number generator
rec_nets (dictionary of lasagne NNs): Neural networks that
paramaterize the recognition model
J (theano symbolic matrix): Input to rec model
k (float): regularization term on generative weights
"""
def __init__(self, incoming, num_units, rec_nets, k,
output_layer=False, extra_noise=0.01,
param_init=tf.random_normal_initializer(0, 0.01),
nonlinearity=tf.nn.relu,
**kwargs):
super(DLGMLayer, self).__init__(**kwargs)
num_inputs = incoming.output_shape[1]
self.num_units = num_units
self.output_layer = output_layer
self.extra_noise = extra_noise
# Initialize generative/decoding Parameters
self.W = self.add_variable(name='W', shape=(num_inputs, num_units),
initializer=param_init)
self.b = self.add_variable(name='b', shape=(num_units,),
initializer=param_init)
self.unc_G = self.add_variable(name='unc_G',
shape=(num_units, num_units),
initializer=param_init)
self.G = (tf.diag(tf.nn.softplus(tf.diag_part(self.unc_G))) +
self.unc_G - tf.matrix_band_part(self.unc_G, 0, -1))
self.nonlinearity = nonlinearity
# regularization term
self.k = k
# Load recognition/encoding Parameters
self.mu_net = rec_nets['mu_net']
self.u_net = rec_nets['u_net']
self.unc_d_net = rec_nets['unc_d_net']
def build(self, incoming, postJ):
rec_params = (self.mu_net.variables +
self.u_net.variables +
self.unc_d_net.variables)
i = 0
for param in rec_params:
self.add_variable(name="param"+str(i), shape=None,
initializer=param)
i += 1
super(DLGMLayer, self).build(incoming)
def calculate_xi(self, postJ):
"""
Calculate xi based on sampled J from posterior
"""
# get output of rec model
self.batch_mu = self.mu_net(postJ)
self.batch_u = self.u_net(postJ)
self.batch_unc_d = self.unc_d_net(postJ)
# add extra dim to batch_u, so it gets treated as column vectors when
# iterated over
self.batch_u = tf.expand_dims(self.batch_u, -1)
def get_cov(acc, inputs):
# convert output of rec model to rank-1 covariance matrix
# use softplus to get positive constrained d, minimum of -15
# since softplus will turn low numbers into 0, which become NaNs
# when inverted
u, unc_d = inputs
d = tf.nn.softplus(tf.maximum(unc_d, -15.0))
D_inv = tf.diag(1.0 / d)
eta = 1.0 / (tf.matmul(tf.matmul(tf.transpose(u), D_inv), u) + 1.0)
C = D_inv - eta*tf.matmul(tf.matmul(tf.matmul(D_inv, u),
tf.transpose(u)), D_inv)
Tr_C = tf.trace(C)
ld_C = tf.log(eta) - tf.reduce_sum(tf.log(d)) # eq 20 in DLGM
# coeff = ((1 - T.sqrt(eta)) / (u.T.dot(D_inv).dot(u)))
# simplified coefficient below is more stable as u -> 0
# original coefficient from paper is above
coeff = eta / (1.0 + tf.sqrt(eta))
R = (tf.sqrt(D_inv) - coeff * tf.matmul
(tf.matmul(tf.matmul(D_inv, u), tf.transpose(u)),
tf.sqrt(D_inv)))
return Tr_C, ld_C, R
(self.batch_Tr_C, self.batch_ld_C, self.batch_R) = tf.scan(
get_cov, [self.batch_u, self.batch_unc_d],
initializer=(0.0, tf.zeros([1, 1]), tf.diag(self.batch_unc_d[0])))
self.batch_xi = (self.batch_mu +
(tf.squeeze(tf.matmul(self.batch_R,
(tf.expand_dims(tf.random_normal(
[tf.shape(self.batch_R)[0],
self.num_units]), -1))))))
def call(self, inputs, add_noise=False, use_rec_model=False):
activation = tf.matmul(self.nonlinearity(inputs), self.W) + self.b
if use_rec_model:
# use sample from rec model
xi = self.batch_xi
if add_noise: # additional noise
xi += (self.extra_noise * tf.random_normal
(tf.shape(self.batch_xi)))
else:
# pure random input
xi = tf.random_normal((tf.shape(inputs)[0], self.num_units))
# we want the mean when training, so don't add noise to
# output of last layer when training.
if not self.output_layer:
activation += tf.matmul(xi, self.G)
elif not add_noise:
activation += tf.matmul(xi, self.G)
return activation
def get_ELBO(self, length):
"""
Get ELBO for this layer
length (theano symbolic int): length of current batch
"""
# KL divergence between posterior and N(0,1) prior
KL_div = (0.5 * (tf.reduce_sum(tf.sqrt(tf.reduce_sum(self.batch_mu**2,
axis=1))) + tf.reduce_sum(self.batch_Tr_C) -
tf.reduce_sum(self.batch_ld_C) - length))
weight_reg = ((0.5 / self.k) *
tf.sqrt(tf.reduce_sum(self.W**2)) *
tf.sqrt(tf.reduce_sum(self.G**2)))
return -(weight_reg + KL_div)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.num_units)
class PKBiasLayer(keras_layers.Layer):
"""
This layer draws different biases (depending on the mode)
from a normal distribution, then adds them to the input
Default modes are as follows:
0: normal, no biases added
1: saline and DLPFC, bias 0 is added
2: saline and DMPFC, bias 1 is added
3: muscimol and DLPFC, biases 0 and 2 are added
4: muscimol and DMPFC, biases 1 and 3 are added
"""
def __init__(self, incoming, params,
param_init=tf.random_normal_initializer(stddev=0.01),
num_biases=4, **kwargs):
super(PKBiasLayer, self).__init__(**kwargs)
num_inputs = incoming.output_shape[1]
self.mode = tf.zeros(num_biases)
self.k = np.cast[backend.floatx()](params['k'])
self.m = self.add_variable(name='m', shape=[num_biases, num_inputs],
initializer=param_init)
self.log_s = self.add_variable(name='log_s',
shape=[num_biases, num_inputs],
initializer=param_init)
# standard deviation will always be positive but optimization over
# log_s can be unconstrained
self.s = tf.exp(self.log_s)
self.draw_biases()
self.draw_on_every_output = True
def build(self, incoming):
if self.draw_on_every_output:
self.draw_biases()
super(PKBiasLayer, self).build(incoming)
def draw_biases(self):
self.biases = self.m + tf.random_normal(shape=self.s.shape,
seed=1234) * self.s
def call(self, inputs):
act_biases = tf.matmul(tf.reshape(tf.cast(
self.mode, backend.floatx()), [1, -1]), self.biases)
return inputs + act_biases
def set_mode(self, mode):
self.mode = mode
def get_ELBO(self, nbatches):
"""
Return the contribution to the ELBO for these biases
Normalized by nbatches (number of batches in dataset)
"""
ELBO = (tf.reduce_sum(-tf.abs(self.biases) / self.k -
tf.log(tf.constant(2.0) * self.k)))
ELBO += tf.reduce_sum(tf.log(self.s))
return ELBO / nbatches
class PKRowBiasLayer(keras_layers.Layer):
"""
This layer draws different biases (depending on the mode)
from a normal distribution, then adds them to the input.
This layer has sparsity at the row level, instead of the individual
sparsity of the PKBiasLayer.
Default modes are as follows:
0: normal, no biases added
1: saline and DLPFC, bias 0 is added
2: saline and DMPFC, bias 1 is added
3: muscimol and DLPFC, biases 0 and 2 are added
4: muscimol and DMPFC, biases 1 and 3 are added
"""
def __init__(self, incoming, params,
param_init=tf.random_normal_initializer(stddev=0.01),
num_biases=4, **kwargs):
super(PKRowBiasLayer, self).__init__(**kwargs)
num_inputs = incoming.output_shape[1]
self.mode = tf.zeros(num_biases)
# parameters on prior
self.a = np.cast[backend.floatx()](params['a']) # shape
self.b = np.cast[backend.floatx()](params['b']) # rate
# learnable posterior parameters
# normal dist over biases
self.mu = self.add_variable(name='mu', shape=[num_biases, num_inputs],
initializer=param_init)
self.unc_sig = self.add_variable(name='unc_sig',
shape=[num_biases, num_inputs],
initializer=param_init)
# gamma over rows
self.alpha = tf.Variable(initial_value=self.a * np.ones(
(num_biases, 1)), name='alpha', dtype=tf.float32)
self.beta = tf.Variable(initial_value=self.b * np.ones(
(num_biases, 1)), name='beta', dtype=tf.float32)
# update for alpha
self.alpha += (num_inputs / 2.0)
# standard deviation will always be positive but optimization over
# unc_sig can be unconstrained
self.sigma = tf.nn.softplus(self.unc_sig)
self.draw_biases()
self.draw_on_every_output = True
def build(self, incoming):
if self.draw_on_every_output:
self.draw_biases()
super(PKRowBiasLayer, self).build(incoming)
def draw_biases(self):
self.gamma = self.mu + tf.random_normal(
shape=self.sigma.shape, seed=1234) * self.sigma
def call(self, input):
act_biases = tf.matmul(tf.reshape(tf.cast(
self.mode, backend.floatx()), [1, -1]), self.gamma)
return input + act_biases
def set_mode(self, mode):
self.mode = mode
def coord_update(self):
self.beta = self.b + 0.5 * tf.reduce_sum(self.mu**2 + self.sigma**2,
axis=1,
keep_dims=True)
def get_ELBO(self, nbatches):
"""
Return the contribution to the ELBO for these biases
Normalized by nbatches (number of batches in dataset)
"""
self.coord_update()
# Log Density
ELBO = (tf.reduce_sum(-0.5 * (self.mu**2 + self.sigma**2) *
(self.alpha / self.beta) + 0.5 * (tf.digamma(self.alpha) -
tf.log(self.beta)) - 0.5 * tf.log(2 * np.pi)))
ELBO += (tf.reduce_sum((self.a - 1) * (tf.digamma(self.alpha) -
tf.log(self.beta)) - self.b * (self.alpha / self.beta) +
self.a * tf.log(self.b) - tf.lgamma(self.a)))
# entropy
ELBO += (tf.reduce_sum(0.5 * tf.log(2 * np.pi) + 0.5 +
tf.log(self.sigma)))
ELBO += (tf.reduce_sum(self.alpha - tf.log(self.beta) +
tf.lgamma(self.alpha) + (1 - self.alpha) *
tf.digamma(self.alpha)))
return ELBO / nbatches
|
[
"tensorflow.trace",
"tensorflow.matrix_band_part",
"tensorflow.reduce_sum",
"tensorflow.maximum",
"numpy.ones",
"tensorflow.diag_part",
"tensorflow.matmul",
"tensorflow.sqrt",
"tensorflow.contrib.keras.backend.floatx",
"tensorflow.abs",
"tensorflow.diag",
"tensorflow.exp",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.random_normal_initializer",
"tensorflow.random_normal",
"tensorflow.log",
"tensorflow.digamma",
"tensorflow.expand_dims",
"tensorflow.lgamma",
"tensorflow.shape",
"tensorflow.zeros",
"tensorflow.nn.softplus"
] |
[((1930, 1967), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0)', '(0.01)'], {}), '(0, 0.01)\n', (1958, 1967), True, 'import tensorflow as tf\n'), ((3970, 4002), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.batch_u', '(-1)'], {}), '(self.batch_u, -1)\n', (3984, 4002), True, 'import tensorflow as tf\n'), ((7644, 7685), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (7672, 7685), True, 'import tensorflow as tf\n'), ((7848, 7868), 'tensorflow.zeros', 'tf.zeros', (['num_biases'], {}), '(num_biases)\n', (7856, 7868), True, 'import tensorflow as tf\n'), ((8378, 8396), 'tensorflow.exp', 'tf.exp', (['self.log_s'], {}), '(self.log_s)\n', (8384, 8396), True, 'import tensorflow as tf\n'), ((10013, 10054), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (10041, 10054), True, 'import tensorflow as tf\n'), ((10220, 10240), 'tensorflow.zeros', 'tf.zeros', (['num_biases'], {}), '(num_biases)\n', (10228, 10240), True, 'import tensorflow as tf\n'), ((11295, 11323), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['self.unc_sig'], {}), '(self.unc_sig)\n', (11309, 11323), True, 'import tensorflow as tf\n'), ((2849, 2887), 'tensorflow.matrix_band_part', 'tf.matrix_band_part', (['self.unc_G', '(0)', '(-1)'], {}), '(self.unc_G, 0, -1)\n', (2868, 2887), True, 'import tensorflow as tf\n'), ((4394, 4410), 'tensorflow.diag', 'tf.diag', (['(1.0 / d)'], {}), '(1.0 / d)\n', (4401, 4410), True, 'import tensorflow as tf\n'), ((4642, 4653), 'tensorflow.trace', 'tf.trace', (['C'], {}), '(C)\n', (4650, 4653), True, 'import tensorflow as tf\n'), ((6314, 6335), 'tensorflow.matmul', 'tf.matmul', (['xi', 'self.G'], {}), '(xi, self.G)\n', (6323, 6335), True, 'import tensorflow as tf\n'), ((9357, 9371), 'tensorflow.log', 'tf.log', (['self.s'], {}), '(self.s)\n', (9363, 9371), True, 'import tensorflow as tf\n'), ((4348, 4372), 'tensorflow.maximum', 'tf.maximum', (['unc_d', '(-15.0)'], {}), '(unc_d, -15.0)\n', (4358, 4372), True, 'import tensorflow as tf\n'), ((4673, 4684), 'tensorflow.log', 'tf.log', (['eta'], {}), '(eta)\n', (4679, 4684), True, 'import tensorflow as tf\n'), ((4984, 4998), 'tensorflow.sqrt', 'tf.sqrt', (['D_inv'], {}), '(D_inv)\n', (4991, 4998), True, 'import tensorflow as tf\n'), ((6390, 6411), 'tensorflow.matmul', 'tf.matmul', (['xi', 'self.G'], {}), '(xi, self.G)\n', (6399, 6411), True, 'import tensorflow as tf\n'), ((6981, 7007), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.G ** 2)'], {}), '(self.G ** 2)\n', (6994, 7007), True, 'import tensorflow as tf\n'), ((7894, 7910), 'tensorflow.contrib.keras.backend.floatx', 'backend.floatx', ([], {}), '()\n', (7908, 7910), False, 'from tensorflow.contrib.keras import backend\n'), ((8675, 8722), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': 'self.s.shape', 'seed': '(1234)'}), '(shape=self.s.shape, seed=1234)\n', (8691, 8722), True, 'import tensorflow as tf\n'), ((10296, 10312), 'tensorflow.contrib.keras.backend.floatx', 'backend.floatx', ([], {}), '()\n', (10310, 10312), False, 'from tensorflow.contrib.keras import backend\n'), ((10361, 10377), 'tensorflow.contrib.keras.backend.floatx', 'backend.floatx', ([], {}), '()\n', (10375, 10377), False, 'from tensorflow.contrib.keras import backend\n'), ((11605, 11656), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': 'self.sigma.shape', 'seed': '(1234)'}), '(shape=self.sigma.shape, seed=1234)\n', (11621, 11656), True, 'import tensorflow as tf\n'), ((11980, 12049), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.mu ** 2 + self.sigma ** 2)'], {'axis': '(1)', 'keep_dims': '(True)'}), '(self.mu ** 2 + self.sigma ** 2, axis=1, keep_dims=True)\n', (11993, 12049), True, 'import tensorflow as tf\n'), ((12772, 12789), 'tensorflow.lgamma', 'tf.lgamma', (['self.a'], {}), '(self.a)\n', (12781, 12789), True, 'import tensorflow as tf\n'), ((12890, 12908), 'tensorflow.log', 'tf.log', (['self.sigma'], {}), '(self.sigma)\n', (12896, 12908), True, 'import tensorflow as tf\n'), ((4701, 4710), 'tensorflow.log', 'tf.log', (['d'], {}), '(d)\n', (4707, 4710), True, 'import tensorflow as tf\n'), ((4953, 4965), 'tensorflow.sqrt', 'tf.sqrt', (['eta'], {}), '(eta)\n', (4960, 4965), True, 'import tensorflow as tf\n'), ((5308, 5324), 'tensorflow.zeros', 'tf.zeros', (['[1, 1]'], {}), '([1, 1])\n', (5316, 5324), True, 'import tensorflow as tf\n'), ((5326, 5354), 'tensorflow.diag', 'tf.diag', (['self.batch_unc_d[0]'], {}), '(self.batch_unc_d[0])\n', (5333, 5354), True, 'import tensorflow as tf\n'), ((6812, 6842), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.batch_ld_C'], {}), '(self.batch_ld_C)\n', (6825, 6842), True, 'import tensorflow as tf\n'), ((6923, 6949), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.W ** 2)'], {}), '(self.W ** 2)\n', (6936, 6949), True, 'import tensorflow as tf\n'), ((8883, 8899), 'tensorflow.contrib.keras.backend.floatx', 'backend.floatx', ([], {}), '()\n', (8897, 8899), False, 'from tensorflow.contrib.keras import backend\n'), ((10894, 10918), 'numpy.ones', 'np.ones', (['(num_biases, 1)'], {}), '((num_biases, 1))\n', (10901, 10918), True, 'import numpy as np\n'), ((11020, 11044), 'numpy.ones', 'np.ones', (['(num_biases, 1)'], {}), '((num_biases, 1))\n', (11027, 11044), True, 'import numpy as np\n'), ((11785, 11801), 'tensorflow.contrib.keras.backend.floatx', 'backend.floatx', ([], {}), '()\n', (11799, 11801), False, 'from tensorflow.contrib.keras import backend\n'), ((12563, 12580), 'tensorflow.log', 'tf.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (12569, 12580), True, 'import tensorflow as tf\n'), ((12992, 13013), 'tensorflow.lgamma', 'tf.lgamma', (['self.alpha'], {}), '(self.alpha)\n', (13001, 13013), True, 'import tensorflow as tf\n'), ((13052, 13074), 'tensorflow.digamma', 'tf.digamma', (['self.alpha'], {}), '(self.alpha)\n', (13062, 13074), True, 'import tensorflow as tf\n'), ((2789, 2813), 'tensorflow.diag_part', 'tf.diag_part', (['self.unc_G'], {}), '(self.unc_G)\n', (2801, 2813), True, 'import tensorflow as tf\n'), ((5104, 5118), 'tensorflow.sqrt', 'tf.sqrt', (['D_inv'], {}), '(D_inv)\n', (5111, 5118), True, 'import tensorflow as tf\n'), ((5999, 6022), 'tensorflow.shape', 'tf.shape', (['self.batch_xi'], {}), '(self.batch_xi)\n', (6007, 6022), True, 'import tensorflow as tf\n'), ((6106, 6122), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (6114, 6122), True, 'import tensorflow as tf\n'), ((6761, 6791), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.batch_Tr_C'], {}), '(self.batch_Tr_C)\n', (6774, 6791), True, 'import tensorflow as tf\n'), ((9230, 9249), 'tensorflow.abs', 'tf.abs', (['self.biases'], {}), '(self.biases)\n', (9236, 9249), True, 'import tensorflow as tf\n'), ((9298, 9314), 'tensorflow.constant', 'tf.constant', (['(2.0)'], {}), '(2.0)\n', (9309, 9314), True, 'import tensorflow as tf\n'), ((12755, 12769), 'tensorflow.log', 'tf.log', (['self.b'], {}), '(self.b)\n', (12761, 12769), True, 'import tensorflow as tf\n'), ((12847, 12864), 'tensorflow.log', 'tf.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (12853, 12864), True, 'import tensorflow as tf\n'), ((12955, 12972), 'tensorflow.log', 'tf.log', (['self.beta'], {}), '(self.beta)\n', (12961, 12972), True, 'import tensorflow as tf\n'), ((4456, 4471), 'tensorflow.transpose', 'tf.transpose', (['u'], {}), '(u)\n', (4468, 4471), True, 'import tensorflow as tf\n'), ((4539, 4558), 'tensorflow.matmul', 'tf.matmul', (['D_inv', 'u'], {}), '(D_inv, u)\n', (4548, 4558), True, 'import tensorflow as tf\n'), ((4598, 4613), 'tensorflow.transpose', 'tf.transpose', (['u'], {}), '(u)\n', (4610, 4613), True, 'import tensorflow as tf\n'), ((5047, 5066), 'tensorflow.matmul', 'tf.matmul', (['D_inv', 'u'], {}), '(D_inv, u)\n', (5056, 5066), True, 'import tensorflow as tf\n'), ((5068, 5083), 'tensorflow.transpose', 'tf.transpose', (['u'], {}), '(u)\n', (5080, 5083), True, 'import tensorflow as tf\n'), ((12495, 12517), 'tensorflow.digamma', 'tf.digamma', (['self.alpha'], {}), '(self.alpha)\n', (12505, 12517), True, 'import tensorflow as tf\n'), ((12536, 12553), 'tensorflow.log', 'tf.log', (['self.beta'], {}), '(self.beta)\n', (12542, 12553), True, 'import tensorflow as tf\n'), ((6699, 6740), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.batch_mu ** 2)'], {'axis': '(1)'}), '(self.batch_mu ** 2, axis=1)\n', (6712, 6740), True, 'import tensorflow as tf\n'), ((12630, 12652), 'tensorflow.digamma', 'tf.digamma', (['self.alpha'], {}), '(self.alpha)\n', (12640, 12652), True, 'import tensorflow as tf\n'), ((12672, 12689), 'tensorflow.log', 'tf.log', (['self.beta'], {}), '(self.beta)\n', (12678, 12689), True, 'import tensorflow as tf\n'), ((5548, 5570), 'tensorflow.shape', 'tf.shape', (['self.batch_R'], {}), '(self.batch_R)\n', (5556, 5570), True, 'import tensorflow as tf\n')]
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: <NAME> (<EMAIL>)
#
from __future__ import absolute_import, division, unicode_literals
import math
import sys
from math import sqrt
from mo_dots import Data, Null, coalesce
from mo_future import text
from mo_logs import Log
from mo_math import OR, almost_equal
from mo_math.vendor import strangman
DEBUG = True
DEBUG_STRANGMAN = False
EPSILON = 0.000000001
ABS_EPSILON = sys.float_info.min * 2 # *2 FOR SAFETY
if DEBUG_STRANGMAN:
try:
import numpy as np
from scipy import stats
import scipy
except Exception as e:
DEBUG_STRANGMAN = False
def chisquare(f_obs, f_exp):
try:
py_result = strangman.stats.chisquare(f_obs, f_exp)
except Exception as e:
Log.error("problem with call", e)
if DEBUG_STRANGMAN:
from mo_testing.fuzzytestcase import assertAlmostEqualValue
sp_result = scipy.stats.chisquare(np.array(f_obs), f_exp=np.array(f_exp))
if not assertAlmostEqualValue(
sp_result[0], py_result[0], digits=9
) and assertAlmostEqualValue(sp_result[1], py_result[1], delta=1e-8):
Log.error("problem with stats lib")
return py_result
def Stats2ZeroMoment(stats):
# MODIFIED FROM http://statsmodels.sourceforge.net/devel/_modules/statsmodels/stats/moment_helpers.html
# ADDED count
mc0, mc1, mc2, skew, kurt = (
stats.count,
coalesce(stats.mean, 0),
coalesce(stats.variance, 0),
coalesce(stats.skew, 0),
coalesce(stats.kurtosis, 0),
)
mz0 = mc0
mz1 = mc1 * mc0
mz2 = (mc2 + mc1 * mc1) * mc0
mc3 = coalesce(skew, 0) * (mc2 ** 1.5) # 3rd central moment
mz3 = (mc3 + 3 * mc1 * mc2 + mc1 ** 3) * mc0 # 3rd non-central moment
mc4 = (coalesce(kurt, 0) + 3.0) * (mc2 ** 2.0) # 4th central moment
mz4 = (mc4 + 4 * mc1 * mc3 + 6 * mc1 * mc1 * mc2 + mc1 ** 4) * mc0
m = ZeroMoment(mz0, mz1, mz2, mz3, mz4)
if DEBUG:
from mo_testing.fuzzytestcase import assertAlmostEqualValue
globals()["DEBUG"] = False
try:
v = ZeroMoment2Stats(m)
assertAlmostEqualValue(v.count, stats.count, places=10)
assertAlmostEqualValue(v.mean, stats.mean, places=10)
assertAlmostEqualValue(v.variance, stats.variance, places=10)
assertAlmostEqualValue(v.skew, stats.skew, places=10)
assertAlmostEqualValue(v.kurtosis, stats.kurtosis, places=10)
except Exception as e:
v = ZeroMoment2Stats(m)
Log.error("programmer error")
globals()["DEBUG"] = True
return m
def ZeroMoment2Stats(z_moment):
Z = z_moment.S
N = Z[0]
if N == 0:
return Stats()
mean = Z[1] / N
Z2 = Z[2] / N
Z3 = Z[3] / N
Z4 = Z[4] / N
if N == 1:
variance = None
skew = None
kurtosis = None
else:
if almost_equal(Z2, mean * mean, digits=9):
variance = 0
skew = None
kurtosis = None
else:
variance = Z2 - mean * mean
mc3 = Z3 - (3 * mean * variance + mean ** 3) # 3rd central moment
mc4 = Z4 - (4 * mean * mc3 + 6 * mean * mean * variance + mean ** 4)
skew = mc3 / (variance ** 1.5)
kurtosis = (mc4 / (variance ** 2.0)) - 3.0
stats = Stats(count=N, mean=mean, variance=variance, skew=skew, kurtosis=kurtosis)
if DEBUG:
from mo_testing.fuzzytestcase import assertAlmostEqualValue
globals()["DEBUG"] = False
v = Null
try:
v = Stats2ZeroMoment(stats)
for i in range(5):
assertAlmostEqualValue(v.S[i], Z[i], places=7)
except Exception as e:
Log.error(
"Conversion failed. Programmer error:\nfrom={{from|indent}},\nresult stats={{stats|indent}},\nexpected param={{expected|indent}}",
{"from": Z},
stats=stats,
expected=v.S,
cause=e,
)
globals()["DEBUG"] = True
return stats
class Stats(Data):
def __init__(self, **kwargs):
Data.__init__(self)
self.count = 0
self.mean = None
self.variance = None
self.skew = None
self.kurtosis = None
if "samples" in kwargs:
s = ZeroMoment2Stats(ZeroMoment.new_instance(kwargs["samples"]))
self.count = s.count
self.mean = s.mean
self.variance = s.variance
self.skew = s.skew
self.kurtosis = s.kurtosis
return
if "count" not in kwargs:
self.count = 0
self.mean = None
self.variance = None
self.skew = None
self.kurtosis = None
elif "mean" not in kwargs:
self.count = kwargs["count"]
self.mean = None
self.variance = None
self.skew = None
self.kurtosis = None
elif "variance" not in kwargs and "std" not in kwargs:
self.count = kwargs["count"]
self.mean = kwargs["mean"]
self.variance = 0
self.skew = None
self.kurtosis = None
elif "skew" not in kwargs:
self.count = kwargs["count"]
self.mean = kwargs["mean"]
self.variance = (
kwargs["variance"] if "variance" in kwargs else kwargs["std"] ** 2
)
self.skew = None
self.kurtosis = None
elif "kurtosis" not in kwargs:
self.count = kwargs["count"]
self.mean = kwargs["mean"]
self.variance = (
kwargs["variance"] if "variance" in kwargs else kwargs["std"] ** 2
)
self.skew = kwargs["skew"]
self.kurtosis = None
else:
self.count = kwargs["count"]
self.mean = kwargs["mean"]
self.variance = (
kwargs["variance"] if "variance" in kwargs else kwargs["std"] ** 2
)
self.skew = kwargs["skew"]
self.kurtosis = kwargs["kurtosis"]
@property
def std(self):
return sqrt(self.variance)
class ZeroMoment(object):
"""
ZERO-CENTERED MOMENTS
"""
def __init__(self, *args):
self.S = tuple(args)
def __add__(self, other):
if isinstance(other, ZeroMoment):
return ZeroMoment(*map(add, self.S, other.S))
elif hasattr(other, "__iter__"):
return ZeroMoment(*map(add, self.S, ZeroMoment.new_instance(other)))
elif other == None:
return self
else:
return ZeroMoment(
*map(
add,
self.S,
(
1,
other,
pow(other, 2),
pow(other, 3),
pow(other, 4),
pow(other, 2),
),
)
)
def __sub__(self, other):
if isinstance(other, ZeroMoment):
return ZeroMoment(*map(sub, self.S, other.S))
elif hasattr(other, "__iter__"):
return ZeroMoment(*map(sub, self.S, ZeroMoment.new_instance(other)))
elif other == None:
return self
else:
return ZeroMoment(
*map(
sub, self.S, (1, other, pow(other, 2), pow(other, 3), pow(other, 4))
)
)
@property
def tuple(self):
# RETURN AS ORDERED TUPLE
return self.S
@property
def dict(self):
# RETURN HASH OF SUMS
return {"s" + text(i): m for i, m in enumerate(self.S)}
@staticmethod
def new_instance(values=None):
if values == None:
return ZeroMoment()
vals = [v for v in values if v != None]
return ZeroMoment(
len(vals),
sum(vals),
sum([pow(n, 2) for n in vals]),
sum([pow(n, 3) for n in vals]),
sum([pow(n, 4) for n in vals]),
)
@property
def stats(self, *args, **kwargs):
return ZeroMoment2Stats(self, *args, **kwargs)
def add(a, b):
return coalesce(a, 0) + coalesce(b, 0)
def sub(a, b):
return coalesce(a, 0) - coalesce(b, 0)
def ZeroMoment2dict(z):
# RETURN HASH OF SUMS
return {"s" + text(i): m for i, m in enumerate(z.S)}
def median(values, simple=True, mean_weight=0.0):
"""
RETURN MEDIAN VALUE
IF simple=False THEN IN THE EVENT MULTIPLE INSTANCES OF THE
MEDIAN VALUE, THE MEDIAN IS INTERPOLATED BASED ON ITS POSITION
IN THE MEDIAN RANGE
mean_weight IS TO PICK A MEDIAN VALUE IN THE ODD CASE THAT IS
CLOSER TO THE MEAN (PICK A MEDIAN BETWEEN TWO MODES IN BIMODAL CASE)
"""
if OR(v == None for v in values):
Log.error("median is not ready to handle None")
try:
if not values:
return Null
l = len(values)
_sorted = sorted(values)
middle = int(l / 2)
_median = float(_sorted[middle])
if len(_sorted) == 1:
return _median
if simple:
if l % 2 == 0:
return (_sorted[middle - 1] + _median) / 2
return _median
# FIND RANGE OF THE median
start_index = middle - 1
while start_index > 0 and _sorted[start_index] == _median:
start_index -= 1
start_index += 1
stop_index = middle + 1
while stop_index < l and _sorted[stop_index] == _median:
stop_index += 1
num_middle = stop_index - start_index
if l % 2 == 0:
if num_middle == 1:
return (_sorted[middle - 1] + _median) / 2
else:
return (_median - 0.5) + (middle - start_index) / num_middle
else:
if num_middle == 1:
return (1 - mean_weight) * _median + mean_weight * (
_sorted[middle - 1] + _sorted[middle + 1]
) / 2
else:
return (_median - 0.5) + (middle + 0.5 - start_index) / num_middle
except Exception as e:
Log.error("problem with median of {{values}}", values=values, cause=e)
def percentile(values, percent):
"""
PERCENTILE WITH INTERPOLATION
RETURN VALUE AT, OR ABOVE, percentile OF THE VALUES
snagged from http://code.activestate.com/recipes/511478-finding-the-percentile-of-the-values/
"""
N = sorted(values)
if not N:
return None
k = (len(N) - 1) * percent
f = int(math.floor(k))
c = int(math.ceil(k))
if f == c:
return N[int(k)]
d0 = N[f] * (c - k)
d1 = N[c] * (k - f)
return d0 + d1
zero = Stats()
|
[
"mo_dots.Data.__init__",
"math.sqrt",
"math.ceil",
"mo_math.vendor.strangman.stats.chisquare",
"math.floor",
"mo_testing.fuzzytestcase.assertAlmostEqualValue",
"mo_math.almost_equal",
"mo_dots.coalesce",
"numpy.array",
"mo_future.text",
"mo_logs.Log.error",
"mo_math.OR"
] |
[((9098, 9127), 'mo_math.OR', 'OR', (['(v == None for v in values)'], {}), '(v == None for v in values)\n', (9100, 9127), False, 'from mo_math import OR, almost_equal\n'), ((877, 916), 'mo_math.vendor.strangman.stats.chisquare', 'strangman.stats.chisquare', (['f_obs', 'f_exp'], {}), '(f_obs, f_exp)\n', (902, 916), False, 'from mo_math.vendor import strangman\n'), ((1618, 1641), 'mo_dots.coalesce', 'coalesce', (['stats.mean', '(0)'], {}), '(stats.mean, 0)\n', (1626, 1641), False, 'from mo_dots import Data, Null, coalesce\n'), ((1651, 1678), 'mo_dots.coalesce', 'coalesce', (['stats.variance', '(0)'], {}), '(stats.variance, 0)\n', (1659, 1678), False, 'from mo_dots import Data, Null, coalesce\n'), ((1688, 1711), 'mo_dots.coalesce', 'coalesce', (['stats.skew', '(0)'], {}), '(stats.skew, 0)\n', (1696, 1711), False, 'from mo_dots import Data, Null, coalesce\n'), ((1721, 1748), 'mo_dots.coalesce', 'coalesce', (['stats.kurtosis', '(0)'], {}), '(stats.kurtosis, 0)\n', (1729, 1748), False, 'from mo_dots import Data, Null, coalesce\n'), ((1835, 1852), 'mo_dots.coalesce', 'coalesce', (['skew', '(0)'], {}), '(skew, 0)\n', (1843, 1852), False, 'from mo_dots import Data, Null, coalesce\n'), ((3109, 3148), 'mo_math.almost_equal', 'almost_equal', (['Z2', '(mean * mean)'], {'digits': '(9)'}), '(Z2, mean * mean, digits=9)\n', (3121, 3148), False, 'from mo_math import OR, almost_equal\n'), ((4354, 4373), 'mo_dots.Data.__init__', 'Data.__init__', (['self'], {}), '(self)\n', (4367, 4373), False, 'from mo_dots import Data, Null, coalesce\n'), ((6407, 6426), 'math.sqrt', 'sqrt', (['self.variance'], {}), '(self.variance)\n', (6411, 6426), False, 'from math import sqrt\n'), ((8501, 8515), 'mo_dots.coalesce', 'coalesce', (['a', '(0)'], {}), '(a, 0)\n', (8509, 8515), False, 'from mo_dots import Data, Null, coalesce\n'), ((8518, 8532), 'mo_dots.coalesce', 'coalesce', (['b', '(0)'], {}), '(b, 0)\n', (8526, 8532), False, 'from mo_dots import Data, Null, coalesce\n'), ((8561, 8575), 'mo_dots.coalesce', 'coalesce', (['a', '(0)'], {}), '(a, 0)\n', (8569, 8575), False, 'from mo_dots import Data, Null, coalesce\n'), ((8578, 8592), 'mo_dots.coalesce', 'coalesce', (['b', '(0)'], {}), '(b, 0)\n', (8586, 8592), False, 'from mo_dots import Data, Null, coalesce\n'), ((9137, 9184), 'mo_logs.Log.error', 'Log.error', (['"""median is not ready to handle None"""'], {}), "('median is not ready to handle None')\n", (9146, 9184), False, 'from mo_logs import Log\n'), ((10879, 10892), 'math.floor', 'math.floor', (['k'], {}), '(k)\n', (10889, 10892), False, 'import math\n'), ((10906, 10918), 'math.ceil', 'math.ceil', (['k'], {}), '(k)\n', (10915, 10918), False, 'import math\n'), ((952, 985), 'mo_logs.Log.error', 'Log.error', (['"""problem with call"""', 'e'], {}), "('problem with call', e)\n", (961, 985), False, 'from mo_logs import Log\n'), ((1122, 1137), 'numpy.array', 'np.array', (['f_obs'], {}), '(f_obs)\n', (1130, 1137), True, 'import numpy as np\n'), ((1264, 1327), 'mo_testing.fuzzytestcase.assertAlmostEqualValue', 'assertAlmostEqualValue', (['sp_result[1]', 'py_result[1]'], {'delta': '(1e-08)'}), '(sp_result[1], py_result[1], delta=1e-08)\n', (1286, 1327), False, 'from mo_testing.fuzzytestcase import assertAlmostEqualValue\n'), ((1340, 1375), 'mo_logs.Log.error', 'Log.error', (['"""problem with stats lib"""'], {}), "('problem with stats lib')\n", (1349, 1375), False, 'from mo_logs import Log\n'), ((1976, 1993), 'mo_dots.coalesce', 'coalesce', (['kurt', '(0)'], {}), '(kurt, 0)\n', (1984, 1993), False, 'from mo_dots import Data, Null, coalesce\n'), ((2333, 2388), 'mo_testing.fuzzytestcase.assertAlmostEqualValue', 'assertAlmostEqualValue', (['v.count', 'stats.count'], {'places': '(10)'}), '(v.count, stats.count, places=10)\n', (2355, 2388), False, 'from mo_testing.fuzzytestcase import assertAlmostEqualValue\n'), ((2401, 2454), 'mo_testing.fuzzytestcase.assertAlmostEqualValue', 'assertAlmostEqualValue', (['v.mean', 'stats.mean'], {'places': '(10)'}), '(v.mean, stats.mean, places=10)\n', (2423, 2454), False, 'from mo_testing.fuzzytestcase import assertAlmostEqualValue\n'), ((2467, 2528), 'mo_testing.fuzzytestcase.assertAlmostEqualValue', 'assertAlmostEqualValue', (['v.variance', 'stats.variance'], {'places': '(10)'}), '(v.variance, stats.variance, places=10)\n', (2489, 2528), False, 'from mo_testing.fuzzytestcase import assertAlmostEqualValue\n'), ((2541, 2594), 'mo_testing.fuzzytestcase.assertAlmostEqualValue', 'assertAlmostEqualValue', (['v.skew', 'stats.skew'], {'places': '(10)'}), '(v.skew, stats.skew, places=10)\n', (2563, 2594), False, 'from mo_testing.fuzzytestcase import assertAlmostEqualValue\n'), ((2607, 2668), 'mo_testing.fuzzytestcase.assertAlmostEqualValue', 'assertAlmostEqualValue', (['v.kurtosis', 'stats.kurtosis'], {'places': '(10)'}), '(v.kurtosis, stats.kurtosis, places=10)\n', (2629, 2668), False, 'from mo_testing.fuzzytestcase import assertAlmostEqualValue\n'), ((8663, 8670), 'mo_future.text', 'text', (['i'], {}), '(i)\n', (8667, 8670), False, 'from mo_future import text\n'), ((10468, 10538), 'mo_logs.Log.error', 'Log.error', (['"""problem with median of {{values}}"""'], {'values': 'values', 'cause': 'e'}), "('problem with median of {{values}}', values=values, cause=e)\n", (10477, 10538), False, 'from mo_logs import Log\n'), ((1145, 1160), 'numpy.array', 'np.array', (['f_exp'], {}), '(f_exp)\n', (1153, 1160), True, 'import numpy as np\n'), ((1177, 1237), 'mo_testing.fuzzytestcase.assertAlmostEqualValue', 'assertAlmostEqualValue', (['sp_result[0]', 'py_result[0]'], {'digits': '(9)'}), '(sp_result[0], py_result[0], digits=9)\n', (1199, 1237), False, 'from mo_testing.fuzzytestcase import assertAlmostEqualValue\n'), ((2748, 2777), 'mo_logs.Log.error', 'Log.error', (['"""programmer error"""'], {}), "('programmer error')\n", (2757, 2777), False, 'from mo_logs import Log\n'), ((3863, 3909), 'mo_testing.fuzzytestcase.assertAlmostEqualValue', 'assertAlmostEqualValue', (['v.S[i]', 'Z[i]'], {'places': '(7)'}), '(v.S[i], Z[i], places=7)\n', (3885, 3909), False, 'from mo_testing.fuzzytestcase import assertAlmostEqualValue\n'), ((3953, 4154), 'mo_logs.Log.error', 'Log.error', (['"""Conversion failed. Programmer error:\nfrom={{from|indent}},\nresult stats={{stats|indent}},\nexpected param={{expected|indent}}"""', "{'from': Z}"], {'stats': 'stats', 'expected': 'v.S', 'cause': 'e'}), '(\n """Conversion failed. Programmer error:\nfrom={{from|indent}},\nresult stats={{stats|indent}},\nexpected param={{expected|indent}}"""\n , {\'from\': Z}, stats=stats, expected=v.S, cause=e)\n', (3962, 4154), False, 'from mo_logs import Log\n'), ((7946, 7953), 'mo_future.text', 'text', (['i'], {}), '(i)\n', (7950, 7953), False, 'from mo_future import text\n')]
|
# organize data, read wav to get duration and split train/test
# to a csv file
# author: Max, 2020.08.05
import os
import librosa
from tqdm import tqdm
import pandas as pd
from sklearn.model_selection import StratifiedKFold
def main(root_pth):
if not os.path.exists('df.csv'):
data = []
folds = os.listdir(root_pth)
for idx, fold in enumerate(tqdm(folds)):
wavs = os.listdir(os.path.join(root_pth, fold))
for wav in wavs:
wav_pth = os.path.join(root_pth, fold, wav)
duration = librosa.get_duration(filename=wav_pth)
target = {
'file': wav,
'bird': fold,
'label': idx,
'duration': duration
}
data.append(target)
# if idx == 1:
# break
df = pd.DataFrame(data, columns=['label', 'bird', 'file', 'duration'])
df.to_csv('df.csv', index=False)
df = pd.read_csv('df.csv')
df['fold'] = -1
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
for fold_number, (train_index, val_index) in enumerate(skf.split(X=df.index, y=df['label'])):
df.loc[df.iloc[val_index].index, 'fold'] = fold_number
df.to_csv('df.csv', index=False)
if __name__ == "__main__":
# main('./data/birdsong-recognition/train_audio/')
# https://www.kaggle.com/ttahara/training-birdsong-baseline-resnest50-fast#split-data
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
train_all = pd.read_csv('train_mod.csv')
train_all = train_all.drop(train_all[train_all["resampled_filename"] == 'XC195038.wav'].index)
train_all = train_all.reset_index()
train_all["fold"] = -1
for fold_id, (train_index, val_index) in enumerate(skf.split(train_all, train_all["ebird_code"])):
train_all.iloc[val_index, -1] = fold_id
# # check the propotion
fold_proportion = pd.pivot_table(train_all, index="ebird_code", columns="fold", values="xc_id", aggfunc=len)
print(fold_proportion.shape)
train_all.to_csv('df_mod.csv', index=False)
|
[
"pandas.DataFrame",
"tqdm.tqdm",
"pandas.pivot_table",
"pandas.read_csv",
"os.path.exists",
"sklearn.model_selection.StratifiedKFold",
"os.path.join",
"os.listdir",
"librosa.get_duration"
] |
[((1010, 1031), 'pandas.read_csv', 'pd.read_csv', (['"""df.csv"""'], {}), "('df.csv')\n", (1021, 1031), True, 'import pandas as pd\n'), ((1062, 1120), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': '(42)'}), '(n_splits=5, shuffle=True, random_state=42)\n', (1077, 1120), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((1505, 1563), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': '(42)'}), '(n_splits=5, shuffle=True, random_state=42)\n', (1520, 1563), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((1580, 1608), 'pandas.read_csv', 'pd.read_csv', (['"""train_mod.csv"""'], {}), "('train_mod.csv')\n", (1591, 1608), True, 'import pandas as pd\n'), ((1977, 2072), 'pandas.pivot_table', 'pd.pivot_table', (['train_all'], {'index': '"""ebird_code"""', 'columns': '"""fold"""', 'values': '"""xc_id"""', 'aggfunc': 'len'}), "(train_all, index='ebird_code', columns='fold', values=\n 'xc_id', aggfunc=len)\n", (1991, 2072), True, 'import pandas as pd\n'), ((258, 282), 'os.path.exists', 'os.path.exists', (['"""df.csv"""'], {}), "('df.csv')\n", (272, 282), False, 'import os\n'), ((318, 338), 'os.listdir', 'os.listdir', (['root_pth'], {}), '(root_pth)\n', (328, 338), False, 'import os\n'), ((893, 958), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['label', 'bird', 'file', 'duration']"}), "(data, columns=['label', 'bird', 'file', 'duration'])\n", (905, 958), True, 'import pandas as pd\n'), ((374, 385), 'tqdm.tqdm', 'tqdm', (['folds'], {}), '(folds)\n', (378, 385), False, 'from tqdm import tqdm\n'), ((418, 446), 'os.path.join', 'os.path.join', (['root_pth', 'fold'], {}), '(root_pth, fold)\n', (430, 446), False, 'import os\n'), ((503, 536), 'os.path.join', 'os.path.join', (['root_pth', 'fold', 'wav'], {}), '(root_pth, fold, wav)\n', (515, 536), False, 'import os\n'), ((564, 602), 'librosa.get_duration', 'librosa.get_duration', ([], {'filename': 'wav_pth'}), '(filename=wav_pth)\n', (584, 602), False, 'import librosa\n')]
|
from django.db import models
# Create your models here.
class Location(models.Model):
"""
class facilitates the creation of location objects
"""
location_name = models.CharField(max_length=70)
def __str__(self):
return self.location_name
def save_location(self):
"""
method saves entered location in database
"""
self.save()
def update_location(self, using=None, fields=None, **kwargs):
"""
method updates saved location
"""
if fields is not None:
fields = set(fields)
deferred_fields = self.get_deferred_fields()
if fields.intersection(deferred_fields):
fields = fields.union(deferred_fields)
super().refresh_from_db(using, fields, **kwargs)
def delete_location(self):
"""
method deletes location
"""
self.delete()
class Category(models.Model):
"""
class facilitates the creation of category objects
"""
category_name = models.CharField(max_length=70)
def __str__(self):
return self.category_name
def save_category(self):
"""
method saves added category
"""
self.save()
def update_category(self, using=None, fields=None, **kwargs):
"""
method updates saved category
"""
if fields is not None:
fields = set(fields)
deferred_fields = self.get_deferred_fields()
if fields.intersection(deferred_fields):
fields = fields.union(deferred_fields)
super().refresh_from_db(using, fields, **kwargs)
def delete_category(self):
"""
method deletes saved category
"""
self.delete()
class Image(models.Model):
"""
class facilitates the creation of image objects
"""
image_name = models.CharField(max_length=70)
image_description = models.TextField(max_length=200)
location = models.ForeignKey(Location, on_delete=models.CASCADE)
category = models.ManyToManyField(Category)
pub_date = models.DateField('date published', null=True)
owner = models.CharField(max_length=70, null=True)
image = models.ImageField(upload_to='images/%Y/%m/%d', null=True)
def __str__(self):
return self.image_name
def save_image(self):
"""
method saves added image
"""
self.save()
def update_image(self, using=None, fields=None, **kwargs):
"""
method updates saved category
"""
if fields is not None:
fields = set(fields)
deferred_fields = self.get_deferred_fields()
if fields.intersection(deferred_fields):
fields = fields.union(deferred_fields)
super().refresh_from_db(using, fields, **kwargs)
def delete_image(self):
"""
method deletes saved image
"""
self.delete()
@classmethod
def get_image_by_id(cls, image_id):
"""
method returns image with a particular id
"""
try:
single_image = cls.objects.filter(pk=image_id)
except Image.DoesNotExist:
pass
return single_image
@classmethod
def filter_images_by_location(cls, location_id):
"""
method returns images in a given category
"""
try:
images = cls.objects.filter(location__pk=location_id)
except Image.DoesNotExist:
pass
return images
@classmethod
def search_images_by_category(cls, category_id):
"""
method returns images associated with a particular category
"""
try:
images = cls.objects.filter(category__pk=category_id)
except Image.DoesNotExist:
pass
return images
@classmethod
def search_term_category(cls, search_term):
"""
method returns category specific images
"""
images = cls.objects.filter(category__category_name__icontains=search_term)
return images
|
[
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.ImageField",
"django.db.models.DateField"
] |
[((178, 209), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(70)'}), '(max_length=70)\n', (194, 209), False, 'from django.db import models\n'), ((1047, 1078), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(70)'}), '(max_length=70)\n', (1063, 1078), False, 'from django.db import models\n'), ((1892, 1923), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(70)'}), '(max_length=70)\n', (1908, 1923), False, 'from django.db import models\n'), ((1948, 1980), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1964, 1980), False, 'from django.db import models\n'), ((1996, 2049), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Location'], {'on_delete': 'models.CASCADE'}), '(Location, on_delete=models.CASCADE)\n', (2013, 2049), False, 'from django.db import models\n'), ((2065, 2097), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Category'], {}), '(Category)\n', (2087, 2097), False, 'from django.db import models\n'), ((2113, 2158), 'django.db.models.DateField', 'models.DateField', (['"""date published"""'], {'null': '(True)'}), "('date published', null=True)\n", (2129, 2158), False, 'from django.db import models\n'), ((2171, 2213), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(70)', 'null': '(True)'}), '(max_length=70, null=True)\n', (2187, 2213), False, 'from django.db import models\n'), ((2226, 2283), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""images/%Y/%m/%d"""', 'null': '(True)'}), "(upload_to='images/%Y/%m/%d', null=True)\n", (2243, 2283), False, 'from django.db import models\n')]
|
from excel_text.factory import get_text_function
text = get_text_function({"decimal": ".", "thousands": ",", "raise": True})
|
[
"excel_text.factory.get_text_function"
] |
[((57, 125), 'excel_text.factory.get_text_function', 'get_text_function', (["{'decimal': '.', 'thousands': ',', 'raise': True}"], {}), "({'decimal': '.', 'thousands': ',', 'raise': True})\n", (74, 125), False, 'from excel_text.factory import get_text_function\n')]
|
import os
import typing as ta
from omnibus import lang
from ._registry import register
@lang.cached_nullary
def _load_dot_env() -> ta.Optional[ta.Mapping[str, str]]:
fp = os.path.join(os.path.dirname(os.path.dirname(__file__)), '../../.env')
if not os.path.isfile(fp):
return None
with open(fp, 'r') as f:
buf = f.read()
ret = {}
for line in buf.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
k, _, v = line.partition('=')
k = k.strip()
v = v.strip()
ret[k] = v
os.environ[k] = v
return ret
@register
class EnvPlugin:
def pytest_addoption(self, parser):
parser.addoption('--no-dotenv', action='store_true', help='Disables dotenv')
def pytest_configure(self, config):
if not config.option.no_dotenv:
_load_dot_env()
|
[
"os.path.isfile",
"os.path.dirname"
] |
[((261, 279), 'os.path.isfile', 'os.path.isfile', (['fp'], {}), '(fp)\n', (275, 279), False, 'import os\n'), ((208, 233), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (223, 233), False, 'import os\n')]
|
# coding=utf-8
from dateutil.easter import EASTER_WESTERN
from holidata.utils import SmartDayArrow
from .holidays import Locale, Holiday
"""
source: https://www.riksdagen.se/sv/dokument-lagar/dokument/svensk-forfattningssamling/lag-1989253-om-allmanna-helgdagar_sfs-1989-253
source: https://www.riksdagen.se/sv/dokument-lagar/dokument/svensk-forfattningssamling/semesterlag-1977480_sfs-1977-480
"""
class sv_SE(Locale):
"""
01-01: [NF] Nyårsdagen
01-06: [NRF] Trettondedag jul
05-01: [NF] Första maj
06-06: [NF] Nationaldagen
12-24: [NRF] Julafton
12-25: [NRF] Juldagen
12-26: [NRF] Annandag jul
12-31: [NF] Nyårsafton
2 days before Easter: [NRV] Långfredagen
Easter: [NRV] Påskdagen
1 day after Easter: [NRV] Annandag påsk
39 days after Easter: [NRV] Kristi himmelsfärdsdag
49 days after Easter: [NRV] Pingstdagen
"""
locale = "sv-SE"
easter_type = EASTER_WESTERN
def __midsommar(self):
"""
Find the Saturday between 20 and 26 June
"""
return SmartDayArrow(self.year, 6, 19).shift_to_weekday('saturday', order=1, reverse=False)
def holiday_midsommarafton(self):
"""
The day before midsommardagen: [NV] Midsommarafton
"""
return [Holiday(
self.locale,
"",
self.__midsommar().shift(days=-1),
"Midsommarafton",
"NV"
)]
def holiday_midsommardagen(self):
"""
Saturday between 20 and 26 June: [NV] Midsommardagen
"""
return [Holiday(
self.locale,
"",
self.__midsommar(),
"Midsommardagen",
"NV"
)]
def holiday_alla_helgons_dag(self):
"""
Saturday between 31 October and 6 November: [NRV] Alla helgons dag
"""
return [Holiday(
self.locale,
"",
SmartDayArrow(self.year, 10, 30).shift_to_weekday('saturday', order=1, reverse=False),
"Alla helgons dag",
"NRV"
)]
|
[
"holidata.utils.SmartDayArrow"
] |
[((1054, 1085), 'holidata.utils.SmartDayArrow', 'SmartDayArrow', (['self.year', '(6)', '(19)'], {}), '(self.year, 6, 19)\n', (1067, 1085), False, 'from holidata.utils import SmartDayArrow\n'), ((1931, 1963), 'holidata.utils.SmartDayArrow', 'SmartDayArrow', (['self.year', '(10)', '(30)'], {}), '(self.year, 10, 30)\n', (1944, 1963), False, 'from holidata.utils import SmartDayArrow\n')]
|
import json
from datetime import datetime
import pandas as pd
import argparse
import boto3
import os
import itertools
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from urllib3.exceptions import ProtocolError
class StdOutListener(StreamListener):
def __init__(self, duration):
self.start_date = datetime.utcnow()
self.duration = duration
self.stop = False
self.data = []
def on_data(self, data):
tweet = json.loads(data)
self.data.append(tweet)
if (datetime.utcnow() - self.start_date).total_seconds() > self.duration:
self.stop = True
return False
return True
def on_error(self, status):
if (datetime.utcnow() - self.start_date).total_seconds() > self.duration:
self.stop = True
return False
return True
def set_job():
parser = argparse.ArgumentParser(description='Collect tweets')
parser.add_argument('--duration', type=int, help='Number of seconds before to stop the collecter', default=300)
parser.add_argument('--configuration', type=str, help='Configuration file for the job', default="./configuration.json")
parser.add_argument('--candidates', type=str, help='Configuration file for the job', default="./candidates.json")
args = parser.parse_args()
duration = args.duration
with open(args.configuration) as f:
configuration = json.load(f)
with open(args.candidates) as f:
candidates = json.load(f)
return duration, configuration, candidates
file_extension = ".csv.gz"
if __name__ == '__main__':
duration, configuration, candidates = set_job()
print(datetime.utcnow())
print(f'Will save the tweets for the next {duration} sec')
print(candidates)
filters = [[item["name"]] + item["twitter_account"] for key, item in candidates.items()]
filters = list(itertools.chain.from_iterable(filters))
filters = list(dict.fromkeys(filters))
print("Filters:", filters)
collecter = StdOutListener(duration)
auth = OAuthHandler(configuration["twitter"]["consumer_key"], configuration["twitter"]["consumer_secret"])
auth.set_access_token(configuration["twitter"]["access_token"], configuration["twitter"]["access_token_secret"])
stream = Stream(auth, collecter)
while not collecter.stop:
try:
stream.filter(track=filters, languages=["en","fr"])
except ProtocolError:
continue
dfp_tweets = pd.DataFrame(collecter.data)
file_name = collecter.start_date.strftime('%Y%m%d_%H%M%S') + file_extension
dfp_tweets.to_csv("tmp" + file_extension, index=None)
s3_client = boto3.client('s3', aws_access_key_id=configuration["aws"]["key"], aws_secret_access_key=configuration["aws"]["secret"])
partition = collecter.start_date.strftime('%Y%m%d')
response = s3_client.upload_file("tmp" + file_extension, configuration["aws"]["bucket"], f'data/raw/twitter/{partition}/{file_name}')
print(datetime.utcnow())
print('DONE')
|
[
"pandas.DataFrame",
"json.load",
"argparse.ArgumentParser",
"boto3.client",
"json.loads",
"datetime.datetime.utcnow",
"tweepy.Stream",
"tweepy.OAuthHandler",
"itertools.chain.from_iterable"
] |
[((937, 990), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Collect tweets"""'}), "(description='Collect tweets')\n", (960, 990), False, 'import argparse\n'), ((2113, 2217), 'tweepy.OAuthHandler', 'OAuthHandler', (["configuration['twitter']['consumer_key']", "configuration['twitter']['consumer_secret']"], {}), "(configuration['twitter']['consumer_key'], configuration[\n 'twitter']['consumer_secret'])\n", (2125, 2217), False, 'from tweepy import OAuthHandler\n'), ((2343, 2366), 'tweepy.Stream', 'Stream', (['auth', 'collecter'], {}), '(auth, collecter)\n', (2349, 2366), False, 'from tweepy import Stream\n'), ((2544, 2572), 'pandas.DataFrame', 'pd.DataFrame', (['collecter.data'], {}), '(collecter.data)\n', (2556, 2572), True, 'import pandas as pd\n'), ((2728, 2851), 'boto3.client', 'boto3.client', (['"""s3"""'], {'aws_access_key_id': "configuration['aws']['key']", 'aws_secret_access_key': "configuration['aws']['secret']"}), "('s3', aws_access_key_id=configuration['aws']['key'],\n aws_secret_access_key=configuration['aws']['secret'])\n", (2740, 2851), False, 'import boto3\n'), ((368, 385), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (383, 385), False, 'from datetime import datetime\n'), ((514, 530), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (524, 530), False, 'import json\n'), ((1478, 1490), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1487, 1490), False, 'import json\n'), ((1550, 1562), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1559, 1562), False, 'import json\n'), ((1729, 1746), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1744, 1746), False, 'from datetime import datetime\n'), ((1946, 1984), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['filters'], {}), '(filters)\n', (1975, 1984), False, 'import itertools\n'), ((3052, 3069), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3067, 3069), False, 'from datetime import datetime\n'), ((575, 592), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (590, 592), False, 'from datetime import datetime\n'), ((764, 781), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (779, 781), False, 'from datetime import datetime\n')]
|
import typing
import hypothesis
from hypothesis import strategies
from src.packages.rescue import Rescue as _Rescue
from src.packages.rat import Rat as _Rat
from src.packages.utils import sanitize, Platforms as _Platforms
import string
# Anything that isn't a whitespace other than the space character, and isn't a control character.
valid_characters = strategies.characters(blacklist_categories=["C", "Zl", "Zp"])
"""
Any character (ASCII / UNICODE) that isn't a control character or whitespace other than ' '.
"""
# Filtered by anything that doesn't make it through the sanitizer.
valid_text = (
strategies.text(valid_characters, min_size=10)
.filter(lambda data: sanitize(data) == data)
.filter(lambda data: data.isprintable())
)
"""
Generates probably valid text.
Shrinks towards smaller words.
"""
valid_word_chars = strategies.characters(blacklist_categories=["C", "Z"])
"""
Characters that are valid to be in a word
"""
valid_word = strategies.text(valid_word_chars, min_size=1)
"""
a single word (no whitespace)
Shrinks towards smaller words.
"""
valid_words = strategies.lists(valid_word, max_size=10)
"""
a list of valid words
Shrinks towards smaller lists and smaller words.
"""
_irc_nick_letters = strategies.characters(
whitelist_characters=f"{string.ascii_letters}{string.digits}" + r"\`_[]{}",
whitelist_categories=())
valid_irc_name = strategies.text(alphabet=_irc_nick_letters, min_size=3).filter(
lambda word: not word[0].isnumeric())
platform = strategies.sampled_from([_Platforms.PS, _Platforms.PC, _Platforms.XB])
""" Some platform """
rescue = strategies.builds(
_Rescue,
uuid=strategies.uuids(version=4), # generate some valid uuid4
client=valid_irc_name, # client should always be defined
# irc nickname may be any valid word, or None.
irc_nickname=strategies.one_of(valid_irc_name, strategies.none()),
platform=platform,
active=strategies.booleans(),
code_red=strategies.booleans(),
board_index=strategies.one_of(strategies.integers(min_value=1), strategies.none())
)
""" Strategy for generating a rescue. Shrinks towards smaller arguments """
def rescues(min_size: int, max_size: int):
""" builds a list of rescues, shrinks towards smaller lists and smaller rescues """
return strategies.lists(rescue, min_size=min_size, max_size=max_size,
unique_by=(
lambda case: case.irc_nickname, lambda case: case.board_index,
lambda case: case.client))
rat = strategies.builds(
_Rat,
uuid=strategies.uuids(version=4),
name=valid_word,
platforms=strategies.one_of(strategies.none(), platform)
)
""" Generates a valid rat object """
rats = strategies.lists(rat)
""" a list of rats """
|
[
"hypothesis.strategies.lists",
"hypothesis.strategies.characters",
"hypothesis.strategies.sampled_from",
"hypothesis.strategies.none",
"hypothesis.strategies.booleans",
"hypothesis.strategies.text",
"hypothesis.strategies.uuids",
"src.packages.utils.sanitize",
"hypothesis.strategies.integers"
] |
[((355, 416), 'hypothesis.strategies.characters', 'strategies.characters', ([], {'blacklist_categories': "['C', 'Zl', 'Zp']"}), "(blacklist_categories=['C', 'Zl', 'Zp'])\n", (376, 416), False, 'from hypothesis import strategies\n'), ((847, 901), 'hypothesis.strategies.characters', 'strategies.characters', ([], {'blacklist_categories': "['C', 'Z']"}), "(blacklist_categories=['C', 'Z'])\n", (868, 901), False, 'from hypothesis import strategies\n'), ((966, 1011), 'hypothesis.strategies.text', 'strategies.text', (['valid_word_chars'], {'min_size': '(1)'}), '(valid_word_chars, min_size=1)\n', (981, 1011), False, 'from hypothesis import strategies\n'), ((1098, 1139), 'hypothesis.strategies.lists', 'strategies.lists', (['valid_word'], {'max_size': '(10)'}), '(valid_word, max_size=10)\n', (1114, 1139), False, 'from hypothesis import strategies\n'), ((1241, 1372), 'hypothesis.strategies.characters', 'strategies.characters', ([], {'whitelist_characters': "(f'{string.ascii_letters}{string.digits}' + '\\\\`_[]{}')", 'whitelist_categories': '()'}), "(whitelist_characters=\n f'{string.ascii_letters}{string.digits}' + '\\\\`_[]{}',\n whitelist_categories=())\n", (1262, 1372), False, 'from hypothesis import strategies\n'), ((1507, 1577), 'hypothesis.strategies.sampled_from', 'strategies.sampled_from', (['[_Platforms.PS, _Platforms.PC, _Platforms.XB]'], {}), '([_Platforms.PS, _Platforms.PC, _Platforms.XB])\n', (1530, 1577), False, 'from hypothesis import strategies\n'), ((2754, 2775), 'hypothesis.strategies.lists', 'strategies.lists', (['rat'], {}), '(rat)\n', (2770, 2775), False, 'from hypothesis import strategies\n'), ((2294, 2466), 'hypothesis.strategies.lists', 'strategies.lists', (['rescue'], {'min_size': 'min_size', 'max_size': 'max_size', 'unique_by': '(lambda case: case.irc_nickname, lambda case: case.board_index, lambda case:\n case.client)'}), '(rescue, min_size=min_size, max_size=max_size, unique_by=(\n lambda case: case.irc_nickname, lambda case: case.board_index, lambda\n case: case.client))\n', (2310, 2466), False, 'from hypothesis import strategies\n'), ((1390, 1445), 'hypothesis.strategies.text', 'strategies.text', ([], {'alphabet': '_irc_nick_letters', 'min_size': '(3)'}), '(alphabet=_irc_nick_letters, min_size=3)\n', (1405, 1445), False, 'from hypothesis import strategies\n'), ((1650, 1677), 'hypothesis.strategies.uuids', 'strategies.uuids', ([], {'version': '(4)'}), '(version=4)\n', (1666, 1677), False, 'from hypothesis import strategies\n'), ((1926, 1947), 'hypothesis.strategies.booleans', 'strategies.booleans', ([], {}), '()\n', (1945, 1947), False, 'from hypothesis import strategies\n'), ((1962, 1983), 'hypothesis.strategies.booleans', 'strategies.booleans', ([], {}), '()\n', (1981, 1983), False, 'from hypothesis import strategies\n'), ((2596, 2623), 'hypothesis.strategies.uuids', 'strategies.uuids', ([], {'version': '(4)'}), '(version=4)\n', (2612, 2623), False, 'from hypothesis import strategies\n'), ((1872, 1889), 'hypothesis.strategies.none', 'strategies.none', ([], {}), '()\n', (1887, 1889), False, 'from hypothesis import strategies\n'), ((2019, 2051), 'hypothesis.strategies.integers', 'strategies.integers', ([], {'min_value': '(1)'}), '(min_value=1)\n', (2038, 2051), False, 'from hypothesis import strategies\n'), ((2053, 2070), 'hypothesis.strategies.none', 'strategies.none', ([], {}), '()\n', (2068, 2070), False, 'from hypothesis import strategies\n'), ((2678, 2695), 'hypothesis.strategies.none', 'strategies.none', ([], {}), '()\n', (2693, 2695), False, 'from hypothesis import strategies\n'), ((604, 650), 'hypothesis.strategies.text', 'strategies.text', (['valid_characters'], {'min_size': '(10)'}), '(valid_characters, min_size=10)\n', (619, 650), False, 'from hypothesis import strategies\n'), ((680, 694), 'src.packages.utils.sanitize', 'sanitize', (['data'], {}), '(data)\n', (688, 694), False, 'from src.packages.utils import sanitize, Platforms as _Platforms\n')]
|
"""Added unique for name in Tiers
Revision ID: <KEY>
Revises: 330568e8928c
Create Date: 2015-02-06 12:05:09.151253
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '330568e8928c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(op.f('uq_usagetiers_name'), 'usagetiers', ['name'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(op.f('uq_usagetiers_name'), 'usagetiers', type_='unique')
### end Alembic commands ###
|
[
"alembic.op.f"
] |
[((374, 400), 'alembic.op.f', 'op.f', (['"""uq_usagetiers_name"""'], {}), "('uq_usagetiers_name')\n", (378, 400), False, 'from alembic import op\n'), ((565, 591), 'alembic.op.f', 'op.f', (['"""uq_usagetiers_name"""'], {}), "('uq_usagetiers_name')\n", (569, 591), False, 'from alembic import op\n')]
|
from collections import defaultdict
from typing import Union, Set, List
import click
import logging
import networkx as nx
import os
import rdflib
import uuid
from prefixcommons.curie_util import read_remote_jsonld_context
from rdflib import Namespace, URIRef
from rdflib.namespace import RDF, RDFS, OWL
from kgx.prefix_manager import PrefixManager
from kgx.rdf_graph_mixin import RdfGraphMixin
from kgx.transformer import Transformer
from kgx.utils.kgx_utils import get_toolkit
from kgx.utils.rdf_utils import find_category, property_mapping
biolink_prefix_map = read_remote_jsonld_context('https://biolink.github.io/biolink-model/context.jsonld')
# TODO: use OBO IRI from biolink model context once https://github.com/biolink/biolink-model/issues/211 is resolved
OBO = Namespace('http://purl.obolibrary.org/obo/')
OBAN = Namespace(biolink_prefix_map['OBAN'])
PMID = Namespace(biolink_prefix_map['PMID'])
BIOLINK = Namespace(biolink_prefix_map['@vocab'])
DEFAULT_EDGE_LABEL = 'related_to'
class RdfTransformer(RdfGraphMixin, Transformer):
"""
Transformer that parses RDF and loads triples, as nodes and edges, into a networkx.MultiDiGraph
This is the base class which is used to implement other RDF-based transformers.
"""
OWL_PREDICATES = [RDFS.subClassOf, OWL.sameAs, OWL.equivalentClass]
is_about = URIRef('http://purl.obolibrary.org/obo/IAO_0000136')
has_subsequence = URIRef('http://purl.obolibrary.org/obo/RO_0002524')
is_subsequence_of = URIRef('http://purl.obolibrary.org/obo/RO_0002525')
def __init__(self, source_graph: nx.MultiDiGraph = None):
super().__init__(source_graph)
self.ontologies = []
self.prefix_manager = PrefixManager()
self.toolkit = get_toolkit()
def parse(self, filename: str = None, input_format: str = None, provided_by: str = None) -> None:
"""
Parse a file, containing triples, into a rdflib.Graph
The file can be either a 'turtle' file or any other format supported by rdflib.
Parameters
----------
filename : str
File to read from.
input_format : str
The input file format. If None is provided then the format is guessed using rdflib.util.guess_format()
provided_by : str
Define the source providing the input file.
"""
rdfgraph = rdflib.Graph()
if input_format is None:
input_format = rdflib.util.guess_format(filename)
logging.info("Parsing {} with '{}' format".format(filename, input_format))
rdfgraph.parse(filename, format=input_format)
logging.info("{} parsed with {} triples".format(filename, len(rdfgraph)))
# TODO: use source from RDF
if provided_by:
self.graph_metadata['provided_by'] = [provided_by]
else:
if isinstance(filename, str):
self.graph_metadata['provided_by'] = [os.path.basename(filename)]
elif hasattr(filename, 'name'):
self.graph_metadata['provided_by'] = [filename.name]
self.load_networkx_graph(rdfgraph)
self.load_node_attributes(rdfgraph)
self.report()
def add_ontology(self, file: str) -> None:
"""
Load an ontology OWL into a Rdflib.Graph
# TODO: is there better way of pre-loading required ontologies?
"""
ont = rdflib.Graph()
logging.info("Parsing {}".format(file))
ont.parse(file, format=rdflib.util.guess_format(file))
self.ontologies.append(ont)
logging.info("{} parsed with {} triples".format(file, len(ont)))
def load_networkx_graph(self, rdfgraph: rdflib.Graph = None, predicates: Set[URIRef] = None, **kwargs) -> None:
"""
Walk through the rdflib.Graph and load all required triples into networkx.MultiDiGraph
By default this method loads the following predicates,
- RDFS.subClassOf
- OWL.sameAs
- OWL.equivalentClass
- is_about (IAO:0000136)
- has_subsequence (RO:0002524)
- is_subsequence_of (RO:0002525)
This behavior can be overridden by providing a list of rdflib.URIRef that ought to be loaded
via the 'predicates' parameter.
Parameters
----------
rdfgraph: rdflib.Graph
Graph containing nodes and edges
predicates: list
A list of rdflib.URIRef representing predicates to be loaded
kwargs: dict
Any additional arguments
"""
if predicates is None:
predicates = set()
predicates = predicates.union(self.OWL_PREDICATES, [self.is_about, self.is_subsequence_of, self.has_subsequence])
triples = rdfgraph.triples((None, None, None))
logging.info("Loading from rdflib.Graph to networkx.MultiDiGraph")
with click.progressbar(list(triples), label='Progress') as bar:
for s, p, o in bar:
if (p == self.is_about) and (p in predicates):
logging.info("Loading is_about predicate")
# if predicate is 'is_about' then treat object as publication
self.add_node_attribute(o, key=s, value='publications')
elif (p == self.is_subsequence_of) and (p in predicates):
logging.info("Loading is_subsequence_of predicate")
# if predicate is 'is_subsequence_of'
self.add_edge(s, o, self.is_subsequence_of)
elif (p == self.has_subsequence) and (p in predicates):
logging.info("Loading has_subsequence predicate")
# if predicate is 'has_subsequence', interpret the inverse relation 'is_subsequence_of'
self.add_edge(o, s, self.is_subsequence_of)
elif any(p.lower() == x.lower() for x in predicates):
logging.info("Loading {} predicate, additional predicate".format(p))
self.add_edge(s, o, p)
def load_node_attributes(self, rdfgraph: rdflib.Graph) -> None:
"""
This method loads the properties of nodes into networkx.MultiDiGraph
As there can be many values for a single key, all properties are lists by default.
This method assumes that RdfTransformer.load_edges() has been called, and that all nodes
have had their IRI as an attribute.
Parameters
----------
rdfgraph: rdflib.Graph
Graph containing nodes and edges
"""
logging.info("Loading node attributes from rdflib.Graph into networkx.MultiDiGraph")
with click.progressbar(self.graph.nodes(data=True), label='Progress') as bar:
for n, data in bar:
if 'iri' in data:
uriref = URIRef(data['iri'])
else:
provided_by = self.graph_metadata.get('provided_by')
logging.warning("No 'iri' property for {} provided by {}".format(n, provided_by))
continue
for s, p, o in rdfgraph.triples((uriref, None, None)):
if p in property_mapping:
# predicate corresponds to a property on subject
if not (isinstance(s, rdflib.term.BNode) and isinstance(o, rdflib.term.BNode)):
# neither subject nor object is a BNode
self.add_node_attribute(uriref, key=p, value=o)
elif isinstance(o, rdflib.term.Literal):
# object is a Literal
# i.e. predicate corresponds to a property on subject
self.add_node_attribute(uriref, key=p, value=o)
category = find_category(uriref, [rdfgraph] + self.ontologies)
logging.debug("Inferred '{}' as category for node '{}'".format(category, uriref))
if category is not None:
self.add_node_attribute(uriref, key='category', value=category)
class ObanRdfTransformer(RdfTransformer):
"""
Transformer that parses a 'turtle' file and loads triples, as nodes and edges, into a networkx.MultiDiGraph
This Transformer supports OBAN style of modeling where,
- it dereifies OBAN.association triples into a property graph form
- it reifies property graph into OBAN.association triples
"""
def load_networkx_graph(self, rdfgraph: rdflib.Graph = None, predicates: Set[URIRef] = None, **kwargs) -> None:
"""
Walk through the rdflib.Graph and load all triples into networkx.MultiDiGraph
Parameters
----------
rdfgraph: rdflib.Graph
Graph containing nodes and edges
predicates: list
A list of rdflib.URIRef representing predicates to be loaded
kwargs: dict
Any additional arguments
"""
if not predicates:
predicates = set()
predicates = predicates.union(self.OWL_PREDICATES)
for rel in predicates:
triples = rdfgraph.triples((None, rel, None))
with click.progressbar(list(triples), label="Loading relation '{}'".format(rel)) as bar:
for s, p, o in bar:
if not (isinstance(s, rdflib.term.BNode) and isinstance(o, rdflib.term.BNode)):
self.add_edge(s, o, p)
# get all OBAN.associations
associations = rdfgraph.subjects(RDF.type, OBAN.association)
logging.info("Loading from rdflib.Graph into networkx.MultiDiGraph")
with click.progressbar(list(associations), label='Progress') as bar:
for association in bar:
edge_attr = defaultdict(list)
edge_attr['id'].append(str(association))
# dereify OBAN.association
subject = None
object = None
predicate = None
# get all triples for association
for s, p, o in rdfgraph.triples((association, None, None)):
if o.startswith(PMID):
edge_attr['publications'].append(o)
if p in property_mapping or isinstance(o, rdflib.term.Literal):
p = property_mapping.get(p, p)
if p == 'subject':
subject = o
elif p == 'object':
object = o
elif p == 'predicate':
predicate = o
else:
edge_attr[p].append(o)
if predicate is None:
logging.warning("No 'predicate' for OBAN.association {}; defaulting to '{}'".format(association, self.DEFAULT_EDGE_LABEL))
predicate = DEFAULT_EDGE_LABEL
if subject and object:
self.add_edge(subject, object, predicate)
for key, values in edge_attr.items():
for value in values:
self.add_edge_attribute(subject, object, predicate, key=key, value=value)
def uriref(self, identifier: str) -> URIRef:
"""
Generate a rdflib.URIRef for a given string.
Parameters
----------
identifier: str
Identifier as string.
Returns
-------
rdflib.URIRef
URIRef form of the input `identifier`
"""
if identifier in property_mapping:
uri = property_mapping[identifier]
else:
uri = self.prefix_manager.expand(identifier)
return URIRef(uri)
def save_attribute(self, rdfgraph: rdflib.Graph, object_iri: URIRef, key: str, value: Union[List[str], str]) -> None:
"""
Saves a node or edge attributes from networkx.MultiDiGraph into rdflib.Graph
Intended to be used within `ObanRdfTransformer.save()`.
Parameters
----------
rdfgraph: rdflib.Graph
Graph containing nodes and edges
object_iri: rdflib.URIRef
IRI of an object in the graph
key: str
The name of the attribute
value: Union[List[str], str]
The value of the attribute; Can be either a List or just a string
"""
element = self.toolkit.get_element(key)
if element is None:
return
if element.is_a == 'association slot' or element.is_a == 'node property':
if key in property_mapping:
key = property_mapping[key]
else:
key = URIRef('{}{}'.format(BIOLINK, element.name.replace(' ', '_')))
if not isinstance(value, (list, tuple, set)):
value = [value]
for value in value:
if element.range == 'iri type':
value = URIRef('{}{}'.format(BIOLINK, ''.join(value.title().split(' '))))
rdfgraph.add((object_iri, key, rdflib.term.Literal(value)))
def save(self, filename: str = None, output_format: str = "turtle", **kwargs) -> None:
"""
Transform networkx.MultiDiGraph into rdflib.Graph that follow OBAN-style reification and export
this graph as a file (TTL, by default).
Parameters
----------
filename: str
Filename to write to
output_format: str
The output format; default: 'turtle'
kwargs: dict
Any additional arguments
"""
# Make a new rdflib.Graph() instance to generate RDF triples
rdfgraph = rdflib.Graph()
# Register OBAN URL prefix (http://purl.org/oban/) as `OBAN` in the namespace.
rdfgraph.bind('OBAN', str(OBAN))
# <http://purl.obolibrary.org/obo/RO_0002558> is currently stored as OBO:RO_0002558 rather than RO:0002558
# because of the bug in rdflib. See https://github.com/RDFLib/rdflib/issues/632
rdfgraph.bind('OBO', str(OBO))
rdfgraph.bind('biolink', str(BIOLINK))
# saving all nodes
for n, data in self.graph.nodes(data=True):
if 'iri' not in n:
uriRef = self.uriref(n)
else:
uriRef = URIRef(data['iri'])
for key, value in data.items():
if key not in ['id', 'iri']:
self.save_attribute(rdfgraph, uriRef, key=key, value=value)
# saving all edges
for u, v, data in self.graph.edges(data=True):
if 'relation' not in data:
raise Exception('Relation is a required edge property in the biolink model, edge {} --> {}'.format(u, v))
if 'id' in data and data['id'] is not None:
assoc_id = URIRef(data['id'])
else:
# generating a UUID for association
assoc_id = URIRef('urn:uuid:{}'.format(uuid.uuid4()))
rdfgraph.add((assoc_id, RDF.type, OBAN.association))
rdfgraph.add((assoc_id, OBAN.association_has_subject, self.uriref(u)))
rdfgraph.add((assoc_id, OBAN.association_has_predicate, self.uriref(data['relation'])))
rdfgraph.add((assoc_id, OBAN.association_has_object, self.uriref(v)))
for key, value in data.items():
if key not in ['subject', 'relation', 'object']:
self.save_attribute(rdfgraph, assoc_id, key=key, value=value)
# Serialize the graph into the file.
rdfgraph.serialize(destination=filename, format=output_format)
class RdfOwlTransformer(RdfTransformer):
"""
Transformer that parses an OWL ontology in RDF, while retaining class-class relationships.
"""
def load_networkx_graph(self, rdfgraph: rdflib.Graph = None, predicates: Set[URIRef] = None, **kwargs) -> None:
"""
Walk through the rdflib.Graph and load all triples into networkx.MultiDiGraph
Parameters
----------
rdfgraph: rdflib.Graph
Graph containing nodes and edges
predicates: list
A list of rdflib.URIRef representing predicates to be loaded
kwargs: dict
Any additional arguments
"""
triples = rdfgraph.triples((None, RDFS.subClassOf, None))
logging.info("Loading from rdflib.Graph to networkx.MultiDiGraph")
with click.progressbar(list(triples), label='Progress') as bar:
for s, p, o in bar:
# ignoring blank nodes
if isinstance(s, rdflib.term.BNode):
continue
pred = None
parent = None
# TODO: does this block load all relevant bits from an OWL?
if isinstance(o, rdflib.term.BNode):
# C SubClassOf R some D
for x in rdfgraph.objects(o, OWL.onProperty):
pred = x
for x in rdfgraph.objects(o, OWL.someValuesFrom):
parent = x
if pred is None or parent is None:
logging.warning("Do not know how to handle BNode: {}".format(o))
continue
else:
# C SubClassOf D (C and D are named classes)
pred = p
parent = o
self.add_edge(s, parent, pred)
relations = rdfgraph.subjects(RDF.type, OWL.ObjectProperty)
logging.info("Loading relations")
with click.progressbar(relations, label='Progress') as bar:
for relation in bar:
for _, p, o in rdfgraph.triples((relation, None, None)):
if o.startswith('http://purl.obolibrary.org/obo/RO_'):
self.add_edge(relation, o, p)
else:
self.add_node_attribute(relation, key=p, value=o)
self.add_node_attribute(relation, key='category', value='relation')
|
[
"prefixcommons.curie_util.read_remote_jsonld_context",
"kgx.utils.kgx_utils.get_toolkit",
"rdflib.Graph",
"click.progressbar",
"kgx.utils.rdf_utils.find_category",
"os.path.basename",
"uuid.uuid4",
"kgx.prefix_manager.PrefixManager",
"rdflib.term.Literal",
"rdflib.URIRef",
"rdflib.Namespace",
"logging.info",
"rdflib.util.guess_format",
"collections.defaultdict",
"kgx.utils.rdf_utils.property_mapping.get"
] |
[((566, 655), 'prefixcommons.curie_util.read_remote_jsonld_context', 'read_remote_jsonld_context', (['"""https://biolink.github.io/biolink-model/context.jsonld"""'], {}), "(\n 'https://biolink.github.io/biolink-model/context.jsonld')\n", (592, 655), False, 'from prefixcommons.curie_util import read_remote_jsonld_context\n'), ((774, 818), 'rdflib.Namespace', 'Namespace', (['"""http://purl.obolibrary.org/obo/"""'], {}), "('http://purl.obolibrary.org/obo/')\n", (783, 818), False, 'from rdflib import Namespace, URIRef\n'), ((826, 863), 'rdflib.Namespace', 'Namespace', (["biolink_prefix_map['OBAN']"], {}), "(biolink_prefix_map['OBAN'])\n", (835, 863), False, 'from rdflib import Namespace, URIRef\n'), ((871, 908), 'rdflib.Namespace', 'Namespace', (["biolink_prefix_map['PMID']"], {}), "(biolink_prefix_map['PMID'])\n", (880, 908), False, 'from rdflib import Namespace, URIRef\n'), ((919, 958), 'rdflib.Namespace', 'Namespace', (["biolink_prefix_map['@vocab']"], {}), "(biolink_prefix_map['@vocab'])\n", (928, 958), False, 'from rdflib import Namespace, URIRef\n'), ((1334, 1386), 'rdflib.URIRef', 'URIRef', (['"""http://purl.obolibrary.org/obo/IAO_0000136"""'], {}), "('http://purl.obolibrary.org/obo/IAO_0000136')\n", (1340, 1386), False, 'from rdflib import Namespace, URIRef\n'), ((1409, 1460), 'rdflib.URIRef', 'URIRef', (['"""http://purl.obolibrary.org/obo/RO_0002524"""'], {}), "('http://purl.obolibrary.org/obo/RO_0002524')\n", (1415, 1460), False, 'from rdflib import Namespace, URIRef\n'), ((1485, 1536), 'rdflib.URIRef', 'URIRef', (['"""http://purl.obolibrary.org/obo/RO_0002525"""'], {}), "('http://purl.obolibrary.org/obo/RO_0002525')\n", (1491, 1536), False, 'from rdflib import Namespace, URIRef\n'), ((1698, 1713), 'kgx.prefix_manager.PrefixManager', 'PrefixManager', ([], {}), '()\n', (1711, 1713), False, 'from kgx.prefix_manager import PrefixManager\n'), ((1737, 1750), 'kgx.utils.kgx_utils.get_toolkit', 'get_toolkit', ([], {}), '()\n', (1748, 1750), False, 'from kgx.utils.kgx_utils import get_toolkit\n'), ((2366, 2380), 'rdflib.Graph', 'rdflib.Graph', ([], {}), '()\n', (2378, 2380), False, 'import rdflib\n'), ((3389, 3403), 'rdflib.Graph', 'rdflib.Graph', ([], {}), '()\n', (3401, 3403), False, 'import rdflib\n'), ((4804, 4870), 'logging.info', 'logging.info', (['"""Loading from rdflib.Graph to networkx.MultiDiGraph"""'], {}), "('Loading from rdflib.Graph to networkx.MultiDiGraph')\n", (4816, 4870), False, 'import logging\n'), ((6570, 6659), 'logging.info', 'logging.info', (['"""Loading node attributes from rdflib.Graph into networkx.MultiDiGraph"""'], {}), "(\n 'Loading node attributes from rdflib.Graph into networkx.MultiDiGraph')\n", (6582, 6659), False, 'import logging\n'), ((9556, 9624), 'logging.info', 'logging.info', (['"""Loading from rdflib.Graph into networkx.MultiDiGraph"""'], {}), "('Loading from rdflib.Graph into networkx.MultiDiGraph')\n", (9568, 9624), False, 'import logging\n'), ((11730, 11741), 'rdflib.URIRef', 'URIRef', (['uri'], {}), '(uri)\n', (11736, 11741), False, 'from rdflib import Namespace, URIRef\n'), ((13690, 13704), 'rdflib.Graph', 'rdflib.Graph', ([], {}), '()\n', (13702, 13704), False, 'import rdflib\n'), ((16361, 16427), 'logging.info', 'logging.info', (['"""Loading from rdflib.Graph to networkx.MultiDiGraph"""'], {}), "('Loading from rdflib.Graph to networkx.MultiDiGraph')\n", (16373, 16427), False, 'import logging\n'), ((17536, 17569), 'logging.info', 'logging.info', (['"""Loading relations"""'], {}), "('Loading relations')\n", (17548, 17569), False, 'import logging\n'), ((2442, 2476), 'rdflib.util.guess_format', 'rdflib.util.guess_format', (['filename'], {}), '(filename)\n', (2466, 2476), False, 'import rdflib\n'), ((17583, 17629), 'click.progressbar', 'click.progressbar', (['relations'], {'label': '"""Progress"""'}), "(relations, label='Progress')\n", (17600, 17629), False, 'import click\n'), ((3483, 3513), 'rdflib.util.guess_format', 'rdflib.util.guess_format', (['file'], {}), '(file)\n', (3507, 3513), False, 'import rdflib\n'), ((7806, 7857), 'kgx.utils.rdf_utils.find_category', 'find_category', (['uriref', '([rdfgraph] + self.ontologies)'], {}), '(uriref, [rdfgraph] + self.ontologies)\n', (7819, 7857), False, 'from kgx.utils.rdf_utils import find_category, property_mapping\n'), ((9766, 9783), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (9777, 9783), False, 'from collections import defaultdict\n'), ((14318, 14337), 'rdflib.URIRef', 'URIRef', (["data['iri']"], {}), "(data['iri'])\n", (14324, 14337), False, 'from rdflib import Namespace, URIRef\n'), ((14836, 14854), 'rdflib.URIRef', 'URIRef', (["data['id']"], {}), "(data['id'])\n", (14842, 14854), False, 'from rdflib import Namespace, URIRef\n'), ((2931, 2957), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (2947, 2957), False, 'import os\n'), ((5058, 5100), 'logging.info', 'logging.info', (['"""Loading is_about predicate"""'], {}), "('Loading is_about predicate')\n", (5070, 5100), False, 'import logging\n'), ((6836, 6855), 'rdflib.URIRef', 'URIRef', (["data['iri']"], {}), "(data['iri'])\n", (6842, 6855), False, 'from rdflib import Namespace, URIRef\n'), ((5353, 5404), 'logging.info', 'logging.info', (['"""Loading is_subsequence_of predicate"""'], {}), "('Loading is_subsequence_of predicate')\n", (5365, 5404), False, 'import logging\n'), ((10321, 10347), 'kgx.utils.rdf_utils.property_mapping.get', 'property_mapping.get', (['p', 'p'], {}), '(p, p)\n', (10341, 10347), False, 'from kgx.utils.rdf_utils import find_category, property_mapping\n'), ((13076, 13102), 'rdflib.term.Literal', 'rdflib.term.Literal', (['value'], {}), '(value)\n', (13095, 13102), False, 'import rdflib\n'), ((14980, 14992), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (14990, 14992), False, 'import uuid\n'), ((5619, 5668), 'logging.info', 'logging.info', (['"""Loading has_subsequence predicate"""'], {}), "('Loading has_subsequence predicate')\n", (5631, 5668), False, 'import logging\n')]
|
#
# Copyright (c) 2016 Nordic Semiconductor ASA
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of Nordic Semiconductor ASA nor the names of other
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# 4. This software must only be used in or with a processor manufactured by Nordic
# Semiconductor ASA, or in or with a processor manufactured by a third party that
# is used in combination with a processor manufactured by Nordic Semiconductor.
#
# 5. Any software provided in binary or object form under this license must not be
# reverse engineered, decompiled, modified and/or disassembled.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import importlib
from blatann.nrf.nrf_dll_load import driver
UNIT_0_625_MS = 625 # Unit used for scanning and advertising parameters
UNIT_1_25_MS = 1250 # Unit used for connection interval parameters
UNIT_10_MS = 10000 # Unit used for supervision timeout parameter
def msec_to_units(time_ms, resolution):
"""Convert milliseconds to BLE specific time units."""
units = time_ms * 1000 / resolution
return int(units)
def units_to_msec(units, resolution):
"""Convert BLE specific units to milliseconds."""
time_ms = units * float(resolution) / 1000
return time_ms
def char_array_to_list(array_pointer, length):
"""Convert char_array to python list."""
data_array = driver.char_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def uint8_array_to_list(array_pointer, length):
"""Convert uint8_array to python list."""
data_array = driver.uint8_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def uint16_array_to_list(array_pointer, length):
"""Convert uint16_array to python list."""
data_array = driver.uint16_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def service_array_to_list(array_pointer, length):
"""Convert ble_gattc_service_array to python list."""
data_array = driver.ble_gattc_service_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def include_array_to_list(array_pointer, length):
"""Convert ble_gattc_include_array to python list."""
data_array = driver.ble_gattc_include_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def ble_gattc_char_array_to_list(array_pointer, length):
"""Convert ble_gattc_char_array to python list."""
data_array = driver.ble_gattc_char_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def desc_array_to_list(array_pointer, length):
"""Convert ble_gattc_desc_array to python list."""
data_array = driver.ble_gattc_desc_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def ble_gattc_attr_info16_array_to_list(array_pointer, length):
"""Convert ble_gattc_attr_info16_array to python list"""
data_array = driver.ble_gattc_attr_info16_array.frompointer(array_pointer)
data_list = _populate_array(data_array, length)
return data_list
def ble_gattc_attr_info128_array_to_list(array_pointer, length):
"""Convert ble_gattc_attr_info128_array to python list"""
data_array = driver.ble_gattc_attr_info128_array.frompointer(array_pointer)
data_list = _populate_array(data_array, length)
return data_list
def handle_value_array_to_list(array_pointer, length):
"""Convert ble_gattc_handle_value_array to python list."""
data_array = driver.ble_gattc_handle_value_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def attr_info_array_to_list(array_pointer, length):
"""Convert ble_gattc_attr_info_array to python list."""
data_array = driver.ble_gattc_attr_info_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def attr_info16_array_to_list(array_pointer, length):
"""Convert ble_gattc_attr_info16_array to python list."""
data_array = driver.ble_gattc_attr_info16_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def attr_info128_array_to_list(array_pointer, length):
"""Convert ble_gattc_attr_info128_array to python list."""
data_array = driver.ble_gattc_attr_info128_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def serial_port_desc_array_to_list(array_pointer, length):
"""Convert sd_rpc_serial_port_desc_array to python list."""
data_array = driver.sd_rpc_serial_port_desc_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def _populate_list(data_array, length):
data_list = []
for i in range(0, length):
data_list.append(data_array[i])
return data_list
def list_to_char_array(data_list):
"""Convert python list to char_array."""
data_array = _populate_array(data_list, driver.char_array)
return data_array
def list_to_uint8_array(data_list):
"""Convert python list to uint8_array."""
data_array = _populate_array(data_list, driver.uint8_array)
return data_array
def list_to_uint16_array(data_list):
"""Convert python list to uint16_array."""
data_array = _populate_array(data_list, driver.uint16_array)
return data_array
def list_to_service_array(data_list):
"""Convert python list to ble_gattc_service_array."""
data_array = _populate_array(data_list, driver.ble_gattc_service_array)
return data_array
def list_to_include_array(data_list):
"""Convert python list to ble_gattc_include_array."""
data_array = _populate_array(data_list, driver.ble_gattc_include_array)
return data_array
def list_to_ble_gattc_char_array(data_list):
"""Convert python list to ble_gattc_char_array."""
data_array = _populate_array(data_list, driver.ble_gattc_char_array)
return data_array
def list_to_desc_array(data_list):
"""Convert python list to ble_gattc_desc_array."""
data_array = _populate_array(data_list, driver.ble_gattc_desc_array)
return data_array
def list_to_handle_value_array(data_list):
"""Convert python list to ble_gattc_handle_value_array."""
data_array = _populate_array(data_list, driver.ble_gattc_handle_value_array)
return data_array
def list_to_serial_port_desc_array(data_list):
"""Convert python list to sd_rpc_serial_port_desc_array."""
data_array = _populate_array(data_list, driver.sd_rpc_serial_port_desc_array)
return data_array
def _populate_array(data_list, array_type):
length = len(data_list)
data_array = array_type(length)
for i in range(0, length):
data_array[i] = data_list[i]
return data_array
|
[
"blatann.nrf.nrf_dll_load.driver.sd_rpc_serial_port_desc_array.frompointer",
"blatann.nrf.nrf_dll_load.driver.ble_gattc_desc_array.frompointer",
"blatann.nrf.nrf_dll_load.driver.ble_gattc_attr_info16_array.frompointer",
"blatann.nrf.nrf_dll_load.driver.char_array.frompointer",
"blatann.nrf.nrf_dll_load.driver.uint16_array.frompointer",
"blatann.nrf.nrf_dll_load.driver.ble_gattc_handle_value_array.frompointer",
"blatann.nrf.nrf_dll_load.driver.ble_gattc_include_array.frompointer",
"blatann.nrf.nrf_dll_load.driver.ble_gattc_char_array.frompointer",
"blatann.nrf.nrf_dll_load.driver.ble_gattc_attr_info128_array.frompointer",
"blatann.nrf.nrf_dll_load.driver.ble_gattc_service_array.frompointer",
"blatann.nrf.nrf_dll_load.driver.uint8_array.frompointer",
"blatann.nrf.nrf_dll_load.driver.ble_gattc_attr_info_array.frompointer"
] |
[((2697, 2741), 'blatann.nrf.nrf_dll_load.driver.char_array.frompointer', 'driver.char_array.frompointer', (['array_pointer'], {}), '(array_pointer)\n', (2726, 2741), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((2927, 2972), 'blatann.nrf.nrf_dll_load.driver.uint8_array.frompointer', 'driver.uint8_array.frompointer', (['array_pointer'], {}), '(array_pointer)\n', (2957, 2972), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((3160, 3206), 'blatann.nrf.nrf_dll_load.driver.uint16_array.frompointer', 'driver.uint16_array.frompointer', (['array_pointer'], {}), '(array_pointer)\n', (3191, 3206), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((3406, 3463), 'blatann.nrf.nrf_dll_load.driver.ble_gattc_service_array.frompointer', 'driver.ble_gattc_service_array.frompointer', (['array_pointer'], {}), '(array_pointer)\n', (3448, 3463), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((3663, 3720), 'blatann.nrf.nrf_dll_load.driver.ble_gattc_include_array.frompointer', 'driver.ble_gattc_include_array.frompointer', (['array_pointer'], {}), '(array_pointer)\n', (3705, 3720), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((3924, 3978), 'blatann.nrf.nrf_dll_load.driver.ble_gattc_char_array.frompointer', 'driver.ble_gattc_char_array.frompointer', (['array_pointer'], {}), '(array_pointer)\n', (3963, 3978), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((4172, 4226), 'blatann.nrf.nrf_dll_load.driver.ble_gattc_desc_array.frompointer', 'driver.ble_gattc_desc_array.frompointer', (['array_pointer'], {}), '(array_pointer)\n', (4211, 4226), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((4443, 4504), 'blatann.nrf.nrf_dll_load.driver.ble_gattc_attr_info16_array.frompointer', 'driver.ble_gattc_attr_info16_array.frompointer', (['array_pointer'], {}), '(array_pointer)\n', (4489, 4504), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((4724, 4786), 'blatann.nrf.nrf_dll_load.driver.ble_gattc_attr_info128_array.frompointer', 'driver.ble_gattc_attr_info128_array.frompointer', (['array_pointer'], {}), '(array_pointer)\n', (4771, 4786), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((4997, 5059), 'blatann.nrf.nrf_dll_load.driver.ble_gattc_handle_value_array.frompointer', 'driver.ble_gattc_handle_value_array.frompointer', (['array_pointer'], {}), '(array_pointer)\n', (5044, 5059), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((5263, 5322), 'blatann.nrf.nrf_dll_load.driver.ble_gattc_attr_info_array.frompointer', 'driver.ble_gattc_attr_info_array.frompointer', (['array_pointer'], {}), '(array_pointer)\n', (5307, 5322), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((5530, 5591), 'blatann.nrf.nrf_dll_load.driver.ble_gattc_attr_info16_array.frompointer', 'driver.ble_gattc_attr_info16_array.frompointer', (['array_pointer'], {}), '(array_pointer)\n', (5576, 5591), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((5801, 5863), 'blatann.nrf.nrf_dll_load.driver.ble_gattc_attr_info128_array.frompointer', 'driver.ble_gattc_attr_info128_array.frompointer', (['array_pointer'], {}), '(array_pointer)\n', (5848, 5863), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((6078, 6141), 'blatann.nrf.nrf_dll_load.driver.sd_rpc_serial_port_desc_array.frompointer', 'driver.sd_rpc_serial_port_desc_array.frompointer', (['array_pointer'], {}), '(array_pointer)\n', (6126, 6141), False, 'from blatann.nrf.nrf_dll_load import driver\n')]
|
"""
This whole file is dedicated to the creation of spreadsheets. It's existence is for marketing related
stuff. It does involve programming, but it's more related to marketing.
Note: This code is not accessible through normal runtime
"""
from datetime import date, timedelta
from .models import User
def get_user(token_id):
return User.objects.get(token_id=token_id)
# To get the token id of the user, just go to the admin page and search for it's email
def get_user_metrics(token_id):
user = get_user(token_id)
print(user.schedules_set.all())
# This function needs the date of the first day of the week to figure it out the week you actually want
def get_user_weekly_metrics(token_id, day, month, year):
user = get_user(token_id)
start_date = date(year, month, day)
end_date = start_date + timedelta(days=7)
print(user.schedules_set.filter(date_and_time__date__gte=start_date, date_and_time__date__lte=end_date))
def get_total_schedules_from_two_accounts(first_account_token_id, second_account_token_id):
first_user = get_user(first_account_token_id)
second_user = get_user(second_account_token_id)
print(f'Total schedules of the first user: \n{first_user.schedules_set.all()}')
print(f'Total schedules of the second user: \n{second_user.schedules_set.all()}')
# This function gets the average of the newest and oldest schedule, so it's not as precise as a monthly average.
def get_user_entire_average_schedule_by_day(token_id):
user = get_user(token_id)
dates = user.schedules_set.all().order_by('date_and_time')
if dates.count() < 2:
print('Not enough schedules to create a metric')
return
newest_date = dates.first().date_and_time
oldest_date = dates.last().date_and_time
time_difference = newest_date - oldest_date
print(f'Newest date: {newest_date}')
print(f'Oldest date: {oldest_date}')
print(f'Time difference: {time_difference}')
print(f'Are they equal ? {newest_date == oldest_date}')
if abs(time_difference.days) <= 0:
print('The first and last date are the same. Cannot divide by zero. Considering time difference as 1.')
time_difference += timedelta(days=1)
average = dates.count() / abs(time_difference.days)
print(f'Average is: {average}')
# This function gets the average of the newest and oldest schedule of the month
def get_user_monthly_average_schedule_by_day(token_id, year, month):
user = get_user(token_id)
start_date = date(year, month, 1)
end_date = start_date + timedelta(days=30)
dates = user.schedules_set.filter(
date_and_time__date__gte=start_date,
date_and_time__date__lte=end_date
)
average = dates.count() / 30
print(f'Average is: {average}')
# This function is just a test, I'm just trying to see if I can call two users with a single database query
def call_three_users_at_the_same_time(start_time, end_time, first_user_token_id, second_user_token_id, third_user_token_id):
# Getting three users with only one query
users = User.objects.filter(token_id__in=[first_user_token_id, second_user_token_id, third_user_token_id])
for i in range(users.count()):
# If the date is already scheduled. AKA if the 'surgeon' doesn't have time
if users[i].schedules_set.filter(date_and_time__gte=start_time, date_and_time__lte=end_time):
print('The crew is occupied! Sorry <3')
return
print('The crew is all available!')
|
[
"datetime.date",
"datetime.timedelta"
] |
[((775, 797), 'datetime.date', 'date', (['year', 'month', 'day'], {}), '(year, month, day)\n', (779, 797), False, 'from datetime import date, timedelta\n'), ((2507, 2527), 'datetime.date', 'date', (['year', 'month', '(1)'], {}), '(year, month, 1)\n', (2511, 2527), False, 'from datetime import date, timedelta\n'), ((826, 843), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (835, 843), False, 'from datetime import date, timedelta\n'), ((2196, 2213), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2205, 2213), False, 'from datetime import date, timedelta\n'), ((2556, 2574), 'datetime.timedelta', 'timedelta', ([], {'days': '(30)'}), '(days=30)\n', (2565, 2574), False, 'from datetime import date, timedelta\n')]
|
import sys
import schema_pa
class ArgsParser:
def parse(self, argv):
self.config = schema_pa.parse("schema",
"Test schema", argv)
if __name__ == "__main__":
parser = ArgsParser()
parser.parse(sys.argv[1:])
print(parser.config)
|
[
"schema_pa.parse"
] |
[((97, 143), 'schema_pa.parse', 'schema_pa.parse', (['"""schema"""', '"""Test schema"""', 'argv'], {}), "('schema', 'Test schema', argv)\n", (112, 143), False, 'import schema_pa\n')]
|
import warnings
import torch.nn as nn
from skssl.utils.initialization import linear_init
from skssl.utils.torchextend import identity
__all__ = ["MLP", "get_uninitialized_mlp"]
def get_uninitialized_mlp(**kwargs):
return lambda *args, **kargs2: MLP(*args, **kwargs, **kargs2)
class MLP(nn.Module):
"""General MLP class.
Parameters
----------
input_size: int
output_size: int
hidden_size: int, optional
Number of hidden neurones.
n_hidden_layers: int, optional
Number of hidden layers.
activation: torch.nn.modules.activation, optional
Unitialized activation class.
bias: bool, optional
Whether to use biaises in the hidden layers.
dropout: float, optional
Dropout rate.
"""
def __init__(self, input_size, output_size,
hidden_size=32,
n_hidden_layers=1,
activation=nn.ReLU,
bias=True,
dropout=0):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.n_hidden_layers = n_hidden_layers
if self.hidden_size < min(self.output_size, self.input_size):
self.hidden_size = min(self.output_size, self.input_size)
txt = "hidden_size={} smaller than output={} and input={}. Setting it to {}."
warnings.warn(txt.format(hidden_size, output_size, input_size, self.hidden_size))
self.dropout = (nn.Dropout(p=dropout) if dropout > 0 else identity)
self.activation = activation() # cannot be a function from Functional but class
self.to_hidden = nn.Linear(self.input_size, self.hidden_size, bias=bias)
self.linears = nn.ModuleList([nn.Linear(self.hidden_size, self.hidden_size, bias=bias)
for _ in range(self.n_hidden_layers - 1)])
self.out = nn.Linear(self.hidden_size, self.output_size, bias=bias)
self.reset_parameters()
def forward(self, x):
out = self.to_hidden(x)
out = self.activation(out)
out = self.dropout(out)
for linear in self.linears:
out = linear(out)
out = self.activation(out)
out = self.dropout(out)
out = self.out(out)
return out
def reset_parameters(self):
linear_init(self.to_hidden, activation=self.activation)
for lin in self.linears:
linear_init(lin, activation=self.activation)
linear_init(self.out)
|
[
"torch.nn.Dropout",
"skssl.utils.initialization.linear_init",
"torch.nn.Linear"
] |
[((1695, 1750), 'torch.nn.Linear', 'nn.Linear', (['self.input_size', 'self.hidden_size'], {'bias': 'bias'}), '(self.input_size, self.hidden_size, bias=bias)\n', (1704, 1750), True, 'import torch.nn as nn\n'), ((1946, 2002), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_size', 'self.output_size'], {'bias': 'bias'}), '(self.hidden_size, self.output_size, bias=bias)\n', (1955, 2002), True, 'import torch.nn as nn\n'), ((2393, 2448), 'skssl.utils.initialization.linear_init', 'linear_init', (['self.to_hidden'], {'activation': 'self.activation'}), '(self.to_hidden, activation=self.activation)\n', (2404, 2448), False, 'from skssl.utils.initialization import linear_init\n'), ((2547, 2568), 'skssl.utils.initialization.linear_init', 'linear_init', (['self.out'], {}), '(self.out)\n', (2558, 2568), False, 'from skssl.utils.initialization import linear_init\n'), ((1528, 1549), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (1538, 1549), True, 'import torch.nn as nn\n'), ((2494, 2538), 'skssl.utils.initialization.linear_init', 'linear_init', (['lin'], {'activation': 'self.activation'}), '(lin, activation=self.activation)\n', (2505, 2538), False, 'from skssl.utils.initialization import linear_init\n'), ((1789, 1845), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_size', 'self.hidden_size'], {'bias': 'bias'}), '(self.hidden_size, self.hidden_size, bias=bias)\n', (1798, 1845), True, 'import torch.nn as nn\n')]
|
from discord.ext import commands
from util.data.user_data import UserData
from util.decorators import delete_original
class Preferences(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="botdms", aliases=["botdm"])
@commands.cooldown(1, 2, commands.BucketType.user)
@delete_original()
async def dms(self, ctx, *, enabled: bool):
"""
Control whether or not the bot will DM you with certain commands/functions.
Example: Disabling DMs will prevent bot from DMing reactor role gives/takes.
Usage: botdms False
"""
result = UserData(str(ctx.author.id)).booleans.set("dm_enabled", enabled)
await ctx.send(f"{ctx.author.mention}, bot DMs have been **{'enabled' if result else 'disabled'}**.",
delete_after=10)
def setup(bot):
bot.add_cog(Preferences(bot))
|
[
"discord.ext.commands.cooldown",
"discord.ext.commands.command",
"util.decorators.delete_original"
] |
[((212, 262), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""botdms"""', 'aliases': "['botdm']"}), "(name='botdms', aliases=['botdm'])\n", (228, 262), False, 'from discord.ext import commands\n'), ((268, 317), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(2)', 'commands.BucketType.user'], {}), '(1, 2, commands.BucketType.user)\n', (285, 317), False, 'from discord.ext import commands\n'), ((323, 340), 'util.decorators.delete_original', 'delete_original', ([], {}), '()\n', (338, 340), False, 'from util.decorators import delete_original\n')]
|
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def plot_freq_labels(data, template="plotly"):
X = ["Non Hate Speech", "Hate Speech"]
Y = data["label"].value_counts().values
fig = go.Figure()
fig.add_trace(
go.Bar(
x=X,
y=Y,
text=Y,
textposition="auto",
marker_color=["lightblue", "royalblue"],
hovertemplate="Label: %{x} <br>Count: %{y}",
)
)
fig.update_layout(
title="Labels frequency",
xaxis_title="Labels",
yaxis_title="Counts",
template=template,
)
return fig
def plot_word_hist(data, template="plotly"):
fig = go.Figure()
fig.add_trace(
go.Histogram(
x=data.word_count_before.values,
marker_color="royalblue",
name="Before cleaning",
)
)
fig.add_trace(
go.Histogram(
x=data.word_count.values,
marker_color="lightblue",
name="After cleaning",
)
)
fig.update_layout(
title="Words distribution",
xaxis_title="Number of words",
yaxis_title="Number of sentences",
barmode="stack",
template=template,
)
fig.update_xaxes(range=[0, 50])
return fig
def plot_most_common_words(df, template="plotly"):
X = df.words
Y = df.freq
fig = go.Figure()
fig.add_trace(
go.Bar(
x=X,
y=Y,
hovertemplate="Word: %{x} <br>Count: %{y}",
marker_color="royalblue",
)
)
fig.update_layout(
title="Top 20 most common Words in the entire dataset ",
xaxis_title="Word",
yaxis_title="Count",
xaxis_tickangle=290,
template=template,
)
return fig
def plot_top_20_pos(df, x_col="", title="", template="plotly"):
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Bar(
x=df[x_col],
y=df.Freq_No_Hate,
name="Freq. Not Hate Speech",
yaxis="y",
offsetgroup=1,
marker_color="lightblue",
),
secondary_y=False,
)
fig.add_trace(
go.Bar(
x=df[x_col],
y=df.Freq_Hate_Speech,
name="Freq. Hate Speech",
yaxis="y2",
offsetgroup=2,
marker_color="royalblue",
),
secondary_y=True,
)
fig.update_xaxes(title_text=x_col, tickangle = 290)
fig.update_yaxes(title_text="Count", secondary_y=False)
fig.update_layout(
title=title, template=template, yaxis2=dict(overlaying="y", side="right")
)
fig.update_layout(barmode="group")
return fig
def plot_top_pos_general(df, x_col=None, y_col=None, title="", template="plotly"):
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Bar(
x=df[x_col[0]],
y=df[y_col[0]],
name=x_col[0],
yaxis="y",
offsetgroup=1,
marker_color="lightblue",
hovertemplate="<b>Total</b><br>POS: %{x} <br>Rel. freq.: %{y}",
),
secondary_y=False,
)
fig.add_trace(
go.Bar(
x=df[x_col[1]],
y=df[y_col[1]],
name=x_col[1],
yaxis="y2",
offsetgroup=2,
marker_color="royalblue",
hovertemplate="<b>Hate Speech</b><br>POS: %{x} <br>Rel. freq.: %{y}",
),
secondary_y=True,
)
fig.update_xaxes(title_text="POS", tickangle=290)
fig.update_yaxes(title_text="Relative Frequency", secondary_y=False)
fig.update_layout(
title=title, template=template, yaxis2=dict(overlaying="y", side="right")
)
fig.update_layout(barmode="group")
return fig
|
[
"plotly.subplots.make_subplots",
"plotly.graph_objects.Figure",
"plotly.graph_objects.Bar",
"plotly.graph_objects.Histogram"
] |
[((223, 234), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (232, 234), True, 'import plotly.graph_objects as go\n'), ((708, 719), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (717, 719), True, 'import plotly.graph_objects as go\n'), ((1415, 1426), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (1424, 1426), True, 'import plotly.graph_objects as go\n'), ((1907, 1953), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'specs': "[[{'secondary_y': True}]]"}), "(specs=[[{'secondary_y': True}]])\n", (1920, 1953), False, 'from plotly.subplots import make_subplots\n'), ((2864, 2910), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'specs': "[[{'secondary_y': True}]]"}), "(specs=[[{'secondary_y': True}]])\n", (2877, 2910), False, 'from plotly.subplots import make_subplots\n'), ((263, 398), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'X', 'y': 'Y', 'text': 'Y', 'textposition': '"""auto"""', 'marker_color': "['lightblue', 'royalblue']", 'hovertemplate': '"""Label: %{x} <br>Count: %{y}"""'}), "(x=X, y=Y, text=Y, textposition='auto', marker_color=['lightblue',\n 'royalblue'], hovertemplate='Label: %{x} <br>Count: %{y}')\n", (269, 398), True, 'import plotly.graph_objects as go\n'), ((748, 847), 'plotly.graph_objects.Histogram', 'go.Histogram', ([], {'x': 'data.word_count_before.values', 'marker_color': '"""royalblue"""', 'name': '"""Before cleaning"""'}), "(x=data.word_count_before.values, marker_color='royalblue',\n name='Before cleaning')\n", (760, 847), True, 'import plotly.graph_objects as go\n'), ((924, 1016), 'plotly.graph_objects.Histogram', 'go.Histogram', ([], {'x': 'data.word_count.values', 'marker_color': '"""lightblue"""', 'name': '"""After cleaning"""'}), "(x=data.word_count.values, marker_color='lightblue', name=\n 'After cleaning')\n", (936, 1016), True, 'import plotly.graph_objects as go\n'), ((1455, 1546), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'X', 'y': 'Y', 'hovertemplate': '"""Word: %{x} <br>Count: %{y}"""', 'marker_color': '"""royalblue"""'}), "(x=X, y=Y, hovertemplate='Word: %{x} <br>Count: %{y}', marker_color=\n 'royalblue')\n", (1461, 1546), True, 'import plotly.graph_objects as go\n'), ((1982, 2107), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'df[x_col]', 'y': 'df.Freq_No_Hate', 'name': '"""Freq. Not Hate Speech"""', 'yaxis': '"""y"""', 'offsetgroup': '(1)', 'marker_color': '"""lightblue"""'}), "(x=df[x_col], y=df.Freq_No_Hate, name='Freq. Not Hate Speech', yaxis=\n 'y', offsetgroup=1, marker_color='lightblue')\n", (1988, 2107), True, 'import plotly.graph_objects as go\n'), ((2248, 2374), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'df[x_col]', 'y': 'df.Freq_Hate_Speech', 'name': '"""Freq. Hate Speech"""', 'yaxis': '"""y2"""', 'offsetgroup': '(2)', 'marker_color': '"""royalblue"""'}), "(x=df[x_col], y=df.Freq_Hate_Speech, name='Freq. Hate Speech', yaxis=\n 'y2', offsetgroup=2, marker_color='royalblue')\n", (2254, 2374), True, 'import plotly.graph_objects as go\n'), ((2939, 3117), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'df[x_col[0]]', 'y': 'df[y_col[0]]', 'name': 'x_col[0]', 'yaxis': '"""y"""', 'offsetgroup': '(1)', 'marker_color': '"""lightblue"""', 'hovertemplate': '"""<b>Total</b><br>POS: %{x} <br>Rel. freq.: %{y}"""'}), "(x=df[x_col[0]], y=df[y_col[0]], name=x_col[0], yaxis='y',\n offsetgroup=1, marker_color='lightblue', hovertemplate=\n '<b>Total</b><br>POS: %{x} <br>Rel. freq.: %{y}')\n", (2945, 3117), True, 'import plotly.graph_objects as go\n'), ((3266, 3451), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'df[x_col[1]]', 'y': 'df[y_col[1]]', 'name': 'x_col[1]', 'yaxis': '"""y2"""', 'offsetgroup': '(2)', 'marker_color': '"""royalblue"""', 'hovertemplate': '"""<b>Hate Speech</b><br>POS: %{x} <br>Rel. freq.: %{y}"""'}), "(x=df[x_col[1]], y=df[y_col[1]], name=x_col[1], yaxis='y2',\n offsetgroup=2, marker_color='royalblue', hovertemplate=\n '<b>Hate Speech</b><br>POS: %{x} <br>Rel. freq.: %{y}')\n", (3272, 3451), True, 'import plotly.graph_objects as go\n')]
|
from Bubot.Helpers.ExtException import ExtException, KeyNotFound
from Bubot_CoAP.resources.resource import Resource
class OcfResource(Resource):
def __init__(self, name, coap_server=None, visible=True, observable=True, allow_children=True):
super().__init__(name, coap_server=None, visible=True, observable=True, allow_children=True)
self._data = {}
self._href = name
self.actual_content_type = "application/vnd.ocf+cbor"
self.content_type = "application/vnd.ocf+cbor"
self.device = None
pass
@classmethod
def init_from_config(cls, device, href, config):
self = cls(href)
self.device = device
self.data = config
return self
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def payload(self):
return self._data
# @payload.setter
# def payload(self, value):
# self._data = value
def get_attr(self, *args):
try:
return self.data[args[0]]
except KeyError:
try:
return args[1]
except IndexError:
raise KeyNotFound(
action='OcfDevice.get_param',
detail=f'{args[0]} ({self.__class__.__name__}{self._href})'
) from None
def set_attr(self, name, value):
self.data[name] = value
@property
def resource_type(self):
return self._data.get('rt', [])
@property
def interface_type(self):
return self._data.get('if', [])
def get_link(self, request_address):
return {
'anchor': f'ocf://{self.device.get_device_id()}',
'href': self._href,
'eps': self.device.transport_layer.get_eps(request_address[0] if request_address else None),
'rt': self.get_attr('rt', []),
'if': self.get_attr('if', []),
'n': self.get_attr('n', ''),
'p': self.get_attr('p', dict(bm=0)),
}
async def render_GET(self, request):
self.device.log.debug(
f'{self.__class__.__name__} get {self._href} {request.query} from {request.source} to {request.destination} ')
return self
|
[
"Bubot.Helpers.ExtException.KeyNotFound"
] |
[((1217, 1324), 'Bubot.Helpers.ExtException.KeyNotFound', 'KeyNotFound', ([], {'action': '"""OcfDevice.get_param"""', 'detail': 'f"""{args[0]} ({self.__class__.__name__}{self._href})"""'}), "(action='OcfDevice.get_param', detail=\n f'{args[0]} ({self.__class__.__name__}{self._href})')\n", (1228, 1324), False, 'from Bubot.Helpers.ExtException import ExtException, KeyNotFound\n')]
|
from django.shortcuts import render, get_object_or_404, redirect
from .models import Post
from django.views.generic import ListView, DetailView
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
class HomePageView(ListView):
model = Post
template_name = 'home.html'
paginate_by = 4
class PostDetailView(LoginRequiredMixin, DetailView):
login_url = '/login/'
model = Post
template_name = 'post_detail.html'
def AboutPageView(request):
return render(request, 'about.html')
@login_required
def ContactPageView(request):
return render(request, 'contact.html')
def signupuser(request):
if request.method == 'GET':
return render(request, 'signupuser.html', {'form': UserCreationForm()})
else:
#create a new user
if request.POST['password1'] == request.POST['password2']:
try:
user = User.objects.create_user(request.POST['username'], password = request.POST['password1'])
user.save()
login(request, user)
return redirect('home')
except IntegrityError:
return render(request, 'signupuser.html',{'forms':UserCreationForm(),'error':"That user name has been taken. Please try someother username"})
else:
return render(request, 'signupuser.html',{'forms':UserCreationForm(), 'error':'password did not match'})
def loginuser(request):
if request.method == 'GET':
return render(request, 'loginuser.html', {'form':AuthenticationForm()})
else:
user = authenticate(request,username = request.POST['username'],password = request.POST['password'])
if user is None:
return render(request, 'loginuser.html', {'form':AuthenticationForm,'error':'Username and password did not match'})
else:
login(request,user)
return redirect('home')
@login_required
def logoutuser(request):
if request.method == "POST":
logout(request)
return redirect('home')
|
[
"django.shortcuts.redirect",
"django.contrib.auth.forms.AuthenticationForm",
"django.contrib.auth.models.User.objects.create_user",
"django.contrib.auth.logout",
"django.contrib.auth.forms.UserCreationForm",
"django.contrib.auth.authenticate",
"django.shortcuts.render",
"django.contrib.auth.login"
] |
[((758, 787), 'django.shortcuts.render', 'render', (['request', '"""about.html"""'], {}), "(request, 'about.html')\n", (764, 787), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((846, 877), 'django.shortcuts.render', 'render', (['request', '"""contact.html"""'], {}), "(request, 'contact.html')\n", (852, 877), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((1842, 1938), 'django.contrib.auth.authenticate', 'authenticate', (['request'], {'username': "request.POST['username']", 'password': "request.POST['password']"}), "(request, username=request.POST['username'], password=request.\n POST['password'])\n", (1854, 1938), False, 'from django.contrib.auth import login, logout, authenticate\n'), ((2254, 2269), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (2260, 2269), False, 'from django.contrib.auth import login, logout, authenticate\n'), ((2285, 2301), 'django.shortcuts.redirect', 'redirect', (['"""home"""'], {}), "('home')\n", (2293, 2301), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((1980, 2095), 'django.shortcuts.render', 'render', (['request', '"""loginuser.html"""', "{'form': AuthenticationForm, 'error': 'Username and password did not match'}"], {}), "(request, 'loginuser.html', {'form': AuthenticationForm, 'error':\n 'Username and password did not match'})\n", (1986, 2095), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((2115, 2135), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (2120, 2135), False, 'from django.contrib.auth import login, logout, authenticate\n'), ((2154, 2170), 'django.shortcuts.redirect', 'redirect', (['"""home"""'], {}), "('home')\n", (2162, 2170), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((997, 1015), 'django.contrib.auth.forms.UserCreationForm', 'UserCreationForm', ([], {}), '()\n', (1013, 1015), False, 'from django.contrib.auth.forms import UserCreationForm, AuthenticationForm\n'), ((1162, 1253), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', (["request.POST['username']"], {'password': "request.POST['password1']"}), "(request.POST['username'], password=request.POST[\n 'password1'])\n", (1186, 1253), False, 'from django.contrib.auth.models import User\n'), ((1295, 1315), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (1300, 1315), False, 'from django.contrib.auth import login, logout, authenticate\n'), ((1339, 1355), 'django.shortcuts.redirect', 'redirect', (['"""home"""'], {}), "('home')\n", (1347, 1355), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((1794, 1814), 'django.contrib.auth.forms.AuthenticationForm', 'AuthenticationForm', ([], {}), '()\n', (1812, 1814), False, 'from django.contrib.auth.forms import UserCreationForm, AuthenticationForm\n'), ((1625, 1643), 'django.contrib.auth.forms.UserCreationForm', 'UserCreationForm', ([], {}), '()\n', (1641, 1643), False, 'from django.contrib.auth.forms import UserCreationForm, AuthenticationForm\n'), ((1457, 1475), 'django.contrib.auth.forms.UserCreationForm', 'UserCreationForm', ([], {}), '()\n', (1473, 1475), False, 'from django.contrib.auth.forms import UserCreationForm, AuthenticationForm\n')]
|
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import numpy as np
import scipy.sparse as sps
from irspack.definitions import DenseScoreArray, UserIndexArray
from irspack.utils._util_cpp import retrieve_recommend_from_score
from irspack.utils.threading import get_n_threads
if TYPE_CHECKING:
# We should move this module out of "utils".
from irspack.recommenders import BaseRecommender
class IDMappedRecommender:
"""A utility class that helps mapping user/item ids to index, retrieving recommendation score,
and making a recommendation.
Args:
recommender:
The backend base recommender which transforms user/item ids.
user_ids:
user_ids which correspods to the rows of ``recommender.X_train_all``.
item_ids:
item_ids which correspods to the columns of ``recommender.X_train_all``.
Raises:
ValueError: When recommender and user_ids/item_ids are inconsistent.
ValueError: When there is a duplicate in user_ids.
ValueError: When there is a duplicate in item_ids.
"""
def __init__(
self, recommender: "BaseRecommender", user_ids: List[Any], item_ids: List[Any]
):
if (recommender.n_users != len(user_ids)) or (
recommender.n_items != len(item_ids)
):
raise ValueError(
"The recommender and user/item ids have inconsistent lengths."
)
self.recommender = recommender
self.user_ids = user_ids
self.item_ids = item_ids
self.user_id_to_index = {user_id: i for i, user_id in enumerate(user_ids)}
self.item_id_to_index = {item_id: i for i, item_id in enumerate(item_ids)}
def _item_id_list_to_index_list(self, ids: Iterable[Any]) -> List[int]:
return [self.item_id_to_index[id] for id in ids if id in self.item_id_to_index]
def _user_profile_to_data_col(
self, profile: Union[List[Any], Dict[Any, float]]
) -> Tuple[List[float], List[int]]:
data: List[float]
cols: List[int]
# data: np.ndarray
if isinstance(profile, list):
cols = self._item_id_list_to_index_list(profile)
data = [1.0] * len(cols)
else:
data = []
cols = []
for id, score in profile.items():
if id in self.item_id_to_index:
data.append(score)
cols.append(self.item_id_to_index[id])
return data, cols
def _list_of_user_profile_to_matrix(
self, users_info: Sequence[Union[List[Any], Dict[Any, float]]]
) -> sps.csr_matrix:
data: List[float] = []
indptr: List[int] = [0]
col: List[int] = []
indptr_current = 0
for user_info in users_info:
data_u, col_u = self._user_profile_to_data_col(user_info)
data.extend(data_u)
col.extend(col_u)
indptr_current += len(col_u)
indptr.append(indptr_current)
result = sps.csr_matrix(
(data, col, indptr), shape=(len(users_info), len(self.item_ids))
)
return result
def get_recommendation_for_known_user_id(
self,
user_id: Any,
cutoff: int = 20,
allowed_item_ids: Optional[List[Any]] = None,
forbidden_item_ids: Optional[List[Any]] = None,
) -> List[Tuple[Any, float]]:
"""Retrieve recommendation result for a known user.
Args:
user_id:
The target user ID.
cutoff:
Maximal number of recommendations allowed.
allowed_item_ids:
If not ``None``, recommend the items within this list.
If ``None``, all known item ids can be recommended (except for those in ``item_ids`` argument).
Defaults to ``None``.
forbidden_item_ids:
If not ``None``, never recommend the items within the list. Defaults to None.
Raises:
RuntimeError: When user_id is not in ``self.user_ids``.
Returns:
A List of tuples consisting of ``(item_id, score)``.
"""
if user_id not in self.user_ids:
raise RuntimeError(f"User with user_id {user_id} not found.")
user_index: UserIndexArray = np.asarray(
[self.user_id_to_index[user_id]], dtype=np.int64
)
score = self.recommender.get_score_remove_seen(user_index)[0, :]
return self._score_to_recommended_items(
score,
cutoff=cutoff,
allowed_item_ids=allowed_item_ids,
forbidden_item_ids=forbidden_item_ids,
)
def get_recommendation_for_new_user(
self,
user_profile: Union[List[Any], Dict[Any, float]],
cutoff: int = 20,
allowed_item_ids: Optional[List[Any]] = None,
forbidden_item_ids: Optional[List[Any]] = None,
) -> List[Tuple[Any, float]]:
"""Retrieve recommendation result for a previously unseen user using item ids with which he or she interacted.
Args:
user_profile:
User's profile given either as a list of item ids the user had a cotact or a item id-rating dict.
Previously unseen item ID will be ignored.
cutoff:
Maximal number of recommendations allowed.
allowed_item_ids:
If not ``None``, recommend the items within this list.
If ``None``, all known item ids can be recommended (except for those in ``item_ids`` argument).
Defaults to ``None``.
forbidden_item_ids:
If not ``None``, never recommend the items within the list. Defaults to None.
Returns:
A List of tuples consisting of ``(item_id, score)``.
"""
data, cols = self._user_profile_to_data_col(user_profile)
X_input = sps.csr_matrix(
(data, cols, [0, len(cols)]), shape=(1, len(self.item_ids))
)
score = self.recommender.get_score_cold_user_remove_seen(X_input)[0]
return self._score_to_recommended_items(
score,
cutoff,
allowed_item_ids=allowed_item_ids,
forbidden_item_ids=forbidden_item_ids,
)
def get_recommendation_for_new_user_batch(
self,
user_profiles: Sequence[Union[List[Any], Dict[Any, float]]],
cutoff: int = 20,
allowed_item_ids: Optional[List[List[Any]]] = None,
forbidden_item_ids: Optional[List[List[Any]]] = None,
n_threads: Optional[int] = None,
) -> List[List[Tuple[Any, float]]]:
"""Retrieve recommendation result for a previously unseen users using item ids with which they have interacted.
Args:
user_profiles:
A list of user profiles.
Each profile should be either the item ids the user had a cotact, or item-rating dict.
Previously unseen item IDs will be ignored.
cutoff:
Maximal number of recommendations allowed.
allowed_item_ids:
If not ``None``, defines "a list of list of recommendable item IDs"
and ``len(allowed_item_ids)`` must be equal to ``len(item_ids)``.
Defaults to ``None``.
forbidden_item_ids:
If not ``None``, defines "a list of list of forbidden item IDs"
and ``len(allowed_item_ids)`` must be equal to ``len(item_ids)``
Defaults to ``None``.
Returns:
A list of list of tuples consisting of ``(item_id, score)``.
Each internal list corresponds to the recommender's recommendation output.
"""
X_input = self._list_of_user_profile_to_matrix(user_profiles)
score = self.recommender.get_score_cold_user_remove_seen(X_input)
return self._score_to_recommended_items_batch(
score,
cutoff,
allowed_item_ids=allowed_item_ids,
forbidden_item_ids=forbidden_item_ids,
n_threads=get_n_threads(n_threads=n_threads),
)
def _score_to_recommended_items(
self,
score: DenseScoreArray,
cutoff: int,
allowed_item_ids: Optional[List[Any]] = None,
forbidden_item_ids: Optional[List[Any]] = None,
) -> List[Tuple[Any, float]]:
if allowed_item_ids is not None:
allowed_item_indices = np.asarray(
self._item_id_list_to_index_list(allowed_item_ids), dtype=np.int64
)
high_score_inds = allowed_item_indices[
score[allowed_item_indices].argsort()[::-1]
]
else:
high_score_inds = score.argsort()[::-1]
recommendations: List[Tuple[Any, float]] = []
for i in high_score_inds:
i_int = int(i)
score_this = score[i_int]
item_id = self.item_ids[i_int]
if np.isinf(score_this):
continue
if forbidden_item_ids is not None:
if item_id in forbidden_item_ids:
continue
recommendations.append((item_id, float(score_this)))
if len(recommendations) >= cutoff:
break
return recommendations
def _score_to_recommended_items_batch(
self,
score: DenseScoreArray,
cutoff: int,
allowed_item_ids: Optional[List[List[Any]]] = None,
forbidden_item_ids: Optional[List[List[Any]]] = None,
n_threads: int = 1,
) -> List[List[Tuple[Any, float]]]:
if forbidden_item_ids is not None:
assert len(forbidden_item_ids) == score.shape[0]
if allowed_item_ids is not None:
assert len(allowed_item_ids) == score.shape[0]
allowed_item_indices: List[List[int]] = []
if allowed_item_ids is not None:
allowed_item_indices = [
self._item_id_list_to_index_list(_) for _ in allowed_item_ids
]
if forbidden_item_ids is not None:
for u, forbidden_ids_per_user in enumerate(forbidden_item_ids):
score[
u, self._item_id_list_to_index_list(forbidden_ids_per_user)
] = -np.inf
raw_result = retrieve_recommend_from_score(
score,
allowed_item_indices,
cutoff,
n_threads=n_threads,
)
return [
[
(self.item_ids[item_index], score)
for item_index, score in user_wise_raw_result
]
for user_wise_raw_result in raw_result
]
|
[
"irspack.utils.threading.get_n_threads",
"numpy.asarray",
"irspack.utils._util_cpp.retrieve_recommend_from_score",
"numpy.isinf"
] |
[((4405, 4465), 'numpy.asarray', 'np.asarray', (['[self.user_id_to_index[user_id]]'], {'dtype': 'np.int64'}), '([self.user_id_to_index[user_id]], dtype=np.int64)\n', (4415, 4465), True, 'import numpy as np\n'), ((10426, 10517), 'irspack.utils._util_cpp.retrieve_recommend_from_score', 'retrieve_recommend_from_score', (['score', 'allowed_item_indices', 'cutoff'], {'n_threads': 'n_threads'}), '(score, allowed_item_indices, cutoff,\n n_threads=n_threads)\n', (10455, 10517), False, 'from irspack.utils._util_cpp import retrieve_recommend_from_score\n'), ((9089, 9109), 'numpy.isinf', 'np.isinf', (['score_this'], {}), '(score_this)\n', (9097, 9109), True, 'import numpy as np\n'), ((8206, 8240), 'irspack.utils.threading.get_n_threads', 'get_n_threads', ([], {'n_threads': 'n_threads'}), '(n_threads=n_threads)\n', (8219, 8240), False, 'from irspack.utils.threading import get_n_threads\n')]
|
# -*- coding: utf-8 -*-
"""
session component module.
"""
from pyrin.application.decorators import component
from pyrin.security.session import SessionPackage
from pyrin.security.session.manager import SessionManager
from pyrin.application.structs import Component
@component(SessionPackage.COMPONENT_NAME)
class SessionComponent(Component, SessionManager):
"""
session component class.
"""
pass
|
[
"pyrin.application.decorators.component"
] |
[((269, 309), 'pyrin.application.decorators.component', 'component', (['SessionPackage.COMPONENT_NAME'], {}), '(SessionPackage.COMPONENT_NAME)\n', (278, 309), False, 'from pyrin.application.decorators import component\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-25 22:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Testings', '0003_collection'),
]
operations = [
migrations.RenameField(
model_name='collection',
old_name='products',
new_name='product',
),
]
|
[
"django.db.migrations.RenameField"
] |
[((285, 377), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""collection"""', 'old_name': '"""products"""', 'new_name': '"""product"""'}), "(model_name='collection', old_name='products',\n new_name='product')\n", (307, 377), False, 'from django.db import migrations\n')]
|
import os
from celery.schedules import crontab
CELERY_BROKER_URL='amqp://guest@localhost//'
CELERY_RESULT_BACKEND = 'mongodb://localhost:27017/'
CELERY_MONGODB_BACKEND_SETTINGS = {
'database': 'wowgicflaskapp',
'taskmeta_collection': 'my_taskmeta_collection',
}
#CELERY_ACCEPT_CONTENT = ['pickle', 'json']
#CELERY_TASK_SERIALIZER='json'
#CELERY_RESULT_SERIALIZER='json'
#CELERY_TIMEZONE='Europe/Oslo'
CELERY_ENABLE_UTC=True
IP = os.uname()[1]
PORT = 8080
NEO4J_IP='127.0.0.1'
MONGODB_HOST = '127.0.0.1'
MONGODB_PORT = '27017'
MONGODB_USERNAME = 'admin'
MONGODB_PASSWORD = '<PASSWORD>'
LOGGER_NAME='wowgic_dev'
CELERYBEAT_SCHEDULE = {# Executes every Monday morning at 7:30 A.M
'getAllInterestNode_every15mins': {
'task': 'tasks.getAllInterestNode',
'schedule': crontab(minute='*/15'),
},
}
|
[
"os.uname",
"celery.schedules.crontab"
] |
[((450, 460), 'os.uname', 'os.uname', ([], {}), '()\n', (458, 460), False, 'import os\n'), ((796, 818), 'celery.schedules.crontab', 'crontab', ([], {'minute': '"""*/15"""'}), "(minute='*/15')\n", (803, 818), False, 'from celery.schedules import crontab\n')]
|
import boto3
#class Command:
# def __init__(self, description,
class Config:
dynamodb = boto3.resource('dynamodb')
def __init__(self, table_name):
self.table = self.dynamodb.Table(table_name)
def get(self, key):
response = self.table.get_item(Key = {'Key' : key}, ConsistentRead = True)
return response['Item']['Value']
def put(self, key, value):
self.table.put_item(Item = {'Key' : key, 'Value' : value})
config = Config('BonkBotConfig')
# using the singleton pattern
def get_instance():
return config
|
[
"boto3.resource"
] |
[((98, 124), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {}), "('dynamodb')\n", (112, 124), False, 'import boto3\n')]
|
import requests
from pprint import pprint
def find_vacancies(parameters):
URL = 'https://www.cbr-xml-daily.ru/daily_json.js'
response = requests.get(URL).json()
usd_rate = response['Valute']['USD']['Value']
euro_rate = response['Valute']['EUR']['Value']
URL_HH = 'https://api.hh.ru/vacancies'
min_salary = list()
max_salary = list()
response = requests.get(URL_HH, params=parameters).json()
pages = response['pages']
vacancies_count = response['found']
for page in range(pages + 1):
params = {'text': parameters.get('text'),
'only_with_salary': parameters.get('only_with_salary'),
'per_page': parameters.get('per_page'),
'page': page}
response = requests.get(URL_HH, params=params).json()
for item in response['items']:
salfrom = item['salary']['from']
salto = item['salary']['to']
salcurr = item['salary']['currency']
if salcurr == 'RUR':
if salfrom is not None:
min_salary.append(salfrom)
if salto is not None:
max_salary.append(salto)
elif salcurr == 'USD':
if salfrom is not None:
min_salary.append(int(salfrom * usd_rate))
if salto is not None:
max_salary.append(int(salto * usd_rate))
elif salcurr == 'EUR':
if salfrom is not None:
min_salary.append(int(salfrom * euro_rate))
if salto is not None:
max_salary.append(int(salto * euro_rate))
data = {
'average_salary': f'{sum(min_salary) // len(min_salary)} - {sum(max_salary) // len(max_salary)}',
'vacancies_count': vacancies_count
}
return data
|
[
"requests.get"
] |
[((146, 163), 'requests.get', 'requests.get', (['URL'], {}), '(URL)\n', (158, 163), False, 'import requests\n'), ((381, 420), 'requests.get', 'requests.get', (['URL_HH'], {'params': 'parameters'}), '(URL_HH, params=parameters)\n', (393, 420), False, 'import requests\n'), ((768, 803), 'requests.get', 'requests.get', (['URL_HH'], {'params': 'params'}), '(URL_HH, params=params)\n', (780, 803), False, 'import requests\n')]
|
"""
Example to read a FITS file.
Created on Jul 9, 2019
Be aware that hdus.close () needs to be called to limit the number of open files at a given time.
@author: skwok
"""
import astropy.io.fits as pf
from astropy.utils.exceptions import AstropyWarning
import warnings
import numpy as np
from keckdrpframework.models.arguments import Arguments
from keckdrpframework.primitives.base_primitive import BasePrimitive
def open_nowarning(filename):
with warnings.catch_warnings():
warnings.simplefilter("ignore", AstropyWarning)
return pf.open(filename, memmap=False)
class SimpleFitsReader_LRIS(BasePrimitive):
def __init__(self, action, context):
"""
Initializes the super class.
"""
BasePrimitive.__init__(self, action, context)
def _perform(self):
"""
Expects action.args.name as fits file name
Returns HDUs or (later) data model
"""
name = self.action.args.name
self.logger.debug(f"Reading {name}")
out_args = Arguments()
out_args.name = name
out_args.img = self.readData(name)
return out_args
def readData(self, name, cutout=True):
"""
Reads FITS file, mostly from KECK instruments.
If there are multiple HDUs, the image is assembled according to
the kewyrods DETSEC and DATASEC.
Otherwise hdus[0].data is returned.
If cutout is TRUE, then only the none-zero portion is returned.
"""
with open_nowarning(name) as hdus:
if len(hdus) == 1:
return hdus[0].data
else:
imgBuf = hdus[1].data
for hdu in hdus[2:]:
imgBuf = np.concatenate((imgBuf, hdu.data), 1)
return imgBuf
|
[
"warnings.simplefilter",
"keckdrpframework.primitives.base_primitive.BasePrimitive.__init__",
"keckdrpframework.models.arguments.Arguments",
"warnings.catch_warnings",
"astropy.io.fits.open",
"numpy.concatenate"
] |
[((460, 485), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (483, 485), False, 'import warnings\n'), ((495, 542), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'AstropyWarning'], {}), "('ignore', AstropyWarning)\n", (516, 542), False, 'import warnings\n'), ((558, 589), 'astropy.io.fits.open', 'pf.open', (['filename'], {'memmap': '(False)'}), '(filename, memmap=False)\n', (565, 589), True, 'import astropy.io.fits as pf\n'), ((746, 791), 'keckdrpframework.primitives.base_primitive.BasePrimitive.__init__', 'BasePrimitive.__init__', (['self', 'action', 'context'], {}), '(self, action, context)\n', (768, 791), False, 'from keckdrpframework.primitives.base_primitive import BasePrimitive\n'), ((1036, 1047), 'keckdrpframework.models.arguments.Arguments', 'Arguments', ([], {}), '()\n', (1045, 1047), False, 'from keckdrpframework.models.arguments import Arguments\n'), ((1741, 1778), 'numpy.concatenate', 'np.concatenate', (['(imgBuf, hdu.data)', '(1)'], {}), '((imgBuf, hdu.data), 1)\n', (1755, 1778), True, 'import numpy as np\n')]
|
""" timg_denoise.py
"""
import numpy as np
import torch
import torch.nn as nn
class Timg_DenoiseNet_LinT_1Layer(nn.Module):
def __init__(self):
super(Timg_DenoiseNet_LinT_1Layer, self).__init__()
self.C = 64
self.K = 13
self.centre = 3/255.0
self.scale = 2.0
self.conv1 = nn.Conv2d(1, self.C, self.K, padding=self.K//2)
self.norm1 = nn.BatchNorm2d(self.C)
self.relu1 = nn.ReLU()
self.comb = nn.Conv2d(self.C, 1, 1)
# just need time to be above the minimum
self.fix_range_t = nn.Threshold(1/255.0, 1/255.0)
# nn.init.dirac_(self.conv1.weight)
def forward(self, t):
t = self.scale * (t - self.centre)
t = self.conv1(t)
t = self.relu1(t)
t = self.comb(t)
t = self.fix_range_t(t)
return t
class Timg_DenoiseNet_LinT(nn.Module):
def __init__(self, Tmin=1e-3, Tmax=1e3):
super(Timg_DenoiseNet_LinT, self).__init__()
self.C = 64
self.Tmin = Tmin
self.Tmax = Tmax
self.Tmid = 1
self.Tscale = self.Tmid - self.Tmin
self.conv1 = nn.Conv2d(1, self.C, 5, padding=2)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn2 = nn.BatchNorm2d(self.C)
self.relu2 = nn.ReLU()
self.conv3 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn3 = nn.BatchNorm2d(self.C)
self.relu3 = nn.ReLU()
self.conv4 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn4 = nn.BatchNorm2d(self.C)
self.relu4 = nn.ReLU()
self.conv5 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn5 = nn.BatchNorm2d(self.C)
self.relu5 = nn.ReLU()
self.comb = nn.Conv2d(self.C, 1, 1)
self.fix_range1 = nn.Hardtanh(min_val=self.Tmin, max_val=self.Tmax)
self.fix_range2 = nn.Hardtanh(min_val=0, max_val=1)
def forward(self, t):
t = (1.0/self.Tscale) * (t - self.Tmid)
t = self.conv1(t)
t = self.relu1(t)
t = self.conv2(t)
t = self.bn2(t)
t = self.relu2(t)
t = self.conv3(t)
t = self.bn3(t)
t = self.relu3(t)
t = self.conv4(t)
t = self.bn4(t)
t = self.relu4(t)
t = self.conv5(t)
t = self.bn5(t)
t = self.relu5(t)
t = self.comb(t)
t = self.Tmid + (self.Tscale * t)
t = self.fix_range1(t)
y = torch.pow(t, -1)
y = self.fix_range2(y)
return y
class Timg_DenoiseNet(nn.Module):
def __init__(self, Tmin=1e-3, Tmax=1e3):
super(Timg_DenoiseNet, self).__init__()
self.C = 64
self.Tmin = np.log(Tmin)
self.Tmax = np.log(Tmax)
# self.conv1 = nn.Conv2d(1, self.C, 3, padding=1)
self.conv1 = nn.Conv2d(1, self.C, 5, padding=2)
# self.conv1 = nn.Conv2d(1, self.C, 7, padding=3)
# self.conv1 = nn.Conv2d(1, self.C, 9, padding=4)
# self.conv1 = nn.Conv2d(1, self.C, 11, padding=5)
# self.conv1 = nn.Conv2d(1, self.C, 13, padding=6)
# self.conv1 = nn.Conv2d(1, self.C, 15, padding=7)
# self.conv1 = nn.Conv2d(1, self.C, 17, padding=8)
# self.conv1 = nn.Conv2d(1, self.C, 19, padding=9)
# self.conv1 = nn.Conv2d(1, self.C, 21, padding=10)
self.relu1 = nn.ReLU()
# self.conv2 = nn.Conv2d(self.C, self.C, 3, padding=1)
self.conv2 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn2 = nn.BatchNorm2d(self.C)
self.relu2 = nn.ReLU()
# self.conv3 = nn.Conv2d(self.C, self.C, 3, padding=1)
self.conv3 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn3 = nn.BatchNorm2d(self.C)
self.relu3 = nn.ReLU()
# self.conv4 = nn.Conv2d(self.C, self.C, 3, padding=1)
self.conv4 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn4 = nn.BatchNorm2d(self.C)
self.relu4 = nn.ReLU()
# self.conv5 = nn.Conv2d(self.C, self.C, 3, padding=1)
self.conv5 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn5 = nn.BatchNorm2d(self.C)
self.relu5 = nn.ReLU()
self.comb = nn.Conv2d(self.C, 1, 1)
self.fix_range1 = nn.Hardtanh(min_val=self.Tmin, max_val=self.Tmax)
self.fix_range2 = nn.Hardtanh(min_val=0, max_val=1)
def forward(self, t):
logt = torch.log(t)
logt = self.conv1(logt)
logt = self.relu1(logt)
logt = self.conv2(logt)
logt = self.bn2(logt)
logt = self.relu2(logt)
logt = self.conv3(logt)
logt = self.bn3(logt)
logt = self.relu3(logt)
logt = self.conv4(logt)
logt = self.bn4(logt)
logt = self.relu4(logt)
logt = self.conv5(logt)
logt = self.bn5(logt)
logt = self.relu5(logt)
logt = self.comb(logt)
logt = self.fix_range1(logt)
t = torch.exp(logt)
y = torch.pow(t, -1)
y = self.fix_range2(y)
return y
|
[
"torch.nn.ReLU",
"numpy.log",
"torch.nn.Conv2d",
"torch.nn.Threshold",
"torch.exp",
"torch.nn.BatchNorm2d",
"torch.pow",
"torch.log",
"torch.nn.Hardtanh"
] |
[((327, 376), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'self.C', 'self.K'], {'padding': '(self.K // 2)'}), '(1, self.C, self.K, padding=self.K // 2)\n', (336, 376), True, 'import torch.nn as nn\n'), ((396, 418), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (410, 418), True, 'import torch.nn as nn\n'), ((440, 449), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (447, 449), True, 'import torch.nn as nn\n'), ((471, 494), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', '(1)', '(1)'], {}), '(self.C, 1, 1)\n', (480, 494), True, 'import torch.nn as nn\n'), ((571, 605), 'torch.nn.Threshold', 'nn.Threshold', (['(1 / 255.0)', '(1 / 255.0)'], {}), '(1 / 255.0, 1 / 255.0)\n', (583, 605), True, 'import torch.nn as nn\n'), ((1142, 1176), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'self.C', '(5)'], {'padding': '(2)'}), '(1, self.C, 5, padding=2)\n', (1151, 1176), True, 'import torch.nn as nn\n'), ((1198, 1207), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1205, 1207), True, 'import torch.nn as nn\n'), ((1230, 1269), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', 'self.C', '(5)'], {'padding': '(2)'}), '(self.C, self.C, 5, padding=2)\n', (1239, 1269), True, 'import torch.nn as nn\n'), ((1289, 1311), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (1303, 1311), True, 'import torch.nn as nn\n'), ((1333, 1342), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1340, 1342), True, 'import torch.nn as nn\n'), ((1365, 1404), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', 'self.C', '(5)'], {'padding': '(2)'}), '(self.C, self.C, 5, padding=2)\n', (1374, 1404), True, 'import torch.nn as nn\n'), ((1424, 1446), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (1438, 1446), True, 'import torch.nn as nn\n'), ((1468, 1477), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1475, 1477), True, 'import torch.nn as nn\n'), ((1500, 1539), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', 'self.C', '(5)'], {'padding': '(2)'}), '(self.C, self.C, 5, padding=2)\n', (1509, 1539), True, 'import torch.nn as nn\n'), ((1559, 1581), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (1573, 1581), True, 'import torch.nn as nn\n'), ((1603, 1612), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1610, 1612), True, 'import torch.nn as nn\n'), ((1635, 1674), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', 'self.C', '(5)'], {'padding': '(2)'}), '(self.C, self.C, 5, padding=2)\n', (1644, 1674), True, 'import torch.nn as nn\n'), ((1694, 1716), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (1708, 1716), True, 'import torch.nn as nn\n'), ((1738, 1747), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1745, 1747), True, 'import torch.nn as nn\n'), ((1769, 1792), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', '(1)', '(1)'], {}), '(self.C, 1, 1)\n', (1778, 1792), True, 'import torch.nn as nn\n'), ((1820, 1869), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {'min_val': 'self.Tmin', 'max_val': 'self.Tmax'}), '(min_val=self.Tmin, max_val=self.Tmax)\n', (1831, 1869), True, 'import torch.nn as nn\n'), ((1896, 1929), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {'min_val': '(0)', 'max_val': '(1)'}), '(min_val=0, max_val=1)\n', (1907, 1929), True, 'import torch.nn as nn\n'), ((2478, 2494), 'torch.pow', 'torch.pow', (['t', '(-1)'], {}), '(t, -1)\n', (2487, 2494), False, 'import torch\n'), ((2712, 2724), 'numpy.log', 'np.log', (['Tmin'], {}), '(Tmin)\n', (2718, 2724), True, 'import numpy as np\n'), ((2745, 2757), 'numpy.log', 'np.log', (['Tmax'], {}), '(Tmax)\n', (2751, 2757), True, 'import numpy as np\n'), ((2838, 2872), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'self.C', '(5)'], {'padding': '(2)'}), '(1, self.C, 5, padding=2)\n', (2847, 2872), True, 'import torch.nn as nn\n'), ((3365, 3374), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3372, 3374), True, 'import torch.nn as nn\n'), ((3460, 3499), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', 'self.C', '(5)'], {'padding': '(2)'}), '(self.C, self.C, 5, padding=2)\n', (3469, 3499), True, 'import torch.nn as nn\n'), ((3519, 3541), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (3533, 3541), True, 'import torch.nn as nn\n'), ((3563, 3572), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3570, 3572), True, 'import torch.nn as nn\n'), ((3658, 3697), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', 'self.C', '(5)'], {'padding': '(2)'}), '(self.C, self.C, 5, padding=2)\n', (3667, 3697), True, 'import torch.nn as nn\n'), ((3717, 3739), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (3731, 3739), True, 'import torch.nn as nn\n'), ((3761, 3770), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3768, 3770), True, 'import torch.nn as nn\n'), ((3856, 3895), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', 'self.C', '(5)'], {'padding': '(2)'}), '(self.C, self.C, 5, padding=2)\n', (3865, 3895), True, 'import torch.nn as nn\n'), ((3915, 3937), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (3929, 3937), True, 'import torch.nn as nn\n'), ((3959, 3968), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3966, 3968), True, 'import torch.nn as nn\n'), ((4054, 4093), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', 'self.C', '(5)'], {'padding': '(2)'}), '(self.C, self.C, 5, padding=2)\n', (4063, 4093), True, 'import torch.nn as nn\n'), ((4113, 4135), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (4127, 4135), True, 'import torch.nn as nn\n'), ((4157, 4166), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4164, 4166), True, 'import torch.nn as nn\n'), ((4188, 4211), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', '(1)', '(1)'], {}), '(self.C, 1, 1)\n', (4197, 4211), True, 'import torch.nn as nn\n'), ((4239, 4288), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {'min_val': 'self.Tmin', 'max_val': 'self.Tmax'}), '(min_val=self.Tmin, max_val=self.Tmax)\n', (4250, 4288), True, 'import torch.nn as nn\n'), ((4315, 4348), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {'min_val': '(0)', 'max_val': '(1)'}), '(min_val=0, max_val=1)\n', (4326, 4348), True, 'import torch.nn as nn\n'), ((4391, 4403), 'torch.log', 'torch.log', (['t'], {}), '(t)\n', (4400, 4403), False, 'import torch\n'), ((4931, 4946), 'torch.exp', 'torch.exp', (['logt'], {}), '(logt)\n', (4940, 4946), False, 'import torch\n'), ((4959, 4975), 'torch.pow', 'torch.pow', (['t', '(-1)'], {}), '(t, -1)\n', (4968, 4975), False, 'import torch\n')]
|
#!/usr/bin/env python
#
# Copyright 2016 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from lxml import etree
import structlog
from netconf.nc_rpc.rpc import Rpc
import netconf.nc_common.error as ncerror
from netconf.constants import Constants as C
from netconf.utils import filter_tag_match
from twisted.internet.defer import inlineCallbacks, returnValue
import dicttoxml
from simplejson import dumps, load
log = structlog.get_logger()
class Get(Rpc):
def __init__(self, request, grpc_client, session):
super(Get, self).__init__(request, grpc_client, session)
self._validate_parameters()
@inlineCallbacks
def execute(self):
if self.rpc_response.is_error:
returnValue(self.rpc_response)
log.info('get-request', session=self.session.session_id,
request=self.request)
rpc = self.get_voltha_rpc(self.request)
if not rpc:
log.info('unsupported-request', request=self.request)
self.rpc_response.is_error = True
self.rpc_response.node = ncerror.BadMsg(self.request)
return
# Invoke voltha via the grpc client
res_dict = yield self.grpc_client.invoke_voltha_api(rpc)
# convert dict to xml
xml = dicttoxml.dicttoxml(res_dict, attr_type=True)
log.info('voltha-info', res=res_dict, xml=xml)
root_elem = self.get_root_element(xml)
# Build the yang response
self.rpc_response.node = self.rpc_response.build_yang_response(
root_elem, self.request)
self.rpc_response.is_error = False
returnValue(self.rpc_response)
def _validate_parameters(self):
log.info('validate-parameters', session=self.session.session_id)
# Validate the GET command
if self.request:
try:
if self.request['command'] != 'get':
self.rpc_response.is_error = True
self.rpc_response.node = ncerror.BadMsg('No GET in get '
'request')
if self.request.has_key('filter'):
if not self.request.has_key('class'):
self.rpc_response.is_error = True
self.rpc_response.node = ncerror.BadMsg(
'Missing filter sub-element')
except Exception as e:
self.rpc_response.is_error = True
self.rpc_response.node = ncerror.BadMsg(self.request)
return
def get_voltha_rpc(self, request):
if request.has_key('class'):
rpcs = self.rpc_request_mapping.get(request['class'])
if rpcs is None:
return None
for rpc in rpcs:
if request.has_key('subclass'):
# search first for subclass
if rpc['subclass'] and request['subclass'] == rpc[
'subclass']:
return rpc['rpc']
# If we are here then no subclass exists. Just return the rpc
# associated with theNone subclass
for rpc in rpcs:
if rpc['subclass'] is None:
return rpc['rpc']
return None
# Supported Get Methods
rpc_request_mapping = {
'Voltha': [
{'subclass': None,
'rpc': 'VolthaGlobalService-GetVoltha'
}],
'VolthaInstance': [
{'subclass': None,
'rpc': 'VolthaLocalService-GetVolthaInstance'
},
{'subclass': 'health',
'rpc': 'VolthaLocalService-GetHealth'
},
{'subclass': 'adapters',
'rpc': 'VolthaLocalService-ListAdapters'
},
{'subclass': 'logical_devices',
'rpc': 'VolthaLocalService-ListLogicalDevices'
},
{'subclass': 'devices',
'rpc': 'VolthaLocalService-ListDevices'
},
{'subclass': 'device_types',
'rpc': 'VolthaLocalService-ListDeviceTypes'
},
{'subclass': 'device_groups',
'rpc': 'VolthaLocalService-ListDeviceGroups'
},
],
'VolthaInstances': [
{'subclass': None,
'rpc': 'VolthaGlobalService-ListVolthaInstances'
}],
}
|
[
"twisted.internet.defer.returnValue",
"netconf.nc_common.error.BadMsg",
"dicttoxml.dicttoxml",
"structlog.get_logger"
] |
[((950, 972), 'structlog.get_logger', 'structlog.get_logger', ([], {}), '()\n', (970, 972), False, 'import structlog\n'), ((1800, 1845), 'dicttoxml.dicttoxml', 'dicttoxml.dicttoxml', (['res_dict'], {'attr_type': '(True)'}), '(res_dict, attr_type=True)\n', (1819, 1845), False, 'import dicttoxml\n'), ((2145, 2175), 'twisted.internet.defer.returnValue', 'returnValue', (['self.rpc_response'], {}), '(self.rpc_response)\n', (2156, 2175), False, 'from twisted.internet.defer import inlineCallbacks, returnValue\n'), ((1243, 1273), 'twisted.internet.defer.returnValue', 'returnValue', (['self.rpc_response'], {}), '(self.rpc_response)\n', (1254, 1273), False, 'from twisted.internet.defer import inlineCallbacks, returnValue\n'), ((1597, 1625), 'netconf.nc_common.error.BadMsg', 'ncerror.BadMsg', (['self.request'], {}), '(self.request)\n', (1611, 1625), True, 'import netconf.nc_common.error as ncerror\n'), ((2515, 2554), 'netconf.nc_common.error.BadMsg', 'ncerror.BadMsg', (['"""No GET in get request"""'], {}), "('No GET in get request')\n", (2529, 2554), True, 'import netconf.nc_common.error as ncerror\n'), ((3036, 3064), 'netconf.nc_common.error.BadMsg', 'ncerror.BadMsg', (['self.request'], {}), '(self.request)\n', (3050, 3064), True, 'import netconf.nc_common.error as ncerror\n'), ((2835, 2879), 'netconf.nc_common.error.BadMsg', 'ncerror.BadMsg', (['"""Missing filter sub-element"""'], {}), "('Missing filter sub-element')\n", (2849, 2879), True, 'import netconf.nc_common.error as ncerror\n')]
|
from queue import PriorityQueue
from components import *
class AStar:
"""
A star algorithm implementation
f(n) = g(n) + h(n)
"""
def __init__(self):
self.paths = [
KEY_RIGHT,
KEY_LEFT,
KEY_UP,
KEY_DOWN
]
self.invalid = {
KEY_UP: KEY_DOWN,
KEY_DOWN: KEY_UP,
KEY_LEFT: KEY_RIGHT,
KEY_RIGHT: KEY_LEFT
}
self.moves = 0
def collides(self, headPosition, snake):
""" Check for body collision on the next step """
return any([body.position == headPosition for body in snake.body[: -1]])
def getDistances(self, goal, current, snake):
""" Finding distance for each path """
distances = PriorityQueue()
self.moves += 1
for path in self.paths:
x = None
y = None
goal_x = goal.x
goal_y = goal.y
if path is KEY_UP:
x = current.x
y = current.y - 1
elif path is KEY_DOWN:
x = current.x
y = current.y + 1
elif path is KEY_RIGHT:
x = current.x + 1
y = current.y
elif path is KEY_LEFT:
x = current.x - 1
y = current.y
if self.collides((x, y), snake):
continue
gn = self.moves
hn = abs(x - goal_x) + abs(y - goal_y)
fn = gn + hn
# add to queue
distances.put((fn, path))
return distances
def getKey(self, food, snake):
""" Returns the next step """
if snake.head.x == food.x and snake.head.y:
self.moves = 0
return snake.direction
distances = self.getDistances(food, snake.head, snake)
if distances.qsize() == 0:
return snake.direction
return distances.get()[1]
|
[
"queue.PriorityQueue"
] |
[((782, 797), 'queue.PriorityQueue', 'PriorityQueue', ([], {}), '()\n', (795, 797), False, 'from queue import PriorityQueue\n')]
|
from config import MONGO_CONNECTION_URL,MONGO_DB_SCHEMA
from config import REDIS_SERVER_HOST,REDIS_SERVER_PORT
from app import server
from pymongo import MongoClient
from anuvaad_auditor.loghandler import log_info, log_exception
from utilities import AppContext
from flask import g
import redis
client = MongoClient(MONGO_CONNECTION_URL)
def get_db():
# log_info("Establishing connection with mongo", AppContext.getContext())
return client[MONGO_DB_SCHEMA]
def get_redis():
if 'redisdb' not in g:
# log_info("Establishing connection with redis store", AppContext.getContext())
g.redisdb = redis.Redis(host=REDIS_SERVER_HOST, port=REDIS_SERVER_PORT, db=4)
return g.redisdb
# def get_db():
# with server.app_context():
# if 'mongodb' not in g:
# log_info("Establishing connection with mongo", AppContext.getContext())
# client = MongoClient(MONGO_CONNECTION_URL)
# g.mongodb = client[MONGO_DB_SCHEMA]
# return g.mongodb
|
[
"pymongo.MongoClient",
"redis.Redis"
] |
[((304, 337), 'pymongo.MongoClient', 'MongoClient', (['MONGO_CONNECTION_URL'], {}), '(MONGO_CONNECTION_URL)\n', (315, 337), False, 'from pymongo import MongoClient\n'), ((620, 685), 'redis.Redis', 'redis.Redis', ([], {'host': 'REDIS_SERVER_HOST', 'port': 'REDIS_SERVER_PORT', 'db': '(4)'}), '(host=REDIS_SERVER_HOST, port=REDIS_SERVER_PORT, db=4)\n', (631, 685), False, 'import redis\n')]
|
import logging
from app import db
from app.models import Inventory, Datacenter, Rack, Item
import random
import string
from datetime import datetime
log = logging.getLogger(__name__)
DC_RACK_MAX = 20
ITEM_MAX = 1000
cities = ["Lisbon", "Porto", "Madrid", "Barcelona", "Frankfurt", "London"]
models = ["Server MX", "Server MY", "Server DL380", "Server x440", "Server x460"]
datacenters = list()
def get_random_name(names_list, size=1):
return names_list[random.randrange(0, len(names_list))]
def serial_generator(size=6, chars=string.ascii_uppercase + string.digits):
return "".join(random.choice(chars) for _ in range(size))
for city in cities:
datacenter = Datacenter()
datacenter.name = "DC %s" % city
datacenter.address = city
datacenters.append(datacenter)
db.session.add(datacenter)
log.info(datacenter)
try:
db.session.commit()
for num in range(1, DC_RACK_MAX):
rack = Rack()
rack.num = num
rack.datacenter = datacenter
db.session.add(rack)
except Exception as e:
log.error("Creating Datacenter: %s", e)
db.session.rollback()
for i in range(1, ITEM_MAX):
item = Item()
item.serial_number = serial_generator()
item.model = get_random_name(models)
db.session.add(item)
log.info(item)
try:
db.session.commit()
except Exception as e:
log.error("Creating Item: %s", e)
db.session.rollback()
|
[
"app.models.Rack",
"app.db.session.rollback",
"random.choice",
"app.db.session.commit",
"app.models.Item",
"app.db.session.add",
"logging.getLogger",
"app.models.Datacenter"
] |
[((156, 183), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (173, 183), False, 'import logging\n'), ((681, 693), 'app.models.Datacenter', 'Datacenter', ([], {}), '()\n', (691, 693), False, 'from app.models import Inventory, Datacenter, Rack, Item\n'), ((800, 826), 'app.db.session.add', 'db.session.add', (['datacenter'], {}), '(datacenter)\n', (814, 826), False, 'from app import db\n'), ((1205, 1211), 'app.models.Item', 'Item', ([], {}), '()\n', (1209, 1211), False, 'from app.models import Inventory, Datacenter, Rack, Item\n'), ((1301, 1321), 'app.db.session.add', 'db.session.add', (['item'], {}), '(item)\n', (1315, 1321), False, 'from app import db\n'), ((869, 888), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (886, 888), False, 'from app import db\n'), ((1358, 1377), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1375, 1377), False, 'from app import db\n'), ((599, 619), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (612, 619), False, 'import random\n'), ((950, 956), 'app.models.Rack', 'Rack', ([], {}), '()\n', (954, 956), False, 'from app.models import Inventory, Datacenter, Rack, Item\n'), ((1037, 1057), 'app.db.session.add', 'db.session.add', (['rack'], {}), '(rack)\n', (1051, 1057), False, 'from app import db\n'), ((1142, 1163), 'app.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (1161, 1163), False, 'from app import db\n'), ((1455, 1476), 'app.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (1474, 1476), False, 'from app import db\n')]
|
from ansible_task_executor import AnsibleTaskExecutor
import os
class HOST(object):
def __init__(self, ip, user, password, subnet=None, role=None):
self.ip = ip
self.user = user
self.password = password
self.role = role
self.cpu = None
self.memory = None
self.disk = None
self.os_type = None
self.subnet = subnet
self.ansible_inventory = "{} ansible_ssh_user={} ansible_ssh_pass={} role={}".format(ip, user, password, role)
self.executor = AnsibleTaskExecutor()
self.proxy = os.getenv('https_proxy')
def install(self):
if self.role == "compute":
result_code, callback = self.executor.execute('install-host.yml', self.ansible_inventory,
extra_vars={
"proxy_env": {'https_proxy': self.proxy},
"subnet": self.subnet
})
elif self.role == "container":
result_code, callback = self.executor.execute('install-container-host.yml', self.ansible_inventory,
extra_vars={
'https_proxy': self.proxy,
"subnet": self.subnet
})
else:
raise Exception("host role not supported")
if result_code:
raise Exception(callback.get_all_result())
for event in callback.host_ok:
if event['task'] == "Print total memory size" and event['host'] == self.ip:
self.memory = event['result']['msg']
elif event['task'] == "Print total cpu count" and event['host'] == self.ip:
self.cpu = event['result']['msg']
elif event['task'] == "Print os type" and event['host'] == self.ip:
self.os_type = event['result']['msg']
elif event['task'] == "Print virt vol disk usage" and event['host'] == self.ip:
self.disk = int(event['result']['msg'])
else:
pass
return self.cpu, self.memory, self.disk, self.os_type
def static_routes(self, routes):
result_code, callback = self.executor.execute('route.yml', self.ansible_inventory,
extra_vars={"routes": routes})
if result_code:
raise Exception(callback.get_failed_result())
def get_info(self):
"""
Get host cpu/mem/disk usage
:return:
cpu, mem, disk, engine_status(0,1)
"""
result_code, callback = self.executor.execute('check-host.yml', self.ansible_inventory,
extra_vars={"role": self.role})
if result_code:
raise Exception(callback.get_failed_result())
for event in callback.host_ok:
if event['task'] == "Print total memory avail" and event['host'] == self.ip:
memory_avail = event['result']['msg']
elif event['task'] == "Print cpu load usage" and event['host'] == self.ip:
cpu_load = event['result']['msg']
elif event['task'] == "Print virt vol disk usage" and event['host'] == self.ip:
disk_usage = event['result']['msg']
elif event['task'] == "Check engine liveness" and event['host'] == self.ip:
engine_status = event['result']['rc']
else:
pass
return memory_avail, cpu_load, disk_usage, engine_status
def port_dnat(self, rules):
"""
Set iptables rules
:return:
none
"""
result_code, callback = self.executor.execute('iptables.yml', self.ansible_inventory,
extra_vars={"rules": rules})
if result_code:
raise Exception(callback.get_failed_result())
class MultiHOST(HOST):
def __init__(self, hosts):
self.ansible_inventory = ""
for h in hosts:
if len(h) != 4:
continue
self.ansible_inventory += "{} ansible_ssh_user={} ansible_ssh_pass={} role={}".format(
h[0], h[1], h[2], h[3]) + "\n"
self.executor = AnsibleTaskExecutor()
|
[
"ansible_task_executor.AnsibleTaskExecutor",
"os.getenv"
] |
[((535, 556), 'ansible_task_executor.AnsibleTaskExecutor', 'AnsibleTaskExecutor', ([], {}), '()\n', (554, 556), False, 'from ansible_task_executor import AnsibleTaskExecutor\n'), ((578, 602), 'os.getenv', 'os.getenv', (['"""https_proxy"""'], {}), "('https_proxy')\n", (587, 602), False, 'import os\n'), ((4537, 4558), 'ansible_task_executor.AnsibleTaskExecutor', 'AnsibleTaskExecutor', ([], {}), '()\n', (4556, 4558), False, 'from ansible_task_executor import AnsibleTaskExecutor\n')]
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
import jams.const as const
def tcherkez(Rstar, Phi=0.3, T=0.056,
a2=1.0012, a3=1.0058, a4=1.0161,
t1=0.9924, t2=1.0008, g=20e-3,
RG=False, Rchl=False, Rcyt=False, fullmodel=True):
"""
Calculates the Tcherkez model of 13C-discrimiantion in the Calvin cycle.
Definition
----------
def tcherkez(Rstar, Phi=0.3, T=0.056, a2=1.0012, a3=1.0058, a4=1.0161,
t1=0.9924, t2=1.0008, g=20e-3
RG=False, Rchl=False, Rcyt=False, fullmodel=True):
Input
-----
Rstar Isotope ratio of assimilated carbon, e.g. of Farquhar et al. (1982) model
Optional Input
--------------
Phi Vo/Vc: ratio of carboxylateion to oxygentation of Rubisco (default: 0.3)
T Relative flux of starch synthesis [mol(C6 of starch)/mol(CO2 assimilated)] (default: 0.056)
a2 Inverse fractionation associated with aldolase
for the C-2 position of FBP (Fructose-1,6-bisphosphate) (default: 1.0012)
a3 Same for C-3 of FBP (default: 1.0058)
a4 Same for C-4 of FBP (default: 1.0161)
t1 Inverse fractionation associated with trankelotase
for C-1 in E4P (erythrose-4-phosphate) and R5P (ribose-5-phosphate) (default: 0.9924)
t2 Same for C-2 of X5P (xylulose-5-phosphate) (default: 1.0008)
g Isotope discrimination of photorespiratory decarboxylation of Gly (Glycine) (default: 20e-3)
RG If True, output isotope ratio of G3P (3-phosphoglyceraldehyde
or glyceraldehyde-3-phosphate) (default: False)
Rchl If True, output isotope ratio of chloroplastic hexoses and transitory starch (default: False)
Rcyt If True, output isotope ratio of cytoplasmic hexoses (default: False)
fullmodel If True, output RG, Rchl and Rcyt (default: True)
Output
------
RG, Rchl, Rcyt if fullmodel=True
Restrictions
------------
If at least one of RG, Rchl or Rcyt is given then fullmode=False.
References
----------
<NAME>, <NAME>, <NAME> & <NAME>, Theoretical considerations about carbon isotope
distribution in glucose of C3 plants, Functional Plant Biology 31, 857-877, 2004
<NAME>, <NAME>, <NAME>, <NAME> & <NAME>, Experimental evidence for diel variations
of the carbon isotope composition in leaf, stem and phloem sap organic matter in Ricinus communis,
Plant, Cell and Environment 31, 941-953, 2008
Examples
--------
>>> a = -4.4e-3
>>> b = -27e-3
>>> ca = 353e-6
>>> ci = 0.7*ca
>>> Delta = a+(b-a)*ci/ca
>>> delta_a1 = -8e-3
>>> Ra1 = (delta_a1+1.)*const.R13VPDB
>>> Rstar1 = (1.-Delta)*Ra1
>>> from autostring import astr
>>> print(astr((np.array(tcherkez(Rstar1, Phi=0.3, T=0.056))/const.R13VPDB-1.)*1000.,3,pp=True))
['12.764' '17.125' '12.978']
>>> delta_a2 = -7.8e-3
>>> Ra2 = (delta_a2+1.)*const.R13VPDB
>>> Rstar2 = (1.-Delta)*Ra2
>>> R1 = (np.array(tcherkez([Rstar1, Rstar2], Rcyt=True))/const.R13VPDB-1.)*1000.
>>> print(astr(R1,3,pp=True))
[['12.978' '13.182']]
>>> R1, R2 = tcherkez([Rstar1, Rstar2], Rchl=True, Rcyt=True)
>>> print(astr((R1/const.R13VPDB-1.)*1000.,3,pp=True))
['17.125' '17.330']
>>> print(astr((R2/const.R13VPDB-1.)*1000.,3,pp=True))
['12.978' '13.182']
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2013 <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Jan 2012
Modified, MC, Feb 2013 - ported to Python 3
"""
#
if (RG | Rchl | Rcyt):
fullmodel = False
if fullmodel:
RG = True
Rchl = True
Rcyt = True
#
a2tilde = (1.+0.5*Phi-T) / ((2.*a2+1.)/3.+Phi*(2.*a2-0.5)/3.+T*(a2-2.))
a3tilde = (1.+0.5*Phi-T) / ((2.*a3+1.)/3.+Phi*(2.*a3-0.5)/3.+T*(a3-2.))
t1tilde = (1.+3.*T)/(t1+3.*T)*t1
t2tilde = (1.+3.*T)/(t2+3.*T)*t2
eps = a3*a3tilde
epsdash = (t1tilde+1.5*Phi)*a3*a3tilde/(3.*(1.+0.5*Phi-(1.+t2tilde)*a2*a2tilde/3.))
iRG = np.array(Rstar) / (1.+Phi*(0.5-(1.+g)/(2.+g)*(eps+2.*a2*a2tilde*epsdash)/3.)+T*(a4-1.))
iRchl = 1./6.*(epsdash*(1.+(a2*a2tilde*t2tilde)/t2)+eps*(2.+t1tilde/t1)+a4) * iRG
iRcyt = 1./6.*(2.*eps+3.*(a2+1.)/(a2+2.)*epsdash*a2tilde+3.*a3tilde/(2.+a3)*(a3+2.*a4/(1.+a4))) * iRG
out = []
if RG:
out += [iRG]
if Rchl:
out += [iRchl]
if Rcyt:
out += [iRcyt]
return out
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
[
"numpy.array",
"doctest.testmod"
] |
[((6383, 6440), 'doctest.testmod', 'doctest.testmod', ([], {'optionflags': 'doctest.NORMALIZE_WHITESPACE'}), '(optionflags=doctest.NORMALIZE_WHITESPACE)\n', (6398, 6440), False, 'import doctest\n'), ((5918, 5933), 'numpy.array', 'np.array', (['Rstar'], {}), '(Rstar)\n', (5926, 5933), True, 'import numpy as np\n')]
|
import re
import typing as t
from .typed import StringTyped
class RegexDescriptor(StringTyped):
def __init__(self, *args, pattern: t.Union[str, re.Pattern], **kwargs) -> None:
super().__init__(*args, **kwargs)
if isinstance(pattern, str):
pattern = re.compile(pattern)
self.pattern = pattern
def __set__(self, instance: object, value: str) -> None:
if not self.pattern.match(value):
raise ValueError("String must match the regex pattern.")
super().__set__(instance, value)
|
[
"re.compile"
] |
[((285, 304), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (295, 304), False, 'import re\n')]
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/DSTU2/substance.html
Release: DSTU2
Version: 1.0.2
Revision: 7202
"""
from typing import List as ListType
from pydantic import Field
from . import domainresource, fhirtypes
from .backboneelement import BackboneElement
class Substance(domainresource.DomainResource):
"""A homogeneous material with a definite composition
A homogeneous material with a definite composition.
"""
resource_type = Field("Substance", const=True)
identifier: ListType[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="List of Unique identifier (represented as 'dict' in JSON)",
description="Unique identifier for the substance",
element_property=True,
)
category: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="category",
title="List of Type `CodeableConcept` (represented as `dict` in JSON).",
description="What class/type of substance this is",
element_property=True,
)
code: fhirtypes.CodeableConceptType = Field(
None,
alias="code",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="What substance this is",
element_property=True,
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Type `String` (represented as `dict` in JSON)",
description="Textual description of the substance, comments",
element_property=True,
)
instance: ListType[fhirtypes.SubstanceInstanceType] = Field(
None,
alias="instance",
title="List of Type `SubstanceInstance` (represented as `dict` in JSON).",
description="If this describes a specific package/container of the substance",
element_property=True,
)
ingredient: ListType[fhirtypes.SubstanceIngredientType] = Field(
None,
alias="ingredient",
title="List of Type `SubstanceIngredient` (represented as `dict` in JSON).",
description="Composition information about the substance",
element_property=True,
)
class SubstanceInstance(BackboneElement):
"""If this describes a specific package/container of the substance
If this describes a specific package/container of the substance.
"""
resource_type = Field("SubstanceInstance", const=True)
identifier: fhirtypes.IdentifierType = Field(
None,
alias="identifier",
title="Identifier of the package/container",
description=(
"Identifier associated with the package/container"
" (usually a label affixed directly)"
),
element_property=True,
)
expiry: fhirtypes.DateTime = Field(
None,
alias="expiry",
title="When no longer valid to use",
description=(
"When the substance is no longer valid to use. "
"For some substances, a single arbitrary date is used for expiry."
),
element_property=True,
)
quantity: fhirtypes.QuantityType = Field(
None,
alias="quantity",
title=(
"Type `Quantity` referencing `SimpleQuantity` (represented as `dict` in "
"JSON)."
),
description="Amount of substance in the package",
element_property=True,
)
class SubstanceIngredient(BackboneElement):
"""Composition information about the substance
A substance can be composed of other substances.
"""
resource_type = Field("SubstanceIngredient", const=True)
quantity: fhirtypes.RatioType = Field(
None,
alias="quantity",
title="Type `Ratio` (represented as `dict` in JSON).",
description="Optional amount (concentration)",
element_property=True,
)
substance: fhirtypes.ReferenceType = Field(
None,
alias="substance",
title=(
"`Reference` items referencing `Substance` (represented as `dict` in"
" JSON)"
),
description="A component of the substance",
enum_reference_types=["Substance"],
element_property=True,
)
|
[
"pydantic.Field"
] |
[((470, 500), 'pydantic.Field', 'Field', (['"""Substance"""'], {'const': '(True)'}), "('Substance', const=True)\n", (475, 500), False, 'from pydantic import Field\n'), ((555, 736), 'pydantic.Field', 'Field', (['None'], {'alias': '"""identifier"""', 'title': '"""List of Unique identifier (represented as \'dict\' in JSON)"""', 'description': '"""Unique identifier for the substance"""', 'element_property': '(True)'}), '(None, alias=\'identifier\', title=\n "List of Unique identifier (represented as \'dict\' in JSON)",\n description=\'Unique identifier for the substance\', element_property=True)\n', (560, 736), False, 'from pydantic import Field\n'), ((832, 1018), 'pydantic.Field', 'Field', (['None'], {'alias': '"""category"""', 'title': '"""List of Type `CodeableConcept` (represented as `dict` in JSON)."""', 'description': '"""What class/type of substance this is"""', 'element_property': '(True)'}), "(None, alias='category', title=\n 'List of Type `CodeableConcept` (represented as `dict` in JSON).',\n description='What class/type of substance this is', element_property=True)\n", (837, 1018), False, 'from pydantic import Field\n'), ((1100, 1261), 'pydantic.Field', 'Field', (['None'], {'alias': '"""code"""', 'title': '"""Type `CodeableConcept` (represented as `dict` in JSON)."""', 'description': '"""What substance this is"""', 'element_property': '(True)'}), "(None, alias='code', title=\n 'Type `CodeableConcept` (represented as `dict` in JSON).', description=\n 'What substance this is', element_property=True)\n", (1105, 1261), False, 'from pydantic import Field\n'), ((1336, 1518), 'pydantic.Field', 'Field', (['None'], {'alias': '"""description"""', 'title': '"""Type `String` (represented as `dict` in JSON)"""', 'description': '"""Textual description of the substance, comments"""', 'element_property': '(True)'}), "(None, alias='description', title=\n 'Type `String` (represented as `dict` in JSON)', description=\n 'Textual description of the substance, comments', element_property=True)\n", (1341, 1518), False, 'from pydantic import Field\n'), ((1615, 1839), 'pydantic.Field', 'Field', (['None'], {'alias': '"""instance"""', 'title': '"""List of Type `SubstanceInstance` (represented as `dict` in JSON)."""', 'description': '"""If this describes a specific package/container of the substance"""', 'element_property': '(True)'}), "(None, alias='instance', title=\n 'List of Type `SubstanceInstance` (represented as `dict` in JSON).',\n description=\n 'If this describes a specific package/container of the substance',\n element_property=True)\n", (1620, 1839), False, 'from pydantic import Field\n'), ((1932, 2135), 'pydantic.Field', 'Field', (['None'], {'alias': '"""ingredient"""', 'title': '"""List of Type `SubstanceIngredient` (represented as `dict` in JSON)."""', 'description': '"""Composition information about the substance"""', 'element_property': '(True)'}), "(None, alias='ingredient', title=\n 'List of Type `SubstanceIngredient` (represented as `dict` in JSON).',\n description='Composition information about the substance',\n element_property=True)\n", (1937, 2135), False, 'from pydantic import Field\n'), ((2384, 2422), 'pydantic.Field', 'Field', (['"""SubstanceInstance"""'], {'const': '(True)'}), "('SubstanceInstance', const=True)\n", (2389, 2422), False, 'from pydantic import Field\n'), ((2467, 2679), 'pydantic.Field', 'Field', (['None'], {'alias': '"""identifier"""', 'title': '"""Identifier of the package/container"""', 'description': '"""Identifier associated with the package/container (usually a label affixed directly)"""', 'element_property': '(True)'}), "(None, alias='identifier', title='Identifier of the package/container',\n description=\n 'Identifier associated with the package/container (usually a label affixed directly)'\n , element_property=True)\n", (2472, 2679), False, 'from pydantic import Field\n'), ((2786, 3013), 'pydantic.Field', 'Field', (['None'], {'alias': '"""expiry"""', 'title': '"""When no longer valid to use"""', 'description': '"""When the substance is no longer valid to use. For some substances, a single arbitrary date is used for expiry."""', 'element_property': '(True)'}), "(None, alias='expiry', title='When no longer valid to use',\n description=\n 'When the substance is no longer valid to use. For some substances, a single arbitrary date is used for expiry.'\n , element_property=True)\n", (2791, 3013), False, 'from pydantic import Field\n'), ((3126, 3325), 'pydantic.Field', 'Field', (['None'], {'alias': '"""quantity"""', 'title': '"""Type `Quantity` referencing `SimpleQuantity` (represented as `dict` in JSON)."""', 'description': '"""Amount of substance in the package"""', 'element_property': '(True)'}), "(None, alias='quantity', title=\n 'Type `Quantity` referencing `SimpleQuantity` (represented as `dict` in JSON).'\n , description='Amount of substance in the package', element_property=True)\n", (3131, 3325), False, 'from pydantic import Field\n'), ((3583, 3623), 'pydantic.Field', 'Field', (['"""SubstanceIngredient"""'], {'const': '(True)'}), "('SubstanceIngredient', const=True)\n", (3588, 3623), False, 'from pydantic import Field\n'), ((3661, 3825), 'pydantic.Field', 'Field', (['None'], {'alias': '"""quantity"""', 'title': '"""Type `Ratio` (represented as `dict` in JSON)."""', 'description': '"""Optional amount (concentration)"""', 'element_property': '(True)'}), "(None, alias='quantity', title=\n 'Type `Ratio` (represented as `dict` in JSON).', description=\n 'Optional amount (concentration)', element_property=True)\n", (3666, 3825), False, 'from pydantic import Field\n'), ((3905, 4136), 'pydantic.Field', 'Field', (['None'], {'alias': '"""substance"""', 'title': '"""`Reference` items referencing `Substance` (represented as `dict` in JSON)"""', 'description': '"""A component of the substance"""', 'enum_reference_types': "['Substance']", 'element_property': '(True)'}), "(None, alias='substance', title=\n '`Reference` items referencing `Substance` (represented as `dict` in JSON)'\n , description='A component of the substance', enum_reference_types=[\n 'Substance'], element_property=True)\n", (3910, 4136), False, 'from pydantic import Field\n')]
|
from django.utils import translation
from bitcaster.config import settings
from bitcaster.utils.language import get_attr
class UserLanguageMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if request.user and request.user.is_authenticated:
translation.activate(request.user.language)
response = self.get_response(request)
# FIXME: here user can be Application due TriggerKeyAuthentication
if get_attr(request, 'user.is_authenticated') and request.user.is_authenticated:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, request.user.language)
return response
|
[
"django.utils.translation.activate",
"bitcaster.utils.language.get_attr"
] |
[((338, 381), 'django.utils.translation.activate', 'translation.activate', (['request.user.language'], {}), '(request.user.language)\n', (358, 381), False, 'from django.utils import translation\n'), ((515, 557), 'bitcaster.utils.language.get_attr', 'get_attr', (['request', '"""user.is_authenticated"""'], {}), "(request, 'user.is_authenticated')\n", (523, 557), False, 'from bitcaster.utils.language import get_attr\n')]
|
#!/usr/bin/env python
import time
import sys
millis = int(round(time.time() * 1000))
sys.stdout.write("~/.ros/rtabmap_test_" + str(millis)+ '.db')
|
[
"time.time"
] |
[((64, 75), 'time.time', 'time.time', ([], {}), '()\n', (73, 75), False, 'import time\n')]
|
"""Admin pages for schemes models.
On default generates list view admins for all models
"""
from django.contrib.admin import StackedInline, register
from espressodb.base.admin import register_admins
from espressodb.base.admin import ListViewAdmin as LVA
from strops.schemes.models import (
ExpansionScheme,
ExpansionParameter,
ExpansionOrder,
OperatorRelation,
)
class ExpansionParameterInline(StackedInline):
model = ExpansionParameter
extra = 1
@register(ExpansionScheme)
class ExpansionSchemeAdmin(LVA):
inlines = (ExpansionParameterInline,)
class ExpansionOrderInline(StackedInline):
model = ExpansionOrder
extra = 1
register_admins(
"strops.schemes",
exclude_models=["ExpansionScheme", "OperatorRelation", "ExpansionOrder"],
)
@register(OperatorRelation)
class OperatorRelationAdmin(LVA):
inlines = (ExpansionOrderInline,)
autocomplete_fields = ("source", "target")
|
[
"django.contrib.admin.register",
"espressodb.base.admin.register_admins"
] |
[((479, 504), 'django.contrib.admin.register', 'register', (['ExpansionScheme'], {}), '(ExpansionScheme)\n', (487, 504), False, 'from django.contrib.admin import StackedInline, register\n'), ((668, 779), 'espressodb.base.admin.register_admins', 'register_admins', (['"""strops.schemes"""'], {'exclude_models': "['ExpansionScheme', 'OperatorRelation', 'ExpansionOrder']"}), "('strops.schemes', exclude_models=['ExpansionScheme',\n 'OperatorRelation', 'ExpansionOrder'])\n", (683, 779), False, 'from espressodb.base.admin import register_admins\n'), ((790, 816), 'django.contrib.admin.register', 'register', (['OperatorRelation'], {}), '(OperatorRelation)\n', (798, 816), False, 'from django.contrib.admin import StackedInline, register\n')]
|
"""Setup"""
import os
from setuptools import setup, find_packages
# figure out the version
# about = {}
# here = os.path.abspath(os.path.dirname(__file__))
# with open(os.path.join(here, "synapsemonitor", "__version__.py")) as f:
# exec(f.read(), about)
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='synapsemonitor',
# version=about["__version__"],
version="0.0.2",
description='Synapse monitoring',
url='https://github.com/Sage-Bionetworks/synapseMonitor',
author='<NAME>, <NAME>',
author_email='<EMAIL>',
long_description=long_description,
long_description_content_type="text/markdown",
license='Apache',
packages=find_packages(),
zip_safe=False,
python_requires='>=3.6, <3.9',
entry_points={'console_scripts': ['synapsemonitor = synapsemonitor.__main__:main']},
install_requires=['synapseclient', 'pandas'])
|
[
"setuptools.find_packages"
] |
[((717, 732), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (730, 732), False, 'from setuptools import setup, find_packages\n')]
|
import urllib.request
import shutil
import gzip
import json
import re
import os
from collections import defaultdict
from scholarmetrics import hindex
from tqdm import tqdm
from app.dblp_parser import parse_dblp, parse_dblp_person, get_dblp_country
from app.myfunctions import get_dblp_url
URL = 'http://dblp.org/xml/'
basedir = os.path.abspath(os.path.dirname(__file__))
DATA_PATH = basedir + '/data/'
STATIC_PATH = basedir + '/app/static/'
def download_dblp() -> None:
"""
Downloads the DBLP dataset and saves it into the data_path, which is usually ./data.
:return:
"""
source_gz = URL + 'dblp.xml.gz'
source_dtd = URL + 'dblp.dtd'
target_gz = DATA_PATH + 'dblp.xml.gz'
target_dtd = DATA_PATH + 'dblp.dtd'
print(' Downloading file ' + source_gz)
with urllib.request.urlopen(source_gz) as response, open(target_gz, 'wb') as fh:
shutil.copyfileobj(response, fh)
print(' Downloading file ' + source_dtd)
with urllib.request.urlopen(source_dtd) as response, open(target_dtd, 'wb') as fh:
shutil.copyfileobj(response, fh)
print(' Download finish!')
print()
def unzip_dblp() -> None:
"""
Unzips the downloaded DBLP dataset.
:return:
"""
source = DATA_PATH + 'dblp.xml.gz'
target = DATA_PATH + 'dblp.xml'
with gzip.open(source, 'rb') as f_in:
with open(target, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
print()
def extract_publications():
"""
Parses the DBLP XML file to json, which can be used by this pipeline.
:return:
"""
source = DATA_PATH + 'dblp.xml'
target = DATA_PATH + 'dblp.json'
parse_dblp(source, target)
print()
def extract_ai_publications() -> list:
"""
Using the venue file (`./app/static/ai_venues.json`) to extract all publications from these respective venues.
:return:
"""
source = DATA_PATH + 'dblp.json'
source_venues = STATIC_PATH + 'ai_venues.json'
target_pubs = DATA_PATH + 'ai_dblp.json'
authors = set()
with open(source_venues, "r", encoding="utf-8") as f:
tmp = json.load(f)
# Create a dict for all instances
venues = dict(pair for d in tmp.values() for pair in d.items())
venues_set = set()
for k, v in venues.items():
venues_set.add(k)
venues_set.update(v)
def get_disambiguated_venue(venue_name: str):
if venue_name in venues:
return venue_name
else:
for k, v in venues.items():
if venue_name in v:
return k
print(' Parsing ' + source)
with open(target_pubs, "w", encoding="utf-8") as out_f:
with open(source, "r", encoding="utf-8") as in_f:
for line in tqdm(in_f):
line = json.loads(line)
if line['booktitle']:
curr_venue = line['booktitle'][0]
elif line['journal']:
curr_venue = line['journal'][0]
curr_venue = re.sub(" \([0-9]+\)$", "", curr_venue)
if curr_venue in venues_set:
line['venue'] = get_disambiguated_venue(curr_venue)
json.dump(line, out_f)
out_f.write("\n")
authors.update(line['author'])
print(' Parse finish! File ai_dblp.json created!')
print()
return list(authors)
def download_semantic_scholar_if_needed(semantic_scholar_path: str, default_count: int = 184, download: bool = False):
"""
Well, as the name says.
:param semantic_scholar_path:
:param default_count:
:param download:
:return:
"""
sem_url = "https://s3-us-west-2.amazonaws.com/ai2-s2-research-public/open-corpus/2020-04-10/"
if not os.path.exists(semantic_scholar_path):
os.mkdir(semantic_scholar_path)
download = True
if download:
print(" Downloading semantic scholar first. ")
with urllib.request.urlopen(sem_url + "manifest.txt") as response, open(semantic_scholar_path + "manifest.txt", 'wb') as fh:
shutil.copyfileobj(response, fh)
with open(semantic_scholar_path + "/manifest.txt", "r") as f:
for line in tqdm(f, total=default_count):
line = line.strip()
with urllib.request.urlopen(sem_url + line) as response, open(
semantic_scholar_path + line, 'wb') as fh:
shutil.copyfileobj(response, fh)
if "s2-corpus-" in line:
with gzip.open(semantic_scholar_path + line, 'rb') as f_in:
with open(semantic_scholar_path + line[:-3], 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(semantic_scholar_path + line)
def match_semantic_scholar(download: bool = False):
"""
Firstly, downloads the Semantic Scholar. Then tries to match all publications and extracts the citations.
:param download:
:return:
"""
source = DATA_PATH + 'ai_dblp.json'
target = DATA_PATH + 'ai_dataset.json'
source_persons = DATA_PATH + 'persons.json'
semantic_scholar_path = DATA_PATH + "semantic_scholar/"
download_semantic_scholar_if_needed(semantic_scholar_path, download=download)
def de_list(x, parse_int: bool = False):
if isinstance(x, list):
if parse_int:
return int(x[0])
return x[0]
if parse_int:
return int(x)
return x
def get_doi(line) -> str:
"""
Get doi for a given line of the data, useful for semantic_scholar matching"
"""
if "ee" in line:
for x in de_list(line["ee"]):
if "doi" in x:
return x.replace("https://doi.org/", "")
with open(source_persons, encoding="utf-8") as file:
persons = [json.loads(line) for line in file]
# Put all author names into set
authors = dict()
for person in persons:
if isinstance(person["author"], list):
for auth in person['author']:
authors[auth] = person['author'][0]
with open(source, "r", encoding="utf-8") as f:
pubs = f.readlines()
pubs = [json.loads(x) for x in pubs]
for pub in pubs:
tmp = pub['author']
for name in pub['author']:
if name in authors:
tmp.append(authors[name])
tmp.remove(name)
pub['author'] = tmp
removed_indices = set()
titles = defaultdict(list)
[titles[x['title'][0].strip(".").lower()].append(i) for i, x in enumerate(pubs)]
files = [file_path for file_path in os.listdir(semantic_scholar_path) if "s2-corpus-" in file_path]
counter = 1
with open(target, 'w', encoding="utf-8") as out_f:
for file_path in files:
print("Reading file ... (", str(counter), "/", str(len(files)), ")")
with open(semantic_scholar_path + file_path, 'r', encoding="utf-8") as in_f:
for line in in_f:
line = json.loads(line)
curr_title = de_list(line['title']).strip().lower()
if curr_title in titles:
index = None
for i in titles[curr_title]:
pub = pubs[i]
doi = get_doi(pub)
if doi and "doi" in line and line["doi"]:
if doi == line["doi"]:
index = i
break
elif "year" in line and de_list(pub["year"], True) == de_list(line["year"], True):
if line["venue"] == "ArXiv":
if pub["journal"] and de_list(pub["journal"]) == "CoRR":
index = i
break
elif pub["journal"] and de_list(pub["journal"]) == "CoRR":
continue
else:
index = i
break
if index and index not in removed_indices:
if 'in_citations' not in pub:
pub['inCitations'] = len(line['inCitations'])
json.dump(pub, out_f)
out_f.write("\n")
removed_indices.add(index)
counter += 1
for i, pub in enumerate(pubs):
if i not in removed_indices:
json.dump(pub, out_f)
out_f.write("\n")
print(' Parse finish! File ai_dataset.json created!')
print()
def extract_persons(author_list: list) -> None:
"""
Extracting all author information from DBLP, as affiliations etc.
:param author_list:
:return:
"""
source = DATA_PATH + 'dblp.xml'
target = DATA_PATH + 'persons'
print(' Parsing ' + source)
parse_dblp_person(source, target, author_list)
print(' Parse finish! File persons.json created!')
print()
def parse_countries() -> None:
"""
Parses country information from the DBLp into the file 'author_countries.json'.
:return: The file 'author_countries.json'
"""
source_country = STATIC_PATH + 'countries_domain.txt'
source_person = DATA_PATH + 'persons.json'
target = DATA_PATH + 'author_countries.json'
print(' Parsing ' + source_person)
countries = get_dblp_country(source_person, source_country)
with open(target, "w", encoding="utf-8") as f:
for line in countries:
json.dump(line, f)
f.write("\n")
print(' Parse finish! File author_countries.json created!')
print()
def pipeline_prepare_db() -> None:
"""
'*** Starting pipeline process to prepare PyCSRankings Database ***'
Careful, it will download the semantic scholar, which is up to 240 GB large.
:return: The files 'ai_dataset.json', 'persons.json' and 'author_countries.json' in the 'data' folder.
"""
print('**** Starting pipeline process to prepare PyCSRankings Database ****')
print()
print('Process 01 - Download DBLP data')
download_dblp()
print('Process 02 - Unzipping DBLP data')
unzip_dblp()
print('Process 03 - Create dblp.json')
extract_publications()
print('Process 04 - Create ai_article.json')
author_list = extract_ai_publications()
print('Process 05 - Create persons.json')
extract_persons(author_list)
print('Process 06 - Create author_countries.json')
parse_countries()
print('Process 07 - Match with Semantic Scholar')
# Be warned. This will download the semantic scholar dataset, which is rather large.
match_semantic_scholar()
print('*** Pipeline process to prepare PyCSRankings Database Finished! ***')
if __name__ == '__main__':
pipeline_prepare_db()
|
[
"os.mkdir",
"tqdm.tqdm",
"app.dblp_parser.parse_dblp",
"gzip.open",
"json.load",
"json.loads",
"json.dump",
"os.remove",
"os.path.dirname",
"os.path.exists",
"collections.defaultdict",
"app.dblp_parser.get_dblp_country",
"shutil.copyfileobj",
"app.dblp_parser.parse_dblp_person",
"os.listdir",
"re.sub"
] |
[((347, 372), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (362, 372), False, 'import os\n'), ((1662, 1688), 'app.dblp_parser.parse_dblp', 'parse_dblp', (['source', 'target'], {}), '(source, target)\n', (1672, 1688), False, 'from app.dblp_parser import parse_dblp, parse_dblp_person, get_dblp_country\n'), ((6533, 6550), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6544, 6550), False, 'from collections import defaultdict\n'), ((9109, 9155), 'app.dblp_parser.parse_dblp_person', 'parse_dblp_person', (['source', 'target', 'author_list'], {}), '(source, target, author_list)\n', (9126, 9155), False, 'from app.dblp_parser import parse_dblp, parse_dblp_person, get_dblp_country\n'), ((9616, 9663), 'app.dblp_parser.get_dblp_country', 'get_dblp_country', (['source_person', 'source_country'], {}), '(source_person, source_country)\n', (9632, 9663), False, 'from app.dblp_parser import parse_dblp, parse_dblp_person, get_dblp_country\n'), ((885, 917), 'shutil.copyfileobj', 'shutil.copyfileobj', (['response', 'fh'], {}), '(response, fh)\n', (903, 917), False, 'import shutil\n'), ((1060, 1092), 'shutil.copyfileobj', 'shutil.copyfileobj', (['response', 'fh'], {}), '(response, fh)\n', (1078, 1092), False, 'import shutil\n'), ((1320, 1343), 'gzip.open', 'gzip.open', (['source', '"""rb"""'], {}), "(source, 'rb')\n", (1329, 1343), False, 'import gzip\n'), ((2112, 2124), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2121, 2124), False, 'import json\n'), ((3764, 3801), 'os.path.exists', 'os.path.exists', (['semantic_scholar_path'], {}), '(semantic_scholar_path)\n', (3778, 3801), False, 'import os\n'), ((3811, 3842), 'os.mkdir', 'os.mkdir', (['semantic_scholar_path'], {}), '(semantic_scholar_path)\n', (3819, 3842), False, 'import os\n'), ((6244, 6257), 'json.loads', 'json.loads', (['x'], {}), '(x)\n', (6254, 6257), False, 'import json\n'), ((1407, 1438), 'shutil.copyfileobj', 'shutil.copyfileobj', (['f_in', 'f_out'], {}), '(f_in, f_out)\n', (1425, 1438), False, 'import shutil\n'), ((2751, 2761), 'tqdm.tqdm', 'tqdm', (['in_f'], {}), '(in_f)\n', (2755, 2761), False, 'from tqdm import tqdm\n'), ((4086, 4118), 'shutil.copyfileobj', 'shutil.copyfileobj', (['response', 'fh'], {}), '(response, fh)\n', (4104, 4118), False, 'import shutil\n'), ((4213, 4241), 'tqdm.tqdm', 'tqdm', (['f'], {'total': 'default_count'}), '(f, total=default_count)\n', (4217, 4241), False, 'from tqdm import tqdm\n'), ((5892, 5908), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (5902, 5908), False, 'import json\n'), ((6676, 6709), 'os.listdir', 'os.listdir', (['semantic_scholar_path'], {}), '(semantic_scholar_path)\n', (6686, 6709), False, 'import os\n'), ((9758, 9776), 'json.dump', 'json.dump', (['line', 'f'], {}), '(line, f)\n', (9767, 9776), False, 'import json\n'), ((2786, 2802), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2796, 2802), False, 'import json\n'), ((3014, 3054), 're.sub', 're.sub', (['""" \\\\([0-9]+\\\\)$"""', '""""""', 'curr_venue'], {}), "(' \\\\([0-9]+\\\\)$', '', curr_venue)\n", (3020, 3054), False, 'import re\n'), ((8698, 8719), 'json.dump', 'json.dump', (['pub', 'out_f'], {}), '(pub, out_f)\n', (8707, 8719), False, 'import json\n'), ((3190, 3212), 'json.dump', 'json.dump', (['line', 'out_f'], {}), '(line, out_f)\n', (3199, 3212), False, 'import json\n'), ((4445, 4477), 'shutil.copyfileobj', 'shutil.copyfileobj', (['response', 'fh'], {}), '(response, fh)\n', (4463, 4477), False, 'import shutil\n'), ((4764, 4803), 'os.remove', 'os.remove', (['(semantic_scholar_path + line)'], {}), '(semantic_scholar_path + line)\n', (4773, 4803), False, 'import os\n'), ((7074, 7090), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (7084, 7090), False, 'import json\n'), ((4544, 4589), 'gzip.open', 'gzip.open', (['(semantic_scholar_path + line)', '"""rb"""'], {}), "(semantic_scholar_path + line, 'rb')\n", (4553, 4589), False, 'import gzip\n'), ((4712, 4743), 'shutil.copyfileobj', 'shutil.copyfileobj', (['f_in', 'f_out'], {}), '(f_in, f_out)\n', (4730, 4743), False, 'import shutil\n'), ((8454, 8475), 'json.dump', 'json.dump', (['pub', 'out_f'], {}), '(pub, out_f)\n', (8463, 8475), False, 'import json\n')]
|
import random
import numpy as np
import cv2
import torch
import torch.utils.data as data
import logging
from . import util
class LQGTDataset3D(data.Dataset):
'''
Read LQ (Low Quality, here is LR) and GT vti file pairs.
If only GT image is provided, generate LQ vti on-the-fly.
The pair is ensured by 'sorted' function, so please check the name convention.
'''
logger = logging.getLogger('base')
def __init__(self, opt):
super(LQGTDataset3D, self).__init__()
self.opt = opt
self.paths_GT = None, None
if opt['set_type'] == 'vtk':
self.paths_GT = util.get_vtk_paths(opt['dataroot_GT'])
# self.paths_LQ = util.get_vtk_paths(opt['dataroot_LQ'])
elif opt['set_type'] == 'tecplot':
self.paths_GT = util.get_tecplot_paths(opt['dataroot_GT'])
# self.paths_LQ = util.get_tecplot_paths(opt['dataroot_LQ'])
else:
ex = Exception("Type '%s' is not supported" % opt['type'])
raise ex
assert self.paths_GT, 'Error: GT path is empty.'
# if self.paths_LQ and self.paths_GT:
# assert len(self.paths_LQ) == len(
# self.paths_GT
# ), 'GT and LQ datasets have different number of images - {}, {}.'.format(
# len(self.paths_LQ), len(self.paths_GT))
self.random_scale_list = [1]
def __getitem__(self, index):
# cv2.setNumThreads(0)
scale = self.opt['scale']
GT_size = self.opt['GT_size']
attr_id = self.opt.get('attr_id', 0)
# get GT image
GT_path = self.paths_GT[index]
vti_GT_generator = util.getTensorGenerator(GT_path, self.opt['data_type'])
vti_GT, component_GT = vti_GT_generator.get_array_by_id(attr_id)
print('origin GT shape: {}'.format(vti_GT.shape))
if self.opt['phase'] != 'train':
vti_GT = util.modcrop_3d(vti_GT, scale)
# if self.paths_LQ:
# LQ_path = self.paths_LQ[index]
# vti_LQ_generator = util.getTensorGenerator(LQ_path)
# vti_LQ_generator.set_type(self.opt['type'])
# vti_LQ, component_LQ = vti_LQ_generator.get_array_by_id(attr_id)
# else:
# if self.opt['phase'] == 'train':
# # random_scale = random.choice(self.random_scale_list)
# # Z_s, Y_s, X_s = vti_GT.shape
#
# # def _mod(n, random_scale, scale, thres):
# # rlt = int(n * random_scale)
# # rlt = (rlt // scale) * scale
# # return thres if rlt < thres else rlt
#
# # Z_s = _mod(Z_s, random_scale, scale, GT_size)
# # Y_s = _mod(Y_s, random_scale, scale, GT_size)
# # X_s = _mod(X_s, random_scale, scale, GT_size)
# vti_GT = util.resize_3d(arr=np.copy(vti_GT), newsize=GT_size)
#
# # using matlab imresize3
# vti_LQ = util.imresize3_np(vti_GT, 1 / scale, True)
# if vti_LQ.ndim != 3:
# ex = Exception("Error: dims not right")
# raise ex
if self.opt['phase'] == 'train':
Z, Y, X = vti_GT.shape
if Z < GT_size or Y < GT_size or X < GT_size:
vti_GT = util.resize_3d(np.copy(vti_GT), newsize=GT_size)
elif Z > GT_size or Y > GT_size or X > GT_size:
vti_GT = util.modcrop_3d(vti_GT, scale)
# using matlab imresize3
# vti_LQ = util.imresize3_np(vti_GT, 1 / scale, True)
# if vti_LQ.ndim != 2:
# ex = Exception("Error: dims not right")
# raise ex
# Z, Y, X = vti_LQ.shape
# LQ_size = GT_size // scale
#
# # randomly crop
# rnd_Z = random.randint(0, max(0, Z - LQ_size))
# rnd_Y = random.randint(0, max(0, Y - LQ_size))
# rnd_X = random.randint(0, max(0, X - LQ_size))
# vti_LQ = vti_LQ[rnd_Z: rnd_Z + LQ_size, rnd_Y: rnd_Y + LQ_size, rnd_X: rnd_X + LQ_size]
# rnd_Z_GT, rnd_Y_GT, rnd_X_GT = int(rnd_Z * scale), int(rnd_Y * scale), int(rnd_X * scale)
# vti_GT = vti_GT[rnd_Z_GT: rnd_Z_GT + GT_size, rnd_Y_GT: rnd_Y_GT + GT_size, rnd_X_GT: rnd_X_GT + GT_size]
# ZYX to XYZ
vti_GT = torch.from_numpy(np.ascontiguousarray(vti_GT)).float().unsqueeze(0)
print("vti_GT size: {}".format(vti_GT.size()))
# vti_LQ = torch.from_numpy(np.ascontiguousarray(vti_LQ)).float().unsqueeze(0)
# if LQ_path is None:
# LQ_path = GT_path
return {'GT': vti_GT, 'GT_path': GT_path}
def __len__(self):
return len(self.paths_GT)
|
[
"numpy.copy",
"numpy.ascontiguousarray",
"logging.getLogger"
] |
[((396, 421), 'logging.getLogger', 'logging.getLogger', (['"""base"""'], {}), "('base')\n", (413, 421), False, 'import logging\n'), ((3340, 3355), 'numpy.copy', 'np.copy', (['vti_GT'], {}), '(vti_GT)\n', (3347, 3355), True, 'import numpy as np\n'), ((4421, 4449), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['vti_GT'], {}), '(vti_GT)\n', (4441, 4449), True, 'import numpy as np\n')]
|
import multiprocessing
import threading
import numpy as np
import os
import shutil
import matplotlib.pyplot as plt
import sys
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from shared_adam import SharedAdam
import math, os
import cv2
import torchvision.transforms as transforms
import imageio
os.environ["OMP_NUM_THREADS"] = "1"
device=torch.device("cuda")
np.set_printoptions(precision=4,suppress=True)
simulation_dir = '../simulation'
sys.path.insert(0, simulation_dir)
from Wrench_Manipulation_Env import RobotEnv
ExName = "Wrench_Manipulation"
sys.path.insert(0,'../external/bullet3.git/build_cmake/examples/pybullet')
import pybullet
def v_wrap(np_array,dtype=np.float32):
if np_array.dtype != dtype:
np_array = np_array.astype(dtype)
return torch.from_numpy(np_array).to(device)
def push_and_pull(opt, lnet, gnet, done, s_, bs, ba, br, bdone, gamma):
if done:
v_s_ = 0.
else:
v_s_ = lnet.forward(v_wrap(s_[None,:]))[-1].data.cpu().numpy()[0,0]
buffer_v_target = []
for r, termination in zip(br[::-1], bdone[::-1]):
if termination:
v_s_ = 0
v_s_ = r + gamma * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
loss = lnet.loss_func(
v_wrap(np.vstack(bs)),
v_wrap(np.vstack(ba)),
v_wrap(np.array(buffer_v_target)[:, None]))
opt.zero_grad()
loss.backward()
nn.utils.clip_grad_norm(lnet.parameters(),1.0)
for lp, gp in zip(lnet.parameters(), gnet.parameters()):
gp._grad = lp.grad
opt.step()
# pull global parameters
lnet.load_state_dict(gnet.state_dict())
MAX_EP = 15000
UPDATE_GLOBAL_ITER = 10
GAMMA = 0.9
ENTROPY_BETA = 0.001
def set_init(layers):
for layer in layers:
nn.init.normal_(layer.weight, mean=0., std=0.01)
nn.init.constant_(layer.bias, 0.)
class ACNet(nn.Module):
def __init__(self):
super(ACNet, self).__init__()
self.distribution = torch.distributions.Normal
self.block1 = nn.Sequential(
nn.Conv2d(in_channels=3,out_channels=32,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(32),
)
# 60, 80
self.block2 = nn.Sequential(
nn.Conv2d(in_channels=32,out_channels=32,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(32),
)
# 30, 40
self.block3 = nn.Sequential(
nn.Conv2d(in_channels=32,out_channels=64,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(64),
)
# 15, 20
self.block4 = nn.Sequential(
nn.Conv2d(in_channels=64,out_channels=64,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(64),
)
# 8, 10
self.block5 = nn.Sequential(
nn.Conv2d(in_channels=64,out_channels=128,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(128),
)
# 4, 5
self.block6 = nn.Sequential(
nn.Conv2d(in_channels=128,out_channels=128,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(128),
)
# 2, 3
self.fc_a = nn.Sequential(
nn.Linear(2 * 3 * 128, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 24),
nn.ReLU()
)
self.fc_s = nn.Sequential(
nn.Linear(2 * 3 * 128, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 24),
nn.ReLU()
)
self.fc_v = nn.Sequential(
nn.Linear(2 * 3 * 128, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 24),
nn.ReLU()
)
self.mu_layer = nn.Linear(24,6)
self.sigma_layer = nn.Linear(24,6)
self.v_layer = nn.Linear(24,1)
set_init([self.mu_layer, self.sigma_layer, self.v_layer])
def forward(self, im):
im = im.view(-1, 120, 160, 3)
im = im.permute(0,3,1,2)
im = self.block1(im)
im = self.block2(im)
im = self.block3(im)
im = self.block4(im)
im = self.block5(im)
im = self.block6(im)
im = im.reshape(-1, 2 * 3 * 128)
x_a = self.fc_a(im)
mu = self.mu_layer(x_a)
mu = F.tanh(mu)
x_s = self.fc_s(im)
sigma = self.sigma_layer(x_s)
sigma = F.softplus(sigma) * 0.06 + 0.005
x_v= self.fc_v(im)
values = self.v_layer(x_v)
return mu, sigma, values
def choose_action(self, s):
self.training = False
mu, sigma, _ = self.forward(s)
m = self.distribution(mu.view(-1,).data, sigma.view(-1,).data)
return m.sample().cpu().numpy(), mu.cpu().detach().numpy(), sigma.cpu().detach().numpy()
def loss_func(self, s, a, v_t):
self.train()
mu, sigma, values = self.forward(s)
td = v_t - values
c_loss = td.pow(2)
m = self.distribution(mu, sigma)
log_prob = m.log_prob(a)
entropy = 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(m.scale)
exp_v = log_prob * td.detach() + ENTROPY_BETA * entropy
a_loss = -exp_v
total_loss = (a_loss + c_loss).mean()
return total_loss
class Worker(mp.Process):
def __init__(self, gnet, opt, global_ep, global_ep_r, res_queue, wid, SAVE_TOP_DIR):
super(Worker, self).__init__()
print("wid %d" % wid)
self.wid = wid
self.step = 0
self.g_ep, self.g_ep_r, self.res_queue = global_ep, global_ep_r, res_queue
self.gnet, self.opt = gnet, opt
self.random_seed = 42 + self.wid + int(np.log(self.wid * 100 + 1))
print("random_seed",self.random_seed,"self.wid",self.wid)
np.random.seed(self.random_seed)
self.lnet = ACNet().to(device)
self.init_step = 0
self.SAVE_TOP_DIR = SAVE_TOP_DIR
def run(self):
mean=np.array([0.485, 0.456, 0.406])
std=np.array([0.229, 0.224, 0.225])
mean = np.reshape(mean,(1,1,3))
std = np.reshape(std,(1,1,3))
self.start_pos = [-0.1,-0.4,0.5]
self.dt = 1./30.0
if self.wid == 0:
self.p_id = pybullet.connect(pybullet.GUI)
else:
self.p_id = pybullet.connect(pybullet.DIRECT)
action_dir = os.path.join(self.SAVE_TOP_DIR,"action.npy")
fixture_action = np.zeros((3,))
self.env = RobotEnv(worker_id=self.wid,p_id=pybullet,dt=self.dt,maxSteps=20,fixture_offset=fixture_action)
total_step = 1 + self.init_step
suc_check = 0
reward_check = 0
episode_check = 0
sigma_check1 = 0
sigma_check2 = 0
total_episode = 0
buffer_s, buffer_a, buffer_r, buffer_done = [], [], [], []
while total_step < MAX_EP:
observation = self.env.reset()
observation = observation/255.0
observation = (observation - mean)/std
observation = np.reshape(observation,(-1,))
while True:
action, mu_r, sigma_r = self.lnet.choose_action(v_wrap(observation[None,:]))
action[:3] = action[:3].clip(-0.03,0.03)
action[3:] = action[3:].clip(-0.05,0.05)
#
# if action[2] > 0.005:
#w action[2] = 0.005
observation_next, reward, done, suc = self.env.step(action)
observation_next = observation_next/255.0
observation_next = (observation_next - mean)/std
recordGif = False
if recordGif and total_step > 10:
imageio.mimsave('pokingSthSlightly.gif',self.env.obs_list)
return
observation_next = np.reshape(observation_next,(-1,))
buffer_s.append(observation)
buffer_r.append(reward)
buffer_a.append(action)
buffer_done.append(done)
if total_step % (UPDATE_GLOBAL_ITER + self.wid) == 0 or done:
push_and_pull(self.opt, self.lnet, self.gnet, done, observation_next, buffer_s, buffer_a, buffer_r, buffer_done, GAMMA)
buffer_s, buffer_a, buffer_r, buffer_done = [], [], [], []
if done:
suc_check += suc
episode_check += 1
total_episode += 1
observation = observation_next
total_step += 1
reward_check += reward
if total_step % 100 == 0:
current_performance = float(suc_check)/episode_check
avg_sigma1 = sigma_check1 / 100.0
avg_sigma2 = sigma_check2 / 100.0
if self.wid == 0:
print(self.SAVE_TOP_DIR,"total step %d, avg suc %f, avg reward %f" % (total_step, suc_check / 100.0, reward_check / 100.0))
save_path = os.path.join(self.SAVE_TOP_DIR,str(total_step)+'model.pth.tar')
if self.wid == 0 and int(total_step) % 1000 == 0:
print("saving to",save_path)
torch.save(self.gnet.state_dict(), save_path)
suc_check = 0
episode_check = 0
sigma_check1 = 0.0
sigma_check2 = 0.0
if done:
break
reward_dir = os.path.join(self.SAVE_TOP_DIR,"reward.txt")
np.savetxt(reward_dir,np.array([reward_check/100.0]),fmt='%f')
print("finising the learning!")
torch.cuda.empty_cache()
print("empyting the cache!")
sys.exit()
os._exit(1)
if __name__ == "__main__":
ExName = 'optimal'#sys.argv[1]
#print(ExName)
SAVE_TOP_DIR = os.path.join('./wrench/',ExName)
if not os.path.exists(SAVE_TOP_DIR):
os.makedirs(SAVE_TOP_DIR)
mp.set_start_method('spawn')
gnet = ACNet() # global network
## loading
Load_model_id = '2000'
Load_path = os.path.join(SAVE_TOP_DIR,Load_model_id + 'model.pth.tar')
#checkpoint = torch.load(Load_path)
#gnet.load_state_dict(checkpoint)
gnet.to(device)
gnet.share_memory()
opt = SharedAdam(gnet.parameters(),lr=0.0001)
global_ep, global_ep_r, res_queue = mp.Value('i',0), mp.Value('d',0.), mp.Queue()
workers = [Worker(gnet, opt, global_ep, global_ep_r, res_queue, i, SAVE_TOP_DIR) for i in range(1)]
[w.start() for w in workers]
res = []
for worker in workers:
worker.init_step = 0
[w.join() for w in workers]
|
[
"numpy.random.seed",
"torch.nn.init.constant_",
"torch.device",
"pybullet.connect",
"torch.nn.functional.tanh",
"os.path.join",
"imageio.mimsave",
"numpy.set_printoptions",
"os.path.exists",
"Wrench_Manipulation_Env.RobotEnv",
"numpy.reshape",
"torch.nn.Linear",
"math.log",
"torch.log",
"torch.nn.Conv2d",
"torch.multiprocessing.set_start_method",
"torch.nn.BatchNorm2d",
"torch.multiprocessing.Value",
"sys.exit",
"torch.from_numpy",
"numpy.vstack",
"torch.nn.ReLU",
"os.makedirs",
"numpy.log",
"numpy.zeros",
"sys.path.insert",
"torch.nn.init.normal_",
"numpy.array",
"os._exit",
"torch.cuda.empty_cache",
"torch.multiprocessing.Queue",
"torch.nn.functional.softplus"
] |
[((410, 430), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (422, 430), False, 'import torch\n'), ((432, 479), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'suppress': '(True)'}), '(precision=4, suppress=True)\n', (451, 479), True, 'import numpy as np\n'), ((512, 546), 'sys.path.insert', 'sys.path.insert', (['(0)', 'simulation_dir'], {}), '(0, simulation_dir)\n', (527, 546), False, 'import sys\n'), ((624, 699), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../external/bullet3.git/build_cmake/examples/pybullet"""'], {}), "(0, '../external/bullet3.git/build_cmake/examples/pybullet')\n", (639, 699), False, 'import sys\n'), ((9137, 9170), 'os.path.join', 'os.path.join', (['"""./wrench/"""', 'ExName'], {}), "('./wrench/', ExName)\n", (9149, 9170), False, 'import math, os\n'), ((9242, 9270), 'torch.multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (9261, 9270), True, 'import torch.multiprocessing as mp\n'), ((9362, 9421), 'os.path.join', 'os.path.join', (['SAVE_TOP_DIR', "(Load_model_id + 'model.pth.tar')"], {}), "(SAVE_TOP_DIR, Load_model_id + 'model.pth.tar')\n", (9374, 9421), False, 'import math, os\n'), ((1759, 1808), 'torch.nn.init.normal_', 'nn.init.normal_', (['layer.weight'], {'mean': '(0.0)', 'std': '(0.01)'}), '(layer.weight, mean=0.0, std=0.01)\n', (1774, 1808), True, 'import torch.nn as nn\n'), ((1816, 1850), 'torch.nn.init.constant_', 'nn.init.constant_', (['layer.bias', '(0.0)'], {}), '(layer.bias, 0.0)\n', (1833, 1850), True, 'import torch.nn as nn\n'), ((3792, 3808), 'torch.nn.Linear', 'nn.Linear', (['(24)', '(6)'], {}), '(24, 6)\n', (3801, 3808), True, 'import torch.nn as nn\n'), ((3831, 3847), 'torch.nn.Linear', 'nn.Linear', (['(24)', '(6)'], {}), '(24, 6)\n', (3840, 3847), True, 'import torch.nn as nn\n'), ((3866, 3882), 'torch.nn.Linear', 'nn.Linear', (['(24)', '(1)'], {}), '(24, 1)\n', (3875, 3882), True, 'import torch.nn as nn\n'), ((4285, 4295), 'torch.nn.functional.tanh', 'F.tanh', (['mu'], {}), '(mu)\n', (4291, 4295), True, 'import torch.nn.functional as F\n'), ((5618, 5650), 'numpy.random.seed', 'np.random.seed', (['self.random_seed'], {}), '(self.random_seed)\n', (5632, 5650), True, 'import numpy as np\n'), ((5776, 5807), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (5784, 5807), True, 'import numpy as np\n'), ((5816, 5847), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (5824, 5847), True, 'import numpy as np\n'), ((5859, 5886), 'numpy.reshape', 'np.reshape', (['mean', '(1, 1, 3)'], {}), '(mean, (1, 1, 3))\n', (5869, 5886), True, 'import numpy as np\n'), ((5894, 5920), 'numpy.reshape', 'np.reshape', (['std', '(1, 1, 3)'], {}), '(std, (1, 1, 3))\n', (5904, 5920), True, 'import numpy as np\n'), ((6129, 6174), 'os.path.join', 'os.path.join', (['self.SAVE_TOP_DIR', '"""action.npy"""'], {}), "(self.SAVE_TOP_DIR, 'action.npy')\n", (6141, 6174), False, 'import math, os\n'), ((6195, 6209), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (6203, 6209), True, 'import numpy as np\n'), ((6228, 6331), 'Wrench_Manipulation_Env.RobotEnv', 'RobotEnv', ([], {'worker_id': 'self.wid', 'p_id': 'pybullet', 'dt': 'self.dt', 'maxSteps': '(20)', 'fixture_offset': 'fixture_action'}), '(worker_id=self.wid, p_id=pybullet, dt=self.dt, maxSteps=20,\n fixture_offset=fixture_action)\n', (6236, 6331), False, 'from Wrench_Manipulation_Env import RobotEnv\n'), ((8799, 8844), 'os.path.join', 'os.path.join', (['self.SAVE_TOP_DIR', '"""reward.txt"""'], {}), "(self.SAVE_TOP_DIR, 'reward.txt')\n", (8811, 8844), False, 'import math, os\n'), ((8952, 8976), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (8974, 8976), False, 'import torch\n'), ((9014, 9024), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9022, 9024), False, 'import sys\n'), ((9029, 9040), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (9037, 9040), False, 'import math, os\n'), ((9179, 9207), 'os.path.exists', 'os.path.exists', (['SAVE_TOP_DIR'], {}), '(SAVE_TOP_DIR)\n', (9193, 9207), False, 'import math, os\n'), ((9213, 9238), 'os.makedirs', 'os.makedirs', (['SAVE_TOP_DIR'], {}), '(SAVE_TOP_DIR)\n', (9224, 9238), False, 'import math, os\n'), ((9624, 9640), 'torch.multiprocessing.Value', 'mp.Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (9632, 9640), True, 'import torch.multiprocessing as mp\n'), ((9641, 9659), 'torch.multiprocessing.Value', 'mp.Value', (['"""d"""', '(0.0)'], {}), "('d', 0.0)\n", (9649, 9659), True, 'import torch.multiprocessing as mp\n'), ((9659, 9669), 'torch.multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (9667, 9669), True, 'import torch.multiprocessing as mp\n'), ((832, 858), 'torch.from_numpy', 'torch.from_numpy', (['np_array'], {}), '(np_array)\n', (848, 858), False, 'import torch\n'), ((1285, 1298), 'numpy.vstack', 'np.vstack', (['bs'], {}), '(bs)\n', (1294, 1298), True, 'import numpy as np\n'), ((1312, 1325), 'numpy.vstack', 'np.vstack', (['ba'], {}), '(ba)\n', (1321, 1325), True, 'import numpy as np\n'), ((2022, 2129), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(3)', 'out_channels': '(32)', 'kernel_size': '(3, 3)', 'stride': '(2, 2)', 'padding': '(1, 1)', 'bias': '(True)'}), '(in_channels=3, out_channels=32, kernel_size=(3, 3), stride=(2, 2),\n padding=(1, 1), bias=True)\n', (2031, 2129), True, 'import torch.nn as nn\n'), ((2125, 2134), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2132, 2134), True, 'import torch.nn as nn\n'), ((2142, 2160), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (2156, 2160), True, 'import torch.nn as nn\n'), ((2220, 2329), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(32)', 'kernel_size': '(3, 3)', 'stride': '(2, 2)', 'padding': '(1, 1)', 'bias': '(True)'}), '(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=(2, 2\n ), padding=(1, 1), bias=True)\n', (2229, 2329), True, 'import torch.nn as nn\n'), ((2324, 2333), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2331, 2333), True, 'import torch.nn as nn\n'), ((2341, 2359), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (2355, 2359), True, 'import torch.nn as nn\n'), ((2419, 2528), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(64)', 'kernel_size': '(3, 3)', 'stride': '(2, 2)', 'padding': '(1, 1)', 'bias': '(True)'}), '(in_channels=32, out_channels=64, kernel_size=(3, 3), stride=(2, 2\n ), padding=(1, 1), bias=True)\n', (2428, 2528), True, 'import torch.nn as nn\n'), ((2523, 2532), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2530, 2532), True, 'import torch.nn as nn\n'), ((2540, 2558), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2554, 2558), True, 'import torch.nn as nn\n'), ((2618, 2727), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(64)', 'kernel_size': '(3, 3)', 'stride': '(2, 2)', 'padding': '(1, 1)', 'bias': '(True)'}), '(in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(2, 2\n ), padding=(1, 1), bias=True)\n', (2627, 2727), True, 'import torch.nn as nn\n'), ((2722, 2731), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2729, 2731), True, 'import torch.nn as nn\n'), ((2739, 2757), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2753, 2757), True, 'import torch.nn as nn\n'), ((2816, 2926), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(128)', 'kernel_size': '(3, 3)', 'stride': '(2, 2)', 'padding': '(1, 1)', 'bias': '(True)'}), '(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(2, \n 2), padding=(1, 1), bias=True)\n', (2825, 2926), True, 'import torch.nn as nn\n'), ((2921, 2930), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2928, 2930), True, 'import torch.nn as nn\n'), ((2938, 2957), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (2952, 2957), True, 'import torch.nn as nn\n'), ((3015, 3125), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(128)', 'out_channels': '(128)', 'kernel_size': '(3, 3)', 'stride': '(2, 2)', 'padding': '(1, 1)', 'bias': '(True)'}), '(in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(2,\n 2), padding=(1, 1), bias=True)\n', (3024, 3125), True, 'import torch.nn as nn\n'), ((3121, 3130), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3128, 3130), True, 'import torch.nn as nn\n'), ((3138, 3157), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (3152, 3157), True, 'import torch.nn as nn\n'), ((3217, 3244), 'torch.nn.Linear', 'nn.Linear', (['(2 * 3 * 128)', '(128)'], {}), '(2 * 3 * 128, 128)\n', (3226, 3244), True, 'import torch.nn as nn\n'), ((3256, 3265), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3263, 3265), True, 'import torch.nn as nn\n'), ((3277, 3295), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (3286, 3295), True, 'import torch.nn as nn\n'), ((3307, 3316), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3314, 3316), True, 'import torch.nn as nn\n'), ((3328, 3345), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(24)'], {}), '(64, 24)\n', (3337, 3345), True, 'import torch.nn as nn\n'), ((3357, 3366), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3364, 3366), True, 'import torch.nn as nn\n'), ((3415, 3442), 'torch.nn.Linear', 'nn.Linear', (['(2 * 3 * 128)', '(128)'], {}), '(2 * 3 * 128, 128)\n', (3424, 3442), True, 'import torch.nn as nn\n'), ((3454, 3463), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3461, 3463), True, 'import torch.nn as nn\n'), ((3475, 3493), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (3484, 3493), True, 'import torch.nn as nn\n'), ((3505, 3514), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3512, 3514), True, 'import torch.nn as nn\n'), ((3526, 3543), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(24)'], {}), '(64, 24)\n', (3535, 3543), True, 'import torch.nn as nn\n'), ((3555, 3564), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3562, 3564), True, 'import torch.nn as nn\n'), ((3614, 3641), 'torch.nn.Linear', 'nn.Linear', (['(2 * 3 * 128)', '(128)'], {}), '(2 * 3 * 128, 128)\n', (3623, 3641), True, 'import torch.nn as nn\n'), ((3653, 3662), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3660, 3662), True, 'import torch.nn as nn\n'), ((3674, 3692), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (3683, 3692), True, 'import torch.nn as nn\n'), ((3704, 3713), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3711, 3713), True, 'import torch.nn as nn\n'), ((3725, 3742), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(24)'], {}), '(64, 24)\n', (3734, 3742), True, 'import torch.nn as nn\n'), ((3754, 3763), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3761, 3763), True, 'import torch.nn as nn\n'), ((4989, 5007), 'torch.log', 'torch.log', (['m.scale'], {}), '(m.scale)\n', (4998, 5007), False, 'import torch\n'), ((6017, 6047), 'pybullet.connect', 'pybullet.connect', (['pybullet.GUI'], {}), '(pybullet.GUI)\n', (6033, 6047), False, 'import pybullet\n'), ((6076, 6109), 'pybullet.connect', 'pybullet.connect', (['pybullet.DIRECT'], {}), '(pybullet.DIRECT)\n', (6092, 6109), False, 'import pybullet\n'), ((6724, 6754), 'numpy.reshape', 'np.reshape', (['observation', '(-1,)'], {}), '(observation, (-1,))\n', (6734, 6754), True, 'import numpy as np\n'), ((8870, 8902), 'numpy.array', 'np.array', (['[reward_check / 100.0]'], {}), '([reward_check / 100.0])\n', (8878, 8902), True, 'import numpy as np\n'), ((1339, 1364), 'numpy.array', 'np.array', (['buffer_v_target'], {}), '(buffer_v_target)\n', (1347, 1364), True, 'import numpy as np\n'), ((4366, 4383), 'torch.nn.functional.softplus', 'F.softplus', (['sigma'], {}), '(sigma)\n', (4376, 4383), True, 'import torch.nn.functional as F\n'), ((5524, 5550), 'numpy.log', 'np.log', (['(self.wid * 100 + 1)'], {}), '(self.wid * 100 + 1)\n', (5530, 5550), True, 'import numpy as np\n'), ((7402, 7437), 'numpy.reshape', 'np.reshape', (['observation_next', '(-1,)'], {}), '(observation_next, (-1,))\n', (7412, 7437), True, 'import numpy as np\n'), ((4965, 4986), 'math.log', 'math.log', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (4973, 4986), False, 'import math, os\n'), ((7289, 7348), 'imageio.mimsave', 'imageio.mimsave', (['"""pokingSthSlightly.gif"""', 'self.env.obs_list'], {}), "('pokingSthSlightly.gif', self.env.obs_list)\n", (7304, 7348), False, 'import imageio\n')]
|
# test via
# python protest.py dm2_download_megatest.py
from pygr import nlmsa_utils
import pygr.Data
import os, tempfile, time
def rm_recursive(top):
'recursively remove top and everything in it!'
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(top)
#'http://biodb.bioinformatics.ucla.edu/PYGRDATA/dm2_multiz9way.txt.gz',
class NLMSADownload_Test(object):
'''try to save and build via download catalog auto-constructed from biodb site'''
def __init__(self,url='http://biodb.bioinformatics.ucla.edu/PYGRDATA/',
testDir = tempfile.gettempdir()):
self.url = url
import random
self.testDir = os.path.join(testDir,'test%d' % random.randint(1,99999))
self.pygrdatapath = ','.join([self.testDir,
'http://biodb2.bioinformatics.ucla.edu:5000'])
def setup(self):
'create pygr.Data entries for all NLMSAs on biodb/PYGRDATA site'
os.mkdir(self.testDir)
pygr.Data.update(self.pygrdatapath) # set our desired path
from pygr.apps.catalog_downloads import save_NLMSA_downloaders
save_NLMSA_downloaders(self.url)
## def setup(self):
## 'create pygr.Data entries for building the target NLMSA'
## os.mkdir(self.testDir)
## pygrData = get_pygr_data_path(self.pygrdatapath)
## source = pygrData.SourceURL(self.url)
## source.__doc__ = 'textdump of NLMSA to test'
## pygrData.Bio.MSA.UCSC.dm2_multiz9way.txt = source
## msaref = nlmsa_utils.NLMSABuilder(source)
## msaref.__doc__ = 'NLMSA builder to test'
## pygrData.Bio.MSA.UCSC.dm2_multiz9way = msaref
## pygrData.save()
def download_test(self):
'test building the NLMSA, and a simple query'
os.environ['PYGRDATADOWNLOAD'] = self.testDir
os.environ['PYGRDATABUILDDIR'] = self.testDir
t = time.time()
pygr.Data.Bio.MSA.UCSC.dm2_multiz9way() # build it!
t1 = time.time() - t # 1st build time
pygr.Data.clear_cache() # reload rsrc db
t = time.time()
msa = pygr.Data.Bio.MSA.UCSC.dm2_multiz9way() # already built
t2 = time.time() - t # 2nd request time
assert t2 < t1/3., 'second request took too long!'
chr4 = msa.seqDict['dm2.chr4']
result = msa[chr4[:10000]]
assert len(result) == 9
def teardown(self):
'clean up our temporary directory'
rm_recursive(self.testDir)
|
[
"os.mkdir",
"random.randint",
"tempfile.gettempdir",
"os.walk",
"time.time",
"pygr.apps.catalog_downloads.save_NLMSA_downloaders",
"os.rmdir",
"os.path.join"
] |
[((233, 260), 'os.walk', 'os.walk', (['top'], {'topdown': '(False)'}), '(top, topdown=False)\n', (240, 260), False, 'import os, tempfile, time\n'), ((414, 427), 'os.rmdir', 'os.rmdir', (['top'], {}), '(top)\n', (422, 427), False, 'import os, tempfile, time\n'), ((724, 745), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (743, 745), False, 'import os, tempfile, time\n'), ((1105, 1127), 'os.mkdir', 'os.mkdir', (['self.testDir'], {}), '(self.testDir)\n', (1113, 1127), False, 'import os, tempfile, time\n'), ((1274, 1306), 'pygr.apps.catalog_downloads.save_NLMSA_downloaders', 'save_NLMSA_downloaders', (['self.url'], {}), '(self.url)\n', (1296, 1306), False, 'from pygr.apps.catalog_downloads import save_NLMSA_downloaders\n'), ((2051, 2062), 'time.time', 'time.time', ([], {}), '()\n', (2060, 2062), False, 'import os, tempfile, time\n'), ((2230, 2241), 'time.time', 'time.time', ([], {}), '()\n', (2239, 2241), False, 'import os, tempfile, time\n'), ((2136, 2147), 'time.time', 'time.time', ([], {}), '()\n', (2145, 2147), False, 'import os, tempfile, time\n'), ((2325, 2336), 'time.time', 'time.time', ([], {}), '()\n', (2334, 2336), False, 'import os, tempfile, time\n'), ((311, 335), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (323, 335), False, 'import os, tempfile, time\n'), ((384, 408), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (396, 408), False, 'import os, tempfile, time\n'), ((848, 872), 'random.randint', 'random.randint', (['(1)', '(99999)'], {}), '(1, 99999)\n', (862, 872), False, 'import random\n')]
|
from math import floor, log2
from operator import itemgetter
def argmin(*args):
if len(args) == 1:
iterable = args[0]
else:
iterable = args
return min((j, i) for i, j in enumerate(iterable))[1]
def greatest_pow2(n):
return 2 ** floor(log2(n))
def inverse_index(a):
return {v: k for k, v in enumerate(a)}
def inverse_index_array(a):
ia = [None] * len(a)
for i, v in enumerate(a):
ia[v] = i
return ia
|
[
"math.log2"
] |
[((270, 277), 'math.log2', 'log2', (['n'], {}), '(n)\n', (274, 277), False, 'from math import floor, log2\n')]
|
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERsteps.pyCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from lettuce import core
from lettuce.terrain import after
from lettuce.terrain import before
from lettuce.plugins.reporter import Reporter
class NameReporter(Reporter):
def print_scenario_running(self, scenario):
self.wrt('%s ... ' % scenario.name)
def print_scenario_ran(self, scenario):
if scenario.passed:
self.wrt("OK")
elif scenario.failed:
reason = self.scenarios_and_its_fails[scenario]
if isinstance(reason.exception, AssertionError):
self.wrt("FAILED")
else:
self.wrt("ERROR")
self.wrt("\n")
reporter = NameReporter()
before.each_scenario(reporter.print_scenario_running)
after.each_scenario(reporter.print_scenario_ran)
after.each_step(reporter.store_failed_step)
after.all(reporter.print_end)
def print_no_features_found(where):
where = core.fs.relpath(where)
if not where.startswith(os.sep):
where = '.%s%s' % (os.sep, where)
reporter.wrt('Oops!\n')
reporter.wrt('could not find features at %s\n' % where)
|
[
"lettuce.terrain.after.each_step",
"lettuce.terrain.after.each_scenario",
"lettuce.terrain.before.each_scenario",
"lettuce.core.fs.relpath",
"lettuce.terrain.after.all"
] |
[((1431, 1484), 'lettuce.terrain.before.each_scenario', 'before.each_scenario', (['reporter.print_scenario_running'], {}), '(reporter.print_scenario_running)\n', (1451, 1484), False, 'from lettuce.terrain import before\n'), ((1485, 1533), 'lettuce.terrain.after.each_scenario', 'after.each_scenario', (['reporter.print_scenario_ran'], {}), '(reporter.print_scenario_ran)\n', (1504, 1533), False, 'from lettuce.terrain import after\n'), ((1534, 1577), 'lettuce.terrain.after.each_step', 'after.each_step', (['reporter.store_failed_step'], {}), '(reporter.store_failed_step)\n', (1549, 1577), False, 'from lettuce.terrain import after\n'), ((1578, 1607), 'lettuce.terrain.after.all', 'after.all', (['reporter.print_end'], {}), '(reporter.print_end)\n', (1587, 1607), False, 'from lettuce.terrain import after\n'), ((1658, 1680), 'lettuce.core.fs.relpath', 'core.fs.relpath', (['where'], {}), '(where)\n', (1673, 1680), False, 'from lettuce import core\n')]
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 ominocutherium
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Part of ominocutherium's godot gamejam skeleton project.
import os
class ConfigFileInfo:
docs_default_files = []
all_build_objects = []
additional_packages_to_build = []
default_build_info_object = None
__current_build_info_object = None
itch_user = ""
itch_game_name = ""
gut_test_dirs = []
git_primary_branch_name = ""
def __handle_docs_defaults_line(self,line:str):
self.docs_default_files.append(line.split()[1])
def __handle_export_exclude_line(self,line:str):
if self.__current_build_info_object != None:
self.__current_build_info_object.remove_globs.append(line[:-1].split(None,1)[1])
def __handle_export_include_line(self,line:str):
if self.__current_build_info_object != None:
self.__current_build_info_object.add_globs.append(line[:-1].split(None,1)[1])
def __handle_build_info_line(self,line:str):
data = line[:-1].split()
if data[3] == "assets":
self.__current_build_info_object = AssetPackBuildInfo()
if len(data) > 4:
self.__current_build_info_object.pack_name = data[4]
if len(data) > 5 and data[5] == "dlc":
self.__current_build_info_object.add_to_all_platform_packs = False
else:
self.__current_build_info_object = PlatformBuildInfo()
self.__current_build_info_object.platform_template_name = line[:-1].split(None,3)[3]
self.all_build_objects.append(self.__current_build_info_object)
self.__current_build_info_object.itch_channel_name = data[1]
self.__current_build_info_object.build_dir = data[2]
def __handle_itch_config_line(self,line:str):
data = line[:-1].split()
if len(data) > 2:
self.itch_user = data[1]
self.itch_game_name = data[2]
def __handle_test_dir_line(self,line:str):
self.gut_test_dirs.append(line[:-1].split()[1])
def __handle_git_primary_branch_name(self,line:str):
self.git_primary_branch_name = line[:-1].split()[1]
handlers_for_keys = {
"docs_defaults":__handle_docs_defaults_line,
"export_include":__handle_export_include_line,
"export_exclude":__handle_export_exclude_line,
"build_info":__handle_build_info_line,
"itch_config":__handle_itch_config_line,
"git_primary_branchname":__handle_git_primary_branch_name,
"test_dir":__handle_test_dir_line,
}
def __init__(self):
self.docs_default_files = []
self.additional_packages_to_build = []
self.__current_build_info_object = DefaultBuildInfo()
self.default_build_info_object = self.__current_build_info_object
self.all_build_objects = [self.__current_build_info_object]
self.gut_test_dirs = []
def read_config(self):
if os.path.exists(os.path.join('automation','config.txt')):
with open(os.path.join('automation','config.txt')) as config_file:
for line in config_file:
line_without_newline = line[:-1]
split_line = line_without_newline.split()
if len(split_line) > 1 and split_line[0] in self.handlers_for_keys:
self.handlers_for_keys[split_line[0]](self,line)
class BuildInfo():
# have all of the state but none of the behavior of build_game.BuildInfo
# in build_game, BuildInfo copies from this BuildInfo on initialization
build_dir = ""
build_type = ""
platform_template_name = "" # only for game exports, not asset packs
itch_channel_name = ""
files_included : list = []
add_globs : list = []
remove_globs : list = []
def __init__(self):
self.add_globs = []
self.remove_globs = []
self.files_included = []
class PlatformBuildInfo(BuildInfo):
build_type = "platform"
class DefaultBuildInfo(BuildInfo):
build_type = "default"
class AssetPackBuildInfo(BuildInfo):
# have all of the state but none of the behavior of build_asset_packs.AssetPackBuildInfo
# in build_asset_packs, AssetPackBuildInfo copies from this BuildInfo on initialization
build_type = "asset_pack"
pack_name = ""
add_to_all_platform_packs : bool = True
def read_config():
config_info = ConfigFileInfo()
config_info.read_config()
return config_info
|
[
"os.path.join"
] |
[((4078, 4118), 'os.path.join', 'os.path.join', (['"""automation"""', '"""config.txt"""'], {}), "('automation', 'config.txt')\n", (4090, 4118), False, 'import os\n'), ((4142, 4182), 'os.path.join', 'os.path.join', (['"""automation"""', '"""config.txt"""'], {}), "('automation', 'config.txt')\n", (4154, 4182), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hosts', '0002_sshconfig'),
('projects', '0002_auto_20140912_1509'),
]
operations = [
migrations.AddField(
model_name='configuration',
name='value_ssh_key',
field=models.ForeignKey(verbose_name=b'Value', blank=True, to='hosts.SSHConfig', null=True),
),
migrations.AlterField(
model_name='configuration',
name='data_type',
field=models.CharField(default=b'string', max_length=10, null=True, blank=True, choices=[(b'boolean', b'Boolean'), (b'number', b'Number'), (b'string', b'String'), (b'ssk_key', b'SSH Key')]),
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.CharField"
] |
[((402, 491), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'verbose_name': "b'Value'", 'blank': '(True)', 'to': '"""hosts.SSHConfig"""', 'null': '(True)'}), "(verbose_name=b'Value', blank=True, to='hosts.SSHConfig',\n null=True)\n", (419, 491), False, 'from django.db import models, migrations\n'), ((619, 810), 'django.db.models.CharField', 'models.CharField', ([], {'default': "b'string'", 'max_length': '(10)', 'null': '(True)', 'blank': '(True)', 'choices': "[(b'boolean', b'Boolean'), (b'number', b'Number'), (b'string', b'String'),\n (b'ssk_key', b'SSH Key')]"}), "(default=b'string', max_length=10, null=True, blank=True,\n choices=[(b'boolean', b'Boolean'), (b'number', b'Number'), (b'string',\n b'String'), (b'ssk_key', b'SSH Key')])\n", (635, 810), False, 'from django.db import models, migrations\n')]
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def reorder_image(img, input_order='HWC'):
"""Reorder images to 'HWC' order.
If the input_order is (h, w), return (h, w, 1);
If the input_order is (c, h, w), return (h, w, c);
If the input_order is (h, w, c), return as it is.
Args:
img (ndarray): Input image.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
If the input image shape is (h, w), input_order will not have
effects. Default: 'HWC'.
Returns:
ndarray: reordered image.
"""
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
"'HWC' and 'CHW'")
if len(img.shape) == 2:
img = img[..., None]
return img
if input_order == 'CHW':
img = img.transpose(1, 2, 0)
return img
def bgr2ycbcr(img, y_only=False):
"""Convert a BGR image to YCbCr image.
The bgr version of rgb2ycbcr.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
y_only (bool): Whether to only return Y channel. Default: False.
Returns:
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
#img = _convert_input_type_range(img)
if y_only:
out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0
else:
out_img = np.matmul(
img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) + [16, 128, 128]
#out_img = _convert_output_type_range(out_img, img_type)
return out_img
def to_y_channel(img):
"""Change to Y channel of YCbCr.
Args:
img (ndarray): Images with range [0, 255].
Returns:
(ndarray): Images with range [0, 255] (float type) without round.
"""
img = img.astype(np.float32) / 255.
if img.ndim == 3 and img.shape[2] == 3:
img = bgr2ycbcr(img, y_only=True)
img = img[..., None]
return img * 255.
|
[
"numpy.dot",
"numpy.matmul"
] |
[((2452, 2490), 'numpy.dot', 'np.dot', (['img', '[24.966, 128.553, 65.481]'], {}), '(img, [24.966, 128.553, 65.481])\n', (2458, 2490), True, 'import numpy as np\n'), ((2526, 2628), 'numpy.matmul', 'np.matmul', (['img', '[[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, \n 112.0]]'], {}), '(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [\n 65.481, -37.797, 112.0]])\n', (2535, 2628), True, 'import numpy as np\n')]
|
import time
import random
import os
from GPS import gps
from GPS import redisHelper
from GPS import helper
from GPS import args
from GPS import postProcess
# Parse the command line arguments, then, if provided, parse the arguments in
# the scenario file. Then adds default values for paramaters without definitions
# Finally, validates all argument definitions, checks that needed files and
# directories exist, and then checks to make sure that all required arguements
# received definitions.
argument_parser = args.ArgumentParser()
arguments, skipped_lines = argument_parser.parse_arguments()
# Everything GPS does should be done from within the experiment directory
# (which defaults to the current working directory)
with helper.cd(arguments['experiment_dir']):
# Connect to the redis database
R = redisHelper.connect(host=arguments['redis_host'],
port=arguments['redis_port'],
dbid=arguments['redis_dbid'])
# Clear all old state from the current database
redisHelper.deleteDB(R)
# Create a random ID for this GPS run
gpsID = helper.generateID()
# Make the output directory if it does not already exist
helper.mkdir(arguments['output_dir'])
# Now create a directory inside of that one for the files from
# this particular GPS run. If this directory already exists, rename it to
# something else
output_dir = '{}/gps-run-{}'.format(arguments['output_dir'], gpsID)
arguments['output_dir'] = output_dir
moved = False
if helper.isDir(output_dir):
random_id = helper.generateID()
os.system('mv {output_dir} {output_dir}-{random_id}'
''.format(output_dir=output_dir, random_id=random_id))
moved = True
helper.mkdir(output_dir)
# Get a logger
logger = gps.getLogger('{}/gps.log'.format(output_dir), arguments['verbose'])
# Announce the start of the run
logger.info('Starting new GPS run with GPS ID {}'.format(gpsID))
# And record a warning, if needed.
if moved:
logger.warning('Moved old GPS log files to directory {}-{}'
''.format(output_dir, random_id))
# Issue a warning about skipped lines in the scenario file
if len(skipped_lines) > 0:
for line in skipped_lines:
logger.warning("GPS skipped the following unrecognized line '{}' "
"in the scenario file".format(line))
# Update the random seed, if needed
if arguments['seed'] <= 0:
arguments['seed'] = random.randrange(0,999999)
# Create a new scenario file in the log location with all of GPS's parameters
# instantiated to their final values. The workers will use this file to set up,
# and it is useful to have for later for debugging purposes as well.
scenario_file = os.path.abspath(os.path.expanduser(os.path.expandvars('{}/scenario.txt'.format(output_dir))))
argument_parser.create_scenario_file(scenario_file, arguments)
R.set('scenarioFile:' + str(gpsID), scenario_file)
R.set('readyCount:' + str(gpsID), 0)
# Signal to the workers that the master is ready.
R.set('gpsID', gpsID)
try:
#Wait until all of the slaves are ready
ready = False
logger.info('Waiting until all workers are ready...')
oldReadyCount = -1
while(not ready):
time.sleep(1)
readyCount = redisHelper.getReadyCount(gpsID,R)
if(readyCount != oldReadyCount):
logger.info("There are {} out of a minimum of {} workers ready..."
"".format(readyCount, arguments['minimum_workers']))
oldReadyCount = readyCount
ready = readyCount >= arguments['minimum_workers']
readyCount = redisHelper.getReadyCount(gpsID,R)
logger.info("There are {} out of a minimum of {} workers ready..."
"".format(readyCount, arguments['minimum_workers']))
logger.info("GPS Master process is starting.")
pbest, decisionSeq, incumbentTrace, cpuTime, wallTime = gps.gps(arguments, gpsID)
end_master_time = time.time()
R.set('incumbent:' + str(gpsID),pbest)
finally:
R.set('cancel:' + str(gpsID),'True')
if arguments['post_process_incumbent']:
logger.info('Beginning GPS post-processing of configuration runs to select as the incumbent the '
'configuration that has the best performance on the largest number of instances. This '
'should only take a few seconds and helps protect against mistakes made by GPS due to '
'parameter interactions.')
# Create a new post-processing selector
selector = postProcess.Selector(
min_instances=arguments['post_process_min_runs'],
alpha=arguments['post_process_alpha'],
n_permutations=arguments['post_process_n_permutations'],
multiple_test_correction=arguments['post_process_multiple_test_correction'],
logger=logger)
# Add the data from the current scenario
logger.info(arguments['output_dir'])
selector.add_scenarios(arguments['output_dir'])
# And select the best configuration
incumbent, num_runs, estimated_runtime = selector.extract_best()
logger.info("The final incumbent after post-processing all of the configuration runs was evaluated "
" on {0} unique instances and has an estimated running time of {1:.2f} seconds."
"".format(num_runs, estimated_runtime))
logger.info("Final Incumbent: {}".format(incumbent))
if gps.getParamString(pbest) != incumbent:
incumbent_logger = gps.getLogger(arguments['output_dir'] + '/traj.csv', verbose=1, console=False,
format_='%(message)s', logger_name='incumbent_logger_post_process')
incumbent_logger.info('{cpu_time},{train_perf},{wall_time},{inc_id},{ac_time},{config}'
''.format(cpu_time=cpuTime,
train_perf=estimated_runtime,
wall_time=wallTime + time.time() - end_master_time,
inc_id=-1,
ac_time=-1,
config=incumbent.replace(' -',',').replace(' ','=')[1:]))
|
[
"GPS.gps.gps",
"GPS.helper.cd",
"GPS.redisHelper.deleteDB",
"GPS.helper.generateID",
"GPS.helper.mkdir",
"GPS.gps.getLogger",
"GPS.redisHelper.getReadyCount",
"GPS.gps.getParamString",
"GPS.args.ArgumentParser",
"time.time",
"GPS.redisHelper.connect",
"time.sleep",
"random.randrange",
"GPS.postProcess.Selector",
"GPS.helper.isDir"
] |
[((517, 538), 'GPS.args.ArgumentParser', 'args.ArgumentParser', ([], {}), '()\n', (536, 538), False, 'from GPS import args\n'), ((732, 770), 'GPS.helper.cd', 'helper.cd', (["arguments['experiment_dir']"], {}), "(arguments['experiment_dir'])\n", (741, 770), False, 'from GPS import helper\n'), ((816, 930), 'GPS.redisHelper.connect', 'redisHelper.connect', ([], {'host': "arguments['redis_host']", 'port': "arguments['redis_port']", 'dbid': "arguments['redis_dbid']"}), "(host=arguments['redis_host'], port=arguments[\n 'redis_port'], dbid=arguments['redis_dbid'])\n", (835, 930), False, 'from GPS import redisHelper\n'), ((1038, 1061), 'GPS.redisHelper.deleteDB', 'redisHelper.deleteDB', (['R'], {}), '(R)\n', (1058, 1061), False, 'from GPS import redisHelper\n'), ((1117, 1136), 'GPS.helper.generateID', 'helper.generateID', ([], {}), '()\n', (1134, 1136), False, 'from GPS import helper\n'), ((1203, 1240), 'GPS.helper.mkdir', 'helper.mkdir', (["arguments['output_dir']"], {}), "(arguments['output_dir'])\n", (1215, 1240), False, 'from GPS import helper\n'), ((1545, 1569), 'GPS.helper.isDir', 'helper.isDir', (['output_dir'], {}), '(output_dir)\n', (1557, 1569), False, 'from GPS import helper\n'), ((1771, 1795), 'GPS.helper.mkdir', 'helper.mkdir', (['output_dir'], {}), '(output_dir)\n', (1783, 1795), False, 'from GPS import helper\n'), ((1591, 1610), 'GPS.helper.generateID', 'helper.generateID', ([], {}), '()\n', (1608, 1610), False, 'from GPS import helper\n'), ((2554, 2581), 'random.randrange', 'random.randrange', (['(0)', '(999999)'], {}), '(0, 999999)\n', (2570, 2581), False, 'import random\n'), ((3832, 3867), 'GPS.redisHelper.getReadyCount', 'redisHelper.getReadyCount', (['gpsID', 'R'], {}), '(gpsID, R)\n', (3857, 3867), False, 'from GPS import redisHelper\n'), ((4144, 4169), 'GPS.gps.gps', 'gps.gps', (['arguments', 'gpsID'], {}), '(arguments, gpsID)\n', (4151, 4169), False, 'from GPS import gps\n'), ((4196, 4207), 'time.time', 'time.time', ([], {}), '()\n', (4205, 4207), False, 'import time\n'), ((4798, 5070), 'GPS.postProcess.Selector', 'postProcess.Selector', ([], {'min_instances': "arguments['post_process_min_runs']", 'alpha': "arguments['post_process_alpha']", 'n_permutations': "arguments['post_process_n_permutations']", 'multiple_test_correction': "arguments['post_process_multiple_test_correction']", 'logger': 'logger'}), "(min_instances=arguments['post_process_min_runs'],\n alpha=arguments['post_process_alpha'], n_permutations=arguments[\n 'post_process_n_permutations'], multiple_test_correction=arguments[\n 'post_process_multiple_test_correction'], logger=logger)\n", (4818, 5070), False, 'from GPS import postProcess\n'), ((3406, 3419), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3416, 3419), False, 'import time\n'), ((3445, 3480), 'GPS.redisHelper.getReadyCount', 'redisHelper.getReadyCount', (['gpsID', 'R'], {}), '(gpsID, R)\n', (3470, 3480), False, 'from GPS import redisHelper\n'), ((5727, 5752), 'GPS.gps.getParamString', 'gps.getParamString', (['pbest'], {}), '(pbest)\n', (5745, 5752), False, 'from GPS import gps\n'), ((5798, 5949), 'GPS.gps.getLogger', 'gps.getLogger', (["(arguments['output_dir'] + '/traj.csv')"], {'verbose': '(1)', 'console': '(False)', 'format_': '"""%(message)s"""', 'logger_name': '"""incumbent_logger_post_process"""'}), "(arguments['output_dir'] + '/traj.csv', verbose=1, console=\n False, format_='%(message)s', logger_name='incumbent_logger_post_process')\n", (5811, 5949), False, 'from GPS import gps\n'), ((6283, 6294), 'time.time', 'time.time', ([], {}), '()\n', (6292, 6294), False, 'import time\n')]
|
import sys
sys.path.append('/home/kenta/pinky')
from itertools import groupby
import time
from database import session
from model import Motion, Promise
def run_loop():
while True:
filepath = '/home/kenta/pinky/demon/test.log'
log_file = open(filepath,'a')
matching()
try:
pass
# log_file.write(time.ctime()+"\n")
finally:
log_file.close()
time.sleep(5)
def matching():
all_motion = session.query(Motion).all()
user_motion = {}
delete_motion_list = []
all_motion.sort(key=lambda tmp_motion: tmp_motion.user_id)
for user_id, motions in groupby(all_motion, key=lambda tmp_motion: tmp_motion.user_id):
tmp_motion_list = []
for motion in motions:
tmp_motion_list.append(motion)
user_motion[user_id] = tmp_motion_list
user_id_list = []
print(user_motion)
for user_id in user_motion:
if len(user_motion[user_id]) >= 2:
delete_motion_list += user_motion[user_id]
user_id_list.append(user_id)
print(user_id_list)
print('delete_motion_list: ', delete_motion_list)
matching_results = []
for i in range(len(user_id_list) - 1):
firstA = user_motion[user_id_list[i]][0].created_at
lastA = user_motion[user_id_list[i]][1].created_at
for j in range(i + 1, len(user_id_list)):
firstB = user_motion[user_id_list[j]][0].created_at
lastB = user_motion[user_id_list[j]][1].created_at
if abs(firstA - firstB).total_seconds() <= 5 and abs(lastA - lastB).total_seconds() <= 5:
# マッチング結果
if user_motion[user_id_list[i]][0].promise_id is None:
matching_results.append({'promise_id': user_motion[user_id_list[j]][0].promise_id, 'slave_user_id': user_id_list[i]})
else:
matching_results.append({'promise_id': user_motion[user_id_list[i]][0].promise_id, 'slave_user_id': user_id_list[j]})
print(user_id_list[i], user_id_list[j])
print(matching_results)
updates = []
for result in matching_results:
promise = session.query(Promise).filter(Promise.id == result['promise_id']).one_or_none()
promise.slave_user_id = result['slave_user_id']
updates.append(promise)
session.bulk_save_objects(updates)
for motion in delete_motion_list:
print('*****************')
print(motion.created_at, motion.user_id)
print('*****************')
session.delete(motion)
session.commit()
session.close()
if __name__ == '__main__':
run_loop()
|
[
"sys.path.append",
"itertools.groupby",
"database.session.delete",
"database.session.query",
"time.sleep",
"database.session.commit",
"database.session.bulk_save_objects",
"database.session.close"
] |
[((11, 47), 'sys.path.append', 'sys.path.append', (['"""/home/kenta/pinky"""'], {}), "('/home/kenta/pinky')\n", (26, 47), False, 'import sys\n'), ((653, 715), 'itertools.groupby', 'groupby', (['all_motion'], {'key': '(lambda tmp_motion: tmp_motion.user_id)'}), '(all_motion, key=lambda tmp_motion: tmp_motion.user_id)\n', (660, 715), False, 'from itertools import groupby\n'), ((2370, 2404), 'database.session.bulk_save_objects', 'session.bulk_save_objects', (['updates'], {}), '(updates)\n', (2395, 2404), False, 'from database import session\n'), ((2599, 2615), 'database.session.commit', 'session.commit', ([], {}), '()\n', (2613, 2615), False, 'from database import session\n'), ((2620, 2635), 'database.session.close', 'session.close', ([], {}), '()\n', (2633, 2635), False, 'from database import session\n'), ((2571, 2593), 'database.session.delete', 'session.delete', (['motion'], {}), '(motion)\n', (2585, 2593), False, 'from database import session\n'), ((435, 448), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (445, 448), False, 'import time\n'), ((484, 505), 'database.session.query', 'session.query', (['Motion'], {}), '(Motion)\n', (497, 505), False, 'from database import session\n'), ((2196, 2218), 'database.session.query', 'session.query', (['Promise'], {}), '(Promise)\n', (2209, 2218), False, 'from database import session\n')]
|
"""Classes describing a FreeBSD Port and the various structures."""
from abc import ABCMeta, abstractmethod
from io import StringIO
from itertools import groupby
from math import ceil, floor
from pathlib import Path
from typing import (Any, Callable, Dict, Generic, IO, Iterable, Iterator, List, Optional, Set, Tuple, TypeVar, Union,
cast)
from .dependency import Dependency
from .make import MakeDict, make, make_vars
from .platform import Platform
from .uses import Uses
from ..utilities import Orderable
__all__ = ["Port", "PortError", "PortStub"]
T = TypeVar("T", covariant=True) # pylint: disable=C0103
def peek(file: IO[Any], length: int) -> str:
pos = file.tell()
value = file.read(length)
file.seek(pos)
return value
class PortValue(Orderable, Generic[T], metaclass=ABCMeta): # pylint: disable=E1136
def __init__(self, section: int, order: int = 1) -> None:
super().__init__()
self.order = order
self.section = section
@abstractmethod
def __get__(self, instance: "Port", owner: type) -> T:
raise NotImplementedError()
@property
def _key(self) -> Tuple[int, int]:
return self.section, self.order
@abstractmethod
def generate(self, value: Union[str, List[str], "PortObject"]) -> Iterable[Tuple[str, Iterable[str]]]:
raise NotImplementedError()
@abstractmethod
def load(self, obj: "Port", variables: MakeDict) -> None:
raise NotImplementedError()
class PortVar(PortValue[Optional[str]]): # pylint: disable=E1136
def __init__(self, section: int, order: int, name: str) -> None:
super().__init__(section, order)
self.name = name
def __delete__(self, instance: "Port") -> None:
instance.del_value(self)
def __get__(self, instance: "Port", owner: type) -> Optional[str]:
value = instance.uses.get_variable(self.name)
if value is None:
if instance.has_value(self):
return cast(str, instance.get_value(self))
return None
else:
assert len(value) == 1 and isinstance(value[0], str)
return value[0]
def __set__(self, obj: "Port", value: str) -> None:
obj.set_value(self, value)
def generate(self, value: Union[str, List[str], "PortObject"]) -> Iterable[Tuple[str, Iterable[str]]]:
assert isinstance(value, str)
return (self.name, (value,)),
def load(self, obj: "Port", variables: MakeDict) -> None:
if self.name in variables:
value = variables.pop_value(self.name, combine=True)
assert value is not None
self.__set__(obj, value)
class PortVarList(PortValue[List[str]]): # pylint: disable=E1136
def __init__(self, section: int, order: int, name: str) -> None:
super().__init__(section, order)
self._setter: Callable[[Port, List[str]], List[str]] = lambda x, y: y
self.name = name
def __get__(self, instance: "Port", owner: type) -> List[str]:
value = instance.uses.get_variable(self.name)
if value is None:
if not instance.has_value(self):
self.__set__(instance, [])
value = cast(List[str], instance.get_value(self))
assert isinstance(value, list)
return value
def __set__(self, obj: "Port", value: List[str]) -> None:
obj.set_value(self, self._setter(obj, value))
def generate(self, value: Union[str, List[str], "PortObject"]) -> Iterable[Tuple[str, Iterable[str]]]:
assert isinstance(value, list)
return (self.name, value),
def load(self, obj: "Port", variables: MakeDict) -> None:
if self.name in variables:
self.__set__(obj, variables.pop(self.name))
def setter(self, setter: Callable[["Port", List[str]], List[str]]) -> "PortVarList":
self._setter = setter
return self
class PortObject(object, metaclass=ABCMeta): # pylint: disable=E1136
@abstractmethod
def generate(self) -> Iterable[Tuple[str, Iterable[str]]]:
raise NotImplementedError()
@abstractmethod
def load(self, variables: MakeDict) -> None:
raise NotImplementedError()
T2 = TypeVar("T2", bound=PortObject)
class PortObj(PortValue[T2]): # pylint: disable=E1136
def __init__(self, section: int, factory: Callable[[], T2]) -> None:
super().__init__(section)
self.factory = factory
def __get__(self, instance: "Port", owner: type) -> T2:
if not instance.has_value(self):
instance.set_value(self, self.factory())
return cast(T2, instance.get_value(self))
def generate(self, value: Union[str, List[str], PortObject]) -> Iterable[Tuple[str, Iterable[str]]]:
# pylint: disable=no-self-use
return cast(T2, value).generate()
def load(self, obj: "Port", variables: MakeDict) -> None:
self.__get__(obj, Port).load(variables)
class PortLicense(PortObject, Iterable[str]):
def __init__(self) -> None:
super().__init__()
self._licenses: Set[str] = set()
self.combination: Optional[str] = None
self.file: Optional[str] = None
def __iter__(self) -> Iterator[str]:
return iter(self._licenses)
def add(self, license_type: str) -> "PortLicense":
self._licenses.add(license_type)
return self
def generate(self) -> Iterable[Tuple[str, Iterable[str]]]:
yield ("LICENSE", sorted(self._licenses))
if self.combination is not None:
yield ("LICENSE_COMB", (self.combination,))
if self.file is not None:
yield ("LICENSE_FILE", (self.file,))
def load(self, variables: MakeDict) -> None:
if "LICENSE" in variables:
for license_type in variables.pop("LICENSE"):
self.add(license_type)
self.combination = variables.pop_value("LICENSE_COMB", default=None)
self.file = variables.pop_value("LICENSE_FILE", default=None)
class PortDepends(PortObject):
# pylint: disable=too-few-public-methods
class Collection(object):
def __init__(self, name: str) -> None:
self.name = name
self._depends: List[Dependency] = []
def __iter__(self) -> Iterator[Dependency]:
return iter(self._depends)
def add(self, dependency: Dependency) -> None:
if dependency not in self._depends:
self._depends.append(dependency)
else:
raise KeyError("%s: dependency '%s' already registered" % (self.name, dependency))
def __init__(self) -> None:
super().__init__()
self._depends: List[PortDepends.Collection] = []
self.build = self._make_depends("BUILD_DEPENDS")
self.lib = self._make_depends("LIB_DEPENDS")
self.run = self._make_depends("RUN_DEPENDS")
self.test = self._make_depends("TEST_DEPENDS")
def _make_depends(self, name: str,) -> "PortDepends.Collection":
depends = PortDepends.Collection(name)
self._depends.append(depends)
return depends
def generate(self) -> Iterable[Tuple[str, Iterable[str]]]:
return ((i.name, (str(d) + "\n" for d in sorted(i))) for i in self._depends if any(i))
def load(self, variables: MakeDict) -> None:
for depends in self._depends:
for depend in variables.pop(depends.name, default=[]):
depends.add(Dependency.create(depend))
class PortBroken(PortObject):
class Category(object):
def __init__(self, arch: str = None, opsys: str = None, osrel: str = None) -> None:
self.arch = arch
self.opsys = opsys
self.osrel = osrel
def __eq__(self, other: object) -> bool:
if isinstance(other, PortBroken.Category):
return self.arch == other.arch and self.opsys == other.opsys and self.osrel == other.osrel
return False
def __hash__(self) -> int:
return hash(str(self))
def __str__(self) -> str:
subcat: List[str] = []
if self.opsys is not None:
subcat.append(self.opsys)
if self.osrel is not None:
subcat.append(self.osrel)
if self.arch is not None:
subcat.append(self.arch)
elif self.arch is not None:
subcat.append(self.arch)
if subcat:
return "BROKEN_" + "_".join(subcat)
else:
return "BROKEN"
@staticmethod
def create(makevar: str) -> "PortBroken.Category":
subcat = makevar.split("_")[1:]
arch = None
opsys = None
osrel = None
if len(subcat) > 1:
opsys = subcat[0]
osrel = subcat[1]
if len(subcat) == 3:
arch = subcat[2]
elif len(subcat) == 1:
if subcat[0] == "FreeBSD":
opsys = subcat[0]
else:
arch = subcat[0]
return PortBroken.Category(arch, opsys, osrel)
def __init__(self) -> None:
super().__init__()
self.reasons: Dict[PortBroken.Category, str] = {}
def generate(self) -> Iterable[Tuple[str, Iterable[str]]]:
broken: Dict[str, str] = {}
for category, reason in self.reasons.items():
broken[str(category)] = reason
for category_name in sorted(broken.keys()):
yield (category_name, (broken[category_name],))
def load(self, variables: MakeDict) -> None:
for variable in variables.variables:
if variable.startswith("BROKEN"):
self.reasons[PortBroken.Category.create(variable)] = " ".join(variables.pop(variable))
class PortUses(PortObject):
def __init__(self) -> None:
super().__init__()
self._uses: Dict[type, Uses] = {}
def __contains__(self, item: Union[type, str]) -> bool:
if isinstance(item, str):
item = Uses.get(item)
return item in self._uses
def __getitem__(self, item: Union[type, str]) -> Uses:
if isinstance(item, str):
item = Uses.get(item)
if item not in self._uses:
self._uses[item] = item()
return self._uses[item]
def get_variable(self, name: str) -> Optional[List[str]]:
values = [v for v in (u.get_variable(name) for u in list(self._uses.values())) if v is not None]
if len(values) > 1:
raise PortError("PortUses: multiple uses define value for variable '%s'" % name)
return values[0] if values else None
def generate(self) -> Iterable[Tuple[str, Iterable[str]]]:
yield ("USES", (str(u) for u in sorted(self._uses.values())))
for uses in sorted(self._uses.values()):
yield from uses.generate()
def load(self, variables: MakeDict) -> None:
for use in variables.pop("USES", default=[]):
uses_var = use.split(":")
assert 1 <= len(uses_var) <= 2
name = uses_var[0]
args = uses_var[1].split(",") if len(uses_var) == 2 else []
uses = self[name]
for arg in args:
uses.add(arg)
uses.load(variables)
class PortError(Exception):
pass
class PortStub(object):
def __init__(self, category: str, name: str, portdir: Optional[Path] = None) -> None:
self.category = category
self.name = name
self._portdir = portdir
def __repr__(self) -> str:
return "<Port: %s>" % self.origin
@property
def portdir(self) -> Path:
if self._portdir is None:
from ports.core.ports import Ports
return Ports.dir / self.category / self.name
return self._portdir
@property
def origin(self) -> str:
return "%s/%s" % (self.category, self.name)
class Port(PortStub):
portname = PortVar(1, 1, "PORTNAME")
portversion = PortVar(1, 2, "PORTVERSION")
distversion = PortVar(1, 4, "DISTVERSION")
portrevision = PortVar(1, 6, "PORTREVISION")
categories = PortVarList(1, 8, "CATEGORIES")
pkgnameprefix = PortVar(1, 12, "PKGNAMEPREFIX")
distname = PortVar(1, 14, "DISTNAME")
maintainer = PortVar(2, 1, "MAINTAINER")
comment = PortVar(2, 2, "COMMENT")
license = PortObj(3, PortLicense)
depends = PortObj(4, PortDepends)
broken = PortObj(5, PortBroken)
uses = PortObj(6, PortUses)
no_arch = PortVar(7, 1, "NO_ARCH")
def __init__(self, category: str, name: str, portdir: Optional[Path]) -> None:
self._values: Dict[PortValue, Union[str, List[str], PortObject]] = {}
self.categories = [category]
super().__init__(category, name, portdir)
self.changelog: Dict[str, List[str]] = {}
self.maintainer = Platform.address
self.portname = name
self.description: Optional[str] = None
self.website: Optional[str] = None
@property # type: ignore
def category(self) -> str: # type: ignore
return self.categories[0]
@category.setter
def category(self, value: str) -> None: # type: ignore
categories = self.categories
if value in categories:
categories.remove(value)
self.categories = [value] + categories
@categories.setter
def categories(self, categories: List[str]) -> List[str]:
if not categories:
raise PortError("Port: invalid categories, must start with: %s" % self.category)
return categories
@property
def descr(self) -> Path:
return self.portdir / "pkg-descr"
@property
def pkgname(self) -> str:
return "%s%s" % (self.pkgnameprefix or "", self.portname)
@property
def version(self) -> str:
if self.distversion is not None:
return self.distversion
assert self.portversion is not None
return self.portversion
@staticmethod
def _gen_footer(makefile: StringIO) -> None:
makefile.write("\n.include <bsd.port.mk>\n")
def _gen_header(self, makefile: StringIO) -> None:
port_makefile = self.portdir / "Makefile"
metadata: List[str] = []
if port_makefile.exists():
with port_makefile.open("rU") as makefile_file:
for line in iter(makefile_file.readline, ""):
if line.startswith("# Created by") or line.startswith("# $FreeBSD"):
metadata.append(line)
if peek(makefile_file, 1) != "#":
break
else:
metadata.append("# $FreeBSD$\n")
makefile.writelines(metadata)
def _gen_sections(self, makefile: StringIO) -> None:
for _, items in groupby(sorted(list(self._values.items()), key=lambda k: k[0]), lambda k: k[0].section):
values = [j for i in items for j in i[0].generate(i[1])]
if not values:
continue
tabs = max(2, int(ceil(max(len(n[0]) for n in values) + 1.0) / Platform.tab_width))
makefile.write("\n")
for name, value in values:
needed_tabs = tabs - int(floor((len(name) + 1.0) / Platform.tab_width))
makefile.write("%s=%s" % (name, "\t" * needed_tabs))
width = tabs * Platform.tab_width
first_line = True
for i in value:
next_line = i[-1] == "\n"
i = i.rstrip("\n")
if not first_line:
if width == -1 or width + len(i) + 1 > Platform.page_width:
makefile.write(" \\\n%s" % ("\t" * tabs))
width = tabs * Platform.tab_width
else:
makefile.write(" ")
width += 1
first_line = False
makefile.write(i)
if next_line:
width = -1
else:
width += len(i)
makefile.write("\n")
def _gen_distinfo(self) -> None:
make(self.portdir, 'makesum')
def _gen_descr(self) -> None:
if self.description is None:
if self.descr.exists():
self.descr.unlink()
else:
with self.descr.open("w") as descr:
width = 0
for word in self.description.split():
next_line = word[-1] == "\n"
word = word.rstrip("\n")
if width == -1 or width + len(word) + 1 > 79:
descr.write("\n")
width = 0
elif width:
descr.write(" ")
width += 1
descr.write(word)
if next_line:
width = -1
else:
width += len(word)
descr.write("\n")
if self.website is not None:
descr.write("\nWWW: %s\n" % self.website)
def _gen_plist(self) -> None:
raise NotImplementedError("Generic Port does not know how to create pkg-plist")
def generate(self) -> None:
makefile = StringIO()
self._gen_header(makefile)
self._gen_sections(makefile)
self._gen_footer(makefile)
with open(self.portdir / "Makefile", "w") as portmakefile:
portmakefile.write(makefile.getvalue())
self._gen_distinfo()
self._gen_descr()
self._gen_plist()
def load(self) -> None:
variables = make_vars(self.portdir)
bases = [type(self)]
i = 0
while i < len(bases):
bases.extend(j for j in bases[i].__bases__ if j not in bases)
for var in list(vars(bases[i]).values()):
if isinstance(var, PortValue):
var.load(self, variables)
i += 1
if not variables.all_popped:
# TODO: remove once all R-cran ports have been verified
print("Unloaded variables for %s:" % self.name, variables)
assert variables.all_popped
if self.descr.exists():
with self.descr.open() as descr:
lines = descr.readlines()
if lines[-1].startswith("WWW"):
self.website = lines[-1].split()[1]
lines.pop()
if lines[-1] == "\n":
lines.pop()
self.description = " ".join(l.strip() for l in lines)
def del_value(self, port_value: PortValue) -> None:
if port_value in self._values:
del self._values[port_value]
def get_value(self, port_value: PortValue) -> Union[str, List[str], PortObject]:
return self._values[port_value]
def has_value(self, port_value: PortValue) -> bool:
return port_value in self._values
def set_value(self, port_value: PortValue, value: Union[str, List[str], PortObject]) -> None:
self._values[port_value] = value
|
[
"typing.cast",
"typing.TypeVar",
"io.StringIO"
] |
[((578, 606), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'covariant': '(True)'}), "('T', covariant=True)\n", (585, 606), False, 'from typing import Any, Callable, Dict, Generic, IO, Iterable, Iterator, List, Optional, Set, Tuple, TypeVar, Union, cast\n'), ((4214, 4245), 'typing.TypeVar', 'TypeVar', (['"""T2"""'], {'bound': 'PortObject'}), "('T2', bound=PortObject)\n", (4221, 4245), False, 'from typing import Any, Callable, Dict, Generic, IO, Iterable, Iterator, List, Optional, Set, Tuple, TypeVar, Union, cast\n'), ((17402, 17412), 'io.StringIO', 'StringIO', ([], {}), '()\n', (17410, 17412), False, 'from io import StringIO\n'), ((4805, 4820), 'typing.cast', 'cast', (['T2', 'value'], {}), '(T2, value)\n', (4809, 4820), False, 'from typing import Any, Callable, Dict, Generic, IO, Iterable, Iterator, List, Optional, Set, Tuple, TypeVar, Union, cast\n')]
|
import os
import pytest
import numpy as np
import easy_dna as dna
def test_extract_from_input(tmpdir):
parts = []
for i in range(10):
part_id = "part_%s" % ("ABCDEFGHAB"[i]) # id is nonunique on purpose
alias = "part_%d" % i # alias is unique
part_length = np.random.randint(1000, 1500)
sequence = dna.random_dna_sequence(part_length)
record = dna.sequence_to_biopython_record(sequence, id=part_id)
record.name = part_id
dna.annotate_record(record, label=part_id, alias=alias)
parts.append(record)
constructs = []
for position_of_last_part in [8, 10]:
# 8: parts A-H; 10: parts A--H and A, B again
construct_record = sum(parts[1:position_of_last_part], parts[0])
construct_record.id = "construct_%02d" % (position_of_last_part)
construct_record.name = construct_record.id
constructs.append(construct_record)
target_dir = os.path.join(str(tmpdir), "test_dir")
records_dict = dna.extract_from_input(
construct_list=constructs, output_path=target_dir
)
assert records_dict["processed_report"]["shared_with"].count() == 16
with pytest.raises(TypeError):
dna.extract_from_input(output_path=target_dir)
|
[
"easy_dna.annotate_record",
"easy_dna.sequence_to_biopython_record",
"easy_dna.random_dna_sequence",
"pytest.raises",
"numpy.random.randint",
"easy_dna.extract_from_input"
] |
[((1009, 1082), 'easy_dna.extract_from_input', 'dna.extract_from_input', ([], {'construct_list': 'constructs', 'output_path': 'target_dir'}), '(construct_list=constructs, output_path=target_dir)\n', (1031, 1082), True, 'import easy_dna as dna\n'), ((293, 322), 'numpy.random.randint', 'np.random.randint', (['(1000)', '(1500)'], {}), '(1000, 1500)\n', (310, 322), True, 'import numpy as np\n'), ((342, 378), 'easy_dna.random_dna_sequence', 'dna.random_dna_sequence', (['part_length'], {}), '(part_length)\n', (365, 378), True, 'import easy_dna as dna\n'), ((396, 450), 'easy_dna.sequence_to_biopython_record', 'dna.sequence_to_biopython_record', (['sequence'], {'id': 'part_id'}), '(sequence, id=part_id)\n', (428, 450), True, 'import easy_dna as dna\n'), ((489, 544), 'easy_dna.annotate_record', 'dna.annotate_record', (['record'], {'label': 'part_id', 'alias': 'alias'}), '(record, label=part_id, alias=alias)\n', (508, 544), True, 'import easy_dna as dna\n'), ((1180, 1204), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1193, 1204), False, 'import pytest\n'), ((1214, 1260), 'easy_dna.extract_from_input', 'dna.extract_from_input', ([], {'output_path': 'target_dir'}), '(output_path=target_dir)\n', (1236, 1260), True, 'import easy_dna as dna\n')]
|
# Generated by Django 3.2.7 on 2021-11-30 00:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workOrder', '0005_remove_workorder_forg'),
]
operations = [
migrations.AddField(
model_name='workorder',
name='FORG',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='workorder',
name='auditType',
field=models.CharField(default='', max_length=256),
),
]
|
[
"django.db.models.CharField",
"django.db.models.BooleanField"
] |
[((340, 374), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (359, 374), False, 'from django.db import migrations, models\n'), ((500, 544), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(256)'}), "(default='', max_length=256)\n", (516, 544), False, 'from django.db import migrations, models\n')]
|
from django.contrib import admin
# Register your models here.
from blog.models import Post
admin.site.register(Post)
|
[
"django.contrib.admin.site.register"
] |
[((94, 119), 'django.contrib.admin.site.register', 'admin.site.register', (['Post'], {}), '(Post)\n', (113, 119), False, 'from django.contrib import admin\n')]
|
#!/usr/bin/env python3
""" Basic python object resource pool.
"""
import copy
import time
import traceback
from threading import RLock, Thread
from contextlib import contextmanager
# Callback attribute name when adding a return callback to an object
CALLBACK_ATTRIBUTE = 'resource_pool_return_callback'
class AllResourcesRemoved(Exception):
""" Raised when all recources in the pool have been removed.
"""
class ObjectAlreadyInPool(Exception):
""" Raised when adding an object that is already in the pool.
"""
class ObjectNotInPool(Exception):
""" Raise when operations are performed for an object that is
not part of the resource pool.
"""
class ResourcePool(object):
def __init__(self, objects, return_callback=None):
"""
Instantiate with a list of objects you want in the resource pool.
'return_callback' is a function or method that can be used to
perform some action on an object before it is returned to the
pool but without making the process that returned the object
needing to wait for that function to be run.
This is useful for performing a time consumeing "factory reset"
(or similar) on an object before it is returned to the pool but
without holding up the process that used the resource.
The callback function, if specified should just take the object as an
argument and success is measured by no exceptions being raised. If
an exception is raised by the callback then the object will be removed
from the pool rather than being returned as an available resource.
"""
# used to track the original pool of resources, not used yet
self._objects = objects
self._removed = {}
for o in self._objects:
self._removed[id(o)] = False
# create another list with the same object references:
# copy.copy() only copies the references so the two lists are
# separate lists that point to the same objects
self._available = copy.copy(objects)
self._lock = RLock()
self._return_callback = return_callback
def all_removed(self):
return all(self._removed[id(o)] for o in self._objects)
def _get_active(self):
""" returns the list of objects that haven't been removed """
return [o for o in self._objects if not self._removed[id(o)]]
active = property(_get_active)
def _get_active_size(self):
return len(self.active)
active_size = property(_get_active_size)
def add(self, obj):
"""
Adds new objects to the pool, 'obj' can be a single object or a list of
objects and new objects are added to the end of the available resources.
"""
if type(obj) is not list:
obj = [obj]
with self._lock:
for o in obj:
if o in self._objects:
raise ObjectAlreadyInPool("Object is already in the pool.")
self._objects.append(o)
self._available.append(o)
self._removed[id(o)] = False
def remove(self, obj):
"""
Removes an object from the pool so that it can't be handed out as an
available resource again. If the object passed in is not in the pool
an ObjectNotInPool exception is raised.
"""
with self._lock:
if obj not in self._objects:
raise ObjectNotInPool("Object is not in the list of pool objects.")
# mark the resource as deleted
self._removed[id(obj)] = True
# if it is currently in the available set, remove it
self._available = [o for o in self._available if o is not obj]
if self.all_removed():
raise AllResourcesRemoved(
"All resources have been removed. "
"Further use of the resource pool is void.")
def get_resource_unmanaged(self, block=True):
"""
Gets a resource from the pool but in an "unmanaged" fashion. It is
up to you to return the resource to the pool by calling
return_resource().
Return value is an object from the pool but see the note below.
NOTE:
You should consider using get_resource() instead in a 'with' statement
as this will handle returning the resource automatically. eg:
with pool.get_resrouce() as r:
do_stuff(r)
The resource will be automatically returned upon exiting the 'with'
block.
"""
# if the pool is empty, wait for an object to be returned to the
# pool
obj = None
while True:
with self._lock:
if self.all_removed():
raise AllResourcesRemoved(
"All resources have been removed. Further use of "
"the resource pool is void unless new resources are"
"added.")
if self._available:
obj = self._available.pop(0)
if obj or (not block):
break
time.sleep(0.1)
return obj
def return_resource(self, obj, force=False):
""" Returns a resource to the pool but if:
- obj has a property named 'resource_pool_return_callback' and it is
not None
OR
- self._return_callback is not None
then start a thread that calls that callback before returning the resource
to the pool. This allows the calling process to not have to wait for that
pre-return-to-pool operation (eg. factory reset of a device that is being
tested).
NOTE: the callback added as a property to the object gets precedence
over the one specified for the pool.
NOTE: the callback property is stripped from the obj during the return
process.
"""
if (not obj) or (obj not in self._objects):
raise ObjectNotInPool("Object {} not a member of the pool".format(str(obj)))
if not force:
callback = None
if hasattr(obj, CALLBACK_ATTRIBUTE) and \
getattr(obj, CALLBACK_ATTRIBUTE) is not None:
callback = getattr(obj, CALLBACK_ATTRIBUTE)
# strip the callback attribute from the object
delattr(obj, CALLBACK_ATTRIBUTE)
elif self._return_callback:
callback = self._return_callback
if callback:
thread = Thread(target=self._run_return_callback, args=(obj, callback))
thread.setName("return_obj_{}".format(id(obj)))
thread.start()
return
with self._lock:
if not self._removed[id(obj)]:
self._available.append(obj)
def _run_return_callback(self, obj, callback):
""" This should only really be called by self.return_resource() and is intended
to be run in a thread to perform some pre-returnn-to-pool process without
the process that used the resource having to wait for that operation to occur.
If running the callback raises an exception the resource will be removed from
the pool.
"""
try:
callback(obj)
self.return_resource(obj, force=True)
except Exception:
traceback.print_exc()
self.remove(obj)
@contextmanager
def get_resource(self, block=True):
"""
Intended to be used in a 'with' statement or a contextlib.ExitStack.
Returns an object from the pool and waits if necessary. If 'block' is
False, then None is returned if the pool has been depleted.
Example useage:
with get_resrouce() as r:
do_stuff(r)
# at this point, outside the with block, the resource has
# been returned to the pool.
"""
obj = None
try:
obj = self.get_resource_unmanaged(block=block)
yield obj
finally:
if obj:
self.return_resource(obj)
|
[
"threading.Thread",
"traceback.print_exc",
"threading.RLock",
"copy.copy",
"time.sleep"
] |
[((2057, 2075), 'copy.copy', 'copy.copy', (['objects'], {}), '(objects)\n', (2066, 2075), False, 'import copy\n'), ((2097, 2104), 'threading.RLock', 'RLock', ([], {}), '()\n', (2102, 2104), False, 'from threading import RLock, Thread\n'), ((5179, 5194), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5189, 5194), False, 'import time\n'), ((6606, 6668), 'threading.Thread', 'Thread', ([], {'target': 'self._run_return_callback', 'args': '(obj, callback)'}), '(target=self._run_return_callback, args=(obj, callback))\n', (6612, 6668), False, 'from threading import RLock, Thread\n'), ((7453, 7474), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7472, 7474), False, 'import traceback\n')]
|
# -*- coding:utf-8 -*-
import logging
from toybox.simpleapi import run
from pyramid_rpc.jsonrpc import jsonrpc_method
# please: pip install pyramid_rpc
# see also: http://docs.pylonsproject.org/projects/pyramid_rpc/en/latest/jsonrpc.html
"""
python ./jsonrpc_server.py
$ echo '{"id": "1", "params": {"name": "foo"}, "method": "say_hello", "jsonrpc": "2.0"}' | http POST :8080/api
{
"id": "1",
"jsonrpc": "2.0",
"result": "hello, foo"
}
"""
@jsonrpc_method(endpoint='api')
def say_hello(request, name):
return 'hello, {}'.format(name)
def includeme(config):
config.include('pyramid_rpc.jsonrpc')
config.add_jsonrpc_endpoint('api', '/api')
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
run.include(includeme)
run(port=8080)
|
[
"pyramid_rpc.jsonrpc.jsonrpc_method",
"toybox.simpleapi.run",
"logging.basicConfig",
"toybox.simpleapi.run.include"
] |
[((456, 486), 'pyramid_rpc.jsonrpc.jsonrpc_method', 'jsonrpc_method', ([], {'endpoint': '"""api"""'}), "(endpoint='api')\n", (470, 486), False, 'from pyramid_rpc.jsonrpc import jsonrpc_method\n'), ((700, 740), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (719, 740), False, 'import logging\n'), ((745, 767), 'toybox.simpleapi.run.include', 'run.include', (['includeme'], {}), '(includeme)\n', (756, 767), False, 'from toybox.simpleapi import run\n'), ((772, 786), 'toybox.simpleapi.run', 'run', ([], {'port': '(8080)'}), '(port=8080)\n', (775, 786), False, 'from toybox.simpleapi import run\n')]
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<slug>[a-z0-9-_]+?)-(?P<product_id>[0-9]+)/$',
views.product_details, name='details'),
url(r'^category/(?P<path>[a-z0-9-_/]+?)-(?P<category_id>[0-9]+)/$',
views.category_index, name='category'),
url(r'^brand/(?P<path>[a-z0-9-_/]+?)-(?P<brand_id>[0-9]+)/$',
views.brand_index, name='brand'),
url(r'^tags/(?P<path>[a-z0-9-_/]+?)-(?P<tag_id>[0-9]+)/$',
views.tags_index, name='tags'),
url(r'^tags/render/$',
views.tags_render, name='tags_render'),
url(r'^sale/$',
views.get_all_discounted_product, name='sale_product'),
url(r'^sale/render/$',
views.render_discounted_product, name='sale_render'),
url(r'(?P<product_id>[0-9]+)/similar/$',
views.render_similar_product, name="similar-products"),
url(r'(?P<product_id>[0-9]+)/similar/all/$',
views.all_similar_product, name="all-similar-products"),
url(r'(?P<product_id>[0-9]+)/similar/all/render/$',
views.render_all_similar_product, name="render-all-similar-products"),
url(r'^recommendation/all/$',
views.all_recommendation, name="all-recommended-products"),
url(r'^recommendation/all/render/$',
views.get_render_all_recommendation, name="render-all-recommended-products"),
url(r'(?P<slug>[a-z0-9-_]+?)-(?P<product_id>[0-9]+)/add/$',
views.product_add_to_cart, name="add-to-cart"),
url(r'^collection/(?P<slug>[a-z0-9-_/]+?)-(?P<pk>[0-9]+)/$',
views.collection_index, name='collection')]
api_urlpatterns = [
url(r'^product/similar/(?P<product_id>[0-9]+)/$',
views.get_similar_product, name='similarproduct'),
url(r'^update/rating/$',
views.update_product_rating, name='update_rating'),
url(r'^recommender/arc/(?P<mode>[a-z0-9-_/]+?)/(?P<limit>[0-9]+)/$',
views.get_arc_recommendation, name='arcrecommendaion'),
url(r'^recommendation/hybrid/$',
views.get_recommendation, name='hybridrecommendation'),
url(r'^recommendation/partial/render/$',
views.render_recommendation, name='renderhomerecommendation'),
url(r'^recommendation/evaluate/$',
views.evaluate_recommendation, name="evaluate_recommendation"),
]
|
[
"django.conf.urls.url"
] |
[((75, 174), 'django.conf.urls.url', 'url', (['"""^(?P<slug>[a-z0-9-_]+?)-(?P<product_id>[0-9]+)/$"""', 'views.product_details'], {'name': '"""details"""'}), "('^(?P<slug>[a-z0-9-_]+?)-(?P<product_id>[0-9]+)/$', views.\n product_details, name='details')\n", (78, 174), False, 'from django.conf.urls import url\n'), ((184, 294), 'django.conf.urls.url', 'url', (['"""^category/(?P<path>[a-z0-9-_/]+?)-(?P<category_id>[0-9]+)/$"""', 'views.category_index'], {'name': '"""category"""'}), "('^category/(?P<path>[a-z0-9-_/]+?)-(?P<category_id>[0-9]+)/$', views.\n category_index, name='category')\n", (187, 294), False, 'from django.conf.urls import url\n'), ((304, 402), 'django.conf.urls.url', 'url', (['"""^brand/(?P<path>[a-z0-9-_/]+?)-(?P<brand_id>[0-9]+)/$"""', 'views.brand_index'], {'name': '"""brand"""'}), "('^brand/(?P<path>[a-z0-9-_/]+?)-(?P<brand_id>[0-9]+)/$', views.\n brand_index, name='brand')\n", (307, 402), False, 'from django.conf.urls import url\n'), ((412, 504), 'django.conf.urls.url', 'url', (['"""^tags/(?P<path>[a-z0-9-_/]+?)-(?P<tag_id>[0-9]+)/$"""', 'views.tags_index'], {'name': '"""tags"""'}), "('^tags/(?P<path>[a-z0-9-_/]+?)-(?P<tag_id>[0-9]+)/$', views.tags_index,\n name='tags')\n", (415, 504), False, 'from django.conf.urls import url\n'), ((515, 575), 'django.conf.urls.url', 'url', (['"""^tags/render/$"""', 'views.tags_render'], {'name': '"""tags_render"""'}), "('^tags/render/$', views.tags_render, name='tags_render')\n", (518, 575), False, 'from django.conf.urls import url\n'), ((590, 659), 'django.conf.urls.url', 'url', (['"""^sale/$"""', 'views.get_all_discounted_product'], {'name': '"""sale_product"""'}), "('^sale/$', views.get_all_discounted_product, name='sale_product')\n", (593, 659), False, 'from django.conf.urls import url\n'), ((674, 748), 'django.conf.urls.url', 'url', (['"""^sale/render/$"""', 'views.render_discounted_product'], {'name': '"""sale_render"""'}), "('^sale/render/$', views.render_discounted_product, name='sale_render')\n", (677, 748), False, 'from django.conf.urls import url\n'), ((763, 862), 'django.conf.urls.url', 'url', (['"""(?P<product_id>[0-9]+)/similar/$"""', 'views.render_similar_product'], {'name': '"""similar-products"""'}), "('(?P<product_id>[0-9]+)/similar/$', views.render_similar_product, name=\n 'similar-products')\n", (766, 862), False, 'from django.conf.urls import url\n'), ((872, 976), 'django.conf.urls.url', 'url', (['"""(?P<product_id>[0-9]+)/similar/all/$"""', 'views.all_similar_product'], {'name': '"""all-similar-products"""'}), "('(?P<product_id>[0-9]+)/similar/all/$', views.all_similar_product, name\n ='all-similar-products')\n", (875, 976), False, 'from django.conf.urls import url\n'), ((986, 1111), 'django.conf.urls.url', 'url', (['"""(?P<product_id>[0-9]+)/similar/all/render/$"""', 'views.render_all_similar_product'], {'name': '"""render-all-similar-products"""'}), "('(?P<product_id>[0-9]+)/similar/all/render/$', views.\n render_all_similar_product, name='render-all-similar-products')\n", (989, 1111), False, 'from django.conf.urls import url\n'), ((1121, 1213), 'django.conf.urls.url', 'url', (['"""^recommendation/all/$"""', 'views.all_recommendation'], {'name': '"""all-recommended-products"""'}), "('^recommendation/all/$', views.all_recommendation, name=\n 'all-recommended-products')\n", (1124, 1213), False, 'from django.conf.urls import url\n'), ((1223, 1339), 'django.conf.urls.url', 'url', (['"""^recommendation/all/render/$"""', 'views.get_render_all_recommendation'], {'name': '"""render-all-recommended-products"""'}), "('^recommendation/all/render/$', views.get_render_all_recommendation,\n name='render-all-recommended-products')\n", (1226, 1339), False, 'from django.conf.urls import url\n'), ((1350, 1460), 'django.conf.urls.url', 'url', (['"""(?P<slug>[a-z0-9-_]+?)-(?P<product_id>[0-9]+)/add/$"""', 'views.product_add_to_cart'], {'name': '"""add-to-cart"""'}), "('(?P<slug>[a-z0-9-_]+?)-(?P<product_id>[0-9]+)/add/$', views.\n product_add_to_cart, name='add-to-cart')\n", (1353, 1460), False, 'from django.conf.urls import url\n'), ((1470, 1577), 'django.conf.urls.url', 'url', (['"""^collection/(?P<slug>[a-z0-9-_/]+?)-(?P<pk>[0-9]+)/$"""', 'views.collection_index'], {'name': '"""collection"""'}), "('^collection/(?P<slug>[a-z0-9-_/]+?)-(?P<pk>[0-9]+)/$', views.\n collection_index, name='collection')\n", (1473, 1577), False, 'from django.conf.urls import url\n'), ((1608, 1710), 'django.conf.urls.url', 'url', (['"""^product/similar/(?P<product_id>[0-9]+)/$"""', 'views.get_similar_product'], {'name': '"""similarproduct"""'}), "('^product/similar/(?P<product_id>[0-9]+)/$', views.get_similar_product,\n name='similarproduct')\n", (1611, 1710), False, 'from django.conf.urls import url\n'), ((1721, 1795), 'django.conf.urls.url', 'url', (['"""^update/rating/$"""', 'views.update_product_rating'], {'name': '"""update_rating"""'}), "('^update/rating/$', views.update_product_rating, name='update_rating')\n", (1724, 1795), False, 'from django.conf.urls import url\n'), ((1810, 1937), 'django.conf.urls.url', 'url', (['"""^recommender/arc/(?P<mode>[a-z0-9-_/]+?)/(?P<limit>[0-9]+)/$"""', 'views.get_arc_recommendation'], {'name': '"""arcrecommendaion"""'}), "('^recommender/arc/(?P<mode>[a-z0-9-_/]+?)/(?P<limit>[0-9]+)/$', views.\n get_arc_recommendation, name='arcrecommendaion')\n", (1813, 1937), False, 'from django.conf.urls import url\n'), ((1947, 2038), 'django.conf.urls.url', 'url', (['"""^recommendation/hybrid/$"""', 'views.get_recommendation'], {'name': '"""hybridrecommendation"""'}), "('^recommendation/hybrid/$', views.get_recommendation, name=\n 'hybridrecommendation')\n", (1950, 2038), False, 'from django.conf.urls import url\n'), ((2048, 2154), 'django.conf.urls.url', 'url', (['"""^recommendation/partial/render/$"""', 'views.render_recommendation'], {'name': '"""renderhomerecommendation"""'}), "('^recommendation/partial/render/$', views.render_recommendation, name=\n 'renderhomerecommendation')\n", (2051, 2154), False, 'from django.conf.urls import url\n'), ((2164, 2265), 'django.conf.urls.url', 'url', (['"""^recommendation/evaluate/$"""', 'views.evaluate_recommendation'], {'name': '"""evaluate_recommendation"""'}), "('^recommendation/evaluate/$', views.evaluate_recommendation, name=\n 'evaluate_recommendation')\n", (2167, 2265), False, 'from django.conf.urls import url\n')]
|
import unittest
from jarr.controllers.feed_builder import FeedBuilderController as FBC
from jarr.lib.enums import FeedType
class ConstructFeedFromTest(unittest.TestCase):
@property
def jdh_feed(self):
return {'icon_url': 'https://www.journalduhacker.net/assets/jdh-ico-31'
'1c23d65a3a9928889718838e2626c0665d83712d488713c9a'
'6c2ba2c676c0e.ico',
'link': 'https://www.journalduhacker.net/rss',
'links': ['https://www.journalduhacker.net/rss',
'https://www.journalduhacker.net/comments.rss'],
'site_link': 'https://www.journalduhacker.net/',
'title': 'Journal du hacker',
'feed_type': FeedType.classic}
def test_url(self):
jh = FBC('https://www.journalduhacker.net/').construct()
self.assertEqual(self.jdh_feed, jh)
def test_url_non_https(self):
jh = FBC('http://journalduhacker.net/').construct()
self.assertEqual(self.jdh_feed, jh)
def test_url_rss(self):
jdh_feed = self.jdh_feed
jh = FBC('http://journalduhacker.net/rss').construct()
self.assertEqual(jdh_feed, jh)
def test_joies_du_code(self):
self.maxDiff = None
joi = FBC('https://lesjoiesducode.fr/feed').construct()
joi.pop('icon_url')
self.assertEqual(
{'feed_type': FeedType.classic,
'link': 'https://lesjoiesducode.fr/feed',
'site_link': 'https://lesjoiesducode.fr',
'title': 'Les Joies du Code – Humour de développeurs '
': gifs, memes, blagues'}, joi)
def test_apod_from_site(self):
nasa = FBC('http://apod.nasa.gov/').construct()
self.assertEqual(
{'icon_url': 'https://apod.nasa.gov/favicon.ico',
'feed_type': FeedType.classic,
'site_link': 'https://apod.nasa.gov/apod/astropix.html',
'title': 'Astronomy Picture of the Day'}, nasa)
def test_apod_from_feed(self):
nasa = FBC('http://apod.nasa.gov/apod.rss').construct()
self.assertEqual(
{'description': 'Astronomy Picture of the Day',
'feed_type': FeedType.classic,
'icon_url': 'https://apod.nasa.gov/favicon.ico',
'link': 'https://apod.nasa.gov/apod.rss',
'site_link': 'https://apod.nasa.gov/',
'title': 'APOD'}, nasa)
def test_reddit_from_site(self):
reddit = FBC('https://www.reddit.com/r/france/').construct()
self.assertEqual({
'description': 'La France et les Français.',
'feed_type': FeedType.reddit,
'icon_url': 'https://www.redditstatic.com/desktop2x/'
'img/favicon/android-icon-192x192.png',
'site_link': 'https://www.reddit.com/r/france/',
'link': 'https://www.reddit.com/r/france/.rss',
'title': 'France'}, reddit)
def test_reddit_from_feed(self):
reddit = FBC('https://www.reddit.com/r/france/.rss').construct()
self.assertEqual(
{'description': 'La France et les Français.',
'feed_type': FeedType.reddit,
'icon_url': 'https://www.redditstatic.com/desktop2x/'
'img/favicon/android-icon-192x192.png',
'link': 'https://www.reddit.com/r/france/.rss',
'site_link': 'https://www.reddit.com/r/france/',
'title': 'France'}, reddit)
def test_instagram(self):
insta = FBC('http://www.instagram.com/jaesivsm/').construct()
self.assertEqual('jaesivsm', insta['link'])
self.assertEqual(FeedType.instagram, insta['feed_type'])
def test_twitter(self):
feed = FBC('http://twitter.com/jaesivsm/').construct()
self.assertEqual('jaesivsm', feed['link'])
self.assertEqual(FeedType.twitter, feed['feed_type'])
def test_soundcloud(self):
soundcloud = FBC('//soundcloud.com/popotes-podcast/').construct()
self.assertEqual({
'feed_type': FeedType.soundcloud,
'icon_url': 'https://a-v2.sndcdn.com/assets/'
'images/sc-icons/favicon-2cadd14bdb.ico',
'link': 'popotes-podcast',
'site_link': 'https://soundcloud.com/popotes-podcast/',
'title': 'SoundCloud'}, soundcloud)
def test_youtube(self):
yt_channel = 'www.youtube.com/channel/UCOWsWZTiXkbvQvtWO9RA0gA'
feed = FBC(yt_channel).construct()
self.assertEqual(FeedType.classic, feed['feed_type'])
self.assertEqual('https://www.youtube.com/feeds/videos.xml'
'?channel_id=UCOWsWZTiXkbvQvtWO9RA0gA', feed['link'])
self.assertEqual('BenzaieLive', feed['title'])
def test_json(self):
feed = FBC('https://daringfireball.net/feeds/json').construct()
self.assertEqual({'feed_type': FeedType.json,
'icon_url': 'https://daringfireball.net/'
'graphics/favicon-64.png',
'link': 'https://daringfireball.net/feeds/json',
'links': ['https://daringfireball.net/feeds/main',
'https://daringfireball.net/feeds/json'],
'site_link': 'https://daringfireball.net/',
'title': 'Daring Fireball'}, feed)
|
[
"jarr.controllers.feed_builder.FeedBuilderController"
] |
[((821, 860), 'jarr.controllers.feed_builder.FeedBuilderController', 'FBC', (['"""https://www.journalduhacker.net/"""'], {}), "('https://www.journalduhacker.net/')\n", (824, 860), True, 'from jarr.controllers.feed_builder import FeedBuilderController as FBC\n'), ((965, 999), 'jarr.controllers.feed_builder.FeedBuilderController', 'FBC', (['"""http://journalduhacker.net/"""'], {}), "('http://journalduhacker.net/')\n", (968, 999), True, 'from jarr.controllers.feed_builder import FeedBuilderController as FBC\n'), ((1131, 1168), 'jarr.controllers.feed_builder.FeedBuilderController', 'FBC', (['"""http://journalduhacker.net/rss"""'], {}), "('http://journalduhacker.net/rss')\n", (1134, 1168), True, 'from jarr.controllers.feed_builder import FeedBuilderController as FBC\n'), ((1297, 1334), 'jarr.controllers.feed_builder.FeedBuilderController', 'FBC', (['"""https://lesjoiesducode.fr/feed"""'], {}), "('https://lesjoiesducode.fr/feed')\n", (1300, 1334), True, 'from jarr.controllers.feed_builder import FeedBuilderController as FBC\n'), ((1739, 1767), 'jarr.controllers.feed_builder.FeedBuilderController', 'FBC', (['"""http://apod.nasa.gov/"""'], {}), "('http://apod.nasa.gov/')\n", (1742, 1767), True, 'from jarr.controllers.feed_builder import FeedBuilderController as FBC\n'), ((2110, 2146), 'jarr.controllers.feed_builder.FeedBuilderController', 'FBC', (['"""http://apod.nasa.gov/apod.rss"""'], {}), "('http://apod.nasa.gov/apod.rss')\n", (2113, 2146), True, 'from jarr.controllers.feed_builder import FeedBuilderController as FBC\n'), ((2574, 2613), 'jarr.controllers.feed_builder.FeedBuilderController', 'FBC', (['"""https://www.reddit.com/r/france/"""'], {}), "('https://www.reddit.com/r/france/')\n", (2577, 2613), True, 'from jarr.controllers.feed_builder import FeedBuilderController as FBC\n'), ((3098, 3141), 'jarr.controllers.feed_builder.FeedBuilderController', 'FBC', (['"""https://www.reddit.com/r/france/.rss"""'], {}), "('https://www.reddit.com/r/france/.rss')\n", (3101, 3141), True, 'from jarr.controllers.feed_builder import FeedBuilderController as FBC\n'), ((3624, 3665), 'jarr.controllers.feed_builder.FeedBuilderController', 'FBC', (['"""http://www.instagram.com/jaesivsm/"""'], {}), "('http://www.instagram.com/jaesivsm/')\n", (3627, 3665), True, 'from jarr.controllers.feed_builder import FeedBuilderController as FBC\n'), ((3839, 3874), 'jarr.controllers.feed_builder.FeedBuilderController', 'FBC', (['"""http://twitter.com/jaesivsm/"""'], {}), "('http://twitter.com/jaesivsm/')\n", (3842, 3874), True, 'from jarr.controllers.feed_builder import FeedBuilderController as FBC\n'), ((4053, 4093), 'jarr.controllers.feed_builder.FeedBuilderController', 'FBC', (['"""//soundcloud.com/popotes-podcast/"""'], {}), "('//soundcloud.com/popotes-podcast/')\n", (4056, 4093), True, 'from jarr.controllers.feed_builder import FeedBuilderController as FBC\n'), ((4562, 4577), 'jarr.controllers.feed_builder.FeedBuilderController', 'FBC', (['yt_channel'], {}), '(yt_channel)\n', (4565, 4577), True, 'from jarr.controllers.feed_builder import FeedBuilderController as FBC\n'), ((4895, 4939), 'jarr.controllers.feed_builder.FeedBuilderController', 'FBC', (['"""https://daringfireball.net/feeds/json"""'], {}), "('https://daringfireball.net/feeds/json')\n", (4898, 4939), True, 'from jarr.controllers.feed_builder import FeedBuilderController as FBC\n')]
|
# coding=utf-8
import tensorflow as tf
import numpy as np
import helpers
tf.reset_default_graph()
sess = tf.InteractiveSession()
PAD = 0
EOS = 1
vocab_size = 10
input_embedding_size = 20
encoder_hidden_units = 512
decoder_hidden_units = encoder_hidden_units * 2
# define inputs
encoder_input = tf.placeholder(shape=(None, None), dtype=tf.int32, name='encoder_inputs')
# encoder_input_length = tf.placeholder(shape=(None,), dtype=tf.int32, name='encoder_input_length')
decoder_targets = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_target')
decoder_input = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_input')
# define embedding layer
embedding = tf.Variable(tf.random_uniform([vocab_size, input_embedding_size], -1.0, 1.0), dtype=tf.float32)
encoder_inputs_embedded = tf.nn.embedding_lookup(embedding, encoder_input, name='encoder_inputs_embedded')
decoder_inputs_embedded = tf.nn.embedding_lookup(embedding, decoder_input, name='decoder_inputs_embedded')
encoder_cell_fw = tf.contrib.rnn.LSTMCell(encoder_hidden_units)
encoder_cell_bw = tf.contrib.rnn.LSTMCell(encoder_hidden_units)
((encoder_fw_outputs,
encoder_bw_outputs),
(encoder_fw_final_state,
encoder_bw_final_state)) = (tf.nn.bidirectional_dynamic_rnn(cell_fw=encoder_cell_fw,
cell_bw=encoder_cell_bw,
inputs=encoder_inputs_embedded,
dtype=tf.float32,
time_major=False))
encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)
encoder_final_state_c = tf.concat((encoder_fw_final_state.c, encoder_bw_final_state.c), 1)
encoder_final_state_h = tf.concat((encoder_fw_final_state.h, encoder_bw_final_state.h), 1)
encoder_final_state = tf.contrib.rnn.LSTMStateTuple(c=encoder_final_state_c,
h=encoder_final_state_h)
decoder_cell = tf.contrib.rnn.LSTMCell(decoder_hidden_units)
encoder_max_time, batch_size = tf.unstack(tf.shape(encoder_input))
print(encoder_max_time)
print(batch_size)
# decoder_length = encoder_input_length + 3
decoder_outputs, decoder_final_state = tf.nn.dynamic_rnn(decoder_cell, encoder_outputs,
initial_state=encoder_final_state, dtype=tf.float32)
decoder_logits = tf.contrib.layers.linear(decoder_outputs, vocab_size)
decoder_prediction = tf.argmax(decoder_logits, 2)
stepwise_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=tf.one_hot(decoder_targets, depth=vocab_size, dtype=tf.float32),
logits=decoder_logits)
loss = tf.reduce_mean(stepwise_cross_entropy)
train_op = tf.train.AdamOptimizer().minimize(loss)
sess.run(tf.global_variables_initializer())
batch_ = [[6], [3, 4], [9, 8, 7]]
batch_, batch_length_ = helpers.batch(batch_)
print('batch_encoded:\n' + str(batch_))
din_, dlen_ = helpers.batch(np.ones(shape=(3, 1), dtype=np.int32), max_sequence_length=4)
print('decoder inputs:\n' + str(din_))
pred_ = sess.run(decoder_prediction,
feed_dict={
encoder_input: batch_,
decoder_input: din_,})
print('decoder predictions:\n' + str(pred_))
|
[
"tensorflow.reset_default_graph",
"numpy.ones",
"tensorflow.contrib.layers.linear",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.InteractiveSession",
"tensorflow.one_hot",
"tensorflow.contrib.rnn.LSTMStateTuple",
"tensorflow.concat",
"tensorflow.placeholder",
"helpers.batch",
"tensorflow.nn.embedding_lookup",
"tensorflow.global_variables_initializer",
"tensorflow.reduce_mean",
"tensorflow.random_uniform",
"tensorflow.nn.dynamic_rnn",
"tensorflow.argmax",
"tensorflow.shape",
"tensorflow.contrib.rnn.LSTMCell",
"tensorflow.train.AdamOptimizer"
] |
[((75, 99), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (97, 99), True, 'import tensorflow as tf\n'), ((107, 130), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (128, 130), True, 'import tensorflow as tf\n'), ((298, 371), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '(None, None)', 'dtype': 'tf.int32', 'name': '"""encoder_inputs"""'}), "(shape=(None, None), dtype=tf.int32, name='encoder_inputs')\n", (312, 371), True, 'import tensorflow as tf\n'), ((490, 563), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '(None, None)', 'dtype': 'tf.int32', 'name': '"""decoder_target"""'}), "(shape=(None, None), dtype=tf.int32, name='decoder_target')\n", (504, 563), True, 'import tensorflow as tf\n'), ((580, 652), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '(None, None)', 'dtype': 'tf.int32', 'name': '"""decoder_input"""'}), "(shape=(None, None), dtype=tf.int32, name='decoder_input')\n", (594, 652), True, 'import tensorflow as tf\n'), ((814, 899), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'encoder_input'], {'name': '"""encoder_inputs_embedded"""'}), "(embedding, encoder_input, name='encoder_inputs_embedded'\n )\n", (836, 899), True, 'import tensorflow as tf\n'), ((921, 1006), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'decoder_input'], {'name': '"""decoder_inputs_embedded"""'}), "(embedding, decoder_input, name='decoder_inputs_embedded'\n )\n", (943, 1006), True, 'import tensorflow as tf\n'), ((1021, 1066), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['encoder_hidden_units'], {}), '(encoder_hidden_units)\n', (1044, 1066), True, 'import tensorflow as tf\n'), ((1085, 1130), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['encoder_hidden_units'], {}), '(encoder_hidden_units)\n', (1108, 1130), True, 'import tensorflow as tf\n'), ((1233, 1391), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', ([], {'cell_fw': 'encoder_cell_fw', 'cell_bw': 'encoder_cell_bw', 'inputs': 'encoder_inputs_embedded', 'dtype': 'tf.float32', 'time_major': '(False)'}), '(cell_fw=encoder_cell_fw, cell_bw=\n encoder_cell_bw, inputs=encoder_inputs_embedded, dtype=tf.float32,\n time_major=False)\n', (1264, 1391), True, 'import tensorflow as tf\n'), ((1651, 1705), 'tensorflow.concat', 'tf.concat', (['(encoder_fw_outputs, encoder_bw_outputs)', '(2)'], {}), '((encoder_fw_outputs, encoder_bw_outputs), 2)\n', (1660, 1705), True, 'import tensorflow as tf\n'), ((1731, 1797), 'tensorflow.concat', 'tf.concat', (['(encoder_fw_final_state.c, encoder_bw_final_state.c)', '(1)'], {}), '((encoder_fw_final_state.c, encoder_bw_final_state.c), 1)\n', (1740, 1797), True, 'import tensorflow as tf\n'), ((1822, 1888), 'tensorflow.concat', 'tf.concat', (['(encoder_fw_final_state.h, encoder_bw_final_state.h)', '(1)'], {}), '((encoder_fw_final_state.h, encoder_bw_final_state.h), 1)\n', (1831, 1888), True, 'import tensorflow as tf\n'), ((1911, 1990), 'tensorflow.contrib.rnn.LSTMStateTuple', 'tf.contrib.rnn.LSTMStateTuple', ([], {'c': 'encoder_final_state_c', 'h': 'encoder_final_state_h'}), '(c=encoder_final_state_c, h=encoder_final_state_h)\n', (1940, 1990), True, 'import tensorflow as tf\n'), ((2060, 2105), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['decoder_hidden_units'], {}), '(decoder_hidden_units)\n', (2083, 2105), True, 'import tensorflow as tf\n'), ((2301, 2407), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['decoder_cell', 'encoder_outputs'], {'initial_state': 'encoder_final_state', 'dtype': 'tf.float32'}), '(decoder_cell, encoder_outputs, initial_state=\n encoder_final_state, dtype=tf.float32)\n', (2318, 2407), True, 'import tensorflow as tf\n'), ((2478, 2531), 'tensorflow.contrib.layers.linear', 'tf.contrib.layers.linear', (['decoder_outputs', 'vocab_size'], {}), '(decoder_outputs, vocab_size)\n', (2502, 2531), True, 'import tensorflow as tf\n'), ((2553, 2581), 'tensorflow.argmax', 'tf.argmax', (['decoder_logits', '(2)'], {}), '(decoder_logits, 2)\n', (2562, 2581), True, 'import tensorflow as tf\n'), ((2760, 2798), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['stepwise_cross_entropy'], {}), '(stepwise_cross_entropy)\n', (2774, 2798), True, 'import tensorflow as tf\n'), ((2955, 2976), 'helpers.batch', 'helpers.batch', (['batch_'], {}), '(batch_)\n', (2968, 2976), False, 'import helpers\n'), ((703, 767), 'tensorflow.random_uniform', 'tf.random_uniform', (['[vocab_size, input_embedding_size]', '(-1.0)', '(1.0)'], {}), '([vocab_size, input_embedding_size], -1.0, 1.0)\n', (720, 767), True, 'import tensorflow as tf\n'), ((2149, 2172), 'tensorflow.shape', 'tf.shape', (['encoder_input'], {}), '(encoder_input)\n', (2157, 2172), True, 'import tensorflow as tf\n'), ((2860, 2893), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2891, 2893), True, 'import tensorflow as tf\n'), ((3046, 3083), 'numpy.ones', 'np.ones', ([], {'shape': '(3, 1)', 'dtype': 'np.int32'}), '(shape=(3, 1), dtype=np.int32)\n', (3053, 3083), True, 'import numpy as np\n'), ((2660, 2723), 'tensorflow.one_hot', 'tf.one_hot', (['decoder_targets'], {'depth': 'vocab_size', 'dtype': 'tf.float32'}), '(decoder_targets, depth=vocab_size, dtype=tf.float32)\n', (2670, 2723), True, 'import tensorflow as tf\n'), ((2810, 2834), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (2832, 2834), True, 'import tensorflow as tf\n')]
|
import math
import pygame as pg
from collections import OrderedDict
from data.core import tools, constants
from data.components.labels import Button, ButtonGroup
from data.components.special_buttons import GameButton, NeonButton
from data.components.animation import Animation
from data.components.state_machine import _State
class LobbyScreen(_State):
"""
This state represents the lobby where the player can choose
which game they want to play or view their high scores. This is also
the exit point for the game.
"""
per_page = 6
def __init__(self, controller):
super(LobbyScreen, self).__init__(controller)
self.animations = pg.sprite.Group()
def update_screen_buttons(self, games):
screen_rect = pg.Rect((0, 0), constants.RENDER_SIZE)
number_of_pages = int(math.ceil(len(games) / float(self.per_page)))
self.loop_length = constants.RENDER_SIZE[0] * number_of_pages
self.game_buttons = self.make_game_pages(games, screen_rect, self.per_page)
nav_buttons = self.make_navigation_buttons(screen_rect)
main_buttons = self.make_main_buttons(screen_rect)
self.buttons = ButtonGroup(nav_buttons, main_buttons)
def make_game_pages(self, games, screen_rect, per):
games_list = list(games.keys())
groups = (games_list[i:i+per] for i in range(0, len(games), per))
columns = 3
width, height = GameButton.width, GameButton.height
spacer_x, spacer_y = 50, 80
start_x = (screen_rect.w - width * columns - spacer_x * (columns-1))//2
start_y = screen_rect.top + 105
step_x, step_y = width + spacer_x, height + spacer_y
buttons = ButtonGroup()
for offset,group in enumerate(groups):
offset *= constants.RENDER_SIZE[0]
for i,game in enumerate(group):
y, x = divmod(i, columns)
pos = (start_x + step_x * x + offset, start_y + step_y * y)
GameButton(pos, game, games[game], self.change_state, buttons)
return buttons
def make_navigation_buttons(self, screen_rect):
sheet = constants.GFX["nav_buttons"]
size = (53, 50)
y = 530
from_center = 15
icons = tools.strip_from_sheet(sheet, (0, 0), size, 4)
buttons = ButtonGroup()
l_kwargs = {"idle_image" : icons[0], "hover_image" : icons[1],
"call" : self.scroll_page, "args" : 1,
"bindings" : [pg.K_LEFT, pg.K_KP4]}
r_kwargs = {"idle_image" : icons[2], "hover_image" : icons[3],
"call" : self.scroll_page, "args" : -1,
"bindings" : [pg.K_RIGHT, pg.K_KP6]}
left = Button(((0, y), size), buttons, **l_kwargs)
left.rect.right = screen_rect.centerx - from_center
right = Button(((0, y), size), buttons, **r_kwargs)
right.rect.x = screen_rect.centerx + from_center
return buttons
def make_main_buttons(self, screen_rect):
buttons = ButtonGroup()
pos = (9, screen_rect.bottom-(NeonButton.height+11))
NeonButton(pos, "Credits", 32, self.change_state, "credits", buttons)
pos = (screen_rect.right-(NeonButton.width+10),
screen_rect.bottom-(NeonButton.height+11))
NeonButton(pos, "High Scores", 28, self.change_state, "high_scores",
buttons)
pos = (screen_rect.centerx-(NeonButton.width//2),
screen_rect.bottom-(NeonButton.height+11))
NeonButton(pos, "Exit", 32, self.exit_game, None,
buttons, bindings=[pg.K_ESCAPE])
rect_style = (screen_rect.left, screen_rect.top, 150, 95)
return buttons
def scroll_page(self, mag):
if not self.animations and len(self.game_buttons) > self.per_page:
for game in self.game_buttons:
self.normalize_scroll(game, mag)
fx, fy = game.rect.x+constants.RENDER_SIZE[0]*mag, game.rect.y
ani = Animation(x=fx, y=fy, duration=350.0,
transition='in_out_quint', round_values=True)
ani.start(game.rect)
self.animations.add(ani)
constants.SFX["cardplace4"].play()
def normalize_scroll(self, game, mag):
if game.rect.x < 0 and mag == -1:
game.rect.x += self.loop_length
elif game.rect.x >= constants.RENDER_SIZE[0] and mag == 1:
game.rect.x -= self.loop_length
def startup(self, persistent):
super(LobbyScreen, self).startup(persistent)
games = self.controller.game_thumbs
self.update_screen_buttons(games)
def exit_game(self, *args):
self.done = True
self.quit = True
def change_state(self, next_state):
self.done = True
self.next = next_state
def get_event(self, event, scale=(1,1)):
if event.type == pg.QUIT:
self.exit_game()
else:
self.buttons.get_event(event)
self.game_buttons.get_event(event)
def update(self, surface, keys, current_time, dt, scale):
mouse_pos = tools.scaled_mouse_pos(scale)
self.buttons.update(mouse_pos)
self.game_buttons.update(mouse_pos)
self.animations.update(dt)
self.draw(surface)
def draw(self, surface):
rect = surface.get_rect()
surface.fill(constants.BACKGROUND_BASE)
self.buttons.draw(surface)
for button in self.game_buttons:
if button.rect.colliderect(rect):
button.draw(surface)
|
[
"data.core.tools.strip_from_sheet",
"data.components.labels.Button",
"pygame.Rect",
"data.components.special_buttons.GameButton",
"data.components.labels.ButtonGroup",
"pygame.sprite.Group",
"data.components.special_buttons.NeonButton",
"data.components.animation.Animation",
"data.core.tools.scaled_mouse_pos"
] |
[((677, 694), 'pygame.sprite.Group', 'pg.sprite.Group', ([], {}), '()\n', (692, 694), True, 'import pygame as pg\n'), ((762, 800), 'pygame.Rect', 'pg.Rect', (['(0, 0)', 'constants.RENDER_SIZE'], {}), '((0, 0), constants.RENDER_SIZE)\n', (769, 800), True, 'import pygame as pg\n'), ((1177, 1215), 'data.components.labels.ButtonGroup', 'ButtonGroup', (['nav_buttons', 'main_buttons'], {}), '(nav_buttons, main_buttons)\n', (1188, 1215), False, 'from data.components.labels import Button, ButtonGroup\n'), ((1702, 1715), 'data.components.labels.ButtonGroup', 'ButtonGroup', ([], {}), '()\n', (1713, 1715), False, 'from data.components.labels import Button, ButtonGroup\n'), ((2253, 2299), 'data.core.tools.strip_from_sheet', 'tools.strip_from_sheet', (['sheet', '(0, 0)', 'size', '(4)'], {}), '(sheet, (0, 0), size, 4)\n', (2275, 2299), False, 'from data.core import tools, constants\n'), ((2318, 2331), 'data.components.labels.ButtonGroup', 'ButtonGroup', ([], {}), '()\n', (2329, 2331), False, 'from data.components.labels import Button, ButtonGroup\n'), ((2722, 2765), 'data.components.labels.Button', 'Button', (['((0, y), size)', 'buttons'], {}), '(((0, y), size), buttons, **l_kwargs)\n', (2728, 2765), False, 'from data.components.labels import Button, ButtonGroup\n'), ((2842, 2885), 'data.components.labels.Button', 'Button', (['((0, y), size)', 'buttons'], {}), '(((0, y), size), buttons, **r_kwargs)\n', (2848, 2885), False, 'from data.components.labels import Button, ButtonGroup\n'), ((3031, 3044), 'data.components.labels.ButtonGroup', 'ButtonGroup', ([], {}), '()\n', (3042, 3044), False, 'from data.components.labels import Button, ButtonGroup\n'), ((3114, 3183), 'data.components.special_buttons.NeonButton', 'NeonButton', (['pos', '"""Credits"""', '(32)', 'self.change_state', '"""credits"""', 'buttons'], {}), "(pos, 'Credits', 32, self.change_state, 'credits', buttons)\n", (3124, 3183), False, 'from data.components.special_buttons import GameButton, NeonButton\n'), ((3306, 3383), 'data.components.special_buttons.NeonButton', 'NeonButton', (['pos', '"""High Scores"""', '(28)', 'self.change_state', '"""high_scores"""', 'buttons'], {}), "(pos, 'High Scores', 28, self.change_state, 'high_scores', buttons)\n", (3316, 3383), False, 'from data.components.special_buttons import GameButton, NeonButton\n'), ((3527, 3614), 'data.components.special_buttons.NeonButton', 'NeonButton', (['pos', '"""Exit"""', '(32)', 'self.exit_game', 'None', 'buttons'], {'bindings': '[pg.K_ESCAPE]'}), "(pos, 'Exit', 32, self.exit_game, None, buttons, bindings=[pg.\n K_ESCAPE])\n", (3537, 3614), False, 'from data.components.special_buttons import GameButton, NeonButton\n'), ((5151, 5180), 'data.core.tools.scaled_mouse_pos', 'tools.scaled_mouse_pos', (['scale'], {}), '(scale)\n', (5173, 5180), False, 'from data.core import tools, constants\n'), ((1988, 2050), 'data.components.special_buttons.GameButton', 'GameButton', (['pos', 'game', 'games[game]', 'self.change_state', 'buttons'], {}), '(pos, game, games[game], self.change_state, buttons)\n', (1998, 2050), False, 'from data.components.special_buttons import GameButton, NeonButton\n'), ((4019, 4106), 'data.components.animation.Animation', 'Animation', ([], {'x': 'fx', 'y': 'fy', 'duration': '(350.0)', 'transition': '"""in_out_quint"""', 'round_values': '(True)'}), "(x=fx, y=fy, duration=350.0, transition='in_out_quint',\n round_values=True)\n", (4028, 4106), False, 'from data.components.animation import Animation\n')]
|
# Copyright (c) 2020 <NAME>
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from os import listdir, path
from time import sleep, strftime
from PIL import Image, ImageDraw
from threading import Thread
from math import floor
class pic:
def __init__(self, fpath, cropFactor, featherFactor):
self.name = path.basename(fpath)
self.fpath = fpath
self.im = Image.open(fpath)
self.size = self.im.size
self.coords = self.name.lstrip('0123456789_')
self.coords = self.coords.lstrip('(')
self.coords = self.coords.rstrip(').jpg')
self.coords = self.coords.split(',')
self.coords = [float(i) for i in self.coords]
self.mtime = path.getmtime(fpath)
self.featherFactor = featherFactor
self.cTuple = (round((self.size[0]-self.size[0]*cropFactor)/2), # LEFT
round((self.size[1]-self.size[1]*cropFactor)/2), # TOP
round((self.size[0]+self.size[0]*cropFactor)/2), # RITE
round((self.size[1]+self.size[1]*cropFactor)/2)) # BOTM
self.cSize = (self.cTuple[2]-self.cTuple[0],
self.cTuple[3]-self.cTuple[1])
self.cim = self.im.crop(self.cTuple)
def getFMask(self):
""" Returns an edge feather mask to be used in paste function.
"""
if self.featherFactor < 0.01:
mask = Image.new('L', self.cSize, color=255)
return mask
mask = Image.new('L', self.cSize, color=255)
draw = ImageDraw.Draw(mask)
x0, y0 = 0, 0
x1, y1 = self.cSize
# print(f'Crop Tuple: {(x1, y1)}')
feather = round(self.cSize[1] * self.featherFactor)
# print(f'Feather Pixels: {feather}')
for i in range(round(self.cSize[1]/2)):
x1, y1 = x1-1, y1-1
alpha = 255 if i > feather else round(255*(feather/(i+1))**(-1))
draw.rectangle([x0, y0, x1, y1], outline=alpha)
x0, y0 = x0+1, y0+1
return mask
def closePIL(self):
"""closes the im and cim PIL objects
"""
self.im.close()
self.cim.close()
def getSkull(self):
"""adds a semi-transparent 8-bit image (skull) to cropped
PIL image. For use on last pic in list when fPath is marked
with [DEAD]
"""
skull = Image.open('.\\files\\gfx\\skull.png')
skull = skull.resize(self.cSize)
self.cim.paste(skull, mask=skull)
def getYAH(self):
"""adds a semi-transparent 8-bit image ("you are here" marker)
to cropped PIL image. For use on last pic in list when fPath
is NOT marked with [DEAD]
"""
yah = Image.open('.\\files\\gfx\\yah.png')
yah = yah.resize(self.cSize)
self.cim.paste(yah, mask=yah)
def stitch2(rawPath,
destPath='',
cropFactor=0.75,
featherFactor=0.15,
marks=True):
piclist = []
batches = listdir(rawPath)
cnt = 0
tooBig = False
dstPath = destPath
if dstPath == '':
dstPath = rawPath.replace('raws', 'maps', 1)
fullPath = dstPath + '\\' + '[MAP]_' + strftime("%Y%m%d-%H%M%S") + '.png'
print(f'Getting images from {rawPath}')
for i in batches:
names = listdir(rawPath + '\\' + i)
paths = [rawPath + '\\' + i + '\\' + j for j in names]
for k in range(len(names)):
cnt += 1
print(f'Images found: {cnt}', end='\r')
piclist.append(pic(paths[k], cropFactor, featherFactor))
piclist.sort(key=lambda i: i.mtime)
if rawPath.find('DEAD') != -1 and marks:
piclist[-1].getSkull()
elif marks:
piclist[-1].getYAH()
# This next section calculates the bounds of the final map. It
# may run into trouble in the future with image overwrites, as
# currently I'm not doing anything to prevent them.
xCoordMax = max(i.coords[0] for i in piclist)
xCoordMin = min(i.coords[0] for i in piclist)
yCoordMax = max(i.coords[1] for i in piclist)
yCoordMin = min(i.coords[1] for i in piclist)
xMaxPad = round(next(
((i.size[0]/2) for i in piclist if i.coords[0] == xCoordMax)
))
xMinPad = round(next(
((i.size[0]/2) for i in piclist if i.coords[0] == xCoordMin)
))
yMaxPad = round(next(
((i.size[1]/2) for i in piclist if i.coords[1] == yCoordMax)
))
yMinPad = round(next(
((i.size[1]/2) for i in piclist if i.coords[1] == yCoordMin)
))
mapWidth = round(xCoordMax - xCoordMin) + xMaxPad + xMinPad
mapHeight = round(yCoordMax - yCoordMin) + yMaxPad + yMinPad
print(f'\nYou have explored an area {mapWidth} pixels wide ' +
f'and {mapHeight} pixels deep.')
sleep(1)
if mapWidth > 65535 or mapHeight > 65535:
tooBig = True
ratio = 65535 / max(mapWidth, mapHeight)
mapWidth = floor(mapWidth*ratio-10) # subtract 10 just to be sure
mapHeight = floor(mapHeight*ratio-10)
print(f"That's too many, downscaling by {round(ratio*100, 3)}%")
print('(Limit is 65535px on either axis)')
sleep(1)
print(f'New output dimensions: {mapWidth} x {mapHeight}')
sleep(1)
print('Building canvas')
bigMap = Image.new('RGB', (mapWidth, mapHeight), color=0)
if tooBig:
for i in piclist:
print(f'Applying image {piclist.index(i)+1} of {len(piclist)}',
end='\r')
nWidth = round(i.cSize[0]*ratio)
nHeight = round(i.cSize[1]*ratio)
resized = i.cim.resize((nWidth, nHeight))
resizedMask = i.getFMask().resize((nWidth, nHeight))
targ = (round((i.coords[0]-xCoordMin) * ratio),
round((i.coords[1]-yCoordMin) * ratio))
bigMap.paste(resized, targ, resizedMask)
i.closePIL()
else:
for i in piclist:
print(f'Applying image {piclist.index(i)+1} of {len(piclist)}',
end='\r')
targ = (round(i.coords[0] - xCoordMin),
round(i.coords[1] - yCoordMin))
bigMap.paste(i.cim, targ, i.getFMask())
i.closePIL()
# Main thread to save map
saver = Thread(
target=bigMap.save,
args=(fullPath,),
kwargs={'subsampling': 0, 'quality': 100})
def fEllipsis():
"""makes the ellipsis move while file is being saved, so user
doesn't get bored.
"""
print('\n', end='\r')
while saver.is_alive():
print('Drawing map ', end='\r')
sleep(0.3)
print('Drawing map. ', end='\r')
sleep(0.3)
print('Drawing map.. ', end='\r')
sleep(0.3)
print('Drawing map...', end='\r')
sleep(0.3)
ellipsis = Thread(target=fEllipsis)
saver.start()
ellipsis.start()
saver.join()
ellipsis.join()
print('\nMap Complete')
print(f'Path: {fullPath}')
return(fullPath)
|
[
"threading.Thread",
"PIL.Image.new",
"os.path.basename",
"math.floor",
"time.strftime",
"time.sleep",
"PIL.Image.open",
"os.path.getmtime",
"PIL.ImageDraw.Draw",
"os.listdir"
] |
[((3023, 3039), 'os.listdir', 'listdir', (['rawPath'], {}), '(rawPath)\n', (3030, 3039), False, 'from os import listdir, path\n'), ((4870, 4878), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (4875, 4878), False, 'from time import sleep, strftime\n'), ((5384, 5432), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(mapWidth, mapHeight)'], {'color': '(0)'}), "('RGB', (mapWidth, mapHeight), color=0)\n", (5393, 5432), False, 'from PIL import Image, ImageDraw\n'), ((6350, 6441), 'threading.Thread', 'Thread', ([], {'target': 'bigMap.save', 'args': '(fullPath,)', 'kwargs': "{'subsampling': 0, 'quality': 100}"}), "(target=bigMap.save, args=(fullPath,), kwargs={'subsampling': 0,\n 'quality': 100})\n", (6356, 6441), False, 'from threading import Thread\n'), ((6960, 6984), 'threading.Thread', 'Thread', ([], {'target': 'fEllipsis'}), '(target=fEllipsis)\n', (6966, 6984), False, 'from threading import Thread\n'), ((358, 378), 'os.path.basename', 'path.basename', (['fpath'], {}), '(fpath)\n', (371, 378), False, 'from os import listdir, path\n'), ((424, 441), 'PIL.Image.open', 'Image.open', (['fpath'], {}), '(fpath)\n', (434, 441), False, 'from PIL import Image, ImageDraw\n'), ((745, 765), 'os.path.getmtime', 'path.getmtime', (['fpath'], {}), '(fpath)\n', (758, 765), False, 'from os import listdir, path\n'), ((1521, 1558), 'PIL.Image.new', 'Image.new', (['"""L"""', 'self.cSize'], {'color': '(255)'}), "('L', self.cSize, color=255)\n", (1530, 1558), False, 'from PIL import Image, ImageDraw\n'), ((1574, 1594), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['mask'], {}), '(mask)\n', (1588, 1594), False, 'from PIL import Image, ImageDraw\n'), ((2401, 2439), 'PIL.Image.open', 'Image.open', (['""".\\\\files\\\\gfx\\\\skull.png"""'], {}), "('.\\\\files\\\\gfx\\\\skull.png')\n", (2411, 2439), False, 'from PIL import Image, ImageDraw\n'), ((2746, 2782), 'PIL.Image.open', 'Image.open', (['""".\\\\files\\\\gfx\\\\yah.png"""'], {}), "('.\\\\files\\\\gfx\\\\yah.png')\n", (2756, 2782), False, 'from PIL import Image, ImageDraw\n'), ((3330, 3357), 'os.listdir', 'listdir', (["(rawPath + '\\\\' + i)"], {}), "(rawPath + '\\\\' + i)\n", (3337, 3357), False, 'from os import listdir, path\n'), ((5015, 5043), 'math.floor', 'floor', (['(mapWidth * ratio - 10)'], {}), '(mapWidth * ratio - 10)\n', (5020, 5043), False, 'from math import floor\n'), ((5091, 5120), 'math.floor', 'floor', (['(mapHeight * ratio - 10)'], {}), '(mapHeight * ratio - 10)\n', (5096, 5120), False, 'from math import floor\n'), ((5249, 5257), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (5254, 5257), False, 'from time import sleep, strftime\n'), ((5332, 5340), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (5337, 5340), False, 'from time import sleep, strftime\n'), ((1444, 1481), 'PIL.Image.new', 'Image.new', (['"""L"""', 'self.cSize'], {'color': '(255)'}), "('L', self.cSize, color=255)\n", (1453, 1481), False, 'from PIL import Image, ImageDraw\n'), ((3212, 3237), 'time.strftime', 'strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (3220, 3237), False, 'from time import sleep, strftime\n'), ((6726, 6736), 'time.sleep', 'sleep', (['(0.3)'], {}), '(0.3)\n', (6731, 6736), False, 'from time import sleep, strftime\n'), ((6795, 6805), 'time.sleep', 'sleep', (['(0.3)'], {}), '(0.3)\n', (6800, 6805), False, 'from time import sleep, strftime\n'), ((6864, 6874), 'time.sleep', 'sleep', (['(0.3)'], {}), '(0.3)\n', (6869, 6874), False, 'from time import sleep, strftime\n'), ((6933, 6943), 'time.sleep', 'sleep', (['(0.3)'], {}), '(0.3)\n', (6938, 6943), False, 'from time import sleep, strftime\n')]
|
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import open3d as o3d
from random_geometry_points.plane import Plane
def mean_map_entropy(pc_map, map_tips=None, KNN_RAD=1):
MIN_KNN = 5
map_tree = o3d.geometry.KDTreeFlann(pc_map)
points = np.asarray(pc_map.points)
metric = []
for i in range(points.shape[0]):
point = points[i]
[k, idx, _] = map_tree.search_radius_vector_3d(point, KNN_RAD)
if len(idx) > MIN_KNN:
cov = np.cov(points[idx].T)
det = np.linalg.det(2 * np.pi * np.e * cov)
if det > 0:
metric.append(0.5 * np.log(det))
return 0 if len(metric) == 0 else np.mean(metric)
def mean_plane_variance(pc_map, map_tips=None, KNN_RAD=1):
MIN_KNN = 5
map_tree = o3d.geometry.KDTreeFlann(pc_map)
points = np.asarray(pc_map.points)
metric = []
for i in range(points.shape[0]):
point = points[i]
[k, idx, _] = map_tree.search_radius_vector_3d(point, KNN_RAD)
if len(idx) > MIN_KNN:
cov = np.cov(points[idx].T)
eigenvalues = np.linalg.eig(cov)[0]
metric.append(min(eigenvalues))
return 0 if len(metric) == 0 else np.mean(metric)
def orth_mme(pc_map, map_tips, knn_rad=0.5):
map_tree = o3d.geometry.KDTreeFlann(pc_map)
points = np.asarray(pc_map.points)
orth_axes_stats = []
orth_list = map_tips['orth_list']
for k, chosen_points in enumerate(orth_list):
metric = []
plane_error = []
for i in range(chosen_points.shape[0]):
point = chosen_points[i]
[_, idx, _] = map_tree.search_radius_vector_3d(point, knn_rad)
if len(idx) > 5:
metric.append(mme(points[idx]))
avg_metric = np.mean(metric)
orth_axes_stats.append(avg_metric)
return np.sum(orth_axes_stats)
def orth_mpv(pc_map, map_tips, knn_rad=1):
map_tree = o3d.geometry.KDTreeFlann(pc_map)
points = np.asarray(pc_map.points)
orth_axes_stats = []
orth_list = map_tips['orth_list']
for k, chosen_points in enumerate(orth_list):
metric = []
plane_error = []
for i in range(chosen_points.shape[0]):
point = chosen_points[i]
[_, idx, _] = map_tree.search_radius_vector_3d(point, knn_rad)
if len(idx) > 5:
metric.append(mpv(points[idx]))
avg_metric = np.median(metric)
orth_axes_stats.append(avg_metric)
return np.sum(orth_axes_stats)
def mme(points):
cov = np.cov(points.T)
det = np.linalg.det(2 * np.pi * np.e * cov)
return 0.5 * np.log(det) if det > 0 else -math.inf
def mpv(points):
cov = np.cov(points.T)
eigenvalues = np.linalg.eig(cov)[0]
return min(eigenvalues)
def rpe(T_gt, T_est):
seq_len = len(T_gt)
err = 0
for i in range(seq_len):
for j in range(seq_len):
d_gt = T_gt[i] @ np.linalg.inv(T_gt[j])
d_est = T_est[i] @ np.linalg.inv(T_est[j])
dt = d_est[:3, 3] - d_gt[:3, 3]
err += np.linalg.norm(dt) ** 2
return err
|
[
"numpy.sum",
"numpy.log",
"numpy.median",
"numpy.asarray",
"open3d.geometry.KDTreeFlann",
"numpy.linalg.eig",
"numpy.mean",
"numpy.linalg.inv",
"numpy.linalg.norm",
"numpy.linalg.det",
"numpy.cov"
] |
[((797, 829), 'open3d.geometry.KDTreeFlann', 'o3d.geometry.KDTreeFlann', (['pc_map'], {}), '(pc_map)\n', (821, 829), True, 'import open3d as o3d\n'), ((843, 868), 'numpy.asarray', 'np.asarray', (['pc_map.points'], {}), '(pc_map.points)\n', (853, 868), True, 'import numpy as np\n'), ((1367, 1399), 'open3d.geometry.KDTreeFlann', 'o3d.geometry.KDTreeFlann', (['pc_map'], {}), '(pc_map)\n', (1391, 1399), True, 'import open3d as o3d\n'), ((1413, 1438), 'numpy.asarray', 'np.asarray', (['pc_map.points'], {}), '(pc_map.points)\n', (1423, 1438), True, 'import numpy as np\n'), ((1870, 1902), 'open3d.geometry.KDTreeFlann', 'o3d.geometry.KDTreeFlann', (['pc_map'], {}), '(pc_map)\n', (1894, 1902), True, 'import open3d as o3d\n'), ((1916, 1941), 'numpy.asarray', 'np.asarray', (['pc_map.points'], {}), '(pc_map.points)\n', (1926, 1941), True, 'import numpy as np\n'), ((2441, 2464), 'numpy.sum', 'np.sum', (['orth_axes_stats'], {}), '(orth_axes_stats)\n', (2447, 2464), True, 'import numpy as np\n'), ((2525, 2557), 'open3d.geometry.KDTreeFlann', 'o3d.geometry.KDTreeFlann', (['pc_map'], {}), '(pc_map)\n', (2549, 2557), True, 'import open3d as o3d\n'), ((2571, 2596), 'numpy.asarray', 'np.asarray', (['pc_map.points'], {}), '(pc_map.points)\n', (2581, 2596), True, 'import numpy as np\n'), ((3115, 3138), 'numpy.sum', 'np.sum', (['orth_axes_stats'], {}), '(orth_axes_stats)\n', (3121, 3138), True, 'import numpy as np\n'), ((3168, 3184), 'numpy.cov', 'np.cov', (['points.T'], {}), '(points.T)\n', (3174, 3184), True, 'import numpy as np\n'), ((3195, 3232), 'numpy.linalg.det', 'np.linalg.det', (['(2 * np.pi * np.e * cov)'], {}), '(2 * np.pi * np.e * cov)\n', (3208, 3232), True, 'import numpy as np\n'), ((3317, 3333), 'numpy.cov', 'np.cov', (['points.T'], {}), '(points.T)\n', (3323, 3333), True, 'import numpy as np\n'), ((1258, 1273), 'numpy.mean', 'np.mean', (['metric'], {}), '(metric)\n', (1265, 1273), True, 'import numpy as np\n'), ((1792, 1807), 'numpy.mean', 'np.mean', (['metric'], {}), '(metric)\n', (1799, 1807), True, 'import numpy as np\n'), ((2365, 2380), 'numpy.mean', 'np.mean', (['metric'], {}), '(metric)\n', (2372, 2380), True, 'import numpy as np\n'), ((3037, 3054), 'numpy.median', 'np.median', (['metric'], {}), '(metric)\n', (3046, 3054), True, 'import numpy as np\n'), ((3352, 3370), 'numpy.linalg.eig', 'np.linalg.eig', (['cov'], {}), '(cov)\n', (3365, 3370), True, 'import numpy as np\n'), ((1068, 1089), 'numpy.cov', 'np.cov', (['points[idx].T'], {}), '(points[idx].T)\n', (1074, 1089), True, 'import numpy as np\n'), ((1108, 1145), 'numpy.linalg.det', 'np.linalg.det', (['(2 * np.pi * np.e * cov)'], {}), '(2 * np.pi * np.e * cov)\n', (1121, 1145), True, 'import numpy as np\n'), ((1639, 1660), 'numpy.cov', 'np.cov', (['points[idx].T'], {}), '(points[idx].T)\n', (1645, 1660), True, 'import numpy as np\n'), ((3250, 3261), 'numpy.log', 'np.log', (['det'], {}), '(det)\n', (3256, 3261), True, 'import numpy as np\n'), ((1687, 1705), 'numpy.linalg.eig', 'np.linalg.eig', (['cov'], {}), '(cov)\n', (1700, 1705), True, 'import numpy as np\n'), ((3553, 3575), 'numpy.linalg.inv', 'np.linalg.inv', (['T_gt[j]'], {}), '(T_gt[j])\n', (3566, 3575), True, 'import numpy as np\n'), ((3607, 3630), 'numpy.linalg.inv', 'np.linalg.inv', (['T_est[j]'], {}), '(T_est[j])\n', (3620, 3630), True, 'import numpy as np\n'), ((3694, 3712), 'numpy.linalg.norm', 'np.linalg.norm', (['dt'], {}), '(dt)\n', (3708, 3712), True, 'import numpy as np\n'), ((1206, 1217), 'numpy.log', 'np.log', (['det'], {}), '(det)\n', (1212, 1217), True, 'import numpy as np\n')]
|
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.python.ops.gen_image_ops import resize_nearest_neighbor
@tf.function
def kp2gaussian(kp_value, spatial_size, kp_variance):
"""
Transform a keypoint into gaussian like representation
"""
mean = kp_value # B, 10, 2
coordinate_grid = make_coordinate_grid(spatial_size, mean.dtype) # 64, 64, 2
grid_shape = tf.shape(coordinate_grid)
coordinate_grid = tf.reshape(
coordinate_grid,
[1, grid_shape[0], grid_shape[1], 1, 2]
) # 1, 64, 64, 1, 2
# repeats = # B, 1, 1, 10, 1
coordinate_grid = tf.tile(coordinate_grid, [tf.shape(mean)[0], 1, 1, mean.shape[1], 1]) # B, 64, 64, 10, 2
# Preprocess kp shape
mean = tf.reshape(mean, [tf.shape(mean)[0], 1, 1, mean.shape[1], mean.shape[2]]) # B, 1, 1, 10, 2
mean_sub = (coordinate_grid - mean) # B, 64, 64, 10, 2
out = tf.exp(-0.5 * tf.reduce_sum(mean_sub ** 2, -1) / kp_variance) # B, 64, 64, 10
return out
def make_coordinate_grid(spatial_size, type):
"""
Create a meshgrid [-1,1] x [-1,1] of given spatial_size.
"""
h, w = spatial_size[0], spatial_size[1]
x = tf.range(w, dtype=type)
y = tf.range(h, dtype=type)
x = (2. * (x / (tf.cast(w, type) - 1.)) - 1.)
y = (2. * (y / (tf.cast(h, type) - 1.)) - 1.)
yy = tf.repeat(tf.reshape(y, [-1, 1]), w, 1)
xx = tf.repeat(tf.reshape(x, [1, -1]), h, 0)
# yy = y.view(-1, 1).repeat(1, w)
# xx = x.view(1, -1).repeat(h, 1)
meshed = tf.concat([tf.expand_dims(xx, 2), tf.expand_dims(yy, 2)], 2)
return meshed
class ResBlock2d(layers.Layer):
"""
Res block, preserve spatial resolution.
"""
def __init__(self, in_features, kernel_size, padding):
super(ResBlock2d, self).__init__()
self.conv1 = layers.Conv2D(in_features, kernel_size=kernel_size, padding='same')
self.conv2 = layers.Conv2D(in_features, kernel_size=kernel_size, padding='same')
self.norm1 = layers.BatchNormalization()
self.norm2 = layers.BatchNormalization()
def call(self, x, **kwargs):
out = self.norm1(x)
out = tf.keras.activations.relu(out)
out = self.conv1(out)
out = self.norm2(out)
out = tf.keras.activations.relu(out)
out = self.conv2(out)
out += x
return out
class UpBlock2d(layers.Layer):
"""
Upsampling block for use in decoder.
"""
def __init__(self, out_features, kernel_size=(3, 3), groups=1):
super(UpBlock2d, self).__init__()
self.conv = layers.Conv2D(out_features, kernel_size=kernel_size, padding='same')
self.norm = layers.BatchNormalization()
self.up = layers.UpSampling2D()
def call(self, x, **kwargs):
out = self.up(x)
out = self.conv(out)
out = self.norm(out)
out = tf.keras.activations.relu(out)
return out
class DownBlock2d(layers.Layer):
"""
Downsampling block for use in encoder.
"""
def __init__(self, out_features, kernel_size=3):
super(DownBlock2d, self).__init__()
self.conv = layers.Conv2D(out_features, kernel_size=kernel_size, padding='same')
self.norm = layers.BatchNormalization()
self.pool = layers.AvgPool2D(pool_size=(2, 2))
def call(self, x, **kwargs):
out = self.conv(x)
out = self.norm(out)
out = tf.keras.activations.relu(out)
out = self.pool(out)
return out
class SameBlock2d(layers.Layer):
"""
Simple block, preserve spatial resolution.
"""
def __init__(self, out_features, kernel_size=3):
super(SameBlock2d, self).__init__()
self.conv = layers.Conv2D(out_features, kernel_size=kernel_size, padding='same')
self.norm = layers.BatchNormalization()
def call(self, x, **kwargs):
out = self.conv(x)
out = self.norm(out)
out = tf.keras.activations.relu(out)
return out
class Encoder(layers.Layer):
"""
Hourglass Encoder
"""
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
super(Encoder, self).__init__()
self.num_blocks = num_blocks
# down_blocks = []
for i in range(num_blocks):
block = DownBlock2d(
min(max_features, block_expansion * (2 ** (i + 1))),
kernel_size=3
)
setattr(self, f'down_block{i}', block)
# down_blocks.append(block)
# self.down_blocks = tf.keras.Sequential(down_blocks)
def call(self, x, **kwargs):
outs = [x]
for i in range(self.num_blocks):
res = getattr(self, f'down_block{i}')(outs[-1])
outs.append(res)
return outs
class Decoder(layers.Layer):
"""
Hourglass Decoder
"""
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
super(Decoder, self).__init__()
# up_blocks = []
self.num_blocks = num_blocks
for i in range(num_blocks)[::-1]:
out_filters = min(max_features, block_expansion * (2 ** i))
block = UpBlock2d(out_filters, kernel_size=3)
setattr(self, f'up_block{i}', block)
# up_blocks.append()
# self.up_blocks = nn.ModuleList(up_blocks)
self.out_filters = block_expansion + in_features
def call(self, x, **kwargs):
out = x.pop()
for i in range(self.num_blocks)[::-1]:
out = getattr(self, f'up_block{i}')(out)
skip = x.pop()
out = tf.concat([out, skip], axis=3)
return out
class Hourglass(layers.Layer):
"""
Hourglass architecture.
"""
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
super(Hourglass, self).__init__()
self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features)
self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features)
self.out_filters = self.decoder.out_filters
def call(self, x, **kwargs):
return self.decoder(self.encoder(x))
class AntiAliasInterpolation2d(layers.Layer):
"""
Band-limited downsampling, for better preservation of the input signal.
"""
def __init__(self, channels, scale):
super(AntiAliasInterpolation2d, self).__init__()
sigma = (1 / scale - 1) / 2
kernel_size = 2 * round(sigma * 4) + 1
self.ka = kernel_size // 2
self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka
kernel_size = [kernel_size, kernel_size]
sigma = [sigma, sigma]
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = tf.meshgrid(
[tf.range(kernel_size[0], dtype=tf.float32)],
[tf.range(kernel_size[1], dtype=tf.float32)],
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= tf.exp(-(mgrid - mean) ** 2 / (2 * std ** 2))
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / tf.reduce_sum(kernel)
# Reshape to depthwise convolutional weight
kernel = tf.reshape(kernel, [1, 1, *kernel.shape])
kernel = tf.repeat(kernel, channels, 0)
kernel = tf.transpose(tf.constant(kernel, name='kernel'), [2, 3, 1, 0])
self.kernel = tf.Variable(tf.tile(kernel, [1, 1, 1, 1]), trainable=False)
self.groups = channels
self.scale = scale
self.kernels = tf.split(self.kernel, self.groups, axis=3)
def call(self, input, **kwargs):
if self.scale == 1.0:
return input
padded = tf.keras.backend.spatial_2d_padding(input, ((self.ka, self.kb), (self.ka, self.kb)))
# split & concat - to work on CPU
splitted = tf.split(padded, 3, axis=3)
parts = []
for i in range(3):
parts.append(tf.nn.conv2d(splitted[i], self.kernels[i], strides=1, padding='VALID'))
out = tf.concat([*parts], axis=3)
# out = tf.nn.conv2d(padded, self.kernel, strides=1, padding='VALID')
size = (tf.cast(out.shape[1] * self.scale, tf.int32), tf.cast(out.shape[2] * self.scale, tf.int32))
# out = tf.image.resize(out, size, method=tf.image.ResizeMethod.BILINEAR)
out = resize_nearest_neighbor(out, size)
return out
|
[
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.keras.backend.spatial_2d_padding",
"tensorflow.nn.conv2d",
"tensorflow.repeat",
"tensorflow.split",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.python.ops.gen_image_ops.resize_nearest_neighbor",
"tensorflow.concat",
"tensorflow.cast",
"tensorflow.exp",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.range",
"tensorflow.constant",
"tensorflow.tile",
"tensorflow.expand_dims",
"tensorflow.keras.layers.AvgPool2D",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.activations.relu",
"tensorflow.shape"
] |
[((408, 433), 'tensorflow.shape', 'tf.shape', (['coordinate_grid'], {}), '(coordinate_grid)\n', (416, 433), True, 'import tensorflow as tf\n'), ((456, 524), 'tensorflow.reshape', 'tf.reshape', (['coordinate_grid', '[1, grid_shape[0], grid_shape[1], 1, 2]'], {}), '(coordinate_grid, [1, grid_shape[0], grid_shape[1], 1, 2])\n', (466, 524), True, 'import tensorflow as tf\n'), ((1187, 1210), 'tensorflow.range', 'tf.range', (['w'], {'dtype': 'type'}), '(w, dtype=type)\n', (1195, 1210), True, 'import tensorflow as tf\n'), ((1219, 1242), 'tensorflow.range', 'tf.range', (['h'], {'dtype': 'type'}), '(h, dtype=type)\n', (1227, 1242), True, 'import tensorflow as tf\n'), ((1364, 1386), 'tensorflow.reshape', 'tf.reshape', (['y', '[-1, 1]'], {}), '(y, [-1, 1])\n', (1374, 1386), True, 'import tensorflow as tf\n'), ((1413, 1435), 'tensorflow.reshape', 'tf.reshape', (['x', '[1, -1]'], {}), '(x, [1, -1])\n', (1423, 1435), True, 'import tensorflow as tf\n'), ((1831, 1898), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['in_features'], {'kernel_size': 'kernel_size', 'padding': '"""same"""'}), "(in_features, kernel_size=kernel_size, padding='same')\n", (1844, 1898), False, 'from tensorflow.keras import layers\n'), ((1920, 1987), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['in_features'], {'kernel_size': 'kernel_size', 'padding': '"""same"""'}), "(in_features, kernel_size=kernel_size, padding='same')\n", (1933, 1987), False, 'from tensorflow.keras import layers\n'), ((2009, 2036), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (2034, 2036), False, 'from tensorflow.keras import layers\n'), ((2058, 2085), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (2083, 2085), False, 'from tensorflow.keras import layers\n'), ((2162, 2192), 'tensorflow.keras.activations.relu', 'tf.keras.activations.relu', (['out'], {}), '(out)\n', (2187, 2192), True, 'import tensorflow as tf\n'), ((2267, 2297), 'tensorflow.keras.activations.relu', 'tf.keras.activations.relu', (['out'], {}), '(out)\n', (2292, 2297), True, 'import tensorflow as tf\n'), ((2586, 2654), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['out_features'], {'kernel_size': 'kernel_size', 'padding': '"""same"""'}), "(out_features, kernel_size=kernel_size, padding='same')\n", (2599, 2654), False, 'from tensorflow.keras import layers\n'), ((2675, 2702), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (2700, 2702), False, 'from tensorflow.keras import layers\n'), ((2721, 2742), 'tensorflow.keras.layers.UpSampling2D', 'layers.UpSampling2D', ([], {}), '()\n', (2740, 2742), False, 'from tensorflow.keras import layers\n'), ((2874, 2904), 'tensorflow.keras.activations.relu', 'tf.keras.activations.relu', (['out'], {}), '(out)\n', (2899, 2904), True, 'import tensorflow as tf\n'), ((3136, 3204), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['out_features'], {'kernel_size': 'kernel_size', 'padding': '"""same"""'}), "(out_features, kernel_size=kernel_size, padding='same')\n", (3149, 3204), False, 'from tensorflow.keras import layers\n'), ((3225, 3252), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3250, 3252), False, 'from tensorflow.keras import layers\n'), ((3273, 3307), 'tensorflow.keras.layers.AvgPool2D', 'layers.AvgPool2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3289, 3307), False, 'from tensorflow.keras import layers\n'), ((3412, 3442), 'tensorflow.keras.activations.relu', 'tf.keras.activations.relu', (['out'], {}), '(out)\n', (3437, 3442), True, 'import tensorflow as tf\n'), ((3707, 3775), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['out_features'], {'kernel_size': 'kernel_size', 'padding': '"""same"""'}), "(out_features, kernel_size=kernel_size, padding='same')\n", (3720, 3775), False, 'from tensorflow.keras import layers\n'), ((3796, 3823), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3821, 3823), False, 'from tensorflow.keras import layers\n'), ((3928, 3958), 'tensorflow.keras.activations.relu', 'tf.keras.activations.relu', (['out'], {}), '(out)\n', (3953, 3958), True, 'import tensorflow as tf\n'), ((7296, 7337), 'tensorflow.reshape', 'tf.reshape', (['kernel', '[1, 1, *kernel.shape]'], {}), '(kernel, [1, 1, *kernel.shape])\n', (7306, 7337), True, 'import tensorflow as tf\n'), ((7355, 7385), 'tensorflow.repeat', 'tf.repeat', (['kernel', 'channels', '(0)'], {}), '(kernel, channels, 0)\n', (7364, 7385), True, 'import tensorflow as tf\n'), ((7631, 7673), 'tensorflow.split', 'tf.split', (['self.kernel', 'self.groups'], {'axis': '(3)'}), '(self.kernel, self.groups, axis=3)\n', (7639, 7673), True, 'import tensorflow as tf\n'), ((7785, 7873), 'tensorflow.keras.backend.spatial_2d_padding', 'tf.keras.backend.spatial_2d_padding', (['input', '((self.ka, self.kb), (self.ka, self.kb))'], {}), '(input, ((self.ka, self.kb), (self.ka,\n self.kb)))\n', (7820, 7873), True, 'import tensorflow as tf\n'), ((7932, 7959), 'tensorflow.split', 'tf.split', (['padded', '(3)'], {'axis': '(3)'}), '(padded, 3, axis=3)\n', (7940, 7959), True, 'import tensorflow as tf\n'), ((8117, 8144), 'tensorflow.concat', 'tf.concat', (['[*parts]'], {'axis': '(3)'}), '([*parts], axis=3)\n', (8126, 8144), True, 'import tensorflow as tf\n'), ((8428, 8462), 'tensorflow.python.ops.gen_image_ops.resize_nearest_neighbor', 'resize_nearest_neighbor', (['out', 'size'], {}), '(out, size)\n', (8451, 8462), False, 'from tensorflow.python.ops.gen_image_ops import resize_nearest_neighbor\n'), ((1544, 1565), 'tensorflow.expand_dims', 'tf.expand_dims', (['xx', '(2)'], {}), '(xx, 2)\n', (1558, 1565), True, 'import tensorflow as tf\n'), ((1567, 1588), 'tensorflow.expand_dims', 'tf.expand_dims', (['yy', '(2)'], {}), '(yy, 2)\n', (1581, 1588), True, 'import tensorflow as tf\n'), ((5603, 5633), 'tensorflow.concat', 'tf.concat', (['[out, skip]'], {'axis': '(3)'}), '([out, skip], axis=3)\n', (5612, 5633), True, 'import tensorflow as tf\n'), ((7069, 7114), 'tensorflow.exp', 'tf.exp', (['(-(mgrid - mean) ** 2 / (2 * std ** 2))'], {}), '(-(mgrid - mean) ** 2 / (2 * std ** 2))\n', (7075, 7114), True, 'import tensorflow as tf\n'), ((7205, 7226), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['kernel'], {}), '(kernel)\n', (7218, 7226), True, 'import tensorflow as tf\n'), ((7417, 7451), 'tensorflow.constant', 'tf.constant', (['kernel'], {'name': '"""kernel"""'}), "(kernel, name='kernel')\n", (7428, 7451), True, 'import tensorflow as tf\n'), ((7501, 7530), 'tensorflow.tile', 'tf.tile', (['kernel', '[1, 1, 1, 1]'], {}), '(kernel, [1, 1, 1, 1])\n', (7508, 7530), True, 'import tensorflow as tf\n'), ((8240, 8284), 'tensorflow.cast', 'tf.cast', (['(out.shape[1] * self.scale)', 'tf.int32'], {}), '(out.shape[1] * self.scale, tf.int32)\n', (8247, 8284), True, 'import tensorflow as tf\n'), ((8286, 8330), 'tensorflow.cast', 'tf.cast', (['(out.shape[2] * self.scale)', 'tf.int32'], {}), '(out.shape[2] * self.scale, tf.int32)\n', (8293, 8330), True, 'import tensorflow as tf\n'), ((649, 663), 'tensorflow.shape', 'tf.shape', (['mean'], {}), '(mean)\n', (657, 663), True, 'import tensorflow as tf\n'), ((769, 783), 'tensorflow.shape', 'tf.shape', (['mean'], {}), '(mean)\n', (777, 783), True, 'import tensorflow as tf\n'), ((929, 961), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(mean_sub ** 2)', '(-1)'], {}), '(mean_sub ** 2, -1)\n', (942, 961), True, 'import tensorflow as tf\n'), ((6832, 6874), 'tensorflow.range', 'tf.range', (['kernel_size[0]'], {'dtype': 'tf.float32'}), '(kernel_size[0], dtype=tf.float32)\n', (6840, 6874), True, 'import tensorflow as tf\n'), ((6890, 6932), 'tensorflow.range', 'tf.range', (['kernel_size[1]'], {'dtype': 'tf.float32'}), '(kernel_size[1], dtype=tf.float32)\n', (6898, 6932), True, 'import tensorflow as tf\n'), ((8031, 8101), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['splitted[i]', 'self.kernels[i]'], {'strides': '(1)', 'padding': '"""VALID"""'}), "(splitted[i], self.kernels[i], strides=1, padding='VALID')\n", (8043, 8101), True, 'import tensorflow as tf\n'), ((1264, 1280), 'tensorflow.cast', 'tf.cast', (['w', 'type'], {}), '(w, type)\n', (1271, 1280), True, 'import tensorflow as tf\n'), ((1314, 1330), 'tensorflow.cast', 'tf.cast', (['h', 'type'], {}), '(h, type)\n', (1321, 1330), True, 'import tensorflow as tf\n')]
|
import os, sys
import unittest
import tlsh
from logging import *
sys.path.append(os.path.join(os.path.dirname(__file__),'UT'))
# from ut_trendx_predictor import TrendXPredictorTestCase
from ut_dna_manager import DNAManagerTestCase
from ut_housecallx_report import HouseCallXReportTestCase
from ut_trendx_wrapper import TrendXWrapperTestCase
from ut_pe_generator import PEGeneratorTestCase
from ut_workflow import AdversaryWorkflowTestCase
from ut_tlsh import TLSHTestCase
from ut_adversary_tlsh import TLSHAdversaryTestCase
def suite():
suite = unittest.TestSuite()
# TrendXPredictor Test Cases
# suite.addTest(TrendXPredictorTestCase("test_script_adversary"))
# suite.addTest(TrendXPredictorTestCase("test_pe_adversary"))
# # DNAManager Test Cases
# suite.addTest(DNAManagerTestCase("test_load_dna"))
# suite.addTest(DNAManagerTestCase("test_load_dna_random"))
# suite.addTest(DNAManagerTestCase("test_generate_random_indexes"))
# # HouseCallXReport Test Cases
# suite.addTest(HouseCallXReportTestCase("test_get_scores"))
# # TrendX Wrapper Test Cases
# if sys.version_info.major >= 3:
# suite.addTest(TrendXWrapperTestCase("test_scan_pe_file"))
# suite.addTest(TrendXWrapperTestCase("test_scan_pe_dir"))
# suite.addTest(TrendXWrapperTestCase("test_scan_pe_list"))
# else:
# suite.addTest(TrendXWrapperTestCase("test_scan_script_file"))
# suite.addTest(TrendXWrapperTestCase("test_scan_script_dir"))
# suite.addTest(TrendXWrapperTestCase("test_scan_script_list"))
# # PEGenerator Test Cases
# if sys.version_info.major >= 3:
# suite.addTest(PEGeneratorTestCase("test_generate"))
# # suite.addTest(PEGeneratorRandomTestCase("test_generate_imports"))
# else:
# pass
# AdversaryWorkflow Test Case
if sys.version_info.major >= 3:
# suite.addTest(AdversaryWorkflowTestCase("test_process_trendx_pe_bruteforce"))
# suite.addTest(AdversaryWorkflowTestCase("test_process_trendx_script_bruteforce"))
# suite.addTest(AdversaryWorkflowTestCase("test_process_trendx_pe_ga"))
# suite.addTest(AdversaryWorkflowTestCase("test_process_trendx_script_ga"))
# suite.addTest(AdversaryWorkflowTestCase("test_process_tlsh_pe_bruteforce"))
# suite.addTest(AdversaryWorkflowTestCase("test_process_tlsh_script_bruteforce"))
# suite.addTest(AdversaryWorkflowTestCase("test_process_tlsh_pe_ga"))
# suite.addTest(AdversaryWorkflowTestCase("test_process_tlsh_script_ga"))
suite.addTest(AdversaryWorkflowTestCase("test_start"))
# suite.addTest(AdversaryWorkflowTestCase("test_attack"))
else:
pass
# # TLSH Test Case
# suite.addTest(TLSHTestCase("test_get_tlsh"))
# suite.addTest(TLSHTestCase("test_gen_csv_for_file"))
# suite.addTest(TLSHTestCase("test_gen_csv_for_dir"))
# suite.addTest(TLSHTestCase("test_gen_and_scan_csv"))
# suite.addTest(TLSHTestCase("test_tlsh_wrapper_scan"))
# # TLSHGAAdversary Test Case
# suite.addTest(TLSHAdversaryTestCase("test_process_tlsh_pe_ga"))
return suite
if __name__ == "__main__":
basicConfig(filename='adversary_ml_ut_{}.log'.format(os.getpid()), format='[%(asctime)s][%(process)d.%(thread)d][%(levelname)s] - %(message)s', level=DEBUG)
unittest.main(defaultTest = 'suite')
|
[
"unittest.main",
"ut_workflow.AdversaryWorkflowTestCase",
"os.getpid",
"unittest.TestSuite",
"os.path.dirname"
] |
[((569, 589), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (587, 589), False, 'import unittest\n'), ((3420, 3454), 'unittest.main', 'unittest.main', ([], {'defaultTest': '"""suite"""'}), "(defaultTest='suite')\n", (3433, 3454), False, 'import unittest\n'), ((100, 125), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (115, 125), False, 'import os, sys\n'), ((2642, 2681), 'ut_workflow.AdversaryWorkflowTestCase', 'AdversaryWorkflowTestCase', (['"""test_start"""'], {}), "('test_start')\n", (2667, 2681), False, 'from ut_workflow import AdversaryWorkflowTestCase\n'), ((3311, 3322), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3320, 3322), False, 'import os, sys\n')]
|
# Copyright 2013, AnsibleWorks Inc.
# <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import exceptions
class BaseCommand(object):
def __init__(self, toplevel):
self.toplevel = toplevel
self.name = "BASE-COMMAND"
def run(self, args):
raise exceptions.NotImplementedError()
|
[
"exceptions.NotImplementedError"
] |
[((791, 823), 'exceptions.NotImplementedError', 'exceptions.NotImplementedError', ([], {}), '()\n', (821, 823), False, 'import exceptions\n')]
|
from flask import Flask, send_from_directory, jsonify
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
CLIENT_DIR = os.path.join(BASE_DIR, "client", "dist")
STATIC_DIR = os.path.join(BASE_DIR, "static")
app = Flask(__name__)
app.secret_key = "Developer Key"
@app.route('/')
def index():
return send_from_directory(CLIENT_DIR, "index.html")
@app.route('/<path:filename>')
def client_app(filename):
return send_from_directory(CLIENT_DIR, filename)
@app.route('/static/<path:filename>')
def static_files(filename):
return send_from_directory(STATIC_DIR, filename)
@app.route('/api/items')
def data():
reply = {
"results": [
"Item 1",
"Item 2",
]
}
return jsonify(result=reply)
|
[
"os.path.dirname",
"flask.Flask",
"flask.jsonify",
"flask.send_from_directory",
"os.path.join"
] |
[((132, 172), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""client"""', '"""dist"""'], {}), "(BASE_DIR, 'client', 'dist')\n", (144, 172), False, 'import os\n'), ((186, 218), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""static"""'], {}), "(BASE_DIR, 'static')\n", (198, 218), False, 'import os\n'), ((226, 241), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (231, 241), False, 'from flask import Flask, send_from_directory, jsonify\n'), ((92, 117), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (107, 117), False, 'import os\n'), ((317, 362), 'flask.send_from_directory', 'send_from_directory', (['CLIENT_DIR', '"""index.html"""'], {}), "(CLIENT_DIR, 'index.html')\n", (336, 362), False, 'from flask import Flask, send_from_directory, jsonify\n'), ((433, 474), 'flask.send_from_directory', 'send_from_directory', (['CLIENT_DIR', 'filename'], {}), '(CLIENT_DIR, filename)\n', (452, 474), False, 'from flask import Flask, send_from_directory, jsonify\n'), ((554, 595), 'flask.send_from_directory', 'send_from_directory', (['STATIC_DIR', 'filename'], {}), '(STATIC_DIR, filename)\n', (573, 595), False, 'from flask import Flask, send_from_directory, jsonify\n'), ((730, 751), 'flask.jsonify', 'jsonify', ([], {'result': 'reply'}), '(result=reply)\n', (737, 751), False, 'from flask import Flask, send_from_directory, jsonify\n')]
|
from flask import Flask, request
import argparse
import time
import threading
from p4_controller import P4Controller
from state_allocator import EntryManager
app = Flask(__name__)
p4_controller = None
entry_manager = None
@app.route('/test')
def test():
return "Hello from switch control plane controller~!"
@app.route('/config_pipeline', methods=["POST"])
def config_pipeline():
data = request.get_json()
operation_type = data.get("type")
if operation_type == "insert":
entries = []
entry_infos = data.get("entry_infos")
for entry_info in entry_infos:
entries.append(p4_controller.build_table_entry(entry_info))
p4_controller.insert_table_entries(entries)
return "OK"
@app.route("/config_route", methods=["POST"])
def config_route():
data = request.get_json()
chain_id = data.get("chain_id")
chain_length = data.get("chain_length")
output_port = data.get("output_port")
operation_type = data.get("type")
if operation_type == "insert":
p4_controller.insert_route(chain_id, chain_length, output_port)
return str(int(time.time() * 1000))
@app.route("/new_nf", methods=["POST"])
def new_nf():
data = request.get_json()
ip_addr = data.get("ip_addr")
port = data.get("port")
entry_manager.add_grpc_client("%s:%s" % (ip_addr, port))
def main(p4info_file_path, tofino_config_fpath, server_port, max_rule,
polling_time, enable_topk, debug):
global p4_controller
p4_controller = P4Controller(p4info_file_path, tofino_config_fpath, debug)
if max_rule > 0 or polling_time > 0:
global entry_manager
entry_manager = EntryManager(
p4_controller, [],
max_rule, polling_time, enable_topk)
entry_manager.start()
app.run(host="0.0.0.0", port=server_port)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='P4SFC switch controller')
parser.add_argument('--p4info',
help='p4info proto in text format from p4c',
type=str,
action="store",
required=False,
default='../../build/p4info.pb.txt')
parser.add_argument('--tofino-config',
help='Tofino config file from p4c',
type=str,
action="store",
required=False,
default='../../build/device_config')
parser.add_argument('--server-port',
help='port for RESTful API',
type=str,
action="store",
required=False,
default=8090)
parser.add_argument('--max-rule',
help='The maximum number of rules a switch can hold',
type=int,
action="store",
required=False,
default=1000)
parser.add_argument(
'--polling-time',
help='The polling time (in seconds) for poll the entries',
type=float,
action="store",
required=False,
default=30)
parser.add_argument('--enable-topk',
dest='enable_topk',
action='store_true')
parser.add_argument('--disable-topk',
dest='enable_topk',
action='store_false')
parser.add_argument('--debug', dest='debug', action='store_true')
parser.set_defaults(enable_topk=True)
parser.set_defaults(debug=False)
args = parser.parse_args()
main(args.p4info, args.tofino_config, args.server_port, args.max_rule,
args.polling_time, args.enable_topk, args.debug)
|
[
"argparse.ArgumentParser",
"flask.Flask",
"p4_controller.P4Controller",
"time.time",
"state_allocator.EntryManager",
"flask.request.get_json"
] |
[((165, 180), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (170, 180), False, 'from flask import Flask, request\n'), ((400, 418), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (416, 418), False, 'from flask import Flask, request\n'), ((818, 836), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (834, 836), False, 'from flask import Flask, request\n'), ((1213, 1231), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1229, 1231), False, 'from flask import Flask, request\n'), ((1517, 1575), 'p4_controller.P4Controller', 'P4Controller', (['p4info_file_path', 'tofino_config_fpath', 'debug'], {}), '(p4info_file_path, tofino_config_fpath, debug)\n', (1529, 1575), False, 'from p4_controller import P4Controller\n'), ((1884, 1946), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""P4SFC switch controller"""'}), "(description='P4SFC switch controller')\n", (1907, 1946), False, 'import argparse\n'), ((1671, 1739), 'state_allocator.EntryManager', 'EntryManager', (['p4_controller', '[]', 'max_rule', 'polling_time', 'enable_topk'], {}), '(p4_controller, [], max_rule, polling_time, enable_topk)\n', (1683, 1739), False, 'from state_allocator import EntryManager\n'), ((1125, 1136), 'time.time', 'time.time', ([], {}), '()\n', (1134, 1136), False, 'import time\n')]
|
from docutils import nodes
import sphinx.domains.std
# Borrowed from: http://stackoverflow.com/questions/13848328/sphinx-references-to-other-sections-containing-section-number-and-section-title
class CustomStandardDomain(sphinx.domains.std.StandardDomain):
def __init__(self, env):
env.settings['footnote_references'] = 'superscript'
sphinx.domains.std.StandardDomain.__init__(self, env)
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
res = super(CustomStandardDomain, self).resolve_xref(env, fromdocname, builder, typ, target, node, contnode)
if res is None:
return res
if typ == 'ref' and not node['refexplicit']:
docname, labelid, sectname = self.data['labels'].get(target, ('','',''))
res['refdocname'] = docname
return res
def doctree_resolved(app, doctree, docname):
secnums = app.builder.env.toc_secnumbers
for node in doctree.traverse(nodes.reference):
if 'refdocname' in node:
refdocname = node['refdocname']
if refdocname in secnums:
secnum = secnums[refdocname]
emphnode = node.children[0]
textnode = emphnode.children[0]
toclist = app.builder.env.tocs[refdocname]
anchorname = None
for refnode in toclist.traverse(nodes.reference):
if refnode.astext() == textnode.astext():
anchorname = refnode['anchorname']
if anchorname is None:
continue
sec_number = secnum[anchorname]
chapter = sec_number[0]
section = None
if len(sec_number) > 1:
section = ".".join(map(str, sec_number[1:]))
if section:
node.replace(emphnode, nodes.Text("Chapter %s Section %s - %s" % (chapter, section, textnode)))
else:
node.replace(emphnode, nodes.Text("Chapter %s - %s" % (chapter, textnode)))
def setup(app):
app.override_domain(CustomStandardDomain)
app.connect('doctree-resolved', doctree_resolved)
return {'version': "1.0", 'parallel_read_safe': True}
|
[
"docutils.nodes.Text"
] |
[((1583, 1654), 'docutils.nodes.Text', 'nodes.Text', (["('Chapter %s Section %s - %s' % (chapter, section, textnode))"], {}), "('Chapter %s Section %s - %s' % (chapter, section, textnode))\n", (1593, 1654), False, 'from docutils import nodes\n'), ((1694, 1745), 'docutils.nodes.Text', 'nodes.Text', (["('Chapter %s - %s' % (chapter, textnode))"], {}), "('Chapter %s - %s' % (chapter, textnode))\n", (1704, 1745), False, 'from docutils import nodes\n')]
|
import torch
from torch.utils.data import TensorDataset
from torchvision import datasets, transforms
from base import BaseDataLoader, BaseDataLoader_2
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
from .utils import readmts_uci_har, transform_labels
class MnistDataLoader(BaseDataLoader):
"""
MNIST data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True):
trsfm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
self.data_dir = data_dir
self.dataset = datasets.MNIST(self.data_dir, train=training, download=True, transform=trsfm)
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
class HumanActivityRecognitionDataLoader2(BaseDataLoader):
"""
HumanActivityRecognition data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, test_split=0.0, num_workers=1,
training=True):
x_train, y_train, x_test, y_test = readmts_uci_har(data_dir)
x_train = x_train.swapaxes(1, 2)
x_test = x_test.swapaxes(1, 2)
y_train, y_test = transform_labels(y_train, y_test)
for i in range(len(x_train)):
for j in range(len(x_test)):
c = (x_train[i] == x_test[j])
d = c.all()
if d:
break
X = np.concatenate((x_train, x_test))
Y = np.concatenate((y_train, y_test))
X = torch.from_numpy(X).float()
Y = torch.from_numpy(Y)
dataset = TensorDataset(X, Y)
super().__init__(dataset, batch_size, shuffle, validation_split, test_split, num_workers, normalization=True)
class HumanActivityRecognitionDataLoader1(BaseDataLoader):
"""
HumanActivityRecognition data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, test_split=0.0, num_workers=1,
training=True):
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATASET_PATH = data_dir
TRAIN = "/train/"
TEST = "/test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
"""
Given attribute (train or test) of feature, read all 9 features into an
np ndarray of shape [sample_sequence_idx, time_step, feature_num]
argument: X_signals_paths str attribute of feature: 'train' or 'test'
return: np ndarray, tensor of features
"""
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]]
)
file.close()
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
x_train = load_X(X_train_signals_paths)
x_test = load_X(X_test_signals_paths)
x_train = x_train.swapaxes(1, 2)
x_test = x_test.swapaxes(1, 2)
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
"""
Read Y file of values to be predicted
argument: y_path str attibute of Y: 'train' or 'test'
return: Y ndarray / tensor of the 6 one_hot labels of each sample
"""
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]],
dtype=np.int32
)
file.close()
# Substract 1 to each output class for friendly 0-based indexing
# return one_hot(y_ - 1)
return y_ - 1
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
y_train = load_y(y_train_path)
y_test = load_y(y_test_path)
X = np.concatenate((x_train, x_test))
Y = np.concatenate((y_train, y_test))
Y = Y.reshape((len(Y), ))
X = torch.from_numpy(X).float()
Y = torch.from_numpy(Y).long()
dataset = TensorDataset(X, Y)
super().__init__(dataset, batch_size, shuffle, validation_split, test_split, num_workers, normalization=True)
class HumanActivityRecognitionDataLoader3(BaseDataLoader_2):
"""
HumanActivityRecognition data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, test_split=0.0, num_workers=1,
training=True):
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATASET_PATH = data_dir
TRAIN = "/train/"
TEST = "/test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
"""
Given attribute (train or test) of feature, read all 9 features into an
np ndarray of shape [sample_sequence_idx, time_step, feature_num]
argument: X_signals_paths str attribute of feature: 'train' or 'test'
return: np ndarray, tensor of features
"""
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]]
)
file.close()
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
x_train = load_X(X_train_signals_paths)
x_test = load_X(X_test_signals_paths)
x_train = x_train.swapaxes(1, 2)
x_test = x_test.swapaxes(1, 2)
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
"""
Read Y file of values to be predicted
argument: y_path str attibute of Y: 'train' or 'test'
return: Y ndarray / tensor of the 6 one_hot labels of each sample
"""
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]],
dtype=np.int32
)
file.close()
# Substract 1 to each output class for friendly 0-based indexing
# return one_hot(y_ - 1)
return y_ - 1
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
y_train = load_y(y_train_path)
y_test = load_y(y_test_path)
X = np.concatenate((x_train, x_test))
Y = np.concatenate((y_train, y_test))
n_train = len(x_train)
n_test = len(x_test) // 2
n_val = n_test
idx_full = np.arange(len(X))
test_idx = idx_full[-n_test:]
valid_idx = idx_full[-(n_test+n_val):-n_test]
train_idx = idx_full[:n_train]
Y = Y.reshape((len(Y), ))
X = torch.from_numpy(X).float()
Y = torch.from_numpy(Y).long()
dataset = TensorDataset(X, Y)
super().__init__(dataset, batch_size, shuffle, train_idx, valid_idx, test_idx, num_workers, normalization=True)
class HumanActivityRecognitionDataLoader(BaseDataLoader_2):
"""
HumanActivityRecognition data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, test_split=0.0, num_workers=1,
training=True):
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATASET_PATH = data_dir
TRAIN = "/train/"
TEST = "/test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
"""
Given attribute (train or test) of feature, read all 9 features into an
np ndarray of shape [sample_sequence_idx, time_step, feature_num]
argument: X_signals_paths str attribute of feature: 'train' or 'test'
return: np ndarray, tensor of features
"""
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]]
)
file.close()
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
x_train = load_X(X_train_signals_paths)
x_test = load_X(X_test_signals_paths)
x_train = x_train.swapaxes(1, 2)
x_test = x_test.swapaxes(1, 2)
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
"""
Read Y file of values to be predicted
argument: y_path str attibute of Y: 'train' or 'test'
return: Y ndarray / tensor of the 6 one_hot labels of each sample
"""
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]],
dtype=np.int32
)
file.close()
# Substract 1 to each output class for friendly 0-based indexing
# return one_hot(y_ - 1)
return y_ - 1
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
y_train = load_y(y_train_path)
y_test = load_y(y_test_path)
X = np.concatenate((x_train, x_test))
Y = np.concatenate((y_train, y_test))
n_train = len(x_train)
n_test = len(x_test)
n_val = n_test
idx_full = np.arange(len(X))
test_idx = idx_full[-n_test:]
train_idx = idx_full[:n_train]
np.random.seed(123)
np.random.shuffle(train_idx)
valid_idx = train_idx[-n_test//2:]
train_idx = train_idx[:-n_test//2]
Y = Y.reshape((len(Y), ))
X = torch.from_numpy(X).float()
Y = torch.from_numpy(Y).long()
dataset = TensorDataset(X, Y)
super().__init__(dataset, batch_size, shuffle, train_idx, valid_idx, test_idx, num_workers, normalization=True)
|
[
"numpy.random.seed",
"numpy.random.shuffle",
"torchvision.transforms.ToTensor",
"numpy.array",
"torch.utils.data.TensorDataset",
"torchvision.transforms.Normalize",
"torchvision.datasets.MNIST",
"numpy.concatenate",
"torch.from_numpy"
] |
[((694, 771), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['self.data_dir'], {'train': 'training', 'download': '(True)', 'transform': 'trsfm'}), '(self.data_dir, train=training, download=True, transform=trsfm)\n', (708, 771), False, 'from torchvision import datasets, transforms\n'), ((1583, 1616), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test)'], {}), '((x_train, x_test))\n', (1597, 1616), True, 'import numpy as np\n'), ((1629, 1662), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (1643, 1662), True, 'import numpy as np\n'), ((1716, 1735), 'torch.from_numpy', 'torch.from_numpy', (['Y'], {}), '(Y)\n', (1732, 1735), False, 'import torch\n'), ((1755, 1774), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X', 'Y'], {}), '(X, Y)\n', (1768, 1774), False, 'from torch.utils.data import TensorDataset\n'), ((5289, 5322), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test)'], {}), '((x_train, x_test))\n', (5303, 5322), True, 'import numpy as np\n'), ((5335, 5368), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (5349, 5368), True, 'import numpy as np\n'), ((5503, 5522), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X', 'Y'], {}), '(X, Y)\n', (5516, 5522), False, 'from torch.utils.data import TensorDataset\n'), ((9038, 9071), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test)'], {}), '((x_train, x_test))\n', (9052, 9071), True, 'import numpy as np\n'), ((9084, 9117), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (9098, 9117), True, 'import numpy as np\n'), ((9510, 9529), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X', 'Y'], {}), '(X, Y)\n', (9523, 9529), False, 'from torch.utils.data import TensorDataset\n'), ((13047, 13080), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test)'], {}), '((x_train, x_test))\n', (13061, 13080), True, 'import numpy as np\n'), ((13093, 13126), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (13107, 13126), True, 'import numpy as np\n'), ((13335, 13354), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (13349, 13354), True, 'import numpy as np\n'), ((13363, 13391), 'numpy.random.shuffle', 'np.random.shuffle', (['train_idx'], {}), '(train_idx)\n', (13380, 13391), True, 'import numpy as np\n'), ((13613, 13632), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X', 'Y'], {}), '(X, Y)\n', (13626, 13632), False, 'from torch.utils.data import TensorDataset\n'), ((549, 570), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (568, 570), False, 'from torchvision import datasets, transforms\n'), ((584, 626), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (604, 626), False, 'from torchvision import datasets, transforms\n'), ((1676, 1695), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (1692, 1695), False, 'import torch\n'), ((3747, 3766), 'numpy.array', 'np.array', (['X_signals'], {}), '(X_signals)\n', (3755, 3766), True, 'import numpy as np\n'), ((5417, 5436), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (5433, 5436), False, 'import torch\n'), ((5457, 5476), 'torch.from_numpy', 'torch.from_numpy', (['Y'], {}), '(Y)\n', (5473, 5476), False, 'import torch\n'), ((7496, 7515), 'numpy.array', 'np.array', (['X_signals'], {}), '(X_signals)\n', (7504, 7515), True, 'import numpy as np\n'), ((9424, 9443), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (9440, 9443), False, 'import torch\n'), ((9464, 9483), 'torch.from_numpy', 'torch.from_numpy', (['Y'], {}), '(Y)\n', (9480, 9483), False, 'import torch\n'), ((11505, 11524), 'numpy.array', 'np.array', (['X_signals'], {}), '(X_signals)\n', (11513, 11524), True, 'import numpy as np\n'), ((13527, 13546), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (13543, 13546), False, 'import torch\n'), ((13567, 13586), 'torch.from_numpy', 'torch.from_numpy', (['Y'], {}), '(Y)\n', (13583, 13586), False, 'import torch\n'), ((3510, 3543), 'numpy.array', 'np.array', (['serie'], {'dtype': 'np.float32'}), '(serie, dtype=np.float32)\n', (3518, 3543), True, 'import numpy as np\n'), ((7259, 7292), 'numpy.array', 'np.array', (['serie'], {'dtype': 'np.float32'}), '(serie, dtype=np.float32)\n', (7267, 7292), True, 'import numpy as np\n'), ((11268, 11301), 'numpy.array', 'np.array', (['serie'], {'dtype': 'np.float32'}), '(serie, dtype=np.float32)\n', (11276, 11301), True, 'import numpy as np\n')]
|
#-------------------------------------------------------------------------------
#
# Aggregated Magnetic Model
#
# Author: <NAME> <<EMAIL>>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2018 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from collections import namedtuple
from numpy import inf, zeros, asarray
from .._pymm import (
GEOCENTRIC_SPHERICAL, GEODETIC_ABOVE_WGS84, GEOCENTRIC_CARTESIAN,
convert, vrot_sph2geod, vrot_sph2cart,
)
from .model import GeomagneticModel
Component = namedtuple("_Component", ["model", "scale", "parameters"])
def _validity_overlap(validity1, validity2):
start1, end1 = validity1
start2, end2 = validity2
start = max(start1, start2)
end = min(end1, end2)
if end < start:
return -inf, -inf
return start, end
class ComposedGeomagneticModel(GeomagneticModel):
""" Composed Earth magnetic field model aggregating multiple models
into one.
"""
def __init__(self, *models):
self._parameters = set()
self._components = []
self._validity = (-inf, inf)
for model in models:
self.push(model)
def push(self, model, scale=1.0, **parameters):
""" Add model. """
self._parameters.update(model.parameters)
self._validity = _validity_overlap(self.validity, model.validity)
self._components.append(Component(model, scale, parameters))
@property
def validity(self):
return self._validity
@property
def parameters(self):
""" required parameters. """
return tuple(self._parameters)
def eval(self, time, location,
input_coordinate_system=GEOCENTRIC_SPHERICAL,
output_coordinate_system=GEOCENTRIC_SPHERICAL,
**options):
# convert input coordinates to spherical coordinates
coord_sph = convert(
location, input_coordinate_system, GEOCENTRIC_SPHERICAL
)
# get output dimension
time = asarray(time)
location = asarray(location)
if time.ndim > (location.ndim - 1):
shape = time.shape
else:
shape = location.shape[:-1]
result = zeros(shape + (3,))
final_scale = options.pop("scale", None)
for model, scale, params in self._components:
args = options.copy()
args.update(params)
result += model.eval(time, coord_sph, scale=scale, **args)
# rotate result to the desired coordinate frame
if output_coordinate_system == GEODETIC_ABOVE_WGS84:
if input_coordinate_system == GEODETIC_ABOVE_WGS84:
coord_out = location
else:
coord_out = convert(
coord_sph, GEOCENTRIC_SPHERICAL, GEODETIC_ABOVE_WGS84
)
result = vrot_sph2geod(result, coord_out[..., 0] - coord_sph[..., 0])
elif output_coordinate_system == GEOCENTRIC_CARTESIAN:
result = vrot_sph2cart(result, coord_sph[..., 0], coord_sph[..., 1])
# apply the final scale
if final_scale is not None:
result *= final_scale
return result
|
[
"numpy.asarray",
"numpy.zeros",
"collections.namedtuple"
] |
[((1678, 1736), 'collections.namedtuple', 'namedtuple', (['"""_Component"""', "['model', 'scale', 'parameters']"], {}), "('_Component', ['model', 'scale', 'parameters'])\n", (1688, 1736), False, 'from collections import namedtuple\n'), ((3161, 3174), 'numpy.asarray', 'asarray', (['time'], {}), '(time)\n', (3168, 3174), False, 'from numpy import inf, zeros, asarray\n'), ((3194, 3211), 'numpy.asarray', 'asarray', (['location'], {}), '(location)\n', (3201, 3211), False, 'from numpy import inf, zeros, asarray\n'), ((3359, 3378), 'numpy.zeros', 'zeros', (['(shape + (3,))'], {}), '(shape + (3,))\n', (3364, 3378), False, 'from numpy import inf, zeros, asarray\n')]
|
"""
Author: Igor
Date: 2020.06.07
"""
import logging
import sys
from datetime import datetime
from logging.handlers import TimedRotatingFileHandler
LOG_FORMATTING_STRING = logging.Formatter('%(asctime)s - %(module)s - %(filename)s - '
'%(lineno)d - %(levelname)s - %(message)s')
def get_file_handler():
file_name = 'log/error.log.{0}'.format(datetime.utcnow().strftime("%Y%m%d"))
file_handler = TimedRotatingFileHandler(filename=file_name, when='midnight')
file_handler.setFormatter(LOG_FORMATTING_STRING)
file_handler.setLevel(logging.WARN)
file_handler.suffix = "%Y%m%d"
return file_handler
def get_console_handler():
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(LOG_FORMATTING_STRING)
console_handler.setLevel(logging.DEBUG)
return console_handler
def get_logger(logger_name):
"""Returns a named logger that includes console logger that prints everything
starting from 'debug' level and file logger that prints only the sensible errors starting
from 'warning' level.
How to use:
(behind the scene application has a global variable log that
points to this logger)
from children import log
log.warn('Test warn')
log.debug('Test debug')
log.error('Test error')
You can put these output errors in any place of your code. It is recommended to avoid
using it for the errors that are easily recoverable, for example not enough params in
user input.
"""
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
logger.addHandler(get_console_handler())
logger.addHandler(get_file_handler())
return logger
|
[
"logging.StreamHandler",
"logging.Formatter",
"datetime.datetime.utcnow",
"logging.handlers.TimedRotatingFileHandler",
"logging.getLogger"
] |
[((174, 288), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(module)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s"""'], {}), "(\n '%(asctime)s - %(module)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s'\n )\n", (191, 288), False, 'import logging\n'), ((450, 511), 'logging.handlers.TimedRotatingFileHandler', 'TimedRotatingFileHandler', ([], {'filename': 'file_name', 'when': '"""midnight"""'}), "(filename=file_name, when='midnight')\n", (474, 511), False, 'from logging.handlers import TimedRotatingFileHandler\n'), ((715, 748), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (736, 748), False, 'import logging\n'), ((1549, 1579), 'logging.getLogger', 'logging.getLogger', (['logger_name'], {}), '(logger_name)\n', (1566, 1579), False, 'import logging\n'), ((393, 410), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (408, 410), False, 'from datetime import datetime\n')]
|
import operator
import sqlalchemy
import pandas as pd
import numpy as np
from math import ceil
DEFAULT_VARCHAR_LENGTH=100
def get_detected_column_types(df):
""" Get data type of each columns ('DATETIME', 'NUMERIC' or 'STRING')
Parameters:
df (df): pandas dataframe
Returns
df (df): dataframe that all datatypes are converted (df)
"""
assert isinstance(df, pd.DataFrame), 'Parameter must be DataFrame'
for c in df.columns:
# Convert column to string
col_data = df[c].map(str)
col_data = col_data.replace("NaT", None)
col_data = col_data.replace("NaN", None)
# Check NULL column
if(df[c].isnull().values.all()):
continue
# Check DATETIME
try:
# Check if it's able to convert column to datetime
# if column is datetime, then skip to convert
if 'datetime' in str(col_data.dtype):
continue
df[c] = pd.to_datetime(col_data)
continue
except ValueError:
pass
# Check NUMERIC
try:
# Drop NaN rows
series = df[c].dropna()
# if column_name is int or float, then skip to convert
if 'int' in str(col_data.dtype) or 'float' in str(col_data.dtype):
continue
# Check if it can be converted to numeric
df[c] = pd.to_numeric(series)
except ValueError:
pass
return df
def get_max_length_columns(df):
""" find maximum length of value in each column and ceil it
Parameters:
df (df): dataframe
Returns
arr_max_len_columns (array): array of length for each column
arr_max_decimal (array): array of maximum decimal for float, double, and decimal datatype, otherwise its value is zero
"""
assert isinstance(df, pd.DataFrame), 'Parameter must be DataFrame'
measurer = np.vectorize(len)
arr_max_len_columns = []
arr_max_decimal = []
for i, x in enumerate(measurer(df.values.astype(str)).max(axis=0)):
if 'float' in str(df.iloc[:, i].dtype):
col_data = df.iloc[:, i].map(str).str.extract('\.(.*)')
max_decimal = measurer(col_data.values.astype(str)).max(axis=0)[0]
arr_max_decimal.append(max_decimal)
else:
arr_max_decimal.append(0)
arr_max_len_columns.append(ceil(x / 10) * 10)
return arr_max_len_columns, arr_max_decimal
def convert_df_datatype_to_sqlalchemy_datatype(df):
""" convert dataframe's data type into SQLAlchemy's data type
Parameters:
df (df): dataframe
Returns:
dtype_dict (dict): dict of data type of each column in SQLAlchemy standard
"""
assert isinstance(df, pd.DataFrame), 'Parameter must be DataFrame'
arr_max_len_columns, arr_max_decimal = get_max_length_columns(df)
dtype_dict = {}
for i, col_name in enumerate(df.columns):
if(df[col_name].isnull().values.all()):
dtype_dict[col_name] = sqlalchemy.types.VARCHAR(DEFAULT_VARCHAR_LENGTH)
elif 'bool' in str(df[col_name].dtype):
# Compatible with SQL-Server and MySQL, since MySQL doesn't have BOOLEAN.
dtype_dict[col_name] = sqlalchemy.types.INTEGER()
elif 'int' in str(df[col_name].dtype):
dtype_dict[col_name] = sqlalchemy.types.INTEGER()
elif 'float' in str(df[col_name].dtype):
if df[col_name].dropna().apply(float.is_integer).all():
dtype_dict[col_name] = sqlalchemy.types.INTEGER()
else:
dtype_dict[col_name] = sqlalchemy.types.DECIMAL(precision=arr_max_len_columns[i], scale=arr_max_decimal[i])
elif 'datetime' in str(df[col_name].dtype):
dtype_dict[col_name] = sqlalchemy.types.DateTime()
elif 'object' in str(df[col_name].dtype):
# check the limit of varhcar, if the length exeeds, then use TEXT
if arr_max_len_columns[i] > 1000:
dtype_dict[col_name] = sqlalchemy.types.Text()
else:
dtype_dict[col_name] = sqlalchemy.types.VARCHAR(length=arr_max_len_columns[i])
else:
dtype_dict[col_name] = sqlalchemy.types.VARCHAR(length=arr_max_len_columns[i])
return dtype_dict
def get_datatype_each_col(df):
""" main function to call sub-function in order to find data type and data length for each column
Parameters:
df (df): dataframe
Returns:
dtype_dict (dict): dict of data type of each column in SQLAlchemy standard (dict)
"""
assert isinstance(df, pd.DataFrame), 'Parameter must be DataFrame'
df = get_detected_column_types(df)
dtype_dict = convert_df_datatype_to_sqlalchemy_datatype(df)
del df
return dtype_dict
|
[
"numpy.vectorize",
"sqlalchemy.types.DateTime",
"math.ceil",
"sqlalchemy.types.INTEGER",
"sqlalchemy.types.VARCHAR",
"pandas.to_datetime",
"sqlalchemy.types.Text",
"sqlalchemy.types.DECIMAL",
"pandas.to_numeric"
] |
[((1950, 1967), 'numpy.vectorize', 'np.vectorize', (['len'], {}), '(len)\n', (1962, 1967), True, 'import numpy as np\n'), ((983, 1007), 'pandas.to_datetime', 'pd.to_datetime', (['col_data'], {}), '(col_data)\n', (997, 1007), True, 'import pandas as pd\n'), ((1422, 1443), 'pandas.to_numeric', 'pd.to_numeric', (['series'], {}), '(series)\n', (1435, 1443), True, 'import pandas as pd\n'), ((3058, 3106), 'sqlalchemy.types.VARCHAR', 'sqlalchemy.types.VARCHAR', (['DEFAULT_VARCHAR_LENGTH'], {}), '(DEFAULT_VARCHAR_LENGTH)\n', (3082, 3106), False, 'import sqlalchemy\n'), ((2427, 2439), 'math.ceil', 'ceil', (['(x / 10)'], {}), '(x / 10)\n', (2431, 2439), False, 'from math import ceil\n'), ((3276, 3302), 'sqlalchemy.types.INTEGER', 'sqlalchemy.types.INTEGER', ([], {}), '()\n', (3300, 3302), False, 'import sqlalchemy\n'), ((3385, 3411), 'sqlalchemy.types.INTEGER', 'sqlalchemy.types.INTEGER', ([], {}), '()\n', (3409, 3411), False, 'import sqlalchemy\n'), ((3568, 3594), 'sqlalchemy.types.INTEGER', 'sqlalchemy.types.INTEGER', ([], {}), '()\n', (3592, 3594), False, 'import sqlalchemy\n'), ((3652, 3741), 'sqlalchemy.types.DECIMAL', 'sqlalchemy.types.DECIMAL', ([], {'precision': 'arr_max_len_columns[i]', 'scale': 'arr_max_decimal[i]'}), '(precision=arr_max_len_columns[i], scale=\n arr_max_decimal[i])\n', (3676, 3741), False, 'import sqlalchemy\n'), ((3824, 3851), 'sqlalchemy.types.DateTime', 'sqlalchemy.types.DateTime', ([], {}), '()\n', (3849, 3851), False, 'import sqlalchemy\n'), ((4251, 4306), 'sqlalchemy.types.VARCHAR', 'sqlalchemy.types.VARCHAR', ([], {'length': 'arr_max_len_columns[i]'}), '(length=arr_max_len_columns[i])\n', (4275, 4306), False, 'import sqlalchemy\n'), ((4065, 4088), 'sqlalchemy.types.Text', 'sqlalchemy.types.Text', ([], {}), '()\n', (4086, 4088), False, 'import sqlalchemy\n'), ((4146, 4201), 'sqlalchemy.types.VARCHAR', 'sqlalchemy.types.VARCHAR', ([], {'length': 'arr_max_len_columns[i]'}), '(length=arr_max_len_columns[i])\n', (4170, 4201), False, 'import sqlalchemy\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.