code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import markupfield.fields
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Planet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField(unique=True)),
('title', models.CharField(max_length=100)),
('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),
],
options={
'ordering': ['-date'],
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100)),
('slug', models.SlugField(unique_for_date=b'date')),
('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),
('body', markupfield.fields.MarkupField(rendered_field=True)),
('body_markup_type', models.CharField(default=b'markdown', max_length=30, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])),
('_body_rendered', models.TextField(editable=False)),
('author', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-date'],
},
),
]
|
[
"django.db.models.URLField",
"django.db.models.TextField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.SlugField",
"django.db.models.AutoField",
"django.db.models.DateTimeField"
] |
[((265, 322), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (296, 322), False, 'from django.db import models, migrations\n'), ((453, 546), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (469, 546), False, 'from django.db import models, migrations\n'), ((569, 597), 'django.db.models.URLField', 'models.URLField', ([], {'unique': '(True)'}), '(unique=True)\n', (584, 597), False, 'from django.db import models, migrations\n'), ((626, 658), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (642, 658), False, 'from django.db import models, migrations\n'), ((686, 756), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now', 'db_index': '(True)'}), '(default=django.utils.timezone.now, db_index=True)\n', (706, 756), False, 'from django.db import models, migrations\n'), ((962, 1055), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (978, 1055), False, 'from django.db import models, migrations\n'), ((1080, 1112), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1096, 1112), False, 'from django.db import models, migrations\n'), ((1140, 1181), 'django.db.models.SlugField', 'models.SlugField', ([], {'unique_for_date': "b'date'"}), "(unique_for_date=b'date')\n", (1156, 1181), False, 'from django.db import models, migrations\n'), ((1209, 1279), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now', 'db_index': '(True)'}), '(default=django.utils.timezone.now, db_index=True)\n', (1229, 1279), False, 'from django.db import models, migrations\n'), ((1398, 1594), 'django.db.models.CharField', 'models.CharField', ([], {'default': "b'markdown'", 'max_length': '(30)', 'choices': "[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown',\n 'Markdown'), (b'restructuredtext', 'Restructured Text')]"}), "(default=b'markdown', max_length=30, choices=[(b'', b'--'),\n (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (\n b'restructuredtext', 'Restructured Text')])\n", (1414, 1594), False, 'from django.db import models, migrations\n'), ((1623, 1655), 'django.db.models.TextField', 'models.TextField', ([], {'editable': '(False)'}), '(editable=False)\n', (1639, 1655), False, 'from django.db import models, migrations\n'), ((1685, 1747), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'editable': '(False)', 'to': 'settings.AUTH_USER_MODEL'}), '(editable=False, to=settings.AUTH_USER_MODEL)\n', (1702, 1747), False, 'from django.db import models, migrations\n')]
|
# -*- coding: utf-8 -*-
"""
@Time : 2021/8/24 13:00
@Auth : apecode
@File :notice.py
@IDE :PyCharm
@Blog:https://liiuyangxiong.cn
"""
import json
import time
from email.mime.text import MIMEText
from email.header import Header
from smtplib import SMTP_SSL
import requests
import config
class Notice:
def __init__(self, admin: dict, account: dict):
self.admin = admin,
self.account = account
def send(self, content):
if self.account.get("notice") == "" or self.account.get("notice") == "local":
return Notice.saveLocal(content)
elif self.account.get("notice") == "mail":
if self.admin[0]["mail"]["sendMail"] == "" and self.admin[0]["mail"]["authCode"] == "":
print("未设置发送者邮箱信息,转为本地记录")
Notice.saveLocal(content)
else:
self.send_mail(content)
else:
self.sendPushPlus(content)
print(content)
def send_mail(self, message: str):
try:
host_server = self.admin[0]["mail"]["smtpServer"]
# 发件人的邮箱
sendMail = self.admin[0]["mail"]["sendMail"]
# 邮箱的授权码
authCode = self.admin[0]["mail"]["authCode"]
# 收件人邮箱
receiver = self.account.get("mail")
# 邮件标题
mail_title = "易班 " + time.strftime("%Y-%m-%d", time.localtime(int(time.time()))) + " 签到情况"
# ssl登录
smtp = SMTP_SSL(host_server)
smtp.ehlo(host_server)
smtp.login(sendMail, authCode)
msg = MIMEText(message, "html", 'utf-8')
msg["Subject"] = Header(mail_title, 'utf-8')
msg["From"] = sendMail
msg["To"] = receiver
smtp.sendmail(sendMail, receiver, msg.as_string())
smtp.quit()
return True
except Exception as e:
print(e)
return False
# 发送pushPlus
def sendPushPlus(self, content: str):
url = 'https://www.pushplus.plus/send'
headers = {"Content-Type": "application/json"}
data = json.dumps({
"token": self.account.get("pushToken"),
"title": "易班签到通知",
"content": content,
"template": "txt"
})
response = requests.post(url=url, data=data, headers=headers).json()
if response['code'] == 200:
return Notice.log(f"{self.account.get('mobile')}\tPush Plus发送成功!\n")
else:
print("发送失败,转为本地记录")
Notice.saveLocal(content)
return Notice.log(f"{self.account.get('mobile')}\tPush Plus发送失败!原因: {response['msg']}\n")
@staticmethod
def log(message: str):
with open(file="data/logs.log", mode="a+", encoding="utf-8") as f:
f.write(message)
print(message)
@staticmethod
def saveLocal(message):
with open("data/result.log", mode="a+", encoding="utf-8") as w:
w.write(message)
|
[
"email.header.Header",
"smtplib.SMTP_SSL",
"email.mime.text.MIMEText",
"time.time",
"requests.post"
] |
[((1497, 1518), 'smtplib.SMTP_SSL', 'SMTP_SSL', (['host_server'], {}), '(host_server)\n', (1505, 1518), False, 'from smtplib import SMTP_SSL\n'), ((1620, 1654), 'email.mime.text.MIMEText', 'MIMEText', (['message', '"""html"""', '"""utf-8"""'], {}), "(message, 'html', 'utf-8')\n", (1628, 1654), False, 'from email.mime.text import MIMEText\n'), ((1685, 1712), 'email.header.Header', 'Header', (['mail_title', '"""utf-8"""'], {}), "(mail_title, 'utf-8')\n", (1691, 1712), False, 'from email.header import Header\n'), ((2354, 2404), 'requests.post', 'requests.post', ([], {'url': 'url', 'data': 'data', 'headers': 'headers'}), '(url=url, data=data, headers=headers)\n', (2367, 2404), False, 'import requests\n'), ((1435, 1446), 'time.time', 'time.time', ([], {}), '()\n', (1444, 1446), False, 'import time\n')]
|
#!/usr/bin/env python
"""
this calls test_executable_caller as it should be called for the test to work.
"""
import subprocess
if __name__ == '__main__':
process = subprocess.Popen(
['python', 'test_executable_caller.py','test_executable_callee.py'],
shell = False,
universal_newlines = True
)
exit_status = process.wait()
|
[
"subprocess.Popen"
] |
[((171, 299), 'subprocess.Popen', 'subprocess.Popen', (["['python', 'test_executable_caller.py', 'test_executable_callee.py']"], {'shell': '(False)', 'universal_newlines': '(True)'}), "(['python', 'test_executable_caller.py',\n 'test_executable_callee.py'], shell=False, universal_newlines=True)\n", (187, 299), False, 'import subprocess\n')]
|
import datetime
from django.contrib.auth.models import User
from quiz.models import (
AnswerUser,
Category,
Grade,
Question,
QuestionScore,
Quiz,
Statistic,
SubCategory,
ThemeScore,
)
import pytest
### FIXTURES ###
@pytest.fixture
def category_m(db):
return Category.objects.create(category="m")
@pytest.fixture
def sub_category_n(db, category_m):
return SubCategory.objects.create(category=category_m, sub_category="n")
@pytest.fixture
def user_A(db):
return User.objects.create_user(username="A")
@pytest.fixture
def quiz_q(db, category_m, sub_category_n, user_A):
date = datetime.datetime.now()
return Quiz.objects.create(
title="title",
description="Long description",
creator=user_A,
category=category_m,
category_name="m",
sub_category=sub_category_n,
created=date,
random_order=False,
difficulty=1,
)
@pytest.fixture
def question_q(db, quiz_q):
return Question.objects.create(
quiz=quiz_q,
difficulty=1,
order=1,
figure=None,
content="question",
explanation=None,
theme1="t1",
theme2="t2",
theme3="t3",
)
@pytest.fixture
def answerUser(db, question_q, user_A):
a = AnswerUser.objects.create(correct=True)
a.save()
a.question.add(question_q)
a.user.add(user_A)
return a
@pytest.fixture
def stats_s(db, quiz_q):
return Statistic.objects.create(
quiz=quiz_q, number_participants=10, mean=15, easy=5, medium=5, difficult=5
)
@pytest.fixture
def grade_g(db, stats_s):
return Grade.objects.create(grade=5, number=10, statistics=stats_s)
@pytest.fixture
def questionScore_qs(db, stats_s, question_q):
return QuestionScore.objects.create(
question=question_q, statistics=stats_s, score=5
)
@pytest.fixture
def themeScore_ts(db, stats_s, quiz_q):
return ThemeScore.objects.create(
theme="t1", score=5, statistics=stats_s, quiz=quiz_q
)
### TESTS ###
def test_category(category_m):
assert isinstance(category_m, Category)
assert category_m.category == "m"
assert str(category_m) == "m"
def test_sub_category(category_m, sub_category_n):
assert sub_category_n.sub_category == "n"
assert sub_category_n.category == category_m
assert isinstance(sub_category_n, SubCategory)
assert str(sub_category_n) == "n (m)"
def test_quiz(quiz_q, user_A, category_m, sub_category_n):
date = datetime.datetime.now()
assert quiz_q.title == "title"
assert quiz_q.description == "Long description"
assert quiz_q.creator == user_A
assert quiz_q.category == category_m
assert quiz_q.sub_category == sub_category_n
assert isinstance(quiz_q.created, datetime.datetime)
assert quiz_q.created.year == date.year
assert quiz_q.created.month == date.month
assert quiz_q.created.day == date.day
assert quiz_q.random_order == False
assert quiz_q.difficulty == 1
assert str(quiz_q) == "title"
def test_question(quiz_q, question_q):
assert question_q.quiz == quiz_q
assert question_q.difficulty == 1
assert question_q.order == 1
assert question_q.figure == None
assert question_q.content == "question"
assert question_q.explanation == None
assert question_q.theme1 == "t1"
assert question_q.theme2 == "t2"
assert question_q.theme3 == "t3"
assert str(question_q) == "question"
def test_answerUser(answerUser, question_q, user_A):
assert answerUser.correct == True
assert answerUser.question.get(pk=question_q.id) == question_q
assert answerUser.user.get(pk=user_A.id) == user_A
def test_statisc(stats_s, quiz_q):
assert stats_s.quiz == quiz_q
assert stats_s.number_participants == 10
assert stats_s.mean == 15
assert stats_s.easy == 5
assert stats_s.medium == 5
assert stats_s.difficult == 5
def test_grade(grade_g, stats_s):
assert grade_g.grade == 5
assert grade_g.number == 10
assert grade_g.statistics == stats_s
def test_questionScore(stats_s, question_q, questionScore_qs):
assert questionScore_qs.question == question_q
assert questionScore_qs.statistics == stats_s
assert questionScore_qs.score == 5
def test_themeScore(themeScore_ts, stats_s, quiz_q):
assert themeScore_ts.theme == "t1"
assert themeScore_ts.score == 5
assert themeScore_ts.statistics == stats_s
assert themeScore_ts.quiz == quiz_q
|
[
"quiz.models.Grade.objects.create",
"quiz.models.Question.objects.create",
"quiz.models.ThemeScore.objects.create",
"quiz.models.Category.objects.create",
"quiz.models.AnswerUser.objects.create",
"quiz.models.QuestionScore.objects.create",
"django.contrib.auth.models.User.objects.create_user",
"quiz.models.Quiz.objects.create",
"quiz.models.Statistic.objects.create",
"quiz.models.SubCategory.objects.create",
"datetime.datetime.now"
] |
[((305, 342), 'quiz.models.Category.objects.create', 'Category.objects.create', ([], {'category': '"""m"""'}), "(category='m')\n", (328, 342), False, 'from quiz.models import AnswerUser, Category, Grade, Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore\n'), ((408, 473), 'quiz.models.SubCategory.objects.create', 'SubCategory.objects.create', ([], {'category': 'category_m', 'sub_category': '"""n"""'}), "(category=category_m, sub_category='n')\n", (434, 473), False, 'from quiz.models import AnswerUser, Category, Grade, Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore\n'), ((519, 557), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""A"""'}), "(username='A')\n", (543, 557), False, 'from django.contrib.auth.models import User\n'), ((639, 662), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (660, 662), False, 'import datetime\n'), ((674, 883), 'quiz.models.Quiz.objects.create', 'Quiz.objects.create', ([], {'title': '"""title"""', 'description': '"""Long description"""', 'creator': 'user_A', 'category': 'category_m', 'category_name': '"""m"""', 'sub_category': 'sub_category_n', 'created': 'date', 'random_order': '(False)', 'difficulty': '(1)'}), "(title='title', description='Long description', creator=\n user_A, category=category_m, category_name='m', sub_category=\n sub_category_n, created=date, random_order=False, difficulty=1)\n", (693, 883), False, 'from quiz.models import AnswerUser, Category, Grade, Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore\n'), ((1010, 1168), 'quiz.models.Question.objects.create', 'Question.objects.create', ([], {'quiz': 'quiz_q', 'difficulty': '(1)', 'order': '(1)', 'figure': 'None', 'content': '"""question"""', 'explanation': 'None', 'theme1': '"""t1"""', 'theme2': '"""t2"""', 'theme3': '"""t3"""'}), "(quiz=quiz_q, difficulty=1, order=1, figure=None,\n content='question', explanation=None, theme1='t1', theme2='t2', theme3='t3'\n )\n", (1033, 1168), False, 'from quiz.models import AnswerUser, Category, Grade, Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore\n'), ((1305, 1344), 'quiz.models.AnswerUser.objects.create', 'AnswerUser.objects.create', ([], {'correct': '(True)'}), '(correct=True)\n', (1330, 1344), False, 'from quiz.models import AnswerUser, Category, Grade, Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore\n'), ((1479, 1585), 'quiz.models.Statistic.objects.create', 'Statistic.objects.create', ([], {'quiz': 'quiz_q', 'number_participants': '(10)', 'mean': '(15)', 'easy': '(5)', 'medium': '(5)', 'difficult': '(5)'}), '(quiz=quiz_q, number_participants=10, mean=15, easy\n =5, medium=5, difficult=5)\n', (1503, 1585), False, 'from quiz.models import AnswerUser, Category, Grade, Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore\n'), ((1650, 1710), 'quiz.models.Grade.objects.create', 'Grade.objects.create', ([], {'grade': '(5)', 'number': '(10)', 'statistics': 'stats_s'}), '(grade=5, number=10, statistics=stats_s)\n', (1670, 1710), False, 'from quiz.models import AnswerUser, Category, Grade, Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore\n'), ((1787, 1865), 'quiz.models.QuestionScore.objects.create', 'QuestionScore.objects.create', ([], {'question': 'question_q', 'statistics': 'stats_s', 'score': '(5)'}), '(question=question_q, statistics=stats_s, score=5)\n', (1815, 1865), False, 'from quiz.models import AnswerUser, Category, Grade, Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore\n'), ((1949, 2028), 'quiz.models.ThemeScore.objects.create', 'ThemeScore.objects.create', ([], {'theme': '"""t1"""', 'score': '(5)', 'statistics': 'stats_s', 'quiz': 'quiz_q'}), "(theme='t1', score=5, statistics=stats_s, quiz=quiz_q)\n", (1974, 2028), False, 'from quiz.models import AnswerUser, Category, Grade, Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore\n'), ((2521, 2544), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2542, 2544), False, 'import datetime\n')]
|
'''
#Método 1 de calcular a hipotenusa (sem o módulo math)
co = float(input('comprimento do cateto oposto: '))
ca = float(input('comprimento do cateto adjacente: '))
h = (co ** 2) + (ca ** 2) ** (1/2)
print(f'a hipotenusa equivale a {h:.2f}')
'''
# Método 2 de calcular a hipotenusa (com o módulo math)
import math
co = float(input('comprimento do cateto oposto: '))
ca = float(input('comprimento do cateto adjacente: '))
h = math.hypot(co, ca)
print(f'a hipotenusa equivale a {h:.2f}')
|
[
"math.hypot"
] |
[((430, 448), 'math.hypot', 'math.hypot', (['co', 'ca'], {}), '(co, ca)\n', (440, 448), False, 'import math\n')]
|
import csv
import json
from ofxtools.Parser import OFXTree
"""
statement schema:
{
"id":int,
"ref_no": int,
"date": string,
"account": int, account code,
"isincome": bool,
"countinbudget": bool,
"payee": string,
"notes": {
"bank": string,
"personal": string
},
"categories": [strings],
"totalamount": float,
"splits":[
{
"amount": float,
"categories": [],
"linked_transaction": int,
"countinbudget": bool
}
]
}
"""
def fetch_all_transactions():
cc = import_cc_statement()
acct = import_acct_statement()
return cc + acct
def convert_acct_fitid(id):
if "-" in id:
return id.split("-")[0]
else:
return id.split(".")[0]
def convert_date_to_ISO(datestring):
return "{}-{}-{}T{}:{}:{}Z".format(
datestring[:4],
datestring[4:6],
datestring[6:8],
datestring[8:10],
datestring[10:12],
datestring[12:14],
)
def import_acct_statement():
print("Importing BOFA Account Statement")
parser = OFXTree()
parser.parse("./data/stmt.qfx")
transactions_root = parser.find(".//BANKTRANLIST")[:]
transactions = []
for trans in transactions_root[2:]:
transactions.append({
"id": 0,
"ref_no": int(convert_acct_fitid(trans[3].text)),
"date": convert_date_to_ISO(trans[1].text),
"account": "BOFA_CHECKING",
"payee": trans[4].text,
"notes": {
"bank": "",
"personal": ""
},
"categories": [],
"totalamount": float(trans[2].text),
"splits": []
})
return transactions
def import_cc_statement():
print("Importing BOFA CC Statement")
parser = OFXTree()
parser.parse("./data/currentTransaction_1626.qfx")
transactions_root = parser.find(".//BANKTRANLIST")[:]
id = 0
transactions = []
for trans in transactions_root[2:]:
transactions.append({
"id": id,
"ref_no": int(trans[3].text),
"date": convert_date_to_ISO(trans[1].text),
"account": "BOFA_CASHREWARDS_CC",
"payee": trans[6].text,
"notes": {
"bank": "",
"personal": ""
},
"categories": [],
"totalamount": float(trans[2].text),
"splits": []
})
return transactions
def fetch_acct_info():
print("Updating Account Information")
accounts = []
parser = OFXTree()
parser.parse('./data/currentTransaction_1626.qfx')
accounts.append({
"name": "BOFA_CASHREWARDS_CC",
"id": 2,
"balance": parser.find(".//BALAMT").text,
"last_updated": convert_date_to_ISO(parser.find(".//DTASOF").text)
})
parser.parse('./data/stmt.qfx')
accounts.append({
"name": "BOFA_CHECKING",
"id": 0,
"balance": parser.find(".//BALAMT").text,
"last_updated": convert_date_to_ISO(parser.find(".//DTASOF").text)
})
return accounts
if __name__ == '__main__':
accts = fetch_acct_info()
for acct in accts:
print(acct)
|
[
"ofxtools.Parser.OFXTree"
] |
[((999, 1008), 'ofxtools.Parser.OFXTree', 'OFXTree', ([], {}), '()\n', (1006, 1008), False, 'from ofxtools.Parser import OFXTree\n'), ((1631, 1640), 'ofxtools.Parser.OFXTree', 'OFXTree', ([], {}), '()\n', (1638, 1640), False, 'from ofxtools.Parser import OFXTree\n'), ((2292, 2301), 'ofxtools.Parser.OFXTree', 'OFXTree', ([], {}), '()\n', (2299, 2301), False, 'from ofxtools.Parser import OFXTree\n')]
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="django-trello-webhooks",
version="0.3",
packages=[
'trello_webhooks',
'trello_webhooks.management',
'trello_webhooks.management.commands',
'trello_webhooks.migrations',
'trello_webhooks.templatetags',
'trello_webhooks.tests',
],
install_requires=['django>=1.7.1'],
include_package_data=True,
description='Django Trello Webhooks - Trello callback integration for Django.',
long_description=README,
url='https://github.com/yunojuno/django-trello-webhooks',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
[
"os.path.abspath",
"os.path.dirname",
"setuptools.setup"
] |
[((238, 1204), 'setuptools.setup', 'setup', ([], {'name': '"""django-trello-webhooks"""', 'version': '"""0.3"""', 'packages': "['trello_webhooks', 'trello_webhooks.management',\n 'trello_webhooks.management.commands', 'trello_webhooks.migrations',\n 'trello_webhooks.templatetags', 'trello_webhooks.tests']", 'install_requires': "['django>=1.7.1']", 'include_package_data': '(True)', 'description': '"""Django Trello Webhooks - Trello callback integration for Django."""', 'long_description': 'README', 'url': '"""https://github.com/yunojuno/django-trello-webhooks"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'maintainer': '"""<NAME>"""', 'maintainer_email': '"""<EMAIL>"""', 'classifiers': "['Environment :: Web Environment', 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content']"}), "(name='django-trello-webhooks', version='0.3', packages=[\n 'trello_webhooks', 'trello_webhooks.management',\n 'trello_webhooks.management.commands', 'trello_webhooks.migrations',\n 'trello_webhooks.templatetags', 'trello_webhooks.tests'],\n install_requires=['django>=1.7.1'], include_package_data=True,\n description=\n 'Django Trello Webhooks - Trello callback integration for Django.',\n long_description=README, url=\n 'https://github.com/yunojuno/django-trello-webhooks', author='<NAME>',\n author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>',\n classifiers=['Environment :: Web Environment', 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content'])\n", (243, 1204), False, 'from setuptools import setup\n'), ((197, 222), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (212, 222), False, 'import os\n'), ((67, 92), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (82, 92), False, 'import os\n')]
|
from lightfm import LightFM
from lightfm.datasets import fetch_movielens
from pgvector.sqlalchemy import Vector
from sqlalchemy import create_engine, text, Column, Float, Integer, String
from sqlalchemy.orm import declarative_base, Session
engine = create_engine('postgresql+psycopg2://localhost/pgvector_example', future=True)
with engine.connect() as conn:
conn.execute(text('CREATE EXTENSION IF NOT EXISTS vector'))
conn.commit()
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
factors = Column(Vector(20))
class Item(Base):
__tablename__ = 'item'
id = Column(Integer, primary_key=True)
title = Column(String)
factors = Column(Vector(20))
bias = Column(Float)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
data = fetch_movielens(min_rating=5.0)
model = LightFM(loss='warp', no_components=20)
model.fit(data['train'], epochs=30)
user_biases, user_factors = model.get_user_representations()
item_biases, item_factors = model.get_item_representations()
users = [dict(id=i, factors=factors) for i, factors in enumerate(user_factors)]
items = [dict(id=i, title=data['item_labels'][i], factors=factors, bias=item_biases[i].item()) for i, factors in enumerate(item_factors)]
session = Session(engine)
session.bulk_insert_mappings(User, users)
session.bulk_insert_mappings(Item, items)
session.commit()
user = session.query(User).get(1)
# subtract item bias for negative inner product
items = session.query(Item).order_by(Item.factors.max_inner_product(user.factors) - Item.bias).limit(5).all()
print('user-based recs:', [item.title for item in items])
item = session.query(Item).filter(Item.title == 'Star Wars (1977)').first()
items = session.query(Item).filter(Item.id != item.id).order_by(Item.factors.cosine_distance(item.factors)).limit(5).all()
print('item-based recs:', [item.title for item in items])
|
[
"pgvector.sqlalchemy.Vector",
"sqlalchemy.orm.declarative_base",
"lightfm.LightFM",
"sqlalchemy.orm.Session",
"sqlalchemy.text",
"sqlalchemy.Column",
"sqlalchemy.create_engine",
"lightfm.datasets.fetch_movielens"
] |
[((250, 328), 'sqlalchemy.create_engine', 'create_engine', (['"""postgresql+psycopg2://localhost/pgvector_example"""'], {'future': '(True)'}), "('postgresql+psycopg2://localhost/pgvector_example', future=True)\n", (263, 328), False, 'from sqlalchemy import create_engine, text, Column, Float, Integer, String\n'), ((450, 468), 'sqlalchemy.orm.declarative_base', 'declarative_base', ([], {}), '()\n', (466, 468), False, 'from sqlalchemy.orm import declarative_base, Session\n'), ((843, 874), 'lightfm.datasets.fetch_movielens', 'fetch_movielens', ([], {'min_rating': '(5.0)'}), '(min_rating=5.0)\n', (858, 874), False, 'from lightfm.datasets import fetch_movielens\n'), ((883, 921), 'lightfm.LightFM', 'LightFM', ([], {'loss': '"""warp"""', 'no_components': '(20)'}), "(loss='warp', no_components=20)\n", (890, 921), False, 'from lightfm import LightFM\n'), ((1311, 1326), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (1318, 1326), False, 'from sqlalchemy.orm import declarative_base, Session\n'), ((526, 559), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (532, 559), False, 'from sqlalchemy import create_engine, text, Column, Float, Integer, String\n'), ((650, 683), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (656, 683), False, 'from sqlalchemy import create_engine, text, Column, Float, Integer, String\n'), ((696, 710), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (702, 710), False, 'from sqlalchemy import create_engine, text, Column, Float, Integer, String\n'), ((755, 768), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (761, 768), False, 'from sqlalchemy import create_engine, text, Column, Float, Integer, String\n'), ((377, 422), 'sqlalchemy.text', 'text', (['"""CREATE EXTENSION IF NOT EXISTS vector"""'], {}), "('CREATE EXTENSION IF NOT EXISTS vector')\n", (381, 422), False, 'from sqlalchemy import create_engine, text, Column, Float, Integer, String\n'), ((581, 591), 'pgvector.sqlalchemy.Vector', 'Vector', (['(20)'], {}), '(20)\n', (587, 591), False, 'from pgvector.sqlalchemy import Vector\n'), ((732, 742), 'pgvector.sqlalchemy.Vector', 'Vector', (['(20)'], {}), '(20)\n', (738, 742), False, 'from pgvector.sqlalchemy import Vector\n')]
|
# coding: utf-8
"""
BillForward REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ProfilesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_all_profiles(self, **kwargs):
"""
Returns a collection of all profiles. By default 10 values are returned. Records are returned in natural order
{\"nickname\":\"Get all profiles\",\"response\":\"getProfileAll.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_profiles(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] organizations: A list of organizations used to restrict the scope of API calls.
:param int offset: The offset from the first profile to return.
:param int records: The maximum number of profiles to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_profiles_with_http_info(**kwargs)
else:
(data) = self.get_all_profiles_with_http_info(**kwargs)
return data
def get_all_profiles_with_http_info(self, **kwargs):
"""
Returns a collection of all profiles. By default 10 values are returned. Records are returned in natural order
{\"nickname\":\"Get all profiles\",\"response\":\"getProfileAll.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_profiles_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] organizations: A list of organizations used to restrict the scope of API calls.
:param int offset: The offset from the first profile to return.
:param int records: The maximum number of profiles to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organizations', 'offset', 'records', 'order_by', 'order']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_profiles" % key
)
params[key] = val
del params['kwargs']
resource_path = '/profiles'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'records' in params:
query_params['records'] = params['records']
if 'order_by' in params:
query_params['order_by'] = params['order_by']
if 'order' in params:
query_params['order'] = params['order']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProfilePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_profile(self, profile_id, **kwargs):
"""
Returns a single profile, specified by the ID parameter.
{\"nickname\":\"Retrieve an existing profile\",\"response\":\"getProfileByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_profile(profile_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str profile_id: ID of the Profile. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_profile_with_http_info(profile_id, **kwargs)
else:
(data) = self.get_profile_with_http_info(profile_id, **kwargs)
return data
def get_profile_with_http_info(self, profile_id, **kwargs):
"""
Returns a single profile, specified by the ID parameter.
{\"nickname\":\"Retrieve an existing profile\",\"response\":\"getProfileByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_profile_with_http_info(profile_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str profile_id: ID of the Profile. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['profile_id', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_profile" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'profile_id' is set
if ('profile_id' not in params) or (params['profile_id'] is None):
raise ValueError("Missing the required parameter `profile_id` when calling `get_profile`")
resource_path = '/profiles/{profile-ID}'.replace('{format}', 'json')
path_params = {}
if 'profile_id' in params:
path_params['profile-ID'] = params['profile_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProfilePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_profile_by_account_id(self, account_id, **kwargs):
"""
Returns a collection of profiles, specified by the account-ID parameter. By default 10 values are returned. Records are returned in natural order
{\"nickname\":\"Retrieve by account\",\"response\":\"getProfileByAccountID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_profile_by_account_id(account_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The account-ID of the profile. (required)
:param list[str] organizations: A list of organizations used to restrict the scope of API calls.
:param int offset: The offset from the first profile to return.
:param int records: The maximum number of profiles to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_profile_by_account_id_with_http_info(account_id, **kwargs)
else:
(data) = self.get_profile_by_account_id_with_http_info(account_id, **kwargs)
return data
def get_profile_by_account_id_with_http_info(self, account_id, **kwargs):
"""
Returns a collection of profiles, specified by the account-ID parameter. By default 10 values are returned. Records are returned in natural order
{\"nickname\":\"Retrieve by account\",\"response\":\"getProfileByAccountID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_profile_by_account_id_with_http_info(account_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The account-ID of the profile. (required)
:param list[str] organizations: A list of organizations used to restrict the scope of API calls.
:param int offset: The offset from the first profile to return.
:param int records: The maximum number of profiles to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'organizations', 'offset', 'records', 'order_by', 'order']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_profile_by_account_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_profile_by_account_id`")
resource_path = '/profiles/account/{account-ID}'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['account-ID'] = params['account_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'records' in params:
query_params['records'] = params['records']
if 'order_by' in params:
query_params['order_by'] = params['order_by']
if 'order' in params:
query_params['order'] = params['order']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProfilePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_profile_by_email_address(self, email, **kwargs):
"""
Returns a single profile, specified by the email parameter.
{\"nickname\":\"Retrieve by e-mail\",\"response\":\"getProfileByEmail.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_profile_by_email_address(email, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str email: The email address of the profile. (required)
:param list[str] organizations: A list of organizations used to restrict the scope of API calls.
:param int offset: The offset from the first profile to return.
:param int records: The maximum number of profiles to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:param bool include_retired: Whether retired profiles should be returned.
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_profile_by_email_address_with_http_info(email, **kwargs)
else:
(data) = self.get_profile_by_email_address_with_http_info(email, **kwargs)
return data
def get_profile_by_email_address_with_http_info(self, email, **kwargs):
"""
Returns a single profile, specified by the email parameter.
{\"nickname\":\"Retrieve by e-mail\",\"response\":\"getProfileByEmail.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_profile_by_email_address_with_http_info(email, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str email: The email address of the profile. (required)
:param list[str] organizations: A list of organizations used to restrict the scope of API calls.
:param int offset: The offset from the first profile to return.
:param int records: The maximum number of profiles to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:param bool include_retired: Whether retired profiles should be returned.
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['email', 'organizations', 'offset', 'records', 'order_by', 'order', 'include_retired']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_profile_by_email_address" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'email' is set
if ('email' not in params) or (params['email'] is None):
raise ValueError("Missing the required parameter `email` when calling `get_profile_by_email_address`")
resource_path = '/profiles/email/{email}'.replace('{format}', 'json')
path_params = {}
if 'email' in params:
path_params['email'] = params['email']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'records' in params:
query_params['records'] = params['records']
if 'order_by' in params:
query_params['order_by'] = params['order_by']
if 'order' in params:
query_params['order'] = params['order']
if 'include_retired' in params:
query_params['include_retired'] = params['include_retired']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProfilePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def update_profile(self, request, **kwargs):
"""
Update a profile
{\"nickname\":\"Update a profile\",\"request\":\"updateProfileRequest.html\",\"response\":\"updateProfileResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_profile(request, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param UpdateProfileRequest request: The profile object to be updated. (required)
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_profile_with_http_info(request, **kwargs)
else:
(data) = self.update_profile_with_http_info(request, **kwargs)
return data
def update_profile_with_http_info(self, request, **kwargs):
"""
Update a profile
{\"nickname\":\"Update a profile\",\"request\":\"updateProfileRequest.html\",\"response\":\"updateProfileResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_profile_with_http_info(request, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param UpdateProfileRequest request: The profile object to be updated. (required)
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['request']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_profile" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'request' is set
if ('request' not in params) or (params['request'] is None):
raise ValueError("Missing the required parameter `request` when calling `update_profile`")
resource_path = '/profiles'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request' in params:
body_params = params['request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/xml', 'application/xml', 'application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProfilePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
|
[
"six.iteritems"
] |
[((4492, 4519), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (4501, 4519), False, 'from six import iteritems\n'), ((9178, 9205), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (9187, 9205), False, 'from six import iteritems\n'), ((14778, 14805), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (14787, 14805), False, 'from six import iteritems\n'), ((20740, 20767), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (20749, 20767), False, 'from six import iteritems\n'), ((25708, 25735), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (25717, 25735), False, 'from six import iteritems\n')]
|
from django.contrib import admin
from . import models
@admin.register(models.Reviewer)
class ReviewerAdmin(admin.ModelAdmin):
autocomplete_fields = ["user"]
list_display = ["username", "email"]
search_fields = ["username__istartswith"]
@admin.register(models.Album)
class AlbumAdmin(admin.ModelAdmin):
list_display = ["title", "artist_id", "created_at", "created_by"]
ordering = ["title"]
list_per_page = 30
prepopulated_fields = {
"slug": ["title"]
}
list_select_related = ["artist_id", "created_by"]
autocomplete_fields = ["artist_id"]
search_fields = ["title"]
@admin.register(models.Artist)
class ArtistAdmin(admin.ModelAdmin):
list_display = ["name", "created_at", "created_by"]
ordering = ["name"]
list_per_page = 30
prepopulated_fields = {
"slug": ["name"]
}
search_fields = ['name__istartswith']
@admin.register(models.Genre)
class GenreAdmin(admin.ModelAdmin):
search_fields = ['name__istartswith']
@admin.register(models.AlbumGenre)
class AlbumGenreAdmin(admin.ModelAdmin):
list_display = ["__str__", "album_id", "genre_id"]
autocomplete_fields = ["album_id", "genre_id"]
@admin.register(models.AlbumLink)
class AlbumLinkAdmin(admin.ModelAdmin):
list_display = ["__str__", "album_id"]
autocomplete_fields = ["album_id"]
@admin.register(models.AlbumOfTheYear)
class AlbumOfTheYear(admin.ModelAdmin):
list_display = ["__str__", "album_id"]
autocomplete_fields = ["album_id"]
@admin.register(models.Track)
class TrackAdmin(admin.ModelAdmin):
list_display = ["__str__", "album_id"]
@admin.register(models.Review)
class ReviewAdmin(admin.ModelAdmin):
autocomplete_fields = ["album_id", "reviewer_id"]
list_display = ["__str__", "album_id", "reviewer_id"]
@admin.register(models.FavoriteReviewerArtist)
class FavoriteReviewerArtistAdmin(admin.ModelAdmin):
autocomplete_fields = ["artist_id", "reviewer_id"]
list_display = ["artist_id", "reviewer_id"]
@admin.register(models.ReviewerLink)
class ReviewerLinkAdmin(admin.ModelAdmin):
autocomplete_fields = ["reviewer_id"]
list_display = ["reviewer_id", "service_name"]
|
[
"django.contrib.admin.register"
] |
[((57, 88), 'django.contrib.admin.register', 'admin.register', (['models.Reviewer'], {}), '(models.Reviewer)\n', (71, 88), False, 'from django.contrib import admin\n'), ((253, 281), 'django.contrib.admin.register', 'admin.register', (['models.Album'], {}), '(models.Album)\n', (267, 281), False, 'from django.contrib import admin\n'), ((623, 652), 'django.contrib.admin.register', 'admin.register', (['models.Artist'], {}), '(models.Artist)\n', (637, 652), False, 'from django.contrib import admin\n'), ((897, 925), 'django.contrib.admin.register', 'admin.register', (['models.Genre'], {}), '(models.Genre)\n', (911, 925), False, 'from django.contrib import admin\n'), ((1007, 1040), 'django.contrib.admin.register', 'admin.register', (['models.AlbumGenre'], {}), '(models.AlbumGenre)\n', (1021, 1040), False, 'from django.contrib import admin\n'), ((1191, 1223), 'django.contrib.admin.register', 'admin.register', (['models.AlbumLink'], {}), '(models.AlbumLink)\n', (1205, 1223), False, 'from django.contrib import admin\n'), ((1349, 1386), 'django.contrib.admin.register', 'admin.register', (['models.AlbumOfTheYear'], {}), '(models.AlbumOfTheYear)\n', (1363, 1386), False, 'from django.contrib import admin\n'), ((1512, 1540), 'django.contrib.admin.register', 'admin.register', (['models.Track'], {}), '(models.Track)\n', (1526, 1540), False, 'from django.contrib import admin\n'), ((1623, 1652), 'django.contrib.admin.register', 'admin.register', (['models.Review'], {}), '(models.Review)\n', (1637, 1652), False, 'from django.contrib import admin\n'), ((1805, 1850), 'django.contrib.admin.register', 'admin.register', (['models.FavoriteReviewerArtist'], {}), '(models.FavoriteReviewerArtist)\n', (1819, 1850), False, 'from django.contrib import admin\n'), ((2010, 2045), 'django.contrib.admin.register', 'admin.register', (['models.ReviewerLink'], {}), '(models.ReviewerLink)\n', (2024, 2045), False, 'from django.contrib import admin\n')]
|
import mxnet as mx
from mxnet import nd, autograd
import numpy as np
##################################3
# X, y - training data
# n - number of data points in dataset
# Py - desired label distribution
###################################
def tweak_dist(X, y, num_labels, n, Py):
shape = (n, *X.shape[1:])
Xshift = np.zeros(shape)
yshift = np.zeros(n, dtype=np.int8)
# get indices for each label
indices_by_label = [(y==k).nonzero()[0] for k in range(10)]
labels = np.argmax(
np.random.multinomial(1, Py, n), axis=1)
for i in range(n):
# sample an example from X with replacement
idx = np.random.choice(indices_by_label[labels[i]])
Xshift[i] = X[idx]
yshift[i] = y[idx]
return Xshift, yshift
def tweak_one(X, y, num_labels, n, knockout_label, p):
# create Py
# call down to tweak_dist
Py = np.full(num_labels, (1.-p)/(num_labels-1))
Py[knockout_label] = p
print(Py)
return tweak_dist(X, y, num_labels, n, Py)
|
[
"numpy.full",
"numpy.random.multinomial",
"numpy.zeros",
"numpy.random.choice"
] |
[((326, 341), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (334, 341), True, 'import numpy as np\n'), ((355, 381), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.int8'}), '(n, dtype=np.int8)\n', (363, 381), True, 'import numpy as np\n'), ((899, 948), 'numpy.full', 'np.full', (['num_labels', '((1.0 - p) / (num_labels - 1))'], {}), '(num_labels, (1.0 - p) / (num_labels - 1))\n', (906, 948), True, 'import numpy as np\n'), ((517, 548), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'Py', 'n'], {}), '(1, Py, n)\n', (538, 548), True, 'import numpy as np\n'), ((656, 701), 'numpy.random.choice', 'np.random.choice', (['indices_by_label[labels[i]]'], {}), '(indices_by_label[labels[i]])\n', (672, 701), True, 'import numpy as np\n')]
|
from selenium_controller.github import Github
from selenium_controller.gitlab import Gitlab
from utils.shell_executor.executor import execute_now
def main():
github = Github()
gitlab = Gitlab()
new_tags = list()
execute_now('git fetch --all')
gitlab_versions = gitlab.fetch_gitlab_map_versions()
github_tags = github.fetch_github_available_docker_versions()
github_tags = [t.replace('v', '').replace('.m1', '') for t in github_tags]
# match the gitlab version which not inside GitHub tags, the github tags contains gitlab version
for gitlab_version in gitlab_versions:
if gitlab_version not in github_tags and int(gitlab_version.split('.')[0]) > 12:
new_tags.append(gitlab_version)
for tag in new_tags:
github.create_new_branch(tag)
if __name__ == "__main__":
main()
|
[
"utils.shell_executor.executor.execute_now",
"selenium_controller.gitlab.Gitlab",
"selenium_controller.github.Github"
] |
[((173, 181), 'selenium_controller.github.Github', 'Github', ([], {}), '()\n', (179, 181), False, 'from selenium_controller.github import Github\n'), ((195, 203), 'selenium_controller.gitlab.Gitlab', 'Gitlab', ([], {}), '()\n', (201, 203), False, 'from selenium_controller.gitlab import Gitlab\n'), ((232, 262), 'utils.shell_executor.executor.execute_now', 'execute_now', (['"""git fetch --all"""'], {}), "('git fetch --all')\n", (243, 262), False, 'from utils.shell_executor.executor import execute_now\n')]
|
# -*-coding:utf-8 -*-
from openstack import connection
# create connection
username = "xxxxxx"
password = "<PASSWORD>"
projectId = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # tenant ID
userDomainId = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # user account ID
auth_url = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # endpoint url
conn = connection.Connection(auth_url=auth_url,
user_domain_id=userDomainId,
project_id=projectId,
username=username,
password=password)
def create_server_tags(server_id):
data = {
"tags": [
{
"key": "key1",
"value": "value1"
},
{
"key": "key2",
"value": "value3"
}
]
}
conn.ecs.create_server_tags(server_id, **data)
def delete_server_tags(server_id):
data = {
"tags": [
{
"key": "key1",
"value": "value1"
}
]
}
conn.ecs.delete_server_tags(server_id, **data)
def get_server_tags(server_id):
tags = conn.ecs.get_server_tags(server_id)
for tag in tags:
print(tag.key, tag.value)
def get_project_tags():
tags = conn.ecs.get_project_tags()
for tag in tags:
print(tag.key, tag.values)
if __name__ == "__main__":
server_id = "b0a9d2b4-2cae-4b66-a6ba-6af70f3bd7f8"
create_server_tags(server_id)
get_server_tags(server_id)
delete_server_tags(server_id)
get_project_tags()
|
[
"openstack.connection.Connection"
] |
[((312, 445), 'openstack.connection.Connection', 'connection.Connection', ([], {'auth_url': 'auth_url', 'user_domain_id': 'userDomainId', 'project_id': 'projectId', 'username': 'username', 'password': 'password'}), '(auth_url=auth_url, user_domain_id=userDomainId,\n project_id=projectId, username=username, password=password)\n', (333, 445), False, 'from openstack import connection\n')]
|
#!/usr/bin/env python3
import os
from subprocess import CalledProcessError, run
from typing import Dict, List, Union
import json
from pathlib import Path
import click
__dir__ = Path(__file__).parent.absolute()
def github_repo_name() -> str:
if repo_full := os.environ.get("GITHUB_REPOSITORY"):
return repo_full.split("/")[1]
else:
return ""
def git_list_changes() -> List[str]:
return run(
["git", "log", "-1", "--name-only", "--pretty="],
check=True,
capture_output=True,
text=True,
).stdout.splitlines()
def git_branch_name() -> str:
if fullref := os.environ.get("GITHUB_REF", ""):
return fullref[len("refs/heads/") :]
else:
return ""
def target_branch() -> str:
if git_branch_name() == "staging":
return "release"
else:
return "staging"
def git_commit_title() -> str:
return run(
["git", "log", "-1", r"--pretty=format:%s"],
check=True,
capture_output=True,
text=True,
).stdout.splitlines()[0]
def git_short_sha() -> str:
if fullsha := os.environ.get("GITHUB_SHA", ""):
return fullsha[:7]
else:
return ""
def is_dev_branch() -> bool:
return git_branch_name() not in ["release", "staging"]
def ci_yaml_changed() -> bool:
return ".github/workflows/ci.yml" in git_list_changes()
def docker_tag() -> str:
return f"{git_branch_name()}-{git_short_sha()}"
def docker_stack_name() -> str:
return f"{github_repo_name()}-{git_branch_name()}-{git_short_sha()}"
def should_upload_package() -> bool:
return git_branch_name() == "release"
def should_upload_image() -> bool:
return git_branch_name() in ["release", "staging"]
def package_version() -> str:
with open("package.json", "rb") as content:
package = json.load(content)
return package["version"]
def pr_body() -> str:
if target_branch() == "staging":
return 'To merge into the staging branch, please use "Rebase and merge", or "Squash and merge".'
elif target_branch == "release":
return 'To merge into the release branch, please use "Create a merge commit".'
return ""
def overwrite_path() -> str:
return ":".join(
[
str(__dir__),
os.environ["PATH"],
]
)
def get_env() -> Dict[str, Union[str, bool]]:
return {
"PROJECT_NAME": github_repo_name(),
"DOCKER_TAG": docker_tag(),
"CI_YAML_CHANGED": ci_yaml_changed(),
"IS_DEV_BRANCH": is_dev_branch(),
"BRANCH_NAME": git_branch_name(),
"TARGET_BRANCH": target_branch(),
"COMMIT_TITLE": git_commit_title(),
"SHOULD_UPLOAD_PACKAGE": should_upload_package(),
"SHOULD_UPLOAD_IMAGE": should_upload_image(),
"PACKAGE_VERSION": package_version(),
"PATH": overwrite_path(),
"PR_BODY": pr_body(),
}
@click.command()
@click.option("-w", "--write", is_flag=True)
def main(write):
content = ""
for key, val in get_env().items():
if write:
content += f"{key}={val}\n"
else:
content += f"{key}={val.__repr__()}\n"
if write:
with open(os.environ["GITHUB_ENV"], "a") as env_file:
env_file.write(content)
else:
print(content, end="")
if __name__ == "__main__":
try:
main()
except CalledProcessError as err:
exit(err.stdout + err.stderr)
|
[
"subprocess.run",
"json.load",
"click.option",
"click.command",
"os.environ.get",
"pathlib.Path"
] |
[((2916, 2931), 'click.command', 'click.command', ([], {}), '()\n', (2929, 2931), False, 'import click\n'), ((2933, 2976), 'click.option', 'click.option', (['"""-w"""', '"""--write"""'], {'is_flag': '(True)'}), "('-w', '--write', is_flag=True)\n", (2945, 2976), False, 'import click\n'), ((266, 301), 'os.environ.get', 'os.environ.get', (['"""GITHUB_REPOSITORY"""'], {}), "('GITHUB_REPOSITORY')\n", (280, 301), False, 'import os\n'), ((627, 659), 'os.environ.get', 'os.environ.get', (['"""GITHUB_REF"""', '""""""'], {}), "('GITHUB_REF', '')\n", (641, 659), False, 'import os\n'), ((1110, 1142), 'os.environ.get', 'os.environ.get', (['"""GITHUB_SHA"""', '""""""'], {}), "('GITHUB_SHA', '')\n", (1124, 1142), False, 'import os\n'), ((1839, 1857), 'json.load', 'json.load', (['content'], {}), '(content)\n', (1848, 1857), False, 'import json\n'), ((180, 194), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (184, 194), False, 'from pathlib import Path\n'), ((420, 521), 'subprocess.run', 'run', (["['git', 'log', '-1', '--name-only', '--pretty=']"], {'check': '(True)', 'capture_output': '(True)', 'text': '(True)'}), "(['git', 'log', '-1', '--name-only', '--pretty='], check=True,\n capture_output=True, text=True)\n", (423, 521), False, 'from subprocess import CalledProcessError, run\n'), ((907, 1003), 'subprocess.run', 'run', (["['git', 'log', '-1', '--pretty=format:%s']"], {'check': '(True)', 'capture_output': '(True)', 'text': '(True)'}), "(['git', 'log', '-1', '--pretty=format:%s'], check=True, capture_output=\n True, text=True)\n", (910, 1003), False, 'from subprocess import CalledProcessError, run\n')]
|
from django.db import models
from django.db.models.signals import post_save
from . import tasks
### Define Querysets
class TwitterProfileQuerySet(models.QuerySet):
def search(self, query):
return self.filter(name__icontains=query)
class TaskQuerySet(models.QuerySet):
def search(self, query):
return self.filter(query__icontains=query)
def pending(self):
return self.filter(status='PD')
def done(self):
return self.filter(status='DN')
### Define Models
class TwitterProfile(models.Model):
class Meta:
ordering = ('popularity', 'name')
tw_id = models.PositiveIntegerField(unique=True)
name = models.CharField(max_length=200)
description = models.TextField(blank=True, null=True)
image = models.URLField(blank=True, null=True)
popularity = models.PositiveIntegerField(blank=True, default=0)
objects = models.Manager()
custom = TwitterProfileQuerySet.as_manager()
__str__ = lambda self: self.name
def update_(self, tw_user):
update_fields = []
if self.name != tw_user.name:
self.name = tw_user.name
update_fields.append('name')
if self.description != tw_user.description:
self.description = tw_user.description
update_fields.append('description')
if self.image != tw_user.profile_image_url:
self.image = tw_user.profile_image_url
update_fields.append('image')
if self.popularity != tw_user.followers_count:
self.popularity = tw_user.followers_count
update_fields.append('popularity')
if update_fields:
self.save(update_fields=update_fields)
class Task(models.Model):
class Meta:
ordering = ('query', )
PENDING = 'PD'
DONE = 'DN'
STATUS = (
(PENDING, 'Pending'),
(DONE, 'Done')
)
query = models.CharField(max_length=100)
status = models.CharField(max_length=2, choices=STATUS, default=PENDING)
objects = models.Manager()
custom = TaskQuerySet.as_manager()
def __str__(self):
return "%s -> Status: %s" % (self.query, self.get_status_display())
def update_to_done(self):
if self.status is not self.DONE:
self.status = self.DONE
self.save()
@staticmethod
def run(**kwargs):
if kwargs.get('created', False) or 'from_view' in kwargs:
tasks.twitter_scraper.delay(kwargs['instance'].id)
# Signals
post_save.connect(Task.run, Task)
|
[
"django.db.models.URLField",
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.PositiveIntegerField",
"django.db.models.Manager",
"django.db.models.signals.post_save.connect"
] |
[((2508, 2541), 'django.db.models.signals.post_save.connect', 'post_save.connect', (['Task.run', 'Task'], {}), '(Task.run, Task)\n', (2525, 2541), False, 'from django.db.models.signals import post_save\n'), ((619, 659), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'unique': '(True)'}), '(unique=True)\n', (646, 659), False, 'from django.db import models\n'), ((671, 703), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (687, 703), False, 'from django.db import models\n'), ((722, 761), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (738, 761), False, 'from django.db import models\n'), ((774, 812), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (789, 812), False, 'from django.db import models\n'), ((830, 880), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'default': '(0)'}), '(blank=True, default=0)\n', (857, 880), False, 'from django.db import models\n'), ((896, 912), 'django.db.models.Manager', 'models.Manager', ([], {}), '()\n', (910, 912), False, 'from django.db import models\n'), ((1910, 1942), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1926, 1942), False, 'from django.db import models\n'), ((1956, 2019), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2)', 'choices': 'STATUS', 'default': 'PENDING'}), '(max_length=2, choices=STATUS, default=PENDING)\n', (1972, 2019), False, 'from django.db import models\n'), ((2035, 2051), 'django.db.models.Manager', 'models.Manager', ([], {}), '()\n', (2049, 2051), False, 'from django.db import models\n')]
|
from __future__ import print_function
import FWCore.ParameterSet.Config as cms
from Configuration.AlCa.autoCond import autoCond
process = cms.Process("TEST")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )
process.source = cms.Source("EmptyIOVSource",
lastValue = cms.uint64(3),
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
interval = cms.uint64(1)
)
from CondCore.ESSources.GlobalTag import GlobalTag
# Prepare the list of globalTags
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
globalTag = GlobalTag(autoCond['run2_data'],"frontier://FrontierProd/CMS_CONDITIONS")
process.GlobalTag.connect = cms.string(globalTag.connect())
process.GlobalTag.globaltag = globalTag.gt()
print("Final connection string =", process.GlobalTag.connect)
print("Final globalTag =", process.GlobalTag.globaltag)
process.path = cms.Path()
|
[
"FWCore.ParameterSet.Config.string",
"FWCore.ParameterSet.Config.untracked.int32",
"FWCore.ParameterSet.Config.uint64",
"CondCore.ESSources.GlobalTag.GlobalTag",
"FWCore.ParameterSet.Config.Process",
"FWCore.ParameterSet.Config.Path"
] |
[((140, 159), 'FWCore.ParameterSet.Config.Process', 'cms.Process', (['"""TEST"""'], {}), "('TEST')\n", (151, 159), True, 'import FWCore.ParameterSet.Config as cms\n'), ((737, 811), 'CondCore.ESSources.GlobalTag.GlobalTag', 'GlobalTag', (["autoCond['run2_data']", '"""frontier://FrontierProd/CMS_CONDITIONS"""'], {}), "(autoCond['run2_data'], 'frontier://FrontierProd/CMS_CONDITIONS')\n", (746, 811), False, 'from CondCore.ESSources.GlobalTag import GlobalTag\n'), ((1052, 1062), 'FWCore.ParameterSet.Config.Path', 'cms.Path', ([], {}), '()\n', (1060, 1062), True, 'import FWCore.ParameterSet.Config as cms\n'), ((209, 233), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(100)'], {}), '(100)\n', (228, 233), True, 'import FWCore.ParameterSet.Config as cms\n'), ((327, 340), 'FWCore.ParameterSet.Config.uint64', 'cms.uint64', (['(3)'], {}), '(3)\n', (337, 340), True, 'import FWCore.ParameterSet.Config as cms\n'), ((385, 408), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""runnumber"""'], {}), "('runnumber')\n", (395, 408), True, 'import FWCore.ParameterSet.Config as cms\n'), ((455, 468), 'FWCore.ParameterSet.Config.uint64', 'cms.uint64', (['(1)'], {}), '(1)\n', (465, 468), True, 'import FWCore.ParameterSet.Config as cms\n'), ((513, 526), 'FWCore.ParameterSet.Config.uint64', 'cms.uint64', (['(1)'], {}), '(1)\n', (523, 526), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
from django.contrib.messages.views import SuccessMessageMixin
from django.db.models import Q
from django.shortcuts import render
from django.views.generic import DetailView, ListView, UpdateView
from .forms import StaffUpdateForm
from .models import Staff
class SearchSearchView(ListView):
model = Staff
paginate_by = 10
queryset = Staff.objects.all()
def get_queryset(self):
query = self.request.GET.get("q")
if query:
return Staff.objects.filter(
Q(supplier_name__icontains=query)
| Q(tags__icontains=query)
| Q(email__icontains=query)
| Q(phone__icontains=query)
| Q(description__icontains=query)
| Q(address__icontains=query)
| Q(district__icontains=query)
)
else:
return Staff.objects.all()
class StaffListView(ListView):
model = Staff
class StaffDetailView(DetailView):
model = Staff
class StaffUpdateView(UpdateView):
model = Staff
form_class = StaffUpdateForm
template_name_suffix = "_update_form"
|
[
"django.db.models.Q"
] |
[((792, 820), 'django.db.models.Q', 'Q', ([], {'district__icontains': 'query'}), '(district__icontains=query)\n', (793, 820), False, 'from django.db.models import Q\n'), ((746, 773), 'django.db.models.Q', 'Q', ([], {'address__icontains': 'query'}), '(address__icontains=query)\n', (747, 773), False, 'from django.db.models import Q\n'), ((696, 727), 'django.db.models.Q', 'Q', ([], {'description__icontains': 'query'}), '(description__icontains=query)\n', (697, 727), False, 'from django.db.models import Q\n'), ((652, 677), 'django.db.models.Q', 'Q', ([], {'phone__icontains': 'query'}), '(phone__icontains=query)\n', (653, 677), False, 'from django.db.models import Q\n'), ((608, 633), 'django.db.models.Q', 'Q', ([], {'email__icontains': 'query'}), '(email__icontains=query)\n', (609, 633), False, 'from django.db.models import Q\n'), ((513, 546), 'django.db.models.Q', 'Q', ([], {'supplier_name__icontains': 'query'}), '(supplier_name__icontains=query)\n', (514, 546), False, 'from django.db.models import Q\n'), ((565, 589), 'django.db.models.Q', 'Q', ([], {'tags__icontains': 'query'}), '(tags__icontains=query)\n', (566, 589), False, 'from django.db.models import Q\n')]
|
#
# Copyright 2015-2016 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import json
import subprocess
import codecs
import traceback
import flask
from gevent.wsgi import WSGIServer
proxy = flask.Flask(__name__)
proxy.debug = False
@proxy.route("/init", methods=['POST'])
def init():
flask.g = None
payload = flask.request.get_json(force=True,silent=True)
if not payload or not isinstance(payload, dict):
flask.abort(403)
message = payload.get("value", {})
if "code" in message:
# store the code
flask.g = message["code"]
return ('OK', 200)
else:
flask.abort(403)
@proxy.route("/run", methods=['POST'])
def run():
message = flask.request.get_json(force=True,silent=True)
if not message or not isinstance(message, dict):
flask.abort(403)
if not "value" in message:
flask.abort(403)
value = message["value"]
if not isinstance(value, dict):
flask.abort(403)
# initialize the namespace for the execution
namespace = {}
result = None
try:
exec(flask.g, namespace)
exec("param = " + json.dumps(value), namespace)
exec("fun = main(param)", namespace)
result = namespace['fun']
except Exception:
traceback.print_exc(file = sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
if result and isinstance(result, dict):
response = flask.jsonify(result)
response.status_code = 200
return response
else:
response = flask.jsonify({ "error": "the action did not return a dictionary", "action_output": result })
response.status_code = 502
return response
# start server in a forever loop
if __name__ == "__main__":
PORT = int(os.getenv("FLASK_PROXY_PORT", 8080))
server = WSGIServer(('', PORT), proxy, log=None)
server.serve_forever()
|
[
"traceback.print_exc",
"flask.Flask",
"flask.abort",
"gevent.wsgi.WSGIServer",
"json.dumps",
"flask.jsonify",
"sys.stdout.flush",
"sys.stderr.flush",
"os.getenv",
"flask.request.get_json"
] |
[((724, 745), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (735, 745), False, 'import flask\n'), ((852, 899), 'flask.request.get_json', 'flask.request.get_json', ([], {'force': '(True)', 'silent': '(True)'}), '(force=True, silent=True)\n', (874, 899), False, 'import flask\n'), ((1229, 1276), 'flask.request.get_json', 'flask.request.get_json', ([], {'force': '(True)', 'silent': '(True)'}), '(force=True, silent=True)\n', (1251, 1276), False, 'import flask\n'), ((1841, 1859), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1857, 1859), False, 'import sys\n'), ((1864, 1882), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (1880, 1882), False, 'import sys\n'), ((2336, 2375), 'gevent.wsgi.WSGIServer', 'WSGIServer', (["('', PORT)", 'proxy'], {'log': 'None'}), "(('', PORT), proxy, log=None)\n", (2346, 2375), False, 'from gevent.wsgi import WSGIServer\n'), ((960, 976), 'flask.abort', 'flask.abort', (['(403)'], {}), '(403)\n', (971, 976), False, 'import flask\n'), ((1147, 1163), 'flask.abort', 'flask.abort', (['(403)'], {}), '(403)\n', (1158, 1163), False, 'import flask\n'), ((1338, 1354), 'flask.abort', 'flask.abort', (['(403)'], {}), '(403)\n', (1349, 1354), False, 'import flask\n'), ((1395, 1411), 'flask.abort', 'flask.abort', (['(403)'], {}), '(403)\n', (1406, 1411), False, 'import flask\n'), ((1487, 1503), 'flask.abort', 'flask.abort', (['(403)'], {}), '(403)\n', (1498, 1503), False, 'import flask\n'), ((1946, 1967), 'flask.jsonify', 'flask.jsonify', (['result'], {}), '(result)\n', (1959, 1967), False, 'import flask\n'), ((2056, 2151), 'flask.jsonify', 'flask.jsonify', (["{'error': 'the action did not return a dictionary', 'action_output': result}"], {}), "({'error': 'the action did not return a dictionary',\n 'action_output': result})\n", (2069, 2151), False, 'import flask\n'), ((2286, 2321), 'os.getenv', 'os.getenv', (['"""FLASK_PROXY_PORT"""', '(8080)'], {}), "('FLASK_PROXY_PORT', 8080)\n", (2295, 2321), False, 'import os\n'), ((1798, 1834), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stderr'}), '(file=sys.stderr)\n', (1817, 1834), False, 'import traceback\n'), ((1659, 1676), 'json.dumps', 'json.dumps', (['value'], {}), '(value)\n', (1669, 1676), False, 'import json\n')]
|
# +
from model import common
import torch.nn as nn
import torch
from torch.autograd import Variable
import numpy.random as npr
import numpy as np
import torch.nn.functional as F
import random
import math
def make_model(args, parent=False):
return SRResNet(args)
class SRResNet(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(SRResNet, self).__init__()
n_resblocks = 5
n_feats = 64
kernel_size = 3
scale = args.scale[0]
act = nn.PReLU()
self.sub_mean = common.MeanShift(args.rgb_range)
self.add_mean = common.MeanShift(args.rgb_range, sign=1)
# define head module
m_head = [
nn.Conv2d(3, 64, kernel_size=9, padding=4),
act
]
# define body module
m_body = [
common.ResBlock(
conv, n_feats, kernel_size, bn=True, act=act, res_scale=args.res_scale
) for _ in range(n_resblocks)
]
m_body.append(conv(n_feats, n_feats, kernel_size))
m_body.append(nn.BatchNorm2d(n_feats))
# define tail module
m_tail = [
common.Upsampler(conv, scale, n_feats, act='prelu'),
nn.Conv2d(n_feats, 3, kernel_size=9, padding=4)
]
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x, flag=False, hr=None):
x = self.sub_mean(x)
x = self.head(x)
res = self.body(x)
res += x
x = self.tail[0](res)
if flag:
self.eval()
x_new = x.clone().detach()
x_new = Variable(x_new.data, requires_grad=True).cuda()
num_batch, num_channel, H, W = x_new.shape
HW = H*W
sr = self.tail[-1](x_new)
criterion = nn.L1Loss()
loss = criterion(sr, hr)
self.zero_grad()
loss.backward()
grads_val = x_new.grad.clone().detach()
grad_channel_mean = torch.mean(grads_val.view(num_batch, num_channel, -1), dim=2)
channel_mean = grad_channel_mean
grad_channel_mean = grad_channel_mean.view(num_batch, num_channel, 1, 1)
spatial_mean = torch.sum(x_new * grad_channel_mean, 1)
spatial_mean = spatial_mean.view(num_batch, HW)
self.zero_grad()
choose_one = random.randint(0,9)
if choose_one <= 4:
# ---------------------------- spatial -----------------------
spatial_drop_num = math.ceil(HW * 1 / 3.0)
th18_mask_value = torch.sort(spatial_mean, dim=1, descending=True)[0][:, spatial_drop_num]
th18_mask_value = th18_mask_value.view(num_batch, 1).expand(num_batch, 36864)
mask_all_cuda = torch.where(spatial_mean > th18_mask_value, torch.zeros(spatial_mean.shape).cuda(),
torch.ones(spatial_mean.shape).cuda())
mask_all = mask_all_cuda.reshape(num_batch, H, H).view(num_batch, 1, H, H)
else:
# -------------------------- channel ----------------------------
vector_thresh_percent = math.ceil(num_channel * 1 / 3.2)
vector_thresh_value = torch.sort(channel_mean, dim=1, descending=True)[0][:, vector_thresh_percent]
vector_thresh_value = vector_thresh_value.view(num_batch, 1).expand(num_batch, num_channel)
vector = torch.where(channel_mean > vector_thresh_value,
torch.zeros(channel_mean.shape).cuda(),
torch.ones(channel_mean.shape).cuda())
mask_all = vector.view(num_batch, num_channel, 1, 1)
mask_all[int(num_batch/3):,:,:,:] = 1
self.train()
mask_all = Variable(mask_all, requires_grad=True)
x = x * mask_all
x = self.tail[-1](x)
x = self.add_mean(x)
return x
def load_state_dict(self, state_dict, strict=True):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if name.find('tail') == -1:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
if name.find('tail') == -1:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
|
[
"torch.nn.PReLU",
"torch.ones",
"random.randint",
"torch.nn.Sequential",
"model.common.ResBlock",
"torch.nn.L1Loss",
"torch.autograd.Variable",
"torch.nn.Conv2d",
"math.ceil",
"torch.nn.BatchNorm2d",
"model.common.Upsampler",
"torch.zeros",
"torch.sum",
"torch.sort",
"model.common.MeanShift"
] |
[((508, 518), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (516, 518), True, 'import torch.nn as nn\n'), ((552, 584), 'model.common.MeanShift', 'common.MeanShift', (['args.rgb_range'], {}), '(args.rgb_range)\n', (568, 584), False, 'from model import common\n'), ((609, 649), 'model.common.MeanShift', 'common.MeanShift', (['args.rgb_range'], {'sign': '(1)'}), '(args.rgb_range, sign=1)\n', (625, 649), False, 'from model import common\n'), ((1309, 1331), 'torch.nn.Sequential', 'nn.Sequential', (['*m_head'], {}), '(*m_head)\n', (1322, 1331), True, 'import torch.nn as nn\n'), ((1352, 1374), 'torch.nn.Sequential', 'nn.Sequential', (['*m_body'], {}), '(*m_body)\n', (1365, 1374), True, 'import torch.nn as nn\n'), ((1395, 1417), 'torch.nn.Sequential', 'nn.Sequential', (['*m_tail'], {}), '(*m_tail)\n', (1408, 1417), True, 'import torch.nn as nn\n'), ((711, 753), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(9)', 'padding': '(4)'}), '(3, 64, kernel_size=9, padding=4)\n', (720, 753), True, 'import torch.nn as nn\n'), ((842, 934), 'model.common.ResBlock', 'common.ResBlock', (['conv', 'n_feats', 'kernel_size'], {'bn': '(True)', 'act': 'act', 'res_scale': 'args.res_scale'}), '(conv, n_feats, kernel_size, bn=True, act=act, res_scale=\n args.res_scale)\n', (857, 934), False, 'from model import common\n'), ((1079, 1102), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['n_feats'], {}), '(n_feats)\n', (1093, 1102), True, 'import torch.nn as nn\n'), ((1165, 1216), 'model.common.Upsampler', 'common.Upsampler', (['conv', 'scale', 'n_feats'], {'act': '"""prelu"""'}), "(conv, scale, n_feats, act='prelu')\n", (1181, 1216), False, 'from model import common\n'), ((1230, 1277), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_feats', '(3)'], {'kernel_size': '(9)', 'padding': '(4)'}), '(n_feats, 3, kernel_size=9, padding=4)\n', (1239, 1277), True, 'import torch.nn as nn\n'), ((1912, 1923), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (1921, 1923), True, 'import torch.nn as nn\n'), ((2334, 2373), 'torch.sum', 'torch.sum', (['(x_new * grad_channel_mean)', '(1)'], {}), '(x_new * grad_channel_mean, 1)\n', (2343, 2373), False, 'import torch\n'), ((2501, 2521), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (2515, 2521), False, 'import random\n'), ((3972, 4010), 'torch.autograd.Variable', 'Variable', (['mask_all'], {'requires_grad': '(True)'}), '(mask_all, requires_grad=True)\n', (3980, 4010), False, 'from torch.autograd import Variable\n'), ((2667, 2690), 'math.ceil', 'math.ceil', (['(HW * 1 / 3.0)'], {}), '(HW * 1 / 3.0)\n', (2676, 2690), False, 'import math\n'), ((3322, 3354), 'math.ceil', 'math.ceil', (['(num_channel * 1 / 3.2)'], {}), '(num_channel * 1 / 3.2)\n', (3331, 3354), False, 'import math\n'), ((1713, 1753), 'torch.autograd.Variable', 'Variable', (['x_new.data'], {'requires_grad': '(True)'}), '(x_new.data, requires_grad=True)\n', (1721, 1753), False, 'from torch.autograd import Variable\n'), ((2725, 2773), 'torch.sort', 'torch.sort', (['spatial_mean'], {'dim': '(1)', 'descending': '(True)'}), '(spatial_mean, dim=1, descending=True)\n', (2735, 2773), False, 'import torch\n'), ((3393, 3441), 'torch.sort', 'torch.sort', (['channel_mean'], {'dim': '(1)', 'descending': '(True)'}), '(channel_mean, dim=1, descending=True)\n', (3403, 3441), False, 'import torch\n'), ((2968, 2999), 'torch.zeros', 'torch.zeros', (['spatial_mean.shape'], {}), '(spatial_mean.shape)\n', (2979, 2999), False, 'import torch\n'), ((3052, 3082), 'torch.ones', 'torch.ones', (['spatial_mean.shape'], {}), '(spatial_mean.shape)\n', (3062, 3082), False, 'import torch\n'), ((3689, 3720), 'torch.zeros', 'torch.zeros', (['channel_mean.shape'], {}), '(channel_mean.shape)\n', (3700, 3720), False, 'import torch\n'), ((3766, 3796), 'torch.ones', 'torch.ones', (['channel_mean.shape'], {}), '(channel_mean.shape)\n', (3776, 3796), False, 'import torch\n')]
|
from django.http import HttpRequest
from django.test import SimpleTestCase
from django.urls import reverse
from .. import views
class HomePageTests(SimpleTestCase):
def test_home_page_status_code(self):
response = self.client.get("/")
self.assertEqual(response.status_code, 200)
def test_view_url_by_name(self):
response = self.client.get(reverse("home"))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse("home"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "home.html")
def test_home_page_contains_correct_html(self):
response = self.client.get("/")
self.assertContains(
response, '<h1 class="display-4">Roster Wizard</h1>'
)
def test_home_page_does_not_contain_incorrect_html(self):
response = self.client.get("/")
self.assertNotContains(
response, "Hi there! I should not be on the page."
)
|
[
"django.urls.reverse"
] |
[((375, 390), 'django.urls.reverse', 'reverse', (['"""home"""'], {}), "('home')\n", (382, 390), False, 'from django.urls import reverse\n'), ((527, 542), 'django.urls.reverse', 'reverse', (['"""home"""'], {}), "('home')\n", (534, 542), False, 'from django.urls import reverse\n')]
|
"""
Test functions for GEE
External comparisons are to R. The statmodels GEE implementation
should generally agree with the R GEE implementation for the
independence and exchangeable correlation structures. For other
correlation structures, the details of the correlation estimation
differ among implementations and the results will not agree exactly.
"""
from __future__ import print_function
from statsmodels.compat import lrange
import numpy as np
import os
from numpy.testing import assert_almost_equal
from statsmodels.genmod.generalized_estimating_equations import (GEE,
GEEMargins, Multinomial)
from statsmodels.genmod.families import Gaussian, Binomial, Poisson
from statsmodels.genmod.dependence_structures import (Exchangeable,
Independence, GlobalOddsRatio, Autoregressive, Nested)
import pandas as pd
import statsmodels.formula.api as sm
def load_data(fname, icept=True):
"""
Load a data set from the results directory. The data set should
be a CSV file with the following format:
Column 0: Group indicator
Column 1: endog variable
Columns 2-end: exog variables
If `icept` is True, an intercept is prepended to the exog
variables.
"""
cur_dir = os.path.dirname(os.path.abspath(__file__))
Z = np.genfromtxt(os.path.join(cur_dir, 'results', fname),
delimiter=",")
group = Z[:,0]
endog = Z[:,1]
exog = Z[:,2:]
if icept:
exog = np.concatenate((np.ones((exog.shape[0],1)), exog),
axis=1)
return endog,exog,group
class TestGEE(object):
def test_margins(self):
n = 300
exog = np.random.normal(size=(n, 4))
exog[:,0] = 1
exog[:,1] = 1*(exog[:,2] < 0)
group = np.kron(np.arange(n/4), np.ones(4))
time = np.zeros((n, 1))
beta = np.r_[0, 1, -1, 0.5]
lpr = np.dot(exog, beta)
prob = 1 / (1 + np.exp(-lpr))
endog = 1*(np.random.uniform(size=n) < prob)
fa = Binomial()
ex = Exchangeable()
md = GEE(endog, exog, group, time, fa, ex)
mdf = md.fit()
marg = GEEMargins(mdf, ())
marg.summary()
# This is in the release announcement for version 0.6.
def test_poisson_epil(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
fname = os.path.join(cur_dir, "results", "epil.csv")
data = pd.read_csv(fname)
fam = Poisson()
ind = Independence()
md1 = GEE.from_formula("y ~ age + trt + base", data,
groups=data["subject"], cov_struct=ind,
family=fam)
mdf1 = md1.fit()
# Coefficients should agree with GLM
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
md2 = GLM.from_formula("y ~ age + trt + base", data,
family=families.Poisson())
mdf2 = md2.fit(scale="X2")
assert_almost_equal(mdf1.params, mdf2.params, decimal=6)
assert_almost_equal(mdf1.scale, mdf2.scale, decimal=6)
# TODO: why does this test fail?
def t_est_missing(self):
Y = np.random.normal(size=100)
X1 = np.random.normal(size=100)
X2 = np.random.normal(size=100)
X3 = np.random.normal(size=100)
groups = np.kron(lrange(20), np.ones(5))
Y[0] = np.nan
Y[5:7] = np.nan
X2[10:12] = np.nan
D = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "X3": X3,
"groups": groups})
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None,
groups=D["groups"], missing='drop')
mdf = md.fit()
assert(len(md.endog) == 95)
assert(md.exog.shape) == (95,4)
def test_default_time(self):
"""
Check that the time defaults work correctly.
"""
endog,exog,group = load_data("gee_logistic_1.csv")
# Time values for the autoregressive model
T = np.zeros(len(endog))
idx = set(group)
for ii in idx:
jj = np.flatnonzero(group == ii)
T[jj] = lrange(len(jj))
family = Binomial()
va = Autoregressive()
md1 = GEE(endog, exog, group, family=family, cov_struct=va)
mdf1 = md1.fit()
md2 = GEE(endog, exog, group, time=T, family=family,
cov_struct=va)
mdf2 = md2.fit()
assert_almost_equal(mdf1.params, mdf2.params, decimal=6)
assert_almost_equal(mdf1.standard_errors(),
mdf2.standard_errors(), decimal=6)
def test_logistic(self):
"""
R code for comparing results:
library(gee)
Z = read.csv("results/gee_logistic_1.csv", header=FALSE)
Y = Z[,2]
Id = Z[,1]
X1 = Z[,3]
X2 = Z[,4]
X3 = Z[,5]
mi = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial,
corstr="independence")
smi = summary(mi)
u = coefficients(smi)
cfi = paste(u[,1], collapse=",")
sei = paste(u[,4], collapse=",")
me = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial,
corstr="exchangeable")
sme = summary(me)
u = coefficients(sme)
cfe = paste(u[,1], collapse=",")
see = paste(u[,4], collapse=",")
ma = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial,
corstr="AR-M")
sma = summary(ma)
u = coefficients(sma)
cfa = paste(u[,1], collapse=",")
sea = paste(u[,4], collapse=",")
sprintf("cf = [[%s],[%s],[%s]]", cfi, cfe, cfa)
sprintf("se = [[%s],[%s],[%s]]", sei, see, sea)
"""
endog,exog,group = load_data("gee_logistic_1.csv")
# Time values for the autoregressive model
T = np.zeros(len(endog))
idx = set(group)
for ii in idx:
jj = np.flatnonzero(group == ii)
T[jj] = lrange(len(jj))
family = Binomial()
ve = Exchangeable()
vi = Independence()
va = Autoregressive()
# From R gee
cf = [[0.0167272965285882,1.13038654425893,
-1.86896345082962,1.09397608331333],
[0.0178982283915449,1.13118798191788,
-1.86133518416017,1.08944256230299],
[0.0109621937947958,1.13226505028438,
-1.88278757333046,1.09954623769449]]
se = [[0.127291720283049,0.166725808326067,
0.192430061340865,0.173141068839597],
[0.127045031730155,0.165470678232842,
0.192052750030501,0.173174779369249],
[0.127240302296444,0.170554083928117,
0.191045527104503,0.169776150974586]]
for j,v in enumerate((vi,ve,va)):
md = GEE(endog, exog, group, T, family, v)
mdf = md.fit()
if id(v) != id(va):
assert_almost_equal(mdf.params, cf[j], decimal=6)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=6)
# Test with formulas
D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]),
axis=1)
D = pd.DataFrame(D)
D.columns = ["Y","Id",] + ["X%d" % (k+1)
for k in range(exog.shape[1]-1)]
for j,v in enumerate((vi,ve)):
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None,
groups=D.loc[:,"Id"],
family=family, cov_struct=v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=6)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=6)
# Check for run-time exceptions in summary
# print(mdf.summary())
def test_autoregressive(self):
dep_params_true = [0, 0.589208623896, 0.559823804948]
params_true = [[1.08043787, 1.12709319, 0.90133927],
[0.9613677, 1.05826987, 0.90832055],
[1.05370439, 0.96084864, 0.93923374]]
np.random.seed(342837482)
num_group = 100
ar_param = 0.5
k = 3
ga = Gaussian()
for gsize in 1,2,3:
ix = np.arange(gsize)[:,None] - np.arange(gsize)[None,:]
ix = np.abs(ix)
cmat = ar_param ** ix
cmat_r = np.linalg.cholesky(cmat)
endog = []
exog = []
groups = []
for i in range(num_group):
x = np.random.normal(size=(gsize,k))
exog.append(x)
expval = x.sum(1)
errors = np.dot(cmat_r, np.random.normal(size=gsize))
endog.append(expval + errors)
groups.append(i*np.ones(gsize))
endog = np.concatenate(endog)
groups = np.concatenate(groups)
exog = np.concatenate(exog, axis=0)
ar = Autoregressive()
md = GEE(endog, exog, groups, family=ga, cov_struct = ar)
mdf = md.fit()
assert_almost_equal(ar.dep_params, dep_params_true[gsize-1])
assert_almost_equal(mdf.params, params_true[gsize-1])
def test_post_estimation(self):
family = Gaussian()
endog,exog,group = load_data("gee_linear_1.csv")
ve = Exchangeable()
md = GEE(endog, exog, group, None, family, ve)
mdf = md.fit()
assert_almost_equal(np.dot(exog, mdf.params),
mdf.fittedvalues)
assert_almost_equal(endog - np.dot(exog, mdf.params),
mdf.resid)
def test_linear(self):
"""
library(gee)
Z = read.csv("results/gee_linear_1.csv", header=FALSE)
Y = Z[,2]
Id = Z[,1]
X1 = Z[,3]
X2 = Z[,4]
X3 = Z[,5]
mi = gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian,
corstr="independence", tol=1e-8, maxit=100)
smi = summary(mi)
u = coefficients(smi)
cfi = paste(u[,1], collapse=",")
sei = paste(u[,4], collapse=",")
me = gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian,
corstr="exchangeable", tol=1e-8, maxit=100)
sme = summary(me)
u = coefficients(sme)
cfe = paste(u[,1], collapse=",")
see = paste(u[,4], collapse=",")
sprintf("cf = [[%s],[%s]]", cfi, cfe)
sprintf("se = [[%s],[%s]]", sei, see)
"""
family = Gaussian()
endog,exog,group = load_data("gee_linear_1.csv")
vi = Independence()
ve = Exchangeable()
# From R gee
cf = [[-0.01850226507491,0.81436304278962,
-1.56167635393184,0.794239361055003],
[-0.0182920577154767,0.814898414022467,
-1.56194040106201,0.793499517527478]]
se = [[0.0440733554189401,0.0479993639119261,
0.0496045952071308,0.0479467597161284],
[0.0440369906460754,0.0480069787567662,
0.049519758758187,0.0479760443027526]]
for j,v in enumerate((vi, ve)):
md = GEE(endog, exog, group, None, family, v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=10)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=10)
# Test with formulas
D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]),
axis=1)
D = pd.DataFrame(D)
D.columns = ["Y","Id",] + ["X%d" % (k+1)
for k in range(exog.shape[1]-1)]
for j,v in enumerate((vi,ve)):
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None,
groups=D.loc[:,"Id"],
family=family, cov_struct=v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=10)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=10)
def test_linear_constrained(self):
family = Gaussian()
exog = np.random.normal(size=(300,4))
exog[:,0] = 1
endog = np.dot(exog, np.r_[1, 1, 0, 0.2]) +\
np.random.normal(size=300)
group = np.kron(np.arange(100), np.r_[1,1,1])
vi = Independence()
ve = Exchangeable()
L = np.r_[[[0, 0, 0, 1]]]
R = np.r_[0,]
for j,v in enumerate((vi,ve)):
md = GEE(endog, exog, group, None, family, v,
constraint=(L,R))
mdf = md.fit()
assert_almost_equal(mdf.params[3], 0, decimal=10)
def test_nested_linear(self):
family = Gaussian()
endog,exog,group = load_data("gee_nested_linear_1.csv")
group_n = []
for i in range(endog.shape[0]//10):
group_n.extend([0,]*5)
group_n.extend([1,]*5)
group_n = np.array(group_n)[:,None]
dp = Independence()
md = GEE(endog, exog, group, None, family, dp)
mdf1 = md.fit()
# From statsmodels.GEE (not an independent test)
cf = np.r_[-0.1671073 , 1.00467426, -2.01723004, 0.97297106]
se = np.r_[0.08629606, 0.04058653, 0.04067038, 0.03777989]
assert_almost_equal(mdf1.params, cf, decimal=6)
assert_almost_equal(mdf1.standard_errors(), se,
decimal=6)
ne = Nested()
md = GEE(endog, exog, group, None, family, ne,
dep_data=group_n)
mdf2 = md.fit(start_params=mdf1.params)
# From statsmodels.GEE (not an independent test)
cf = np.r_[-0.16655319, 1.02183688, -2.00858719, 1.00101969]
se = np.r_[0.08632616, 0.02913582, 0.03114428, 0.02893991]
assert_almost_equal(mdf2.params, cf, decimal=6)
assert_almost_equal(mdf2.standard_errors(), se,
decimal=6)
def test_ordinal(self):
family = Binomial()
endog, exog, groups = load_data("gee_ordinal_1.csv",
icept=False)
v = GlobalOddsRatio("ordinal")
md = GEE(endog, exog, groups, None, family, v)
md.setup_ordinal()
mdf = md.fit()
cf = np.r_[1.09238131, 0.02148193, -0.39879146, -0.01855666,
0.02983409, 1.18123172, 0.01845318, -1.10233886]
se = np.r_[0.10878752, 0.10326078, 0.11171241, 0.05488705,
0.05995019, 0.0916574, 0.05951445, 0.08539281]
assert_almost_equal(mdf.params, cf, decimal=5)
assert_almost_equal(mdf.bse, se, decimal=5)
def test_nominal(self):
family = Multinomial(3)
endog, exog, groups = load_data("gee_nominal_1.csv",
icept=False)
# Test with independence correlation
v = Independence()
md = GEE(endog, exog, groups, None, family, v)
md.setup_nominal()
mdf1 = md.fit()
# From statsmodels.GEE (not an independent test)
cf1 = np.r_[0.44944752, 0.45569985, -0.92007064, -0.46766728]
se1 = np.r_[0.09801821, 0.07718842, 0.13229421, 0.08544553]
assert_almost_equal(mdf1.params, cf1, decimal=5)
assert_almost_equal(mdf1.standard_errors(), se1, decimal=5)
# Test with global odds ratio dependence
v = GlobalOddsRatio("nominal")
md = GEE(endog, exog, groups, None, family, v)
md.setup_nominal()
mdf2 = md.fit(start_params=mdf1.params)
# From statsmodels.GEE (not an independent test)
cf2 = np.r_[0.45397549, 0.42278345, -0.91997131, -0.50115943]
se2 = np.r_[0.09646057, 0.07405713, 0.1324629 , 0.09025019]
assert_almost_equal(mdf2.params, cf2, decimal=5)
assert_almost_equal(mdf2.standard_errors(), se2, decimal=5)
def test_poisson(self):
"""
library(gee)
Z = read.csv("results/gee_poisson_1.csv", header=FALSE)
Y = Z[,2]
Id = Z[,1]
X1 = Z[,3]
X2 = Z[,4]
X3 = Z[,5]
X4 = Z[,6]
X5 = Z[,7]
mi = gee(Y ~ X1 + X2 + X3 + X4 + X5, id=Id, family=poisson,
corstr="independence", scale.fix=TRUE)
smi = summary(mi)
u = coefficients(smi)
cfi = paste(u[,1], collapse=",")
sei = paste(u[,4], collapse=",")
me = gee(Y ~ X1 + X2 + X3 + X4 + X5, id=Id, family=poisson,
corstr="exchangeable", scale.fix=TRUE)
sme = summary(me)
u = coefficients(sme)
cfe = paste(u[,1], collapse=",")
see = paste(u[,4], collapse=",")
sprintf("cf = [[%s],[%s]]", cfi, cfe)
sprintf("se = [[%s],[%s]]", sei, see)
"""
family = Poisson()
endog,exog,group_n = load_data("gee_poisson_1.csv")
vi = Independence()
ve = Exchangeable()
# From R gee
cf = [[-0.0364450410793481,-0.0543209391301178,
0.0156642711741052,0.57628591338724,
-0.00465659951186211,-0.477093153099256],
[-0.0315615554826533,-0.0562589480840004,
0.0178419412298561,0.571512795340481,
-0.00363255566297332,-0.475971696727736]]
se = [[0.0611309237214186,0.0390680524493108,
0.0334234174505518,0.0366860768962715,
0.0304758505008105,0.0316348058881079],
[0.0610840153582275,0.0376887268649102,
0.0325168379415177,0.0369786751362213,
0.0296141014225009,0.0306115470200955]]
for j,v in enumerate((vi,ve)):
md = GEE(endog, exog, group_n, None, family, v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=5)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=6)
# Test with formulas
D = np.concatenate((endog[:,None], group_n[:,None],
exog[:,1:]), axis=1)
D = pd.DataFrame(D)
D.columns = ["Y","Id",] + ["X%d" % (k+1)
for k in range(exog.shape[1]-1)]
for j,v in enumerate((vi,ve)):
md = GEE.from_formula("Y ~ X1 + X2 + X3 + X4 + X5", D,
None, groups=D.loc[:,"Id"],
family=family, cov_struct=v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=5)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=6)
# print(mdf.params)
def test_compare_OLS(self):
"""
Gaussian GEE with independence correlation should agree
exactly with OLS for parameter estimates and standard errors
derived from the naive covariance estimate.
"""
vs = Independence()
family = Gaussian()
Y = np.random.normal(size=100)
X1 = np.random.normal(size=100)
X2 = np.random.normal(size=100)
X3 = np.random.normal(size=100)
groups = np.kron(lrange(20), np.ones(5))
D = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "X3": X3})
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None,
groups=groups, family=family,
cov_struct=vs)
mdf = md.fit()
ols = sm.ols("Y ~ X1 + X2 + X3", data=D).fit()
assert_almost_equal(ols.params.values, mdf.params, decimal=10)
se = mdf.standard_errors(covariance_type="naive")
assert_almost_equal(ols.bse, se, decimal=10)
naive_tvalues = mdf.params / \
np.sqrt(np.diag(mdf.naive_covariance))
assert_almost_equal(naive_tvalues, ols.tvalues, decimal=10)
def test_compare_logit(self):
vs = Independence()
family = Binomial()
Y = 1*(np.random.normal(size=100) < 0)
X1 = np.random.normal(size=100)
X2 = np.random.normal(size=100)
X3 = np.random.normal(size=100)
groups = np.random.randint(0, 4, size=100)
D = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "X3": X3})
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None, groups=groups,
family=family, cov_struct=vs).fit()
sml = sm.logit("Y ~ X1 + X2 + X3", data=D).fit(disp=False)
assert_almost_equal(sml.params.values, md.params, decimal=10)
def test_compare_poisson(self):
vs = Independence()
family = Poisson()
Y = np.ceil(-np.log(np.random.uniform(size=100)))
X1 = np.random.normal(size=100)
X2 = np.random.normal(size=100)
X3 = np.random.normal(size=100)
groups = np.random.randint(0, 4, size=100)
D = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "X3": X3})
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None, groups=groups,
family=family, cov_struct=vs).fit()
sml = sm.poisson("Y ~ X1 + X2 + X3", data=D).fit(disp=False)
assert_almost_equal(sml.params.values, md.params, decimal=10)
if __name__=="__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
|
[
"numpy.random.seed",
"numpy.abs",
"pandas.read_csv",
"numpy.ones",
"statsmodels.genmod.generalized_estimating_equations.Multinomial",
"numpy.random.randint",
"numpy.arange",
"numpy.exp",
"numpy.random.normal",
"statsmodels.genmod.families.Gaussian",
"numpy.diag",
"os.path.join",
"pandas.DataFrame",
"os.path.abspath",
"numpy.testing.assert_almost_equal",
"statsmodels.compat.lrange",
"statsmodels.genmod.dependence_structures.Autoregressive",
"statsmodels.genmod.dependence_structures.GlobalOddsRatio",
"statsmodels.formula.api.ols",
"statsmodels.genmod.generalized_estimating_equations.GEEMargins",
"numpy.linalg.cholesky",
"statsmodels.genmod.dependence_structures.Independence",
"nose.runmodule",
"statsmodels.genmod.generalized_estimating_equations.GEE.from_formula",
"numpy.dot",
"statsmodels.genmod.generalized_estimating_equations.GEE",
"numpy.concatenate",
"numpy.random.uniform",
"statsmodels.formula.api.poisson",
"statsmodels.genmod.dependence_structures.Exchangeable",
"numpy.flatnonzero",
"numpy.zeros",
"statsmodels.genmod.families.Poisson",
"numpy.array",
"statsmodels.formula.api.logit",
"statsmodels.genmod.families.Binomial",
"statsmodels.genmod.dependence_structures.Nested"
] |
[((21322, 21409), 'nose.runmodule', 'nose.runmodule', ([], {'argv': "[__file__, '-vvs', '-x', '--pdb', '--pdb-failure']", 'exit': '(False)'}), "(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n", (21336, 21409), False, 'import nose\n'), ((1230, 1255), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1245, 1255), False, 'import os\n'), ((1279, 1318), 'os.path.join', 'os.path.join', (['cur_dir', '"""results"""', 'fname'], {}), "(cur_dir, 'results', fname)\n", (1291, 1318), False, 'import os\n'), ((1650, 1679), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n, 4)'}), '(size=(n, 4))\n', (1666, 1679), True, 'import numpy as np\n'), ((1808, 1824), 'numpy.zeros', 'np.zeros', (['(n, 1)'], {}), '((n, 1))\n', (1816, 1824), True, 'import numpy as np\n'), ((1876, 1894), 'numpy.dot', 'np.dot', (['exog', 'beta'], {}), '(exog, beta)\n', (1882, 1894), True, 'import numpy as np\n'), ((2001, 2011), 'statsmodels.genmod.families.Binomial', 'Binomial', ([], {}), '()\n', (2009, 2011), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((2025, 2039), 'statsmodels.genmod.dependence_structures.Exchangeable', 'Exchangeable', ([], {}), '()\n', (2037, 2039), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((2054, 2091), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group', 'time', 'fa', 'ex'], {}), '(endog, exog, group, time, fa, ex)\n', (2057, 2091), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((2131, 2150), 'statsmodels.genmod.generalized_estimating_equations.GEEMargins', 'GEEMargins', (['mdf', '()'], {}), '(mdf, ())\n', (2141, 2150), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((2346, 2390), 'os.path.join', 'os.path.join', (['cur_dir', '"""results"""', '"""epil.csv"""'], {}), "(cur_dir, 'results', 'epil.csv')\n", (2358, 2390), False, 'import os\n'), ((2406, 2424), 'pandas.read_csv', 'pd.read_csv', (['fname'], {}), '(fname)\n', (2417, 2424), True, 'import pandas as pd\n'), ((2440, 2449), 'statsmodels.genmod.families.Poisson', 'Poisson', ([], {}), '()\n', (2447, 2449), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((2464, 2478), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (2476, 2478), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((2493, 2595), 'statsmodels.genmod.generalized_estimating_equations.GEE.from_formula', 'GEE.from_formula', (['"""y ~ age + trt + base"""', 'data'], {'groups': "data['subject']", 'cov_struct': 'ind', 'family': 'fam'}), "('y ~ age + trt + base', data, groups=data['subject'],\n cov_struct=ind, family=fam)\n", (2509, 2595), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((3005, 3061), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf1.params', 'mdf2.params'], {'decimal': '(6)'}), '(mdf1.params, mdf2.params, decimal=6)\n', (3024, 3061), False, 'from numpy.testing import assert_almost_equal\n'), ((3070, 3124), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf1.scale', 'mdf2.scale'], {'decimal': '(6)'}), '(mdf1.scale, mdf2.scale, decimal=6)\n', (3089, 3124), False, 'from numpy.testing import assert_almost_equal\n'), ((3207, 3233), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (3223, 3233), True, 'import numpy as np\n'), ((3247, 3273), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (3263, 3273), True, 'import numpy as np\n'), ((3287, 3313), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (3303, 3313), True, 'import numpy as np\n'), ((3327, 3353), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (3343, 3353), True, 'import numpy as np\n'), ((3490, 3560), 'pandas.DataFrame', 'pd.DataFrame', (["{'Y': Y, 'X1': X1, 'X2': X2, 'X3': X3, 'groups': groups}"], {}), "({'Y': Y, 'X1': X1, 'X2': X2, 'X3': X3, 'groups': groups})\n", (3502, 3560), True, 'import pandas as pd\n'), ((3601, 3687), 'statsmodels.genmod.generalized_estimating_equations.GEE.from_formula', 'GEE.from_formula', (['"""Y ~ X1 + X2 + X3"""', 'D', 'None'], {'groups': "D['groups']", 'missing': '"""drop"""'}), "('Y ~ X1 + X2 + X3', D, None, groups=D['groups'], missing=\n 'drop')\n", (3617, 3687), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((4216, 4226), 'statsmodels.genmod.families.Binomial', 'Binomial', ([], {}), '()\n', (4224, 4226), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((4240, 4256), 'statsmodels.genmod.dependence_structures.Autoregressive', 'Autoregressive', ([], {}), '()\n', (4254, 4256), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((4273, 4326), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group'], {'family': 'family', 'cov_struct': 'va'}), '(endog, exog, group, family=family, cov_struct=va)\n', (4276, 4326), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((4367, 4428), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group'], {'time': 'T', 'family': 'family', 'cov_struct': 'va'}), '(endog, exog, group, time=T, family=family, cov_struct=va)\n', (4370, 4428), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((4481, 4537), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf1.params', 'mdf2.params'], {'decimal': '(6)'}), '(mdf1.params, mdf2.params, decimal=6)\n', (4500, 4537), False, 'from numpy.testing import assert_almost_equal\n'), ((6039, 6049), 'statsmodels.genmod.families.Binomial', 'Binomial', ([], {}), '()\n', (6047, 6049), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((6063, 6077), 'statsmodels.genmod.dependence_structures.Exchangeable', 'Exchangeable', ([], {}), '()\n', (6075, 6077), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((6091, 6105), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (6103, 6105), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((6119, 6135), 'statsmodels.genmod.dependence_structures.Autoregressive', 'Autoregressive', ([], {}), '()\n', (6133, 6135), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((7163, 7232), 'numpy.concatenate', 'np.concatenate', (['(endog[:, None], group[:, None], exog[:, 1:])'], {'axis': '(1)'}), '((endog[:, None], group[:, None], exog[:, 1:]), axis=1)\n', (7177, 7232), True, 'import numpy as np\n'), ((7269, 7284), 'pandas.DataFrame', 'pd.DataFrame', (['D'], {}), '(D)\n', (7281, 7284), True, 'import pandas as pd\n'), ((8198, 8223), 'numpy.random.seed', 'np.random.seed', (['(342837482)'], {}), '(342837482)\n', (8212, 8223), True, 'import numpy as np\n'), ((8300, 8310), 'statsmodels.genmod.families.Gaussian', 'Gaussian', ([], {}), '()\n', (8308, 8310), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((9371, 9381), 'statsmodels.genmod.families.Gaussian', 'Gaussian', ([], {}), '()\n', (9379, 9381), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((9453, 9467), 'statsmodels.genmod.dependence_structures.Exchangeable', 'Exchangeable', ([], {}), '()\n', (9465, 9467), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((9482, 9523), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group', 'None', 'family', 've'], {}), '(endog, exog, group, None, family, ve)\n', (9485, 9523), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((10612, 10622), 'statsmodels.genmod.families.Gaussian', 'Gaussian', ([], {}), '()\n', (10620, 10622), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((10695, 10709), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (10707, 10709), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((10723, 10737), 'statsmodels.genmod.dependence_structures.Exchangeable', 'Exchangeable', ([], {}), '()\n', (10735, 10737), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((11527, 11596), 'numpy.concatenate', 'np.concatenate', (['(endog[:, None], group[:, None], exog[:, 1:])'], {'axis': '(1)'}), '((endog[:, None], group[:, None], exog[:, 1:]), axis=1)\n', (11541, 11596), True, 'import numpy as np\n'), ((11633, 11648), 'pandas.DataFrame', 'pd.DataFrame', (['D'], {}), '(D)\n', (11645, 11648), True, 'import pandas as pd\n'), ((12241, 12251), 'statsmodels.genmod.families.Gaussian', 'Gaussian', ([], {}), '()\n', (12249, 12251), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((12268, 12299), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(300, 4)'}), '(size=(300, 4))\n', (12284, 12299), True, 'import numpy as np\n'), ((12481, 12495), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (12493, 12495), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((12509, 12523), 'statsmodels.genmod.dependence_structures.Exchangeable', 'Exchangeable', ([], {}), '()\n', (12521, 12523), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((12861, 12871), 'statsmodels.genmod.families.Gaussian', 'Gaussian', ([], {}), '()\n', (12869, 12871), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((13131, 13145), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (13143, 13145), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((13159, 13200), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group', 'None', 'family', 'dp'], {}), '(endog, exog, group, None, family, dp)\n', (13162, 13200), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((13432, 13479), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf1.params', 'cf'], {'decimal': '(6)'}), '(mdf1.params, cf, decimal=6)\n', (13451, 13479), False, 'from numpy.testing import assert_almost_equal\n'), ((13589, 13597), 'statsmodels.genmod.dependence_structures.Nested', 'Nested', ([], {}), '()\n', (13595, 13597), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((13611, 13670), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group', 'None', 'family', 'ne'], {'dep_data': 'group_n'}), '(endog, exog, group, None, family, ne, dep_data=group_n)\n', (13614, 13670), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((13943, 13990), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf2.params', 'cf'], {'decimal': '(6)'}), '(mdf2.params, cf, decimal=6)\n', (13962, 13990), False, 'from numpy.testing import assert_almost_equal\n'), ((14134, 14144), 'statsmodels.genmod.families.Binomial', 'Binomial', ([], {}), '()\n', (14142, 14144), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((14273, 14299), 'statsmodels.genmod.dependence_structures.GlobalOddsRatio', 'GlobalOddsRatio', (['"""ordinal"""'], {}), "('ordinal')\n", (14288, 14299), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((14314, 14355), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'groups', 'None', 'family', 'v'], {}), '(endog, exog, groups, None, family, v)\n', (14317, 14355), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((14691, 14737), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params', 'cf'], {'decimal': '(5)'}), '(mdf.params, cf, decimal=5)\n', (14710, 14737), False, 'from numpy.testing import assert_almost_equal\n'), ((14746, 14789), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.bse', 'se'], {'decimal': '(5)'}), '(mdf.bse, se, decimal=5)\n', (14765, 14789), False, 'from numpy.testing import assert_almost_equal\n'), ((14838, 14852), 'statsmodels.genmod.generalized_estimating_equations.Multinomial', 'Multinomial', (['(3)'], {}), '(3)\n', (14849, 14852), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((15026, 15040), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (15038, 15040), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((15054, 15095), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'groups', 'None', 'family', 'v'], {}), '(endog, exog, groups, None, family, v)\n', (15057, 15095), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((15355, 15403), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf1.params', 'cf1'], {'decimal': '(5)'}), '(mdf1.params, cf1, decimal=5)\n', (15374, 15403), False, 'from numpy.testing import assert_almost_equal\n'), ((15534, 15560), 'statsmodels.genmod.dependence_structures.GlobalOddsRatio', 'GlobalOddsRatio', (['"""nominal"""'], {}), "('nominal')\n", (15549, 15560), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((15574, 15615), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'groups', 'None', 'family', 'v'], {}), '(endog, exog, groups, None, family, v)\n', (15577, 15615), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((15899, 15947), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf2.params', 'cf2'], {'decimal': '(5)'}), '(mdf2.params, cf2, decimal=5)\n', (15918, 15947), False, 'from numpy.testing import assert_almost_equal\n'), ((16924, 16933), 'statsmodels.genmod.families.Poisson', 'Poisson', ([], {}), '()\n', (16931, 16933), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((17009, 17023), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (17021, 17023), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((17037, 17051), 'statsmodels.genmod.dependence_structures.Exchangeable', 'Exchangeable', ([], {}), '()\n', (17049, 17051), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((18071, 18142), 'numpy.concatenate', 'np.concatenate', (['(endog[:, None], group_n[:, None], exog[:, 1:])'], {'axis': '(1)'}), '((endog[:, None], group_n[:, None], exog[:, 1:]), axis=1)\n', (18085, 18142), True, 'import numpy as np\n'), ((18180, 18195), 'pandas.DataFrame', 'pd.DataFrame', (['D'], {}), '(D)\n', (18192, 18195), True, 'import pandas as pd\n'), ((19035, 19049), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (19047, 19049), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((19067, 19077), 'statsmodels.genmod.families.Gaussian', 'Gaussian', ([], {}), '()\n', (19075, 19077), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((19091, 19117), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (19107, 19117), True, 'import numpy as np\n'), ((19131, 19157), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (19147, 19157), True, 'import numpy as np\n'), ((19171, 19197), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (19187, 19197), True, 'import numpy as np\n'), ((19211, 19237), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (19227, 19237), True, 'import numpy as np\n'), ((19300, 19352), 'pandas.DataFrame', 'pd.DataFrame', (["{'Y': Y, 'X1': X1, 'X2': X2, 'X3': X3}"], {}), "({'Y': Y, 'X1': X1, 'X2': X2, 'X3': X3})\n", (19312, 19352), True, 'import pandas as pd\n'), ((19367, 19461), 'statsmodels.genmod.generalized_estimating_equations.GEE.from_formula', 'GEE.from_formula', (['"""Y ~ X1 + X2 + X3"""', 'D', 'None'], {'groups': 'groups', 'family': 'family', 'cov_struct': 'vs'}), "('Y ~ X1 + X2 + X3', D, None, groups=groups, family=family,\n cov_struct=vs)\n", (19383, 19461), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((19606, 19668), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['ols.params.values', 'mdf.params'], {'decimal': '(10)'}), '(ols.params.values, mdf.params, decimal=10)\n', (19625, 19668), False, 'from numpy.testing import assert_almost_equal\n'), ((19736, 19780), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['ols.bse', 'se'], {'decimal': '(10)'}), '(ols.bse, se, decimal=10)\n', (19755, 19780), False, 'from numpy.testing import assert_almost_equal\n'), ((19880, 19939), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['naive_tvalues', 'ols.tvalues'], {'decimal': '(10)'}), '(naive_tvalues, ols.tvalues, decimal=10)\n', (19899, 19939), False, 'from numpy.testing import assert_almost_equal\n'), ((19990, 20004), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (20002, 20004), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((20022, 20032), 'statsmodels.genmod.families.Binomial', 'Binomial', ([], {}), '()\n', (20030, 20032), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((20094, 20120), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (20110, 20120), True, 'import numpy as np\n'), ((20134, 20160), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (20150, 20160), True, 'import numpy as np\n'), ((20174, 20200), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (20190, 20200), True, 'import numpy as np\n'), ((20218, 20251), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {'size': '(100)'}), '(0, 4, size=100)\n', (20235, 20251), True, 'import numpy as np\n'), ((20265, 20317), 'pandas.DataFrame', 'pd.DataFrame', (["{'Y': Y, 'X1': X1, 'X2': X2, 'X3': X3}"], {}), "({'Y': Y, 'X1': X1, 'X2': X2, 'X3': X3})\n", (20277, 20317), True, 'import pandas as pd\n'), ((20537, 20598), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sml.params.values', 'md.params'], {'decimal': '(10)'}), '(sml.params.values, md.params, decimal=10)\n', (20556, 20598), False, 'from numpy.testing import assert_almost_equal\n'), ((20651, 20665), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (20663, 20665), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((20683, 20692), 'statsmodels.genmod.families.Poisson', 'Poisson', ([], {}), '()\n', (20690, 20692), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((20765, 20791), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (20781, 20791), True, 'import numpy as np\n'), ((20805, 20831), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (20821, 20831), True, 'import numpy as np\n'), ((20845, 20871), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (20861, 20871), True, 'import numpy as np\n'), ((20889, 20922), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {'size': '(100)'}), '(0, 4, size=100)\n', (20906, 20922), True, 'import numpy as np\n'), ((20936, 20988), 'pandas.DataFrame', 'pd.DataFrame', (["{'Y': Y, 'X1': X1, 'X2': X2, 'X3': X3}"], {}), "({'Y': Y, 'X1': X1, 'X2': X2, 'X3': X3})\n", (20948, 20988), True, 'import pandas as pd\n'), ((21210, 21271), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sml.params.values', 'md.params'], {'decimal': '(10)'}), '(sml.params.values, md.params, decimal=10)\n', (21229, 21271), False, 'from numpy.testing import assert_almost_equal\n'), ((1765, 1781), 'numpy.arange', 'np.arange', (['(n / 4)'], {}), '(n / 4)\n', (1774, 1781), True, 'import numpy as np\n'), ((1781, 1791), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1788, 1791), True, 'import numpy as np\n'), ((2303, 2328), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2318, 2328), False, 'import os\n'), ((3379, 3389), 'statsmodels.compat.lrange', 'lrange', (['(20)'], {}), '(20)\n', (3385, 3389), False, 'from statsmodels.compat import lrange\n'), ((3391, 3401), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (3398, 3401), True, 'import numpy as np\n'), ((4134, 4161), 'numpy.flatnonzero', 'np.flatnonzero', (['(group == ii)'], {}), '(group == ii)\n', (4148, 4161), True, 'import numpy as np\n'), ((5957, 5984), 'numpy.flatnonzero', 'np.flatnonzero', (['(group == ii)'], {}), '(group == ii)\n', (5971, 5984), True, 'import numpy as np\n'), ((6845, 6882), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group', 'T', 'family', 'v'], {}), '(endog, exog, group, T, family, v)\n', (6848, 6882), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((7459, 7561), 'statsmodels.genmod.generalized_estimating_equations.GEE.from_formula', 'GEE.from_formula', (['"""Y ~ X1 + X2 + X3"""', 'D', 'None'], {'groups': "D.loc[:, 'Id']", 'family': 'family', 'cov_struct': 'v'}), "('Y ~ X1 + X2 + X3', D, None, groups=D.loc[:, 'Id'], family\n =family, cov_struct=v)\n", (7475, 7561), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((7667, 7716), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params', 'cf[j]'], {'decimal': '(6)'}), '(mdf.params, cf[j], decimal=6)\n', (7686, 7716), False, 'from numpy.testing import assert_almost_equal\n'), ((8427, 8437), 'numpy.abs', 'np.abs', (['ix'], {}), '(ix)\n', (8433, 8437), True, 'import numpy as np\n'), ((8493, 8517), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['cmat'], {}), '(cmat)\n', (8511, 8517), True, 'import numpy as np\n'), ((8930, 8951), 'numpy.concatenate', 'np.concatenate', (['endog'], {}), '(endog)\n', (8944, 8951), True, 'import numpy as np\n'), ((8973, 8995), 'numpy.concatenate', 'np.concatenate', (['groups'], {}), '(groups)\n', (8987, 8995), True, 'import numpy as np\n'), ((9015, 9043), 'numpy.concatenate', 'np.concatenate', (['exog'], {'axis': '(0)'}), '(exog, axis=0)\n', (9029, 9043), True, 'import numpy as np\n'), ((9062, 9078), 'statsmodels.genmod.dependence_structures.Autoregressive', 'Autoregressive', ([], {}), '()\n', (9076, 9078), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((9096, 9146), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'groups'], {'family': 'ga', 'cov_struct': 'ar'}), '(endog, exog, groups, family=ga, cov_struct=ar)\n', (9099, 9146), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((9189, 9251), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['ar.dep_params', 'dep_params_true[gsize - 1]'], {}), '(ar.dep_params, dep_params_true[gsize - 1])\n', (9208, 9251), False, 'from numpy.testing import assert_almost_equal\n'), ((9262, 9317), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params', 'params_true[gsize - 1]'], {}), '(mdf.params, params_true[gsize - 1])\n', (9281, 9317), False, 'from numpy.testing import assert_almost_equal\n'), ((9576, 9600), 'numpy.dot', 'np.dot', (['exog', 'mdf.params'], {}), '(exog, mdf.params)\n', (9582, 9600), True, 'import numpy as np\n'), ((11248, 11288), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group', 'None', 'family', 'v'], {}), '(endog, exog, group, None, family, v)\n', (11251, 11288), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((11328, 11378), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params', 'cf[j]'], {'decimal': '(10)'}), '(mdf.params, cf[j], decimal=10)\n', (11347, 11378), False, 'from numpy.testing import assert_almost_equal\n'), ((11822, 11924), 'statsmodels.genmod.generalized_estimating_equations.GEE.from_formula', 'GEE.from_formula', (['"""Y ~ X1 + X2 + X3"""', 'D', 'None'], {'groups': "D.loc[:, 'Id']", 'family': 'family', 'cov_struct': 'v'}), "('Y ~ X1 + X2 + X3', D, None, groups=D.loc[:, 'Id'], family\n =family, cov_struct=v)\n", (11838, 11924), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((12026, 12076), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params', 'cf[j]'], {'decimal': '(10)'}), '(mdf.params, cf[j], decimal=10)\n', (12045, 12076), False, 'from numpy.testing import assert_almost_equal\n'), ((12337, 12370), 'numpy.dot', 'np.dot', (['exog', 'np.r_[1, 1, 0, 0.2]'], {}), '(exog, np.r_[1, 1, 0, 0.2])\n', (12343, 12370), True, 'import numpy as np\n'), ((12386, 12412), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(300)'}), '(size=300)\n', (12402, 12412), True, 'import numpy as np\n'), ((12437, 12451), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (12446, 12451), True, 'import numpy as np\n'), ((12638, 12697), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group', 'None', 'family', 'v'], {'constraint': '(L, R)'}), '(endog, exog, group, None, family, v, constraint=(L, R))\n', (12641, 12697), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((12757, 12806), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params[3]', '(0)'], {'decimal': '(10)'}), '(mdf.params[3], 0, decimal=10)\n', (12776, 12806), False, 'from numpy.testing import assert_almost_equal\n'), ((13091, 13108), 'numpy.array', 'np.array', (['group_n'], {}), '(group_n)\n', (13099, 13108), True, 'import numpy as np\n'), ((17792, 17834), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group_n', 'None', 'family', 'v'], {}), '(endog, exog, group_n, None, family, v)\n', (17795, 17834), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((17874, 17923), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params', 'cf[j]'], {'decimal': '(5)'}), '(mdf.params, cf[j], decimal=5)\n', (17893, 17923), False, 'from numpy.testing import assert_almost_equal\n'), ((18370, 18481), 'statsmodels.genmod.generalized_estimating_equations.GEE.from_formula', 'GEE.from_formula', (['"""Y ~ X1 + X2 + X3 + X4 + X5"""', 'D', 'None'], {'groups': "D.loc[:, 'Id']", 'family': 'family', 'cov_struct': 'v'}), "('Y ~ X1 + X2 + X3 + X4 + X5', D, None, groups=D.loc[:,\n 'Id'], family=family, cov_struct=v)\n", (18386, 18481), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((18588, 18637), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params', 'cf[j]'], {'decimal': '(5)'}), '(mdf.params, cf[j], decimal=5)\n', (18607, 18637), False, 'from numpy.testing import assert_almost_equal\n'), ((19263, 19273), 'statsmodels.compat.lrange', 'lrange', (['(20)'], {}), '(20)\n', (19269, 19273), False, 'from statsmodels.compat import lrange\n'), ((19275, 19285), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (19282, 19285), True, 'import numpy as np\n'), ((1461, 1488), 'numpy.ones', 'np.ones', (['(exog.shape[0], 1)'], {}), '((exog.shape[0], 1))\n', (1468, 1488), True, 'import numpy as np\n'), ((1919, 1931), 'numpy.exp', 'np.exp', (['(-lpr)'], {}), '(-lpr)\n', (1925, 1931), True, 'import numpy as np\n'), ((1953, 1978), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'n'}), '(size=n)\n', (1970, 1978), True, 'import numpy as np\n'), ((2941, 2959), 'statsmodels.genmod.families.Poisson', 'families.Poisson', ([], {}), '()\n', (2957, 2959), False, 'from statsmodels.genmod import families\n'), ((6958, 7007), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params', 'cf[j]'], {'decimal': '(6)'}), '(mdf.params, cf[j], decimal=6)\n', (6977, 7007), False, 'from numpy.testing import assert_almost_equal\n'), ((8647, 8680), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(gsize, k)'}), '(size=(gsize, k))\n', (8663, 8680), True, 'import numpy as np\n'), ((9684, 9708), 'numpy.dot', 'np.dot', (['exog', 'mdf.params'], {}), '(exog, mdf.params)\n', (9690, 9708), True, 'import numpy as np\n'), ((19556, 19590), 'statsmodels.formula.api.ols', 'sm.ols', (['"""Y ~ X1 + X2 + X3"""'], {'data': 'D'}), "('Y ~ X1 + X2 + X3', data=D)\n", (19562, 19590), True, 'import statsmodels.formula.api as sm\n'), ((19841, 19870), 'numpy.diag', 'np.diag', (['mdf.naive_covariance'], {}), '(mdf.naive_covariance)\n', (19848, 19870), True, 'import numpy as np\n'), ((20049, 20075), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (20065, 20075), True, 'import numpy as np\n'), ((20332, 20426), 'statsmodels.genmod.generalized_estimating_equations.GEE.from_formula', 'GEE.from_formula', (['"""Y ~ X1 + X2 + X3"""', 'D', 'None'], {'groups': 'groups', 'family': 'family', 'cov_struct': 'vs'}), "('Y ~ X1 + X2 + X3', D, None, groups=groups, family=family,\n cov_struct=vs)\n", (20348, 20426), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((20475, 20511), 'statsmodels.formula.api.logit', 'sm.logit', (['"""Y ~ X1 + X2 + X3"""'], {'data': 'D'}), "('Y ~ X1 + X2 + X3', data=D)\n", (20483, 20511), True, 'import statsmodels.formula.api as sm\n'), ((21003, 21097), 'statsmodels.genmod.generalized_estimating_equations.GEE.from_formula', 'GEE.from_formula', (['"""Y ~ X1 + X2 + X3"""', 'D', 'None'], {'groups': 'groups', 'family': 'family', 'cov_struct': 'vs'}), "('Y ~ X1 + X2 + X3', D, None, groups=groups, family=family,\n cov_struct=vs)\n", (21019, 21097), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((21146, 21184), 'statsmodels.formula.api.poisson', 'sm.poisson', (['"""Y ~ X1 + X2 + X3"""'], {'data': 'D'}), "('Y ~ X1 + X2 + X3', data=D)\n", (21156, 21184), True, 'import statsmodels.formula.api as sm\n'), ((8358, 8374), 'numpy.arange', 'np.arange', (['gsize'], {}), '(gsize)\n', (8367, 8374), True, 'import numpy as np\n'), ((8385, 8401), 'numpy.arange', 'np.arange', (['gsize'], {}), '(gsize)\n', (8394, 8401), True, 'import numpy as np\n'), ((8785, 8813), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'gsize'}), '(size=gsize)\n', (8801, 8813), True, 'import numpy as np\n'), ((20722, 20749), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(100)'}), '(size=100)\n', (20739, 20749), True, 'import numpy as np\n'), ((8893, 8907), 'numpy.ones', 'np.ones', (['gsize'], {}), '(gsize)\n', (8900, 8907), True, 'import numpy as np\n')]
|
# Generated by Django 2.2.20 on 2021-07-29 15:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0019_auto_20210512_1114'),
]
operations = [
migrations.AlterField(
model_name='company',
name='address_area_abbrev_name',
field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'),
),
migrations.AlterField(
model_name='company',
name='registered_address_area_abbrev_name',
field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'),
),
]
|
[
"django.db.models.CharField"
] |
[((356, 441), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'verbose_name': '"""State (Abbreviated)"""'}), "(blank=True, max_length=255, verbose_name='State (Abbreviated)'\n )\n", (372, 441), False, 'from django.db import migrations, models\n'), ((588, 673), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'verbose_name': '"""State (Abbreviated)"""'}), "(blank=True, max_length=255, verbose_name='State (Abbreviated)'\n )\n", (604, 673), False, 'from django.db import migrations, models\n')]
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def index():
headline = "Hello World"
return render_template("index.html", headline=headline)
@app.route("/<string:name>")
def say_name(name):
return render_template("index.html", name=name)
if __name__ == "__main__":
app.run(debug=True)
|
[
"flask.Flask",
"flask.render_template"
] |
[((48, 63), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (53, 63), False, 'from flask import Flask, render_template\n'), ((135, 183), 'flask.render_template', 'render_template', (['"""index.html"""'], {'headline': 'headline'}), "('index.html', headline=headline)\n", (150, 183), False, 'from flask import Flask, render_template\n'), ((246, 286), 'flask.render_template', 'render_template', (['"""index.html"""'], {'name': 'name'}), "('index.html', name=name)\n", (261, 286), False, 'from flask import Flask, render_template\n')]
|
#!/usr/bin/env python3
"""Reddit Bot Common Routines
Contains common Reddit bot functions such as keyword comment retrieval,
processed comment caching, and comment posting.
Allows bot authors to concentrate on writing their custom bot functions.
"""
from collections import deque
from os import mkdir
import re
import signal
import sys
from time import sleep
import praw
from config import (
CACHE_FILE,
CACHE_SIZE,
KEYWORD,
RETRIEVAL_LIMIT,
SITE_NAME,
SUBREDDITS,
)
class RedditBot:
"""Superclass for Reddit bots which adds common bot routines.
Parameters
----------
site_name : str, optional
Initializes praw under site_name within praw.ini.
Defaults to config.SITE_NAME.
See: https://praw.readthedocs.io/en/latest/getting_started
/configuration/prawini.html#choosing-a-site
keyword : str, optional
Comment trigger word.
Defaults to config.KEYWORD.
retrieval_limit : int, optional
Maximum number of comments to retrieve at a time.
Defaults to config.RETRIEVAL_LIMIT.
See: https://praw.readthedocs.io/en/latest/code_overview/models
/subreddit.html#praw.models.Subreddit.comments
subreddits : str, optional
Subreddits to retrieve comments from.
Defaults to config.SUBREDDITS.
See: https://praw.readthedocs.io/en/latest/code_overview/models
/subreddit.html#subreddit
"""
def __init__(self,
site_name=SITE_NAME,
keyword=KEYWORD,
retrieval_limit=RETRIEVAL_LIMIT,
subreddits=SUBREDDITS,
):
print("Initializing bot...")
self.keyword = re.compile(keyword+r' ([ \w]+)', re.I)
self.reddit = None
self.retrieval_limit = retrieval_limit
self.site_name = site_name
self.subreddits = subreddits
self.username = site_name
self.processed_comments = self.read_cache(CACHE_FILE)
signal.signal(signal.SIGINT, self.bot_exit)
def authenticate(self, max_attempts=-1, seconds_between_attempts=60):
"""Authenticates SITE_NAME with Reddit.
Sets self.reddit and self.username on success.
Parameters
----------
max_attempts : int, optional
Maximum number of authentication attempts before failure.
Defaults to -1 (infinite attempts).
seconds_between_attempts : int, optional
Seconds to wait between authentication attempts.
Defaults to 60.
"""
attempt = 0
while attempt != max_attempts:
try:
print("Authenticating as {}...".format(self.site_name))
self.reddit = praw.Reddit(self.site_name)
self.username = self.reddit.user.me()
print("Successfully authenticated as {}".format(self.username))
return
except praw.exceptions.APIException as error:
print("Unable to authenticate:", error)
print("Retrying in {} "
"seconds".format(seconds_between_attempts))
sleep(seconds_between_attempts)
attempt += 1
raise RuntimeError('Failed to authenticate after {} '
'attempts'.format(max_attempts))
def retrieve_comments(self):
"""Retrieves comments from subreddits, filters for keyword trigger, and
excludes processed comments.
Returns
-------
generator
Dict of reddit.Comment and query.
"""
try:
print("Retrieving {} comments...".format(self.retrieval_limit))
comments = self.reddit.subreddit(self.subreddits).comments(
limit=self.retrieval_limit
)
for comment in comments:
if (comment.author != self.username
and comment not in self.processed_comments
#and not self.has_already_replied(comment)
#and not self.is_summon_chain(comment)
):
query = self.keyword.search(comment.body.lower())
if query:
self.processed_comments.append(comment.id)
yield {'comment': comment, 'query' : query.group(1)}
except praw.exceptions.APIException as error:
print("API Error:", error)
raise
except AttributeError as error:
print(error)
print("Unable to retrieve comments.")
raise
def submit_comment(self, target, comment):
"""Submit comment to target submission or comment.
Parameters
----------
target : reddit.submission object or reddit.comment object
Target Reddit submission or comment.
comment : str
Comment to post.
Returns
-------
object
reddit.comment of newly created comment.
"""
try:
if target.author != self.username:
print("Posting reply...")
return target.reply(comment)
except praw.exceptions.APIException as error:
print("API Error:", error)
raise
@staticmethod
def read_cache(file):
"""Opens and reads file, converting contents to \n separated list.
Creates cache file if does not exist.
Parameters
----------
file : str
Location of cache file.
Returns
-------
collections.deque
Contents of cache file, limited to config.CACHE_SIZE
"""
try:
print("Loading cache file into memory...")
with open(file, 'r') as data:
cache = data.read()
mem_cache = deque(cache.split('\n'), CACHE_SIZE)
print("Cache loaded.")
except FileNotFoundError:
print("Cache file not found.")
print("Creating cache directory...")
try:
path = ''
for subdirectory in file.split('/')[:-1]:
path += subdirectory + '/'
mkdir(path)
print("Cache directory created.")
except IOError as error:
print(error)
print("Unable to create cache file")
mem_cache = deque([], CACHE_SIZE)
return mem_cache
@staticmethod
def write_cache(file, mem_cache):
"""Writes list into file, converting list to \n separated contents.
Overwrites original cache file.
Creates cache file if does not exist.
Parameters
----------
file : str
Location of cache file.
mem_cache : list or deque
Items in memory cache
"""
try:
print("Saving memory into cache file...")
with open(file, 'w') as cache_file:
try:
cache_file.write(mem_cache.popleft())
for entry in mem_cache:
cache_file.write('\n'+entry)
# avoid adding \n to end of file so that we don't get empty
# entries in deque when next loaded
print("Cache saved")
except IndexError:
print("No items in cache")
except IOError as error:
print(error)
print("Unable to create cache file")
def bot_exit(self, *args, **kwargs):
"""Saves self.processed_comments into cache file before exiting."""
# pylint: disable=unused-argument
print("\nStopping bot...")
self.write_cache(CACHE_FILE, self.processed_comments)
print("Bot stopped")
sys.exit()
def is_summon_chain(self, target):
"""Checks if parent comment of target is from self.
Used to prevent infinite reply loop caused by another bot.
Parameters
----------
target : reddit.comment object
Target Reddit comment.
Returns
-------
bool
True if parent comment of target is from bot. False otherwise.
"""
return True if (
not target.is_root and target.parent().author == self.username
) else False
def has_already_replied(self, target):
"""Checks if target comment has already been replied by bot.
Used to prevent multiple replies to the same request.
Parameters
----------
target : reddit.comment object
Target Reddit comment.
Returns
-------
bool
True if parent comment of target is from bot. False otherwise.
"""
try:
# implement replace_more()?
target.refresh()
for reply in target.replies.list():
if reply.author == self.username:
print("Comment already processed.")
return True
print("Processing comment...")
return False
except praw.exceptions.APIException as error:
print("API Error:", error)
# Failsafe
return True
|
[
"os.mkdir",
"time.sleep",
"sys.exit",
"signal.signal",
"praw.Reddit",
"collections.deque",
"re.compile"
] |
[((1729, 1769), 're.compile', 're.compile', (["(keyword + ' ([ \\\\w]+)')", 're.I'], {}), "(keyword + ' ([ \\\\w]+)', re.I)\n", (1739, 1769), False, 'import re\n'), ((2018, 2061), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'self.bot_exit'], {}), '(signal.SIGINT, self.bot_exit)\n', (2031, 2061), False, 'import signal\n'), ((7863, 7873), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7871, 7873), False, 'import sys\n'), ((2763, 2790), 'praw.Reddit', 'praw.Reddit', (['self.site_name'], {}), '(self.site_name)\n', (2774, 2790), False, 'import praw\n'), ((6471, 6492), 'collections.deque', 'deque', (['[]', 'CACHE_SIZE'], {}), '([], CACHE_SIZE)\n', (6476, 6492), False, 'from collections import deque\n'), ((3184, 3215), 'time.sleep', 'sleep', (['seconds_between_attempts'], {}), '(seconds_between_attempts)\n', (3189, 3215), False, 'from time import sleep\n'), ((6266, 6277), 'os.mkdir', 'mkdir', (['path'], {}), '(path)\n', (6271, 6277), False, 'from os import mkdir\n')]
|
from __future__ import unicode_literals
import logging
root_logger = logging.getLogger('autotweet')
logging.basicConfig(
format='%(asctime)s {%(module)s:%(levelname)s}: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
def set_level(level):
root_logger.setLevel(level)
get_logger = root_logger.getChild
|
[
"logging.basicConfig",
"logging.getLogger"
] |
[((71, 101), 'logging.getLogger', 'logging.getLogger', (['"""autotweet"""'], {}), "('autotweet')\n", (88, 101), False, 'import logging\n'), ((103, 223), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s {%(module)s:%(levelname)s}: %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(format=\n '%(asctime)s {%(module)s:%(levelname)s}: %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S')\n", (122, 223), False, 'import logging\n')]
|
############################################## README #################################################
# This calculates threshold for an image depending upon its spiking activity.
########################################################################################################
import numpy as np
from snn.neuron import neuron
import random
from matplotlib import pyplot as plt
from snn.recep_field import rf
from snn.spike_train import encode
from snn.rl import rl
from snn.rl import update
from snn.reconstruct import reconst_weights
from snn.parameters import param as par
import os
def threshold(train):
tu = np.shape(train[0])[0]
thresh = 0
for i in range(tu):
simul_active = sum(train[:,i])
if simul_active>thresh:
thresh = simul_active
return (thresh/3)*par.scale
if __name__ == '__main__':
# img = cv2.imread("mnist1/" + str(1) + ".png", 0)
img = np.array(Image.open("mnist1/" + str(1) + ".png", 0))
print(img)
# pot = rf(img)
# train = np.array(encode(pot))
# print threshold(train)
|
[
"numpy.shape"
] |
[((630, 648), 'numpy.shape', 'np.shape', (['train[0]'], {}), '(train[0])\n', (638, 648), True, 'import numpy as np\n')]
|
# Generated by Django 3.1 on 2020-11-10 00:11
from __future__ import unicode_literals
from django.db import migrations, models
import csv
from datetime import datetime
def load_initial_data(apps, schema_editor):
Authority = apps.get_model("accounts", "Authority")
with open("assets/authority/authority_names.csv", 'r') as f:
reader = csv.reader(f)
header = next(reader)
authoritys = []
for row in reader:
authority = Authority.objects.create(authority_abbrev = row[0], authority_name = row[1])
#print(authority)
def reverse_func(apps, schema_editor):
Authority = apps.get_model("accounts", Authority)
Authority.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.RunPython(load_initial_data, reverse_func),
]
|
[
"django.db.migrations.RunPython",
"csv.reader"
] |
[((342, 355), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (352, 355), False, 'import csv\n'), ((790, 843), 'django.db.migrations.RunPython', 'migrations.RunPython', (['load_initial_data', 'reverse_func'], {}), '(load_initial_data, reverse_func)\n', (810, 843), False, 'from django.db import migrations, models\n')]
|
# all the data from train data set, k-fold validation
import numpy as np
import onnxruntime
import torch
from pandas import read_csv
from tensorflow.python.keras.utils.np_utils import to_categorical
from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score
# load a single file as a numpy array
def load_file(filepath):
dataframe = read_csv(filepath, header=None, delim_whitespace=True)
return dataframe.values
# load a list of files into a 3D array of [samples, timesteps, features]
def load_group(filenames, prefix=''):
loaded = list()
for name in filenames:
data = load_file(prefix + name)
loaded.append(data)
# stack group so that features are the 3rd dimension
loaded = np.dstack(loaded)
return loaded
# load a dataset group, such as train or test
def load_dataset_group(group, prefix=''):
filepath = prefix + group + '/Inertial Signals/'
# load all 9 files as a single array
filenames = list()
# total acceleration
filenames += ['total_acc_x_' + group + '.txt', 'total_acc_y_' + group + '.txt', 'total_acc_z_' + group + '.txt']
# body acceleration
filenames += ['body_acc_x_' + group + '.txt', 'body_acc_y_' + group + '.txt', 'body_acc_z_' + group + '.txt']
# body gyroscope
filenames += ['body_gyro_x_' + group + '.txt', 'body_gyro_y_' + group + '.txt', 'body_gyro_z_' + group + '.txt']
# load input data
X = load_group(filenames, filepath)
# load class output
y = load_file(prefix + group + '/y_' + group + '.txt')
return X, y
# load the dataset, returns train and test X and y elements
def load_dataset(prefix=''):
# load all train
trainX, trainy = load_dataset_group('train', prefix + 'UCI HAR Dataset/')
# print(trainX.shape, trainy.shape)
# load all test
testX, testy = load_dataset_group('test', prefix + 'UCI HAR Dataset/')
# print(testX.shape, testy.shape)
# zero-offset class values
trainy = trainy - 1
testy = testy - 1
# one hot encode y
trainy = to_categorical(trainy)
testy = to_categorical(testy)
print(trainX.shape, trainy.shape, testX.shape, testy.shape)
return trainX, trainy, testX, testy
# summarize scores
def summarize_results(scores):
print('scores:', scores)
mean, std = np.mean(scores), np.std(scores)
return [mean, std]
# run an experiment
def run_experiment(repeats=10):
# load data
trainX, trainy, testX, testy = load_dataset()
# sess = onnxruntime.InferenceSession('./models/model1.onnx')
sess = onnxruntime.InferenceSession('./cnn-pytorch.onnx')
for i in sess.get_inputs():
print(i.name)
print(i.shape)
for i in sess.get_outputs():
print(i.name)
print(i.shape)
# y_predict = sess.run(None, {sess.get_inputs()[0].name: testX.astype(np.float32)})
testX = np.transpose(testX, (0, 2, 1))
testX = torch.utils.data.DataLoader(testX, batch_size=32, shuffle=True, num_workers=0)
testy = torch.utils.data.DataLoader(testy, batch_size=32, shuffle=True, num_workers=0)
for features, labels in zip(testX, testy):
y_predict = sess.run(None, {sess.get_inputs()[0].name: features.float().numpy()})
print('y_predict', y_predict)
# y_predict = np.array(y_predict)
# y_predict = np.argmax(y_predict, axis=2)
# testy = labels
# y_true = np.reshape(testy, [-1])
# y_pred = np.reshape(y_predict, [-1])
# accuracy = accuracy_score(y_true, y_pred)
# precision = precision_score(y_true, y_pred, average='macro')
# recall = recall_score(y_true, y_pred, average='macro')
# f1score = f1_score(y_true, y_pred, average='macro')
# print(accuracy, precision, recall, f1score)
run_experiment()
|
[
"numpy.dstack",
"torch.utils.data.DataLoader",
"pandas.read_csv",
"numpy.std",
"numpy.transpose",
"tensorflow.python.keras.utils.np_utils.to_categorical",
"onnxruntime.InferenceSession",
"numpy.mean"
] |
[((365, 419), 'pandas.read_csv', 'read_csv', (['filepath'], {'header': 'None', 'delim_whitespace': '(True)'}), '(filepath, header=None, delim_whitespace=True)\n', (373, 419), False, 'from pandas import read_csv\n'), ((746, 763), 'numpy.dstack', 'np.dstack', (['loaded'], {}), '(loaded)\n', (755, 763), True, 'import numpy as np\n'), ((2044, 2066), 'tensorflow.python.keras.utils.np_utils.to_categorical', 'to_categorical', (['trainy'], {}), '(trainy)\n', (2058, 2066), False, 'from tensorflow.python.keras.utils.np_utils import to_categorical\n'), ((2079, 2100), 'tensorflow.python.keras.utils.np_utils.to_categorical', 'to_categorical', (['testy'], {}), '(testy)\n', (2093, 2100), False, 'from tensorflow.python.keras.utils.np_utils import to_categorical\n'), ((2554, 2604), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', (['"""./cnn-pytorch.onnx"""'], {}), "('./cnn-pytorch.onnx')\n", (2582, 2604), False, 'import onnxruntime\n'), ((2860, 2890), 'numpy.transpose', 'np.transpose', (['testX', '(0, 2, 1)'], {}), '(testX, (0, 2, 1))\n', (2872, 2890), True, 'import numpy as np\n'), ((2903, 2981), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testX'], {'batch_size': '(32)', 'shuffle': '(True)', 'num_workers': '(0)'}), '(testX, batch_size=32, shuffle=True, num_workers=0)\n', (2930, 2981), False, 'import torch\n'), ((2994, 3072), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testy'], {'batch_size': '(32)', 'shuffle': '(True)', 'num_workers': '(0)'}), '(testy, batch_size=32, shuffle=True, num_workers=0)\n', (3021, 3072), False, 'import torch\n'), ((2302, 2317), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (2309, 2317), True, 'import numpy as np\n'), ((2319, 2333), 'numpy.std', 'np.std', (['scores'], {}), '(scores)\n', (2325, 2333), True, 'import numpy as np\n')]
|
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div
from django import forms
from django.core.exceptions import ObjectDoesNotExist
from django.core.validators import RegexValidator, ValidationError
from django.forms import Form
from phonenumber_field.formfields import PhoneNumberField
from project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage
from project_core.utils.orcid import orcid_div, field_set_read_only
from .utils import organisations_name_autocomplete, get_field_information
from ..utils.utils import create_person_position
from ..widgets import XDSoftYearMonthPickerInput
HELP_TEXTS_HEAD_OF_YOUR_RESEARCH = {'orcid': 'Enter the ORCID iD (e.g.: 0000-0002-1825-0097).<br>'
'Please ask head of research unit if unknown',
'first_name': 'Populated from ORCID iD',
'surname': 'Populated from ORCID iD',
'academic_title': 'Mandatory if ORCID iD is entered'}
class PersonForm(Form):
def __init__(self, *args, **kwargs):
self.person_position = kwargs.pop('person_position', None)
self._only_basic_fields = kwargs.pop('only_basic_fields', False)
self._all_fields_are_optional = kwargs.pop('all_fields_are_optional', False)
help_texts = kwargs.pop('help_texts', {})
career_stage_queryset = kwargs.pop('career_stages_queryset', None)
super().__init__(*args, **kwargs)
orcid_initial = first_name_initial = surname_initial = organisations_initial = group_initial = \
academic_title_initial = email_initial = phone_initial = gender_initial = career_stage_initial = phd_date_initial = None
if self.person_position:
orcid_initial = self.person_position.person.orcid
first_name_initial = self.person_position.person.first_name
surname_initial = self.person_position.person.surname
organisations_initial = self.person_position.organisation_names.all()
group_initial = self.person_position.group
academic_title_initial = self.person_position.academic_title
career_stage_initial = self.person_position.career_stage
gender_initial = self.person_position.person.gender
email_initial = self.person_position.main_email()
phone_initial = self.person_position.main_phone()
if self.person_position.person.phd_date:
# In the database is always saved as yyyy-mm (validator in the model) but it's visualized as mm-yyyy
phd_date_parts = self.person_position.person.phd_date.split('-')
phd_date_initial = f'{phd_date_parts[1]}-{phd_date_parts[0]}'
self.fields['orcid'] = forms.CharField(initial=orcid_initial,
**get_field_information(PhysicalPerson, 'orcid', label='ORCID iD',
required=True,
help_text='Enter your ORCID iD (e.g.: 0000-0002-1825-0097).<br>'
'Please create an <a href="https://orcid.org">ORCID iD</a> if you do not already have one'))
self.fields['academic_title'] = forms.ModelChoiceField(queryset=PersonTitle.objects.all(),
initial=academic_title_initial,
required=not self._only_basic_fields)
self.fields['first_name'] = forms.CharField(initial=first_name_initial,
label='First name(s)',
help_text='Your name is populated from your ORCID record. If you would like to change it please amend it in <a href="https://orcid.org/login">ORCID</a>')
self.fields['surname'] = forms.CharField(initial=surname_initial,
label='Surname(s)',
help_text='Your surname is populated from your ORCID record. If you would like to change it please amend it in <a href="https://orcid.org/login">ORCID</a>')
field_set_read_only([self.fields['first_name'], self.fields['surname']])
if self._only_basic_fields == False:
self.fields['gender'] = forms.ModelChoiceField(queryset=Gender.objects.all(),
initial=gender_initial)
if career_stage_queryset is None:
career_stage_queryset = CareerStage.objects.all().order_by('list_order', 'name')
self.fields['career_stage'] = forms.ModelChoiceField(
queryset=career_stage_queryset,
initial=career_stage_initial)
self.fields['email'] = forms.EmailField(initial=email_initial,
help_text='Please write a valid email address. You will receive a confirmation email when saving and submitting your application form. This email address will also be used for communication purposes')
self.fields['phone'] = PhoneNumberField(initial=phone_initial,
help_text='Phone number e.g.: +41222222222 . Extension can be added with xNN at the end')
self.fields['phd_date'] = forms.CharField(initial=phd_date_initial,
label='Date of PhD',
help_text='Where applicable, please enter the date on which you were awarded, or expect to be awarded your PhD (use the format mm-yyyy)',
required=False,
widget=XDSoftYearMonthPickerInput,
validators=[RegexValidator(regex='^[0-9]{2}-[0-9]{4}$',
message='Format is mm-yyyy',
code='Invalid format')])
self.fields['organisation_names'] = organisations_name_autocomplete(initial=organisations_initial,
help_text='Please select the organisation(s) to which you are affiliated for the purposes of this proposal.')
self.fields['group'] = forms.CharField(initial=group_initial,
help_text='Please type the names of the group(s) or laboratories to which you are affiliated for the purposes of this proposal',
label='Group / lab',
required=False)
# If adding fields here: see below to remove them from the self.helper.layout
used_help_texts = []
for field_str, field in self.fields.items():
if self._all_fields_are_optional:
field.required = False
if field_str in help_texts:
self.fields[field_str].help_text = help_texts[field_str]
used_help_texts.append(field_str)
if len(used_help_texts) != len(help_texts):
print('Unused help texts:', help_texts.keys() - used_help_texts)
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.layout = Layout(
orcid_div('orcid'),
Div(
Div('first_name', css_class='col-4'),
Div('surname', css_class='col-4'),
Div('academic_title', css_class='col-2'),
Div('gender', css_class='col-2'),
css_class='row'
),
Div(
Div('career_stage', css_class='col-8'),
Div('phd_date', css_class='col-4'),
css_class='row'
),
Div(
Div('email', css_class='col-6'),
Div('phone', css_class='col-6'),
css_class='row'
),
Div(
Div('organisation_names', css_class='col-12'),
css_class='row'
),
Div(
Div('group', css_class='col-12'),
css_class='row'
),
)
if self._only_basic_fields:
# The Layout always includes all the fields. Now it's better to remove the fields that don't exist
# to avoid django-crispy-forms warnings (not fatal)
PersonForm._delete_field_from_layout(self.helper.layout.fields, 'gender')
PersonForm._delete_field_from_layout(self.helper.layout.fields, 'career_stage')
PersonForm._delete_field_from_layout(self.helper.layout.fields, 'email')
PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phone')
PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phd_date')
PersonForm._delete_field_from_layout(self.helper.layout.fields, 'organisation_names')
PersonForm._delete_field_from_layout(self.helper.layout.fields, 'group')
@staticmethod
def _delete_field_from_layout(container, field_str):
for item in container:
if type(item) == Div:
PersonForm._delete_field_from_layout(item, field_str)
elif type(item) == str and item == field_str:
container.remove(field_str)
def get_person_positions(self):
""" Matches and returns the person_position from the database. """
try:
physical_person = PhysicalPerson.objects.get(
orcid=self.cleaned_data['orcid']
)
except ObjectDoesNotExist:
# Non-existing PHysicalPerson so it doesn't have any PersonPositions associated
return []
person_positions = PersonPosition.objects.filter(
person=physical_person,
academic_title=self.cleaned_data['academic_title'],
group=self.cleaned_data['group'],
career_stage=self.cleaned_data['career_stage']
)
return person_positions
def clean_phd_date(self):
if 'phd_date' not in self.cleaned_data:
return None
if self.cleaned_data['phd_date'] == '':
return None
# It has the correct format mm-yyyy because the field has a validator
# In the DB it's always yyyy-mm because the model has this validator (consistent with general mysql date format)
month, year = self.cleaned_data['phd_date'].split('-')
month_int = int(month)
if month_int < 1 or month_int > 12:
raise ValidationError(f'Invalid month: {month}', code='invalid', params={'value': month})
return f'{year}-{month}'
def clean(self):
cd = super().clean()
if self.errors:
# If there are errors they might be related to orcid (e.g. using the example
# ORCID iD, so cd['orcid'] doesn't exist. At this point we don't do further cleaning:
# the user needs to fix the errors in the form before further cleaning is done.
return cd
# If ORCID iD is filled in: other fields are mandatory
if self._all_fields_are_optional and cd['orcid']:
for field_str, field in self.fields.items():
if field_str not in cd or not cd[field_str]: # It needs to be in cd and have a value
self.add_error(field_str, 'Mandatory field if ORCiD iD is filled in')
if self._all_fields_are_optional and not cd['orcid']:
for field_str, field in self.fields.items():
if field_str in cd and cd[field_str]:
self.add_error(field_str, 'It cannot contain any information if ORCiD ID is empty')
return cd
def save_person(self):
cd = self.cleaned_data
person_position = create_person_position(cd['orcid'], cd['first_name'], cd['surname'],
gender=cd.get('gender', None), phd_date=cd.get('phd_date', None),
academic_title=cd.get('academic_title'), group=cd.get('group'),
career_stage=cd.get('career_stage'),
organisation_names=cd.get('organisation_names', []))
if cd.get('email', None):
# Should this be in the model?
# TODO: discuss how to replace emails
email_contact = person_position.main_email_model()
if email_contact is None:
email_contact = Contact()
email_contact.method = Contact.EMAIL
email_contact.person_position = person_position
email_contact.entry = cd.get('email')
email_contact.save()
if cd.get('phone', None):
# Like before, should this be in the model and consolidated?
# TODO: discuss how to replace phones and handling of multiple phones
phone_contact = person_position.main_phone_model()
if phone_contact is None:
phone_contact = Contact()
phone_contact.method = Contact.PHONE
phone_contact.person_position = person_position
phone_contact.entry = cd.get('phone').as_international
phone_contact.save()
return person_position
|
[
"project_core.models.PhysicalPerson.objects.get",
"django.forms.ModelChoiceField",
"django.forms.EmailField",
"django.core.validators.ValidationError",
"crispy_forms.layout.Div",
"crispy_forms.helper.FormHelper",
"project_core.models.CareerStage.objects.all",
"project_core.utils.orcid.field_set_read_only",
"project_core.models.PersonPosition.objects.filter",
"project_core.utils.orcid.orcid_div",
"phonenumber_field.formfields.PhoneNumberField",
"project_core.models.Gender.objects.all",
"django.core.validators.RegexValidator",
"project_core.models.Contact",
"project_core.models.PersonTitle.objects.all",
"django.forms.CharField"
] |
[((3745, 3979), 'django.forms.CharField', 'forms.CharField', ([], {'initial': 'first_name_initial', 'label': '"""First name(s)"""', 'help_text': '"""Your name is populated from your ORCID record. If you would like to change it please amend it in <a href="https://orcid.org/login">ORCID</a>"""'}), '(initial=first_name_initial, label=\'First name(s)\',\n help_text=\n \'Your name is populated from your ORCID record. If you would like to change it please amend it in <a href="https://orcid.org/login">ORCID</a>\'\n )\n', (3760, 3979), False, 'from django import forms\n'), ((4104, 4331), 'django.forms.CharField', 'forms.CharField', ([], {'initial': 'surname_initial', 'label': '"""Surname(s)"""', 'help_text': '"""Your surname is populated from your ORCID record. If you would like to change it please amend it in <a href="https://orcid.org/login">ORCID</a>"""'}), '(initial=surname_initial, label=\'Surname(s)\', help_text=\n \'Your surname is populated from your ORCID record. If you would like to change it please amend it in <a href="https://orcid.org/login">ORCID</a>\'\n )\n', (4119, 4331), False, 'from django import forms\n'), ((4429, 4501), 'project_core.utils.orcid.field_set_read_only', 'field_set_read_only', (["[self.fields['first_name'], self.fields['surname']]"], {}), "([self.fields['first_name'], self.fields['surname']])\n", (4448, 4501), False, 'from project_core.utils.orcid import orcid_div, field_set_read_only\n'), ((7678, 7694), 'crispy_forms.helper.FormHelper', 'FormHelper', (['self'], {}), '(self)\n', (7688, 7694), False, 'from crispy_forms.helper import FormHelper\n'), ((10232, 10428), 'project_core.models.PersonPosition.objects.filter', 'PersonPosition.objects.filter', ([], {'person': 'physical_person', 'academic_title': "self.cleaned_data['academic_title']", 'group': "self.cleaned_data['group']", 'career_stage': "self.cleaned_data['career_stage']"}), "(person=physical_person, academic_title=self.\n cleaned_data['academic_title'], group=self.cleaned_data['group'],\n career_stage=self.cleaned_data['career_stage'])\n", (10261, 10428), False, 'from project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage\n'), ((4908, 4997), 'django.forms.ModelChoiceField', 'forms.ModelChoiceField', ([], {'queryset': 'career_stage_queryset', 'initial': 'career_stage_initial'}), '(queryset=career_stage_queryset, initial=\n career_stage_initial)\n', (4930, 4997), False, 'from django import forms\n'), ((5062, 5312), 'django.forms.EmailField', 'forms.EmailField', ([], {'initial': 'email_initial', 'help_text': '"""Please write a valid email address. You will receive a confirmation email when saving and submitting your application form. This email address will also be used for communication purposes"""'}), "(initial=email_initial, help_text=\n 'Please write a valid email address. You will receive a confirmation email when saving and submitting your application form. This email address will also be used for communication purposes'\n )\n", (5078, 5312), False, 'from django import forms\n'), ((5391, 5530), 'phonenumber_field.formfields.PhoneNumberField', 'PhoneNumberField', ([], {'initial': 'phone_initial', 'help_text': '"""Phone number e.g.: +41222222222 . Extension can be added with xNN at the end"""'}), "(initial=phone_initial, help_text=\n 'Phone number e.g.: +41222222222 . Extension can be added with xNN at the end'\n )\n", (5407, 5530), False, 'from phonenumber_field.formfields import PhoneNumberField\n'), ((6744, 6958), 'django.forms.CharField', 'forms.CharField', ([], {'initial': 'group_initial', 'help_text': '"""Please type the names of the group(s) or laboratories to which you are affiliated for the purposes of this proposal"""', 'label': '"""Group / lab"""', 'required': '(False)'}), "(initial=group_initial, help_text=\n 'Please type the names of the group(s) or laboratories to which you are affiliated for the purposes of this proposal'\n , label='Group / lab', required=False)\n", (6759, 6958), False, 'from django import forms\n'), ((7782, 7800), 'project_core.utils.orcid.orcid_div', 'orcid_div', (['"""orcid"""'], {}), "('orcid')\n", (7791, 7800), False, 'from project_core.utils.orcid import orcid_div, field_set_read_only\n'), ((9964, 10024), 'project_core.models.PhysicalPerson.objects.get', 'PhysicalPerson.objects.get', ([], {'orcid': "self.cleaned_data['orcid']"}), "(orcid=self.cleaned_data['orcid'])\n", (9990, 10024), False, 'from project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage\n'), ((11044, 11131), 'django.core.validators.ValidationError', 'ValidationError', (['f"""Invalid month: {month}"""'], {'code': '"""invalid"""', 'params': "{'value': month}"}), "(f'Invalid month: {month}', code='invalid', params={'value':\n month})\n", (11059, 11131), False, 'from django.core.validators import RegexValidator, ValidationError\n'), ((3485, 3510), 'project_core.models.PersonTitle.objects.all', 'PersonTitle.objects.all', ([], {}), '()\n', (3508, 3510), False, 'from project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage\n'), ((7835, 7871), 'crispy_forms.layout.Div', 'Div', (['"""first_name"""'], {'css_class': '"""col-4"""'}), "('first_name', css_class='col-4')\n", (7838, 7871), False, 'from crispy_forms.layout import Layout, Div\n'), ((7889, 7922), 'crispy_forms.layout.Div', 'Div', (['"""surname"""'], {'css_class': '"""col-4"""'}), "('surname', css_class='col-4')\n", (7892, 7922), False, 'from crispy_forms.layout import Layout, Div\n'), ((7940, 7980), 'crispy_forms.layout.Div', 'Div', (['"""academic_title"""'], {'css_class': '"""col-2"""'}), "('academic_title', css_class='col-2')\n", (7943, 7980), False, 'from crispy_forms.layout import Layout, Div\n'), ((7998, 8030), 'crispy_forms.layout.Div', 'Div', (['"""gender"""'], {'css_class': '"""col-2"""'}), "('gender', css_class='col-2')\n", (8001, 8030), False, 'from crispy_forms.layout import Layout, Div\n'), ((8112, 8150), 'crispy_forms.layout.Div', 'Div', (['"""career_stage"""'], {'css_class': '"""col-8"""'}), "('career_stage', css_class='col-8')\n", (8115, 8150), False, 'from crispy_forms.layout import Layout, Div\n'), ((8168, 8202), 'crispy_forms.layout.Div', 'Div', (['"""phd_date"""'], {'css_class': '"""col-4"""'}), "('phd_date', css_class='col-4')\n", (8171, 8202), False, 'from crispy_forms.layout import Layout, Div\n'), ((8284, 8315), 'crispy_forms.layout.Div', 'Div', (['"""email"""'], {'css_class': '"""col-6"""'}), "('email', css_class='col-6')\n", (8287, 8315), False, 'from crispy_forms.layout import Layout, Div\n'), ((8333, 8364), 'crispy_forms.layout.Div', 'Div', (['"""phone"""'], {'css_class': '"""col-6"""'}), "('phone', css_class='col-6')\n", (8336, 8364), False, 'from crispy_forms.layout import Layout, Div\n'), ((8446, 8491), 'crispy_forms.layout.Div', 'Div', (['"""organisation_names"""'], {'css_class': '"""col-12"""'}), "('organisation_names', css_class='col-12')\n", (8449, 8491), False, 'from crispy_forms.layout import Layout, Div\n'), ((8573, 8605), 'crispy_forms.layout.Div', 'Div', (['"""group"""'], {'css_class': '"""col-12"""'}), "('group', css_class='col-12')\n", (8576, 8605), False, 'from crispy_forms.layout import Layout, Div\n'), ((13040, 13049), 'project_core.models.Contact', 'Contact', ([], {}), '()\n', (13047, 13049), False, 'from project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage\n'), ((13567, 13576), 'project_core.models.Contact', 'Contact', ([], {}), '()\n', (13574, 13576), False, 'from project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage\n'), ((4616, 4636), 'project_core.models.Gender.objects.all', 'Gender.objects.all', ([], {}), '()\n', (4634, 4636), False, 'from project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage\n'), ((4808, 4833), 'project_core.models.CareerStage.objects.all', 'CareerStage.objects.all', ([], {}), '()\n', (4831, 4833), False, 'from project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage\n'), ((6146, 6245), 'django.core.validators.RegexValidator', 'RegexValidator', ([], {'regex': '"""^[0-9]{2}-[0-9]{4}$"""', 'message': '"""Format is mm-yyyy"""', 'code': '"""Invalid format"""'}), "(regex='^[0-9]{2}-[0-9]{4}$', message='Format is mm-yyyy',\n code='Invalid format')\n", (6160, 6245), False, 'from django.core.validators import RegexValidator, ValidationError\n')]
|
# class derived from a GridLayout with a bunch of widgets
from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, \
QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox
import numpy as np
import pandas as pd
class ParamWidget(QGridLayout):
'''
Collecting all the input boxes and labels to assign data.
'''
def __init__(self, paramTyp, param, projMan = None, parent = None):
'''
Build the boxes.
Parameters
----------
paramTyp: dictionary
Defining types of parameters in the set.
param: dictionary
The parameters in the set read from paramMan.
projMan: Project
Project management class, used for access raw data.
Attributes
----------
param: dictionary
Parameter set managed by this grid widget.
err: bool
Whether there's an error in the parameters.
senderList:
'''
super().__init__(parent)
self.err = False
self.param = param
self.paramTyp = paramTyp
self.projMan = projMan
self.senderList = []
for i, (k, v) in enumerate(paramTyp.items()):
self.addWidget(QLabel(k), i, 0)
val = self.param[k]
if v == "protocol" and projMan != None:
cb = QComboBox()
cb.currentTextChanged.connect(lambda x, ind = k, typ = v: \
self.updateParam(ind, typ, x))
self.addWidget(cb, i, 1)
self.senderList.append(cb)
elif v == "int" or v == "float":
le = QLineEdit()
le.textEdited.connect(lambda x, ind = k, typ = v:
self.updateParam(ind, typ, x))
self.addWidget(le, i, 1)
self.senderList.append(le)
elif v == "intr" or v == "floatr":
le0 = QLineEdit()
le1 = QLineEdit()
le0.textEdited.connect(lambda x, ind = k, typ = v: \
self.updateParam(ind, typ, x, begin = True))
le1.textEdited.connect(lambda x, ind = k, typ = v:
self.updateParam(ind, typ, x, begin = False))
twoHB = QHBoxLayout()
twoHB.addWidget(le0)
twoHB.addWidget(QLabel("to"))
twoHB.addWidget(le1)
self.addLayout(twoHB, i, 1)
self.senderList.append([le0, le1])
elif v == "intl" or v == "floatl" or v == "strl":
le = QLineEdit()
le.textEdited.connect(lambda x, ind = k, typ = v: \
self.updateParam(ind, typ, x))
btn = QPushButton("...")
lstHB = QHBoxLayout()
lstHB.addWidget(le)
lstHB.addWidget(btn)
self.addLayout(lstHB, i, 1)
self.senderList.append(le)
elif v == "bool":
cb = QCheckBox()
cb.stateChanged.connect(lambda x, ind = k, typ = v: \
self.updateParam(ind, typ, x))
self.addWidget(cb, i, 1)
self.senderList.append(cb)
elif "combo" in v:
options = v.split(',')[1:]
cb = QComboBox()
for j in options:
cb.addItem(j)
cb.currentTextChanged.connect(lambda x, ind = k, typ = v: \
self.updateParam(ind, typ, x))
cb.setCurrentIndex(0)
self.addWidget(cb, i, 1)
self.senderList.append(cb)
else:
print("Unknown parameter type.")
self.updateDisp()
self.updateDisp(param)
def updateDisp(self, param = None):
'''
After parameter changes due to importing or change of protocols,
update display of parameters.
Parameters
----------
param: dictionary, optional
New parameters. Default is None, only tend to update protocols.
'''
if param == None:
for i, (k, v) in enumerate(self.paramTyp.items()):
if v == "protocol" and self.projMan != None:
cb = self.senderList[i]
cb.clear()
pt = self.projMan.getProtocols()
for j in pt:
cb.addItem(j)
if len(pt):
cb.setCurrentIndex(0)
else:
self.err = True
else:
self.param = param
for i, (k, v) in enumerate(self.paramTyp.items()):
val = param[k]
if v == "protocol" and self.projMan != None:
cb = self.senderList[i]
cb.clear()
pt = self.projMan.getProtocols()
for j in pt:
cb.addItem(j)
if len(pt):
cb.setCurrentIndex(0)
else:
self.err = True
elif v == "int" or v == "float":
if v == "int" or (1e-3 < abs(val) and abs(val) < 1e3):
ds = str(val)
else:
ds = "{:.3e}".format(val)
le = self.senderList[i]
le.setText(ds)
elif v == "intr" or v == "floatr":
le0, le1 = self.senderList[i]
if v == "intr" or (1e-3 < abs(val[0]) and abs(val[0]) < 1e3):
ds = str(val[0])
else:
ds = "{:.3e}".format(val[0])
le0.setText(ds)
if v == "intr" or (1e-3 < abs(val[1]) and abs(val[1]) < 1e3):
ds = str(val[1])
else:
ds = "{:.3e}".format(val[1])
le1.setText(ds)
elif v == "intl" or v == "floatl":
if len(val):
if v == "intl" or (1e-3 < min(map(abs, val)) and \
max(map(abs, val)) < 1e3):
ds = ", ".join(map(str, val))
else:
ds = ", ".join(["{:.3e}".format(d) for d in val])
else:
ds = ''
le = self.senderList[i]
le.setText(ds)
elif v == "strl":
if len(val):
ds = ", ".join(val)
else:
ds = ''
le = self.senderList[i]
le.setText(ds)
elif v == "bool":
cb = self.senderList[i]
cb.setChecked(val)
elif "combo" in v:
cb = self.senderList[i]
cb.setCurrentText(val)
else:
print("Unknown parameter type")
print(v, val)
self.update()
def updateParam(self, ind, typ, val, **kargs):
'''
Update individual parameters in profile using values get
from input widgets.
Parameters
----------
ind: string
Key of the individual parameter to be set.
typ: string
Type of the individual parameter.
val: string
Text out of the input widget with the value.
**kargs:
Arguments come with some special types of parameters.
- begin: bool
Whether it's the first one of the two value range parameters.
'''
try:
self.err = False
self.sender().setStyleSheet("background:#FFFFFF;")
if typ == "int":
self.param[ind] = int(val)
elif typ == "float":
self.param[ind] = float(val)
elif typ == "intr":
if kargs["begin"]:
self.param[ind][0] = int(val)
else:
self.param[ind][1] = int(val)
elif typ == "floatr":
if kargs["begin"]:
self.param[ind][0] = float(val)
else:
self.param[ind][1] = float(val)
elif typ == "intl":
if len(val):
self.param[ind] = list(map(int, val.split(',')))
else:
self.param[ind] = []
elif typ == "floatl":
if len(val):
self.param[ind] = list(map(float, val.split(',')))
else:
self.param[ind] = []
elif typ == "strl":
if len(val):
self.param[ind] = [d.strip() for d in val.split(',')]
else:
self.param[ind] = []
elif typ == "protocol":
self.param[ind] = val
elif typ == "bool":
self.param[ind] = bool(val)
elif "combo" in typ:
self.param[ind] = val
else:
print("Unknown parameter type")
except ValueError:
self.sender().setStyleSheet("background:#FF0000;")
self.err = True
def getParam(self):
'''
Get parameters managed in this widget.
'''
if not self.err:
return self.param
else:
return None
|
[
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QCheckBox"
] |
[((1038, 1047), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['k'], {}), '(k)\n', (1044, 1047), False, 'from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox\n'), ((1130, 1141), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (1139, 1141), False, 'from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox\n'), ((1348, 1359), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', ([], {}), '()\n', (1357, 1359), False, 'from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox\n'), ((1560, 1571), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', ([], {}), '()\n', (1569, 1571), False, 'from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox\n'), ((1582, 1593), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', ([], {}), '()\n', (1591, 1593), False, 'from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox\n'), ((1821, 1834), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1832, 1834), False, 'from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox\n'), ((1880, 1892), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""to"""'], {}), "('to')\n", (1886, 1892), False, 'from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox\n'), ((2052, 2063), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', ([], {}), '()\n', (2061, 2063), False, 'from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox\n'), ((2167, 2185), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""..."""'], {}), "('...')\n", (2178, 2185), False, 'from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox\n'), ((2198, 2211), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (2209, 2211), False, 'from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox\n'), ((2354, 2365), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', ([], {}), '()\n', (2363, 2365), False, 'from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox\n'), ((2583, 2594), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (2592, 2594), False, 'from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox\n')]
|
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework.documentation import include_docs_urls
from auth_api import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^docs/', include_docs_urls(title='Todo API', description='RESTful API for Todo')),
url(r'^$', views.api_root),
url(r'^', include('users.urls', namespace='users')),
url(r'^', include('todos.urls', namespace='todos')),
]
|
[
"rest_framework.documentation.include_docs_urls",
"django.conf.urls.include",
"django.conf.urls.url"
] |
[((183, 214), 'django.conf.urls.url', 'url', (['"""^admin/"""', 'admin.site.urls'], {}), "('^admin/', admin.site.urls)\n", (186, 214), False, 'from django.conf.urls import url, include\n'), ((315, 340), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.api_root'], {}), "('^$', views.api_root)\n", (318, 340), False, 'from django.conf.urls import url, include\n'), ((236, 307), 'rest_framework.documentation.include_docs_urls', 'include_docs_urls', ([], {'title': '"""Todo API"""', 'description': '"""RESTful API for Todo"""'}), "(title='Todo API', description='RESTful API for Todo')\n", (253, 307), False, 'from rest_framework.documentation import include_docs_urls\n'), ((357, 397), 'django.conf.urls.include', 'include', (['"""users.urls"""'], {'namespace': '"""users"""'}), "('users.urls', namespace='users')\n", (364, 397), False, 'from django.conf.urls import url, include\n'), ((414, 454), 'django.conf.urls.include', 'include', (['"""todos.urls"""'], {'namespace': '"""todos"""'}), "('todos.urls', namespace='todos')\n", (421, 454), False, 'from django.conf.urls import url, include\n')]
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from azure.profiles import KnownProfiles, ProfileDefinition
from azure.profiles.multiapiclient import MultiApiClientMixin
from msrest import Deserializer, Serializer
from ._configuration import AzureMachineLearningWorkspacesConfiguration
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
class _SDKClient(object):
def __init__(self, *args, **kwargs):
"""This is a fake class to support current implemetation of MultiApiClientMixin."
Will be removed in final version of multiapi azure-core based client
"""
pass
class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient):
"""These APIs allow end users to operate on Azure Machine Learning Workspace resources.
This ready contains multiple API versions, to help you deal with all of the Azure clouds
(Azure Stack, Azure Government, Azure China, etc.).
By default, it uses the latest API version available on public Azure.
For production, you should stick to a particular api-version and/or profile.
The profile sets a mapping between an operation group and its API version.
The api-version parameter sets the default API version if the operation
group is not described in the profile.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param api_version: API version to use if no profile is provided, or if missing in profile.
:type api_version: str
:param base_url: Service URL
:type base_url: str
:param profile: A profile definition, from KnownProfiles to dict.
:type profile: azure.profiles.KnownProfiles
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
DEFAULT_API_VERSION = '2022-05-01'
_PROFILE_TAG = "azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION,
'assets': '1.0.0',
'async_operations': 'v1.0',
'batch_job_deployment': '2020-09-01-dataplanepreview',
'batch_job_endpoint': '2020-09-01-dataplanepreview',
'data_call': '1.5.0',
'data_container': '1.5.0',
'data_version': '1.5.0',
'dataset_containers': '2021-10-01',
'dataset_controller_v2': '1.5.0',
'dataset_v2': '1.5.0',
'dataset_versions': '2021-10-01',
'datasets_v1': '1.5.0',
'delete': 'v1.0',
'events': 'v1.0',
'experiments': 'v1.0',
'extensive_model': '1.0.0',
'get_operation_status': '1.5.0',
'metric': 'v1.0',
'migration': '1.0.0',
'models': '1.0.0',
'registry_management_non_workspace': 'v1.0',
'run': 'v1.0',
'run_artifacts': 'v1.0',
'runs': 'v1.0',
'spans': 'v1.0',
'temporary_data_references': '2021-10-01-dataplanepreview',
}},
_PROFILE_TAG + " latest"
)
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
api_version=None, # type: Optional[str]
base_url="https://management.azure.com", # type: str
profile=KnownProfiles.default, # type: KnownProfiles
**kwargs # type: Any
):
self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
super(AzureMachineLearningWorkspaces, self).__init__(
api_version=api_version,
profile=profile
)
@classmethod
def _models_dict(cls, api_version):
return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}
@classmethod
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>`
* 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>`
* v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>`
* v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>`
* 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>`
* 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>`
* 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>`
* 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>`
* 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>`
* 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>`
"""
if api_version == '1.5.0':
from .dataset_dataplane import models
return models
elif api_version == '1.0.0':
from .model_dataplane import models
return models
elif api_version == 'v1.0':
from .registry_discovery import models
return models
elif api_version == 'v1.0':
from .runhistory import models
return models
elif api_version == '2020-09-01-dataplanepreview':
from .v2020_09_01_dataplanepreview import models
return models
elif api_version == '2021-10-01':
from .v2021_10_01 import models
return models
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview import models
return models
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview import models
return models
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview import models
return models
elif api_version == '2022-05-01':
from .v2022_05_01 import models
return models
raise ValueError("API version {} is not available".format(api_version))
@property
def assets(self):
"""Instance depends on the API version:
* 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.AssetsOperations>`
"""
api_version = self._get_api_version('assets')
if api_version == '1.0.0':
from .model_dataplane.operations import AssetsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'assets'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def async_operations(self):
"""Instance depends on the API version:
* v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.AsyncOperationsOperations>`
"""
api_version = self._get_api_version('async_operations')
if api_version == 'v1.0':
from .registry_discovery.operations import AsyncOperationsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'async_operations'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def batch_deployments(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchDeploymentsOperations>`
* 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchDeploymentsOperations>`
* 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchDeploymentsOperations>`
"""
api_version = self._get_api_version('batch_deployments')
if api_version == '2021-10-01':
from .v2021_10_01.operations import BatchDeploymentsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import BatchDeploymentsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import BatchDeploymentsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'batch_deployments'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def batch_endpoints(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchEndpointsOperations>`
* 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchEndpointsOperations>`
* 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchEndpointsOperations>`
"""
api_version = self._get_api_version('batch_endpoints')
if api_version == '2021-10-01':
from .v2021_10_01.operations import BatchEndpointsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import BatchEndpointsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import BatchEndpointsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'batch_endpoints'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def batch_job_deployment(self):
"""Instance depends on the API version:
* 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobDeploymentOperations>`
"""
api_version = self._get_api_version('batch_job_deployment')
if api_version == '2020-09-01-dataplanepreview':
from .v2020_09_01_dataplanepreview.operations import BatchJobDeploymentOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'batch_job_deployment'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def batch_job_endpoint(self):
"""Instance depends on the API version:
* 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobEndpointOperations>`
"""
api_version = self._get_api_version('batch_job_endpoint')
if api_version == '2020-09-01-dataplanepreview':
from .v2020_09_01_dataplanepreview.operations import BatchJobEndpointOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'batch_job_endpoint'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def code_containers(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeContainersOperations>`
* 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeContainersOperations>`
* 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeContainersOperations>`
* 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeContainersOperations>`
"""
api_version = self._get_api_version('code_containers')
if api_version == '2021-10-01':
from .v2021_10_01.operations import CodeContainersOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import CodeContainersOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import CodeContainersOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import CodeContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'code_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def code_versions(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeVersionsOperations>`
* 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeVersionsOperations>`
* 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeVersionsOperations>`
* 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeVersionsOperations>`
"""
api_version = self._get_api_version('code_versions')
if api_version == '2021-10-01':
from .v2021_10_01.operations import CodeVersionsOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import CodeVersionsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import CodeVersionsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import CodeVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'code_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def component_containers(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentContainersOperations>`
* 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentContainersOperations>`
* 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentContainersOperations>`
* 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentContainersOperations>`
"""
api_version = self._get_api_version('component_containers')
if api_version == '2021-10-01':
from .v2021_10_01.operations import ComponentContainersOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import ComponentContainersOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import ComponentContainersOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import ComponentContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'component_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def component_versions(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentVersionsOperations>`
* 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentVersionsOperations>`
* 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentVersionsOperations>`
* 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentVersionsOperations>`
"""
api_version = self._get_api_version('component_versions')
if api_version == '2021-10-01':
from .v2021_10_01.operations import ComponentVersionsOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import ComponentVersionsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import ComponentVersionsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import ComponentVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'component_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def compute(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComputeOperations>`
* 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.ComputeOperations>`
* 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComputeOperations>`
"""
api_version = self._get_api_version('compute')
if api_version == '2021-10-01':
from .v2021_10_01.operations import ComputeOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import ComputeOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import ComputeOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'compute'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def data_call(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataCallOperations>`
"""
api_version = self._get_api_version('data_call')
if api_version == '1.5.0':
from .dataset_dataplane.operations import DataCallOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'data_call'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def data_container(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataContainerOperations>`
"""
api_version = self._get_api_version('data_container')
if api_version == '1.5.0':
from .dataset_dataplane.operations import DataContainerOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'data_container'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def data_containers(self):
"""Instance depends on the API version:
* 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataContainersOperations>`
* 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataContainersOperations>`
"""
api_version = self._get_api_version('data_containers')
if api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import DataContainersOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import DataContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'data_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def data_version(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataVersionOperations>`
"""
api_version = self._get_api_version('data_version')
if api_version == '1.5.0':
from .dataset_dataplane.operations import DataVersionOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'data_version'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def data_versions(self):
"""Instance depends on the API version:
* 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataVersionsOperations>`
* 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataVersionsOperations>`
"""
api_version = self._get_api_version('data_versions')
if api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import DataVersionsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import DataVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'data_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def dataset_containers(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetContainersOperations>`
"""
api_version = self._get_api_version('dataset_containers')
if api_version == '2021-10-01':
from .v2021_10_01.operations import DatasetContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'dataset_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def dataset_controller_v2(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetControllerV2Operations>`
"""
api_version = self._get_api_version('dataset_controller_v2')
if api_version == '1.5.0':
from .dataset_dataplane.operations import DatasetControllerV2Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'dataset_controller_v2'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def dataset_v2(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetV2Operations>`
"""
api_version = self._get_api_version('dataset_v2')
if api_version == '1.5.0':
from .dataset_dataplane.operations import DatasetV2Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'dataset_v2'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def dataset_versions(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetVersionsOperations>`
"""
api_version = self._get_api_version('dataset_versions')
if api_version == '2021-10-01':
from .v2021_10_01.operations import DatasetVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'dataset_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def datasets_v1(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetsV1Operations>`
"""
api_version = self._get_api_version('datasets_v1')
if api_version == '1.5.0':
from .dataset_dataplane.operations import DatasetsV1Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'datasets_v1'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def datastores(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatastoresOperations>`
* 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DatastoresOperations>`
* 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DatastoresOperations>`
"""
api_version = self._get_api_version('datastores')
if api_version == '2021-10-01':
from .v2021_10_01.operations import DatastoresOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import DatastoresOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import DatastoresOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'datastores'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def delete(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DeleteOperations>`
* v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.operations.DeleteOperations>`
"""
api_version = self._get_api_version('delete')
if api_version == '1.5.0':
from .dataset_dataplane.operations import DeleteOperations as OperationClass
elif api_version == 'v1.0':
from .runhistory.operations import DeleteOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'delete'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def environment_containers(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentContainersOperations>`
* 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentContainersOperations>`
* 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentContainersOperations>`
* 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentContainersOperations>`
"""
api_version = self._get_api_version('environment_containers')
if api_version == '2021-10-01':
from .v2021_10_01.operations import EnvironmentContainersOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import EnvironmentContainersOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import EnvironmentContainersOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import EnvironmentContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'environment_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def environment_versions(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentVersionsOperations>`
* 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentVersionsOperations>`
* 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentVersionsOperations>`
* 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentVersionsOperations>`
"""
api_version = self._get_api_version('environment_versions')
if api_version == '2021-10-01':
from .v2021_10_01.operations import EnvironmentVersionsOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import EnvironmentVersionsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import EnvironmentVersionsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import EnvironmentVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'environment_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def events(self):
"""Instance depends on the API version:
* v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.operations.EventsOperations>`
"""
api_version = self._get_api_version('events')
if api_version == 'v1.0':
from .runhistory.operations import EventsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'events'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def experiments(self):
"""Instance depends on the API version:
* v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.operations.ExperimentsOperations>`
"""
api_version = self._get_api_version('experiments')
if api_version == 'v1.0':
from .runhistory.operations import ExperimentsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'experiments'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def extensive_model(self):
"""Instance depends on the API version:
* 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ExtensiveModelOperations>`
"""
api_version = self._get_api_version('extensive_model')
if api_version == '1.0.0':
from .model_dataplane.operations import ExtensiveModelOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'extensive_model'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def get_operation_status(self):
"""Instance depends on the API version:
* 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.GetOperationStatusOperations>`
"""
api_version = self._get_api_version('get_operation_status')
if api_version == '1.5.0':
from .dataset_dataplane.operations import GetOperationStatusOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'get_operation_status'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def jobs(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.JobsOperations>`
* 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.JobsOperations>`
* 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.JobsOperations>`
"""
api_version = self._get_api_version('jobs')
if api_version == '2021-10-01':
from .v2021_10_01.operations import JobsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import JobsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import JobsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'jobs'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def metric(self):
"""Instance depends on the API version:
* v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.operations.MetricOperations>`
"""
api_version = self._get_api_version('metric')
if api_version == 'v1.0':
from .runhistory.operations import MetricOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'metric'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def migration(self):
"""Instance depends on the API version:
* 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.MigrationOperations>`
"""
api_version = self._get_api_version('migration')
if api_version == '1.0.0':
from .model_dataplane.operations import MigrationOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'migration'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def model_containers(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelContainersOperations>`
* 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelContainersOperations>`
* 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelContainersOperations>`
* 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelContainersOperations>`
"""
api_version = self._get_api_version('model_containers')
if api_version == '2021-10-01':
from .v2021_10_01.operations import ModelContainersOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import ModelContainersOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import ModelContainersOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import ModelContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'model_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def model_versions(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelVersionsOperations>`
* 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelVersionsOperations>`
* 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelVersionsOperations>`
* 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelVersionsOperations>`
"""
api_version = self._get_api_version('model_versions')
if api_version == '2021-10-01':
from .v2021_10_01.operations import ModelVersionsOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import ModelVersionsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import ModelVersionsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import ModelVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'model_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def models(self):
"""Instance depends on the API version:
* 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ModelsOperations>`
"""
api_version = self._get_api_version('models')
if api_version == '1.0.0':
from .model_dataplane.operations import ModelsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'models'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def online_deployments(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineDeploymentsOperations>`
* 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineDeploymentsOperations>`
* 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineDeploymentsOperations>`
"""
api_version = self._get_api_version('online_deployments')
if api_version == '2021-10-01':
from .v2021_10_01.operations import OnlineDeploymentsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import OnlineDeploymentsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import OnlineDeploymentsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'online_deployments'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def online_endpoints(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineEndpointsOperations>`
* 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineEndpointsOperations>`
* 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineEndpointsOperations>`
"""
api_version = self._get_api_version('online_endpoints')
if api_version == '2021-10-01':
from .v2021_10_01.operations import OnlineEndpointsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import OnlineEndpointsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import OnlineEndpointsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'online_endpoints'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def operations(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.operations.Operations>`
* 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.Operations>`
* 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.operations.Operations>`
"""
api_version = self._get_api_version('operations')
if api_version == '2021-10-01':
from .v2021_10_01.operations import Operations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import Operations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'operations'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def private_endpoint_connections(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateEndpointConnectionsOperations>`
* 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateEndpointConnectionsOperations>`
* 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateEndpointConnectionsOperations>`
"""
api_version = self._get_api_version('private_endpoint_connections')
if api_version == '2021-10-01':
from .v2021_10_01.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'private_endpoint_connections'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def private_link_resources(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateLinkResourcesOperations>`
* 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateLinkResourcesOperations>`
* 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateLinkResourcesOperations>`
"""
api_version = self._get_api_version('private_link_resources')
if api_version == '2021-10-01':
from .v2021_10_01.operations import PrivateLinkResourcesOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import PrivateLinkResourcesOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import PrivateLinkResourcesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'private_link_resources'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def quotas(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.QuotasOperations>`
* 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.QuotasOperations>`
* 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.QuotasOperations>`
"""
api_version = self._get_api_version('quotas')
if api_version == '2021-10-01':
from .v2021_10_01.operations import QuotasOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import QuotasOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import QuotasOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'quotas'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def registry_management_non_workspace(self):
"""Instance depends on the API version:
* v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.RegistryManagementNonWorkspaceOperations>`
"""
api_version = self._get_api_version('registry_management_non_workspace')
if api_version == 'v1.0':
from .registry_discovery.operations import RegistryManagementNonWorkspaceOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'registry_management_non_workspace'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def run(self):
"""Instance depends on the API version:
* v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunOperations>`
"""
api_version = self._get_api_version('run')
if api_version == 'v1.0':
from .runhistory.operations import RunOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'run'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def run_artifacts(self):
"""Instance depends on the API version:
* v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunArtifactsOperations>`
"""
api_version = self._get_api_version('run_artifacts')
if api_version == 'v1.0':
from .runhistory.operations import RunArtifactsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'run_artifacts'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def runs(self):
"""Instance depends on the API version:
* v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunsOperations>`
"""
api_version = self._get_api_version('runs')
if api_version == 'v1.0':
from .runhistory.operations import RunsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'runs'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def spans(self):
"""Instance depends on the API version:
* v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.operations.SpansOperations>`
"""
api_version = self._get_api_version('spans')
if api_version == 'v1.0':
from .runhistory.operations import SpansOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'spans'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def temporary_data_references(self):
"""Instance depends on the API version:
* 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.TemporaryDataReferencesOperations>`
"""
api_version = self._get_api_version('temporary_data_references')
if api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import TemporaryDataReferencesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'temporary_data_references'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def usages(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.UsagesOperations>`
* 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.UsagesOperations>`
* 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.UsagesOperations>`
"""
api_version = self._get_api_version('usages')
if api_version == '2021-10-01':
from .v2021_10_01.operations import UsagesOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import UsagesOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import UsagesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'usages'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def virtual_machine_sizes(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.VirtualMachineSizesOperations>`
* 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.VirtualMachineSizesOperations>`
* 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.VirtualMachineSizesOperations>`
"""
api_version = self._get_api_version('virtual_machine_sizes')
if api_version == '2021-10-01':
from .v2021_10_01.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import VirtualMachineSizesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'virtual_machine_sizes'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def workspace_connections(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceConnectionsOperations>`
* 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceConnectionsOperations>`
* 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceConnectionsOperations>`
"""
api_version = self._get_api_version('workspace_connections')
if api_version == '2021-10-01':
from .v2021_10_01.operations import WorkspaceConnectionsOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import WorkspaceConnectionsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import WorkspaceConnectionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'workspace_connections'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def workspace_features(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceFeaturesOperations>`
* 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceFeaturesOperations>`
* 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceFeaturesOperations>`
"""
api_version = self._get_api_version('workspace_features')
if api_version == '2021-10-01':
from .v2021_10_01.operations import WorkspaceFeaturesOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import WorkspaceFeaturesOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import WorkspaceFeaturesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'workspace_features'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def workspaces(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspacesOperations>`
* 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspacesOperations>`
* 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspacesOperations>`
"""
api_version = self._get_api_version('workspaces')
if api_version == '2021-10-01':
from .v2021_10_01.operations import WorkspacesOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import WorkspacesOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import WorkspacesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'workspaces'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
def close(self):
self._client.close()
def __enter__(self):
self._client.__enter__()
return self
def __exit__(self, *exc_details):
self._client.__exit__(*exc_details)
|
[
"azure.profiles.ProfileDefinition",
"azure.mgmt.core.ARMPipelineClient"
] |
[((2704, 3578), 'azure.profiles.ProfileDefinition', 'ProfileDefinition', (["{_PROFILE_TAG: {None: DEFAULT_API_VERSION, 'assets': '1.0.0',\n 'async_operations': 'v1.0', 'batch_job_deployment':\n '2020-09-01-dataplanepreview', 'batch_job_endpoint':\n '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container':\n '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01',\n 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0',\n 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete':\n 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model':\n '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration':\n '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0',\n 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0',\n 'temporary_data_references': '2021-10-01-dataplanepreview'}}", "(_PROFILE_TAG + ' latest')"], {}), "({_PROFILE_TAG: {None: DEFAULT_API_VERSION, 'assets':\n '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment':\n '2020-09-01-dataplanepreview', 'batch_job_endpoint':\n '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container':\n '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01',\n 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0',\n 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete':\n 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model':\n '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration':\n '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0',\n 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0',\n 'temporary_data_references': '2021-10-01-dataplanepreview'}}, \n _PROFILE_TAG + ' latest')\n", (2721, 3578), False, 'from azure.profiles import KnownProfiles, ProfileDefinition\n'), ((4342, 4409), 'azure.mgmt.core.ARMPipelineClient', 'ARMPipelineClient', ([], {'base_url': 'base_url', 'config': 'self._config'}), '(base_url=base_url, config=self._config, **kwargs)\n', (4359, 4409), False, 'from azure.mgmt.core import ARMPipelineClient\n')]
|
#!/usr/bin/python3 -B
# Copyright (c) 2020 <NAME>
# See README for details
# ================================================================
import sys
import os
import stat
import importlib
import pprint
from Gen_Bytevec_Mux_BSV import *
from Gen_Bytevec_Mux_C import *
pp = pprint.PrettyPrinter()
# ================================================================
def mkHelp_text (argv):
return "Usage: " + argv [0] + " <spec_file.py>" + '''
<spec_file.py> should be a Python source file defining three variables:
C_to_BSV_structs
BSV_to_C_structs
package_name
The first two are lists of 'struct specs', each of which has the following form:
{ 'struct_name': "Foo",
'fields' : [ { 'field_name' : 'fieldfoo', 'width_bits': width },
...
{ 'field_name' : 'fieldfoo', 'width_bits': width } ]}
Struct names should be globally unique.
Field names should be unique within a struct.
It is ok for a field-width to be 0 (e.g., unused 'user' field in an AXI channel).
Generates three output files:
package_name.bsv
package_name.h
package_name.c
The C/BSV code contains:
Struct defs for each struct, where each field has type:
BSV: Bit #(w) where w is the specified bit-width
C: uint8_t, uint16_t, uint32_t or uint64_t, as appropriate, if width <= 64 bits,
uint8_t [..] if wider
A 'state' struct containing queues and communication 'credits' for each struct type,
Functions for C application code to enqueue each type of send-struct into a pending queue
Functions for C application code to dequeue each type of receive-struct from a pending queue
A function for the C application code to encode an already
queued send-struct into a bytevec ready for transmission
A function for the C application code to decode a received
bytevec into a queued receive-struct
'''
# ================================================================
def main (argv = None):
if ((len (argv) != 2)
or (argv [1] == "-h")
or (argv [1] == "--help")):
sys.stdout.write (mkHelp_text (argv))
return 0
spec_filename = argv [1]
if spec_filename.endswith (".py"):
spec_filename = spec_filename [:-3]
try:
# Warning:
# This dynamic import of the spec_filename spec file is fragile (only works if both
# this Python executable and spec_filename.py are in the current dir.
# Study importlib examples where there is some notion of 'finding' from a path etc.
spec = importlib.import_module (spec_filename) # ("type_specs")
except:
sys.stdout.write ("ERROR: unable to import module '{:s}'\n".format (spec_filename))
sys.exit (1)
sys.stdout.write ("Spec file imported: '{:s}'\n".format (spec_filename))
package_name = spec.package_name
sys.stdout.write ("Package name: '{:s}'\n".format (package_name))
# Compute all necessary byte-widths for transmission and C structs
# Each of the 'field' structs extends with 'width_bytes' and 'dimension'
sys.stdout.write ("Computing all necessary byte-widths for packet formats and C structs.\n")
C_to_BSV_structs = [compute_width_bytes (s) for s in spec.C_to_BSV_structs]
BSV_to_C_structs = [compute_width_bytes (s) for s in spec.BSV_to_C_structs]
# Data structure for different parts of a packet: C to BSV
max_C_to_BSV_struct_bytes = max ([ s ['size_bytes'] for s in C_to_BSV_structs ])
C_to_BSV_packet_bytes = { 'packet_len' : 1,
'num_credits' : len (BSV_to_C_structs),
'channel_id' : 1,
'payload' : max_C_to_BSV_struct_bytes }
# Data structure for different parts of a packet: BSV to C
max_BSV_to_C_struct_bytes = max ([ s ['size_bytes'] for s in BSV_to_C_structs ])
BSV_to_C_packet_bytes = { 'packet_len' : 1,
'num_credits' : len (C_to_BSV_structs),
'channel_id' : 1,
'payload' : max_BSV_to_C_struct_bytes }
# Generate the .bsv file
Gen_BSV (spec_filename,
package_name,
C_to_BSV_structs, C_to_BSV_packet_bytes,
BSV_to_C_structs, BSV_to_C_packet_bytes)
# Generate .h and .c files
Gen_C (spec_filename,
package_name,
C_to_BSV_structs, C_to_BSV_packet_bytes,
BSV_to_C_structs, BSV_to_C_packet_bytes)
return 0
# ================================================================
# This is a struct spec -> struct spec function
# In struct_spec_in, each field spec has attributes 'field_name' and 'width_bits'
# In struct_spec_out, we add attributes 'width_bytes' and 'dimension'
# and we add struct attribute 'size_bytes' for total # of bytes
# Fields <= 64b wide, fit in C scalars (uint8_t/uint16_t/uint32_t/uint64_t)
# have dimension 1 and width_bytes of 1,2,4 or 8
# Larger fields are represented in C as uint8_t [N]
# have dimension N and width_bytes 1
def compute_width_bytes (struct_spec_in):
fields_out = []
size_bytes = 0
for f in struct_spec_in ['fields']:
field_name = f ['field_name']
width_bits = f ['width_bits']
width_bytes = 0
dimension = 1;
if (width_bits == 0):
width_bytes = 0
elif (width_bits <= 8):
width_bytes = 1
elif (width_bits <= 16):
width_bytes = 2
elif (width_bits <= 32):
width_bytes = 4
elif (width_bits <= 64):
width_bytes = 8
else:
width_bytes = 1
dimension = (width_bits + 7) // 8
field_out = {'field_name' : field_name,
'width_bits' : width_bits,
'width_bytes': width_bytes,
'dimension' : dimension}
fields_out.append (field_out)
size_bytes += width_bytes * dimension
struct_spec_out = {'struct_name': struct_spec_in ['struct_name'],
'fields' : fields_out,
'size_bytes' : size_bytes}
return struct_spec_out
# ================================================================
# For non-interactive invocations, call main() and use its return value
# as the exit code.
if __name__ == '__main__':
sys.exit (main (sys.argv))
|
[
"sys.stdout.write",
"pprint.PrettyPrinter",
"sys.exit",
"importlib.import_module"
] |
[((285, 307), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {}), '()\n', (305, 307), False, 'import pprint\n'), ((3243, 3339), 'sys.stdout.write', 'sys.stdout.write', (['"""Computing all necessary byte-widths for packet formats and C structs.\n"""'], {}), "(\n 'Computing all necessary byte-widths for packet formats and C structs.\\n')\n", (3259, 3339), False, 'import sys\n'), ((2715, 2753), 'importlib.import_module', 'importlib.import_module', (['spec_filename'], {}), '(spec_filename)\n', (2738, 2753), False, 'import importlib\n'), ((2887, 2898), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2895, 2898), False, 'import sys\n')]
|
# Copyright 2017-2020 Lawrence Livermore National Security, LLC and other
# CallFlow Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
import pandas as pd
class RankHistogram:
def __init__(self, state, name):
self.graph = state.new_gf.graph
self.df = state.new_gf.df
self.entire_df = state.new_entire_gf.df
self.name = name
self.entry_funcs = {}
self.result = self.run()
def run(self):
ret = []
module = self.name.split("=")[0]
func_in_module = self.df[self.df.module == module]["name"].unique().tolist()
for idx, func in enumerate(func_in_module):
ret.append(
{
"name": func,
"time (inc)": self.df.loc[self.df["name"] == func][
"time (inc)"
].tolist(),
"time": self.df.loc[self.df["name"] == func]["time"].tolist(),
"rank": self.df.loc[self.df["name"] == func]["rank"].tolist(),
"dataset": self.df.loc[self.df["name"] == func]["dataset"].tolist(),
}
)
ret_df = pd.DataFrame(ret)
return ret_df.to_json(orient="columns")
|
[
"pandas.DataFrame"
] |
[((1210, 1227), 'pandas.DataFrame', 'pd.DataFrame', (['ret'], {}), '(ret)\n', (1222, 1227), True, 'import pandas as pd\n')]
|
import os
def read_test_case(file_path):
"""
reads one test case from file.
returns contents of test case
Parameters
----------
file_path : str
the path of the test case file to read.
Returns
-------
list
a list of contents of the test case.
"""
file = open(file_path, "r")
number = int(file.readline().strip())
case = list()
for i in range(number):
case.append(file.readline().strip())
file.close()
return case
def load_test_cases(dir, file_name):
"""
loads one test case from file.
returns a map contents of all test cases.
Parameters
----------
dir : str
directory of the files to load.
file_name : str
the name of the file that contains all
test case files name to read.
Returns
-------
dict
a dict of contents of all test cases.
"""
path = os.path.join(dir, file_name)
test_cases_files = open(path, "r")
test_cases = dict()
for file_name in test_cases_files.readlines():
case_name = file_name.strip().split(".")[0]
file_path = os.path.join(dir, file_name.strip())
test_cases[case_name] = read_test_case(file_path)
test_cases_files.close()
return test_cases
|
[
"os.path.join"
] |
[((927, 955), 'os.path.join', 'os.path.join', (['dir', 'file_name'], {}), '(dir, file_name)\n', (939, 955), False, 'import os\n')]
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from os.path import isfile
def test_ok(aggregator, check, instance_ok):
assert isfile(instance_ok['created_at_file'])
check.check(instance_ok)
aggregator.assert_service_check('system.reboot_required', status=check.OK)
def test_not_present_ok(aggregator, check, instance_not_present):
assert not isfile(instance_not_present['created_at_file'])
check.check(instance_not_present)
aggregator.assert_service_check('system.reboot_required', status=check.OK)
def test_warning(aggregator, check, instance_warning):
check.check(instance_warning)
aggregator.assert_service_check('system.reboot_required', status=check.WARNING)
def test_critical(aggregator, check, instance_critical):
check.check(instance_critical)
aggregator.assert_service_check('system.reboot_required', status=check.CRITICAL)
|
[
"os.path.isfile"
] |
[((186, 224), 'os.path.isfile', 'isfile', (["instance_ok['created_at_file']"], {}), "(instance_ok['created_at_file'])\n", (192, 224), False, 'from os.path import isfile\n'), ((418, 465), 'os.path.isfile', 'isfile', (["instance_not_present['created_at_file']"], {}), "(instance_not_present['created_at_file'])\n", (424, 465), False, 'from os.path import isfile\n')]
|
#! /usr/bin/enc python
# -*- coding: utf-8 -*-
# author: <NAME>
# email: <EMAIL>
"""
Swin Transformer
1. 类似CNN的层次化构建方法(Hierarchical Feature Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍;
这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。
2. 使用Windows Multi-Head Self-Attention (W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head
Self-Attention只在每个Windows内部进行。相对于ViT直接对整个Global进行MSA,计算复杂度更低;但是会隔绝不同
窗口之间的信息传递,通过Shifted Windows Multi-head Self-Atten来让信息在相邻窗口进行传递。
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`
- https://arxiv.org/pdf/2103.14030
Code/weights from https://github.com/microsoft/Swin-Transformer
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import numpy as np
from typing import Optional
from BasicModule import PatchMerging, DropPath, PatchEmbed
from BasicModule import Mlp
from BasicModule import window_partition, window_reverse
"""SwinT
window_size = 7
img_size = 224
Trained ImageNet-1k
depths->2,2,6,2
"""
def swin_tiny_patch4_window7_224(num_classes: int = 1000, **kwargs):
# trained ImageNet-1K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=7,
embed_dim=96,
depths=(2, 2, 6, 2),
num_heads=(3, 6, 12, 24),
num_classes=num_classes,
**kwargs)
return model
"""Swin-S
depths->2,2,18,2
"""
def swin_small_patch4_window7_224(num_classes: int = 1000, **kwargs):
# trained ImageNet-1K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=7,
embed_dim=96,
depths=(2, 2, 18, 2),
num_heads=(3, 6, 12, 24),
num_classes=num_classes,
**kwargs)
return model
"""Swin-B"""
def swin_base_patch4_window7_224(num_classes: int = 1000, **kwargs):
# trained ImageNet-1K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=7,
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
num_classes=num_classes,
**kwargs)
return model
def swin_base_patch4_window12_384(num_classes: int = 1000, **kwargs):
# trained ImageNet-1K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=12,
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
num_classes=num_classes,
**kwargs)
return model
def swin_base_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs):
# trained ImageNet-22K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=7,
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
num_classes=num_classes,
**kwargs)
return model
def swin_base_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs):
# trained ImageNet-22K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=12,
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
num_classes=num_classes,
**kwargs)
return model
"""Swin-Large"""
def swin_large_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs):
# trained ImageNet-22K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=7,
embed_dim=192,
depths=(2, 2, 18, 2),
num_heads=(6, 12, 24, 48),
num_classes=num_classes,
**kwargs)
return model
def swin_large_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs):
# trained ImageNet-22K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=12,
embed_dim=192,
depths=(2, 2, 18, 2),
num_heads=(6, 12, 24, 48),
num_classes=num_classes,
**kwargs)
return model
"""Swin Transformer"""
class SwinTransformer(nn.Module):
"""Swin Transformer结构
这里有个不同之处,就是每个Stage Layer中,
"""
def __init__(self, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24),
window_size=7, mlp_ratio=4., qkv_bias=True,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, patch_norm=True,
use_checkpoint=False, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.patch_norm = patch_norm
# 输出特征矩阵的Channels (C)
# H/4 x W/4 x 48 -> H/4 x W/4 x C(Stage1) -> H/8 x W/8 x 2C(Stage2) -> H/16 x W/16 x 4C(stage3) ...
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# 将image切分为不重合的Patches
# input: (Bs, 224, 224, 3)
# output: (e.g patch_size=4: Bs, 56x56, 4x4x3)
self.patch_embed = PatchEmbed(
patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
# Drop Path
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# bulid layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
# 注意这里构建的stage和论文图中有些差异
# 这里的stage不包含该stage的patch_merging层,包含的是下个stage的
layers = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layers)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self,x):
# x:[B, L, C]
x,H,W = self.patch_embed(x)
x = self.pos_drop(x)
# 多尺度分层Multi-Stage
for layer in self.layers:
x,H,W = layer(x,H,W)
x = self.norm(x) # [B, L, C]
x = self.avgpool(x.transpose(1, 2)) # [B, C, 1]
x = torch.flatten(x, 1)
x = self.head(x) # 分类头
return x
"""一个Stage内的基本SwinTransformer模块"""
class BasicLayer(nn.Module):
"""
One Stage SwinTransformer Layer包括:
"""
def __init__(self, dim, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
"""
Args:
dim (int): Number of input channels.
depth (int): Number of blocks. block数量
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
super(BasicLayer, self).__init__()
self.dim = dim
self.depth = depth
self.window_size = window_size
self.use_checkpoint = use_checkpoint # pre-trained
self.shift_size = window_size // 2
# 构建SwinTransformer Block
self.blocks = nn.ModuleList([
SwinTransformerBlock(
dim=dim,
num_heads=num_heads,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else self.shift_size, #当i为偶,就是W-MSA,i为奇,就是SW-MSA,与论文一致, 保证窗口之间通信
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# Patch Merging Layer 类似于Pooling下采样
if downsample is not None:
self.downsample = downsample(dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def create_mask(self,x,H,W):
"""
SW-MSA后,对于移位后左上角的窗口(也就是移位前最中间的窗口)来说,里面的元素都是互相紧挨着的,
他们之间可以互相两两做自注意力,但是对于剩下几个窗口来说,它们里面的元素是从别的很远的地方搬过来的,
所以他们之间,按道理来说是不应该去做自注意力,也就是说他们之间不应该有什么太大的联系
以14x14个patch为例进行
H: Feature Map Height
W: Feature Map Width
x: Feature Map
"""
# 为SW-MSA计算Attention Mask.
# 保证Hp和Wp是window_size的整数倍
Hp = int(np.ceil(H / self.window_size)) * self.window_size
Wp = int(np.ceil(W / self.window_size)) * self.window_size
# 拥有和feature map一样的通道排列顺序,方便后续window_partition
img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # [1, Hp, Wp, 1]
# 准备进行区域生成,方便生成Mask
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
# 区域编码
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
# Shift Window 混合区域的窗口分割
mask_windows = window_partition(img_mask, self.window_size) # [nW, Mh, Mw, 1]
mask_windows = mask_windows.view(-1, self.window_size * self.window_size) # [nW, Mh*Mw]
# 掩码生成
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) # [nW, 1, Mh*Mw] - [nW, Mh*Mw, 1]
# [nW, Mh*Mw, Mh*Mw]
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
def forward(self,x,H,W):
# [nW, Mh*Mw, Mh*Mw] nW:窗口数
attn_mask = self.create_mask(x,H,W)
for blk in self.blocks:
blk.H, blk.W = H, W # self.H = H, self.W = W
if not torch.jit.is_scripting() and self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, attn_mask)
else:
x = blk(x, attn_mask)
if self.downsample is not None:
x = self.downsample(x, H, W)
H, W = (H + 1) // 2, (W + 1) // 2 # DownSample之后,H,W应该减半
return x, H, W
"""一个基本的SwinTransformerBlock的构成Model"""
class SwinTransformerBlock(nn.Module):
"""
Swin Transformer Block包括:
Feature Map Input -> LayerNorm -> SW-MSA/W-MSA -> LayerNorm-> MLP -------->
|--------------------------------------||----------------------|
"""
def __init__(self, dim, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
"""
Args参数定义:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
super(SwinTransformerBlock, self).__init__()
self.dim = dim
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
# shift_size必须小于windows_size
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0~window_size"
# LN1
self.norm1 = norm_layer(dim)
# Windows_Multi-head Self Attention
self.attn = WindowsAttention(
dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
# LN2
self.norm2 = norm_layer(dim)
# MLP Layer
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, attn_mask):
# feature map的Height & Width
H, W = self.H, self.W
# Batch, length, channel
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
# Skip Connect
shortcut = x
x = self.norm1(x)
# reshape feature map
x = x.view(B, H, W, C)
# 对feature map进行pad,pad到windows size的整数倍
pad_l = 0
pad_t = 0
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
x = F.pad(x,(0,0,pad_l,pad_r,pad_t,pad_b))
# Hp, Wp代表pad后的feature map的Height和Width
_, Hp, Wp, _ = x.shape
# 是W-MSA 还是 SW-MSA ?
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
attn_mask = None
# 窗口划分
# Windows Partition
x_windows = window_partition(shifted_x,self.window_size) #[nW*B, Mh, Mw, C]
x_windows = x_windows.view(-1, self.window_size*self.window_size,C) # [nW*B, Mh*Mw, C]
# W-MSA / SW-MSA
attn_windows = self.attn(x_windows, mask=attn_mask) # [nW*B, Mh*Mw, C]
# 将分割的Windows进行还原
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) # [nW*B, Mh, Mw, C]
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # [B, H', W', C]
# 如果是SW-MSA,需要逆shift过程
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
# 移除Pad数据
if pad_r > 0 or pad_b > 0:
# 把前面pad的数据移除掉
x = x[:, :H, :W, :].contiguous()
x = x.view(B,H*W,C)
# FFN
# 两个Skip Connect
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class WindowsAttention(nn.Module):
"""
Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
VIT中注意力是全局的,复杂度随着图片尺寸的增加指数增加,样当去做视觉里的下游任务,尤其是密集
预测型的任务,或者说遇到非常大尺寸的图片时候,这种全局算自注意力的计算复杂度就非常贵了
SwinTransformer中,采用Windows-based Attention来将计算复杂度与图片尺寸的关系变为线性关系。
General Model:
W-MSA / SW-MSA
Shift 操作但如果加上 shift 的操作,每个 patch 原来只能跟它所在的窗口里的别的 patch 进行
交互,但是 shift 之后,这个 patch就可以跟新的窗口里的别的 patch就进行交互了,而这个新的窗
口里所有的 patch 其实来自于上一层别的窗口里的 patch,这也就是作者说的能起到
cross-window connection,就是窗口和窗口之间可以交互了
上述过程配合之后的Patch Merging,合并到Transformer最后几层的时候,每一个patch本身的感受
野就已经很大了。
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.):
"""
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
# Mh: Windows Size Height
# Mw: Windows Size Width
# nH: num_heads
super(WindowsAttention, self).__init__()
self.dim = dim
self.window_size = window_size # [Mh, Mw]
self.num_heads = num_heads
head_dim = dim // num_heads # 每个head的dim
self.scale = head_dim ** -0.5 # scale
# 定义一个parameter table来存放relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # [2*Mh-1 * 2*Mw-1, nH]
# 相对位置索引获得方法
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing="ij")) # [2, Mh, Mw]
coords_flatten = torch.flatten(coords, 1) # [2, Mh*Mw]
# [2, Mh*Mw, 1] - [2, 1, Mh*Mw]
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # [2, Mh*Mw, Mh*Mw]
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # [Mh*Mw, Mh*Mw, 2]
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # [Mh*Mw, Mh*Mw]
# Register_buffer: 应该就是在内存中定一个常量,同时,模型保存和加载的时候可以写入和读出。
# 不需要学习,但是可以灵活读写
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
nn.init.trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self,x,mask=None):
"""
Args:
x: input features with shape of (num_windows*B, Mh*Mw, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
x的输入维度是(num_windows窗口数*Batch Size)
在窗口内进行Attention Op
"""
# [batch_size*num_windows, Mh*Mw, total_embed_dim]
B_, N, C = x.shape
# qkv(): -> [batch_size*num_windows, Mh*Mw, 3 * total_embed_dim]
# reshape: -> [batch_size*num_windows, Mh*Mw, 3, num_heads, embed_dim_per_head]
# permute: -> [3, batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
# [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]
q,k,v = qkv.unbind(0)
# QK^T/sqrt(d)
# transpose: -> [batch_size*num_windows, num_heads, embed_dim_per_head, Mh*Mw]
# @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, Mh*Mw]
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
# QK^T/sqrt(d) + B
# B:
# relative_position_bias_table.view: [Mh*Mw*Mh*Mw,nH] -> [Mh*Mw,Mh*Mw,nH]
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # [nH, Mh*Mw, Mh*Mw]
# [Bs*nW, nH, Mh*Mw, Mh*Mw]
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
# SW-MSA 需要做attention Mask
# mask: [nW, Mh*Mw, Mh*Mw]
# attn.view: [batch_size, num_windows, num_heads, Mh*Mw, Mh*Mw]
# # mask.unsqueeze: [1, nW, 1, Mh*Mw, Mh*Mw]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
# @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]
# transpose: -> [batch_size*num_windows, Mh*Mw, num_heads, embed_dim_per_head]
# reshape: -> [batch_size*num_windows, Mh*Mw, total_embed_dim]
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
if __name__ == "__main__":
pass
|
[
"torch.nn.Dropout",
"BasicModule.Mlp",
"torch.jit.is_scripting",
"torch.roll",
"torch.nn.init.constant_",
"torch.nn.Softmax",
"torch.arange",
"torch.nn.functional.pad",
"torch.flatten",
"torch.nn.Linear",
"BasicModule.window_reverse",
"torch.zeros",
"BasicModule.DropPath",
"numpy.ceil",
"BasicModule.window_partition",
"torch.nn.ModuleList",
"BasicModule.PatchEmbed",
"torch.utils.checkpoint.checkpoint",
"torch.nn.Identity",
"torch.nn.AdaptiveAvgPool1d",
"torch.nn.init.trunc_normal_",
"torch.meshgrid"
] |
[((6893, 7018), 'BasicModule.PatchEmbed', 'PatchEmbed', ([], {'patch_size': 'patch_size', 'in_c': 'in_chans', 'embed_dim': 'embed_dim', 'norm_layer': '(norm_layer if self.patch_norm else None)'}), '(patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n', (6903, 7018), False, 'from BasicModule import PatchMerging, DropPath, PatchEmbed\n'), ((7064, 7087), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'drop_rate'}), '(p=drop_rate)\n', (7074, 7087), True, 'import torch.nn as nn\n'), ((7294, 7309), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (7307, 7309), True, 'import torch.nn as nn\n'), ((8338, 8361), 'torch.nn.AdaptiveAvgPool1d', 'nn.AdaptiveAvgPool1d', (['(1)'], {}), '(1)\n', (8358, 8361), True, 'import torch.nn as nn\n'), ((9178, 9197), 'torch.flatten', 'torch.flatten', (['x', '(1)'], {}), '(x, 1)\n', (9191, 9197), False, 'import torch\n'), ((12159, 12203), 'torch.zeros', 'torch.zeros', (['(1, Hp, Wp, 1)'], {'device': 'x.device'}), '((1, Hp, Wp, 1), device=x.device)\n', (12170, 12203), False, 'import torch\n'), ((12794, 12838), 'BasicModule.window_partition', 'window_partition', (['img_mask', 'self.window_size'], {}), '(img_mask, self.window_size)\n', (12810, 12838), False, 'from BasicModule import window_partition, window_reverse\n'), ((15934, 16022), 'BasicModule.Mlp', 'Mlp', ([], {'in_features': 'dim', 'hidden_features': 'mlp_hidden_dim', 'act_layer': 'act_layer', 'drop': 'drop'}), '(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer,\n drop=drop)\n', (15937, 16022), False, 'from BasicModule import Mlp\n'), ((16625, 16669), 'torch.nn.functional.pad', 'F.pad', (['x', '(0, 0, pad_l, pad_r, pad_t, pad_b)'], {}), '(x, (0, 0, pad_l, pad_r, pad_t, pad_b))\n', (16630, 16669), True, 'import torch.nn.functional as F\n'), ((17057, 17102), 'BasicModule.window_partition', 'window_partition', (['shifted_x', 'self.window_size'], {}), '(shifted_x, self.window_size)\n', (17073, 17102), False, 'from BasicModule import window_partition, window_reverse\n'), ((17474, 17528), 'BasicModule.window_reverse', 'window_reverse', (['attn_windows', 'self.window_size', 'Hp', 'Wp'], {}), '(attn_windows, self.window_size, Hp, Wp)\n', (17488, 17528), False, 'from BasicModule import window_partition, window_reverse\n'), ((20037, 20070), 'torch.arange', 'torch.arange', (['self.window_size[0]'], {}), '(self.window_size[0])\n', (20049, 20070), False, 'import torch\n'), ((20090, 20123), 'torch.arange', 'torch.arange', (['self.window_size[1]'], {}), '(self.window_size[1])\n', (20102, 20123), False, 'import torch\n'), ((20246, 20270), 'torch.flatten', 'torch.flatten', (['coords', '(1)'], {}), '(coords, 1)\n', (20259, 20270), False, 'import torch\n'), ((20996, 21034), 'torch.nn.Linear', 'nn.Linear', (['dim', '(dim * 3)'], {'bias': 'qkv_bias'}), '(dim, dim * 3, bias=qkv_bias)\n', (21005, 21034), True, 'import torch.nn as nn\n'), ((21060, 21081), 'torch.nn.Dropout', 'nn.Dropout', (['attn_drop'], {}), '(attn_drop)\n', (21070, 21081), True, 'import torch.nn as nn\n'), ((21102, 21121), 'torch.nn.Linear', 'nn.Linear', (['dim', 'dim'], {}), '(dim, dim)\n', (21111, 21121), True, 'import torch.nn as nn\n'), ((21147, 21168), 'torch.nn.Dropout', 'nn.Dropout', (['proj_drop'], {}), '(proj_drop)\n', (21157, 21168), True, 'import torch.nn as nn\n'), ((21178, 21244), 'torch.nn.init.trunc_normal_', 'nn.init.trunc_normal_', (['self.relative_position_bias_table'], {'std': '(0.02)'}), '(self.relative_position_bias_table, std=0.02)\n', (21199, 21244), True, 'import torch.nn as nn\n'), ((21267, 21285), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (21277, 21285), True, 'import torch.nn as nn\n'), ((8382, 8423), 'torch.nn.Linear', 'nn.Linear', (['self.num_features', 'num_classes'], {}), '(self.num_features, num_classes)\n', (8391, 8423), True, 'import torch.nn as nn\n'), ((8448, 8461), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (8459, 8461), True, 'import torch.nn as nn\n'), ((8584, 8625), 'torch.nn.init.trunc_normal_', 'nn.init.trunc_normal_', (['m.weight'], {'std': '(0.02)'}), '(m.weight, std=0.02)\n', (8605, 8625), True, 'import torch.nn as nn\n'), ((15739, 15758), 'BasicModule.DropPath', 'DropPath', (['drop_path'], {}), '(drop_path)\n', (15747, 15758), False, 'from BasicModule import PatchMerging, DropPath, PatchEmbed\n'), ((15782, 15795), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (15793, 15795), True, 'import torch.nn as nn\n'), ((16852, 16923), 'torch.roll', 'torch.roll', (['x'], {'shifts': '(-self.shift_size, -self.shift_size)', 'dims': '(1, 2)'}), '(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))\n', (16862, 16923), False, 'import torch\n'), ((17658, 17735), 'torch.roll', 'torch.roll', (['shifted_x'], {'shifts': '(self.shift_size, self.shift_size)', 'dims': '(1, 2)'}), '(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))\n', (17668, 17735), False, 'import torch\n'), ((19813, 19888), 'torch.zeros', 'torch.zeros', (['((2 * window_size[0] - 1) * (2 * window_size[1] - 1))', 'num_heads'], {}), '((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)\n', (19824, 19888), False, 'import torch\n'), ((20153, 20204), 'torch.meshgrid', 'torch.meshgrid', (['[coords_h, coords_w]'], {'indexing': '"""ij"""'}), "([coords_h, coords_w], indexing='ij')\n", (20167, 20204), False, 'import torch\n'), ((8705, 8733), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (8722, 8733), True, 'import torch.nn as nn\n'), ((8788, 8816), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (8805, 8816), True, 'import torch.nn as nn\n'), ((8829, 8861), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1.0)'], {}), '(m.weight, 1.0)\n', (8846, 8861), True, 'import torch.nn as nn\n'), ((11967, 11996), 'numpy.ceil', 'np.ceil', (['(H / self.window_size)'], {}), '(H / self.window_size)\n', (11974, 11996), True, 'import numpy as np\n'), ((12034, 12063), 'numpy.ceil', 'np.ceil', (['(W / self.window_size)'], {}), '(W / self.window_size)\n', (12041, 12063), True, 'import numpy as np\n'), ((13536, 13576), 'torch.utils.checkpoint.checkpoint', 'checkpoint.checkpoint', (['blk', 'x', 'attn_mask'], {}), '(blk, x, attn_mask)\n', (13557, 13576), True, 'import torch.utils.checkpoint as checkpoint\n'), ((13466, 13490), 'torch.jit.is_scripting', 'torch.jit.is_scripting', ([], {}), '()\n', (13488, 13490), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
import pdb, importlib, inspect, time, datetime, json
# from PyFin.api import advanceDateByCalendar
# from data.polymerize import DBPolymerize
from data.storage_engine import StorageEngine
import time
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from financial import factor_per_share_indicators
from data.model import BalanceMRQ, BalanceTTM, BalanceReport
from data.model import CashFlowTTM, CashFlowReport
from data.model import IndicatorReport
from data.model import IncomeReport, IncomeTTM
from vision.table.valuation import Valuation
from vision.db.signletion_engine import *
from data.sqlengine import sqlEngine
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
# from ultron.cluster.invoke.cache_data import cache_data
class CalcEngine(object):
def __init__(self, name, url,
methods=[{'packet': 'financial.factor_pre_share_indicators', 'class': 'FactorPerShareIndicators'}, ]):
self._name = name
self._methods = methods
self._url = url
def get_trade_date(self, trade_date, n, days=365):
"""
获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。
:param days:
:param trade_date: 当前交易日
:param n:
:return:
"""
syn_util = SyncUtil()
trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date)
trade_date_sets = trade_date_sets['TRADEDATE'].values
time_array = datetime.strptime(str(trade_date), "%Y%m%d")
time_array = time_array - timedelta(days=days) * n
date_time = int(datetime.strftime(time_array, "%Y%m%d"))
if str(date_time) < min(trade_date_sets):
# print('date_time %s is out of trade_date_sets' % date_time)
return str(date_time)
else:
while str(date_time) not in trade_date_sets:
date_time = date_time - 1
# print('trade_date pre %s year %s' % (n, date_time))
return str(date_time)
def _func_sets(self, method):
# 私有函数和保护函数过滤
return list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)), dir(method)))
def loading_data(self, trade_date):
"""
获取基础数据
按天获取当天交易日所有股票的基础数据
:param trade_date: 交易日
:return:
"""
# 转换时间格式
time_array = datetime.strptime(trade_date, "%Y-%m-%d")
trade_date = datetime.strftime(time_array, '%Y%m%d')
# 读取目前涉及到的因子
engine = sqlEngine()
columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date']
# Report data
cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport,
[CashFlowReport.FINALCASHBALA, # 期末现金及现金等价物余额
], dates=[trade_date])
for col in columns:
if col in list(cash_flow_sets.keys()):
cash_flow_sets = cash_flow_sets.drop(col, axis=1)
cash_flow_sets = cash_flow_sets.rename(columns={'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额
})
income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZINCO, # 营业收入
IncomeReport.BIZTOTINCO, # 营业总收入
IncomeReport.PERPROFIT, # 营业利润
IncomeReport.DILUTEDEPS, # 稀释每股收益
], dates=[trade_date])
for col in columns:
if col in list(income_sets.keys()):
income_sets = income_sets.drop(col, axis=1)
income_sets = income_sets.rename(columns={'BIZINCO': 'operating_revenue', # 营业收入
'BIZTOTINCO': 'total_operating_revenue', # 营业总收入
'PERPROFIT': 'operating_profit', # 营业利润
'DILUTEDEPS': 'diluted_eps', # 稀释每股收益
})
balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport,
[BalanceReport.PARESHARRIGH, # 归属于母公司的所有者权益
BalanceReport.CAPISURP,
BalanceReport.RESE,
BalanceReport.UNDIPROF,
], dates=[trade_date])
for col in columns:
if col in list(balance_sets.keys()):
balance_sets = balance_sets.drop(col, axis=1)
balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'total_owner_equities', # 归属于母公司的所有者权益
'CAPISURP': 'capital_reserve_fund', # 资本公积
'RESE': 'surplus_reserve_fund', # 盈余公积
'UNDIPROF': 'retained_profit', # 未分配利润
})
indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport,
[IndicatorReport.FCFE, # 股东自由现金流量
IndicatorReport.FCFF, # 企业自由现金流量
IndicatorReport.EPSBASIC, # 基本每股收益
IndicatorReport.DPS, # 每股股利(税前)
], dates=[trade_date])
for col in columns:
if col in list(indicator_sets.keys()):
indicator_sets = indicator_sets.drop(col, axis=1)
indicator_sets = indicator_sets.rename(columns={'FCFE': 'shareholder_fcfps', # 股东自由现金流量
'FCFF': 'enterprise_fcfps', # 企业自由现金流量
'EPSBASIC': 'basic_eps', # 基本每股收益
'DPS': 'dividend_receivable', # 每股股利(税前)
})
# TTM data
cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM,
[CashFlowTTM.CASHNETI, # 现金及现金等价物净增加额
CashFlowTTM.MANANETR, # 经营活动现金流量净额
], dates=[trade_date])
for col in columns:
if col in list(cash_flow_ttm_sets.keys()):
cash_flow_ttm_sets = cash_flow_ttm_sets.drop(col, axis=1)
cash_flow_ttm_sets = cash_flow_ttm_sets.rename(
columns={'CASHNETI': 'cash_equivalent_increase_ttm', # 现金及现金等价物净增加额
'MANANETR': 'net_operate_cash_flow_ttm', # 经营活动现金流量净额
})
income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM,
[IncomeTTM.PARENETP, # 归属于母公司所有者的净利润
IncomeTTM.PERPROFIT, # 营业利润
IncomeTTM.BIZINCO, # 营业收入
IncomeTTM.BIZTOTINCO, # 营业总收入
], dates=[trade_date])
for col in columns:
if col in list(income_ttm_sets.keys()):
income_ttm_sets = income_ttm_sets.drop(col, axis=1)
income_ttm_sets = income_ttm_sets.rename(columns={'PARENETP': 'np_parent_company_owners_ttm', # 归属于母公司所有者的净利润
'PERPROFIT': 'operating_profit_ttm', # 营业利润
'BIZINCO': 'operating_revenue_ttm', # 营业收入
'BIZTOTINCO': 'total_operating_revenue_ttm', # 营业总收入
})
column = ['trade_date']
valuation_data = get_fundamentals(query(Valuation.security_code,
Valuation.trade_date,
Valuation.capitalization,
).filter(Valuation.trade_date.in_([trade_date])))
for col in column:
if col in list(valuation_data.keys()):
valuation_data = valuation_data.drop(col, axis=1)
valuation_sets = pd.merge(cash_flow_sets, income_sets, on='security_code').reindex()
valuation_sets = pd.merge(balance_sets, valuation_sets, on='security_code').reindex()
valuation_sets = pd.merge(indicator_sets, valuation_sets, on='security_code').reindex()
valuation_sets = pd.merge(cash_flow_ttm_sets, valuation_sets, on='security_code').reindex()
valuation_sets = pd.merge(income_ttm_sets, valuation_sets, on='security_code').reindex()
valuation_sets = pd.merge(valuation_data, valuation_sets, on='security_code').reindex()
return valuation_sets
def process_calc_factor(self, trade_date, valuation_sets):
per_share = factor_per_share_indicators.FactorPerShareIndicators()
factor_share_indicators = pd.DataFrame()
factor_share_indicators['security_code'] = valuation_sets['security_code']
valuation_sets = valuation_sets.set_index('security_code')
factor_share_indicators = factor_share_indicators.set_index('security_code')
factor_share_indicators = per_share.EPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.DilutedEPSTTM(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.CashEquPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.DivPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.EPSTTM(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.NetAssetPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.TotalRevPSTTM(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.TotalRevPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.OptRevPSTTM(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.OptRevPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.OptProfitPSTTM(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.OptProfitPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.CapticalSurplusPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.SurplusReservePS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.UndividedProfitPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.RetainedEarningsPS(factor_share_indicators, factor_share_indicators)
factor_share_indicators = per_share.OptCFPSTTM(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.CFPSTTM(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.EnterpriseFCFPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.ShareholderFCFPS(valuation_sets, factor_share_indicators)
factor_share_indicators = factor_share_indicators.reset_index()
factor_share_indicators['trade_date'] = str(trade_date)
factor_share_indicators.replace([-np.inf, np.inf, None], np.nan, inplace=True)
return factor_share_indicators
def local_run(self, trade_date):
print('当前交易日: %s' % trade_date)
tic = time.time()
valuation_sets = self.loading_data(trade_date)
print('data load time %s' % (time.time() - tic))
storage_engine = StorageEngine(self._url)
result = self.process_calc_factor(trade_date, valuation_sets)
print('cal_time %s' % (time.time() - tic))
storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date, result)
# storage_engine.update_destdb('factor_pre_share_indicators', trade_date, result)
# def remote_run(self, trade_date):
# total_data = self.loading_data(trade_date)
# #存储数据
# session = str(int(time.time() * 1000000 + datetime.datetime.now().microsecond))
# cache_data.set_cache(session, 'alphax', total_data.to_json(orient='records'))
# distributed_factor.delay(session, json.dumps(self._methods), self._name)
#
# def distributed_factor(self, total_data):
# mkt_df = self.calc_factor_by_date(total_data,trade_date)
# result = self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date)
# @app.task
# def distributed_factor(session, trade_date, packet_sets, name):
# calc_engines = CalcEngine(name, packet_sets)
# content = cache_data.get_cache(session, factor_name)
# total_data = json_normalize(json.loads(content))
# calc_engines.distributed_factor(total_data)
#
# # @app.task()
# def factor_calculate(**kwargs):
# print("per_share_kwargs: {}".format(kwargs))
# date_index = kwargs['date_index']
# session = kwargs['session']
# content = cache_data.get_cache(session + str(date_index), date_index)
# total_pre_share_data = json_normalize(json.loads(str(content, encoding='utf8')))
# print("len_total_per_share_data {}".format(len(total_pre_share_data)))
# calculate(date_index, total_pre_share_data)
|
[
"datetime.datetime.strftime",
"pandas.DataFrame",
"data.storage_engine.StorageEngine",
"pandas.merge",
"data.sqlengine.sqlEngine",
"time.time",
"datetime.datetime.strptime",
"datetime.timedelta",
"financial.factor_per_share_indicators.FactorPerShareIndicators",
"vision.table.valuation.Valuation.trade_date.in_"
] |
[((2402, 2443), 'datetime.datetime.strptime', 'datetime.strptime', (['trade_date', '"""%Y-%m-%d"""'], {}), "(trade_date, '%Y-%m-%d')\n", (2419, 2443), False, 'from datetime import timedelta, datetime\n'), ((2465, 2504), 'datetime.datetime.strftime', 'datetime.strftime', (['time_array', '"""%Y%m%d"""'], {}), "(time_array, '%Y%m%d')\n", (2482, 2504), False, 'from datetime import timedelta, datetime\n'), ((2543, 2554), 'data.sqlengine.sqlEngine', 'sqlEngine', ([], {}), '()\n', (2552, 2554), False, 'from data.sqlengine import sqlEngine\n'), ((10098, 10152), 'financial.factor_per_share_indicators.FactorPerShareIndicators', 'factor_per_share_indicators.FactorPerShareIndicators', ([], {}), '()\n', (10150, 10152), False, 'from financial import factor_per_share_indicators\n'), ((10187, 10201), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10199, 10201), True, 'import pandas as pd\n'), ((12751, 12762), 'time.time', 'time.time', ([], {}), '()\n', (12760, 12762), False, 'import time\n'), ((12901, 12925), 'data.storage_engine.StorageEngine', 'StorageEngine', (['self._url'], {}), '(self._url)\n', (12914, 12925), False, 'from data.storage_engine import StorageEngine\n'), ((1633, 1672), 'datetime.datetime.strftime', 'datetime.strftime', (['time_array', '"""%Y%m%d"""'], {}), "(time_array, '%Y%m%d')\n", (1650, 1672), False, 'from datetime import timedelta, datetime\n'), ((1584, 1604), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (1593, 1604), False, 'from datetime import timedelta, datetime\n'), ((9221, 9259), 'vision.table.valuation.Valuation.trade_date.in_', 'Valuation.trade_date.in_', (['[trade_date]'], {}), '([trade_date])\n', (9245, 9259), False, 'from vision.table.valuation import Valuation\n'), ((9432, 9489), 'pandas.merge', 'pd.merge', (['cash_flow_sets', 'income_sets'], {'on': '"""security_code"""'}), "(cash_flow_sets, income_sets, on='security_code')\n", (9440, 9489), True, 'import pandas as pd\n'), ((9525, 9583), 'pandas.merge', 'pd.merge', (['balance_sets', 'valuation_sets'], {'on': '"""security_code"""'}), "(balance_sets, valuation_sets, on='security_code')\n", (9533, 9583), True, 'import pandas as pd\n'), ((9619, 9679), 'pandas.merge', 'pd.merge', (['indicator_sets', 'valuation_sets'], {'on': '"""security_code"""'}), "(indicator_sets, valuation_sets, on='security_code')\n", (9627, 9679), True, 'import pandas as pd\n'), ((9715, 9779), 'pandas.merge', 'pd.merge', (['cash_flow_ttm_sets', 'valuation_sets'], {'on': '"""security_code"""'}), "(cash_flow_ttm_sets, valuation_sets, on='security_code')\n", (9723, 9779), True, 'import pandas as pd\n'), ((9815, 9876), 'pandas.merge', 'pd.merge', (['income_ttm_sets', 'valuation_sets'], {'on': '"""security_code"""'}), "(income_ttm_sets, valuation_sets, on='security_code')\n", (9823, 9876), True, 'import pandas as pd\n'), ((9912, 9972), 'pandas.merge', 'pd.merge', (['valuation_data', 'valuation_sets'], {'on': '"""security_code"""'}), "(valuation_data, valuation_sets, on='security_code')\n", (9920, 9972), True, 'import pandas as pd\n'), ((12855, 12866), 'time.time', 'time.time', ([], {}), '()\n', (12864, 12866), False, 'import time\n'), ((13027, 13038), 'time.time', 'time.time', ([], {}), '()\n', (13036, 13038), False, 'import time\n')]
|
'''
Given n nodes labeled from 0 to n - 1 and a list of undirected edges (each edge is a pair of nodes), write a function to check whether these edges make up a valid tree.
样例
Example 1:
Input: n = 5 edges = [[0, 1], [0, 2], [0, 3], [1, 4]]
Output: true.
Example 2:
Input: n = 5 edges = [[0, 1], [1, 2], [2, 3], [1, 3], [1, 4]]
Output: false.
注意事项
You can assume that no duplicate edges will appear in edges. Since all edges are undirected, [0, 1] is the same as [1, 0] and thus will not appear together in edges.
'''
from collections import defaultdict,deque
class Solution:
"""
@param n: An integer
@param edges: a list of undirected edges
@return: true if it's a valid tree, or false
"""
def validTree(self, n, edges):
# write your code here
if len(edges) != n - 1:
return False
if len(edges) == 0:
return n == 1
neighbor = defaultdict()
for edge in edges:
if edge[0] not in neighbor:
neighbor[edge[0]] = 1
else:
neighbor[edge[0]] += 1
if edge[1] not in neighbor:
neighbor[edge[1]] = 1
else:
neighbor[edge[1]] += 1
queue = deque()
for x in range(n):
if x not in neighbor:
return False
elif neighbor[x] == 1:
neighbor[x] -= 1
queue.append(x)
count = 0
while queue:
node = queue.popleft()
count += 1
for edge in edges:
if node in edge:
neighbor[edge[0]] -= 1
neighbor[edge[1]] -= 1
if len(queue) == 0:
for key in neighbor:
if neighbor[key] == 1 or neighbor[key] == 0:
queue.append(key)
if count < n:
return False
return True
|
[
"collections.defaultdict",
"collections.deque"
] |
[((911, 924), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (922, 924), False, 'from collections import defaultdict, deque\n'), ((1238, 1245), 'collections.deque', 'deque', ([], {}), '()\n', (1243, 1245), False, 'from collections import defaultdict, deque\n')]
|
# -*- coding:utf-8 -*-
import six
import numpy as np
from pyproj import Proj
import operator
from .exceptions import *
class NullProj(object):
"""
Similar to pyproj.Proj, but NullProj does not do actual conversion.
"""
@property
def srs(self):
return ''
def __call__(self, x, y, **kwargs):
return x, y
class GridderBase(object):
"""Gridder is a helper for i, j <-> x, y conversion, etc."""
def i2x(self, *args):
"""Convert i, j, ... -> x, y, ..."""
raise NotImplementedError
def x2i(self, *args, **kwargs):
"""Convert x, y, ... -> i, j, ..."""
raise NotImplementedError
def copy(self, **kwargs):
kws = self.dump()
kws.update(kwargs)
new_gridder = self.__class__(**kws)
return new_gridder
def calibrate(self, x0, y0, x1=None, y1=None):
return
def dump(self):
return {}
class XYGridderBase(GridderBase):
"""
Requires self.X & self.Y.
"""
@property
def bbox(self):
return (np.min(self.X), np.min(self.Y), np.max(self.X), np.max(self.Y))
def get_bounding_ij(self, x1, y1, x2, y2, **kwargs):
bbox = self.bbox
if x1 is None:
x1 = bbox[0]
if y1 is None:
y1 = bbox[1]
if x2 is None:
x2 = bbox[2]
if y2 is None:
y2 = bbox[3]
bad = ~((self.X >= x1) & (self.X <= x2) & (self.Y >= y1) & (self.Y <= y2))
x_bad = np.alltrue(bad, axis=0)
y_bad = np.alltrue(bad, axis=1)
x_points = np.argwhere(np.diff(np.r_[True, x_bad, True])).reshape(-1, 2)
y_points = np.argwhere(np.diff(np.r_[True, y_bad, True])).reshape(-1, 2)
i1, i2 = (-1, -1) if x_points.shape[0] == 0 else x_points[0]
j1, j2 = (-1, -1) if y_points.shape[0] == 0 else y_points[0]
return i1, j1, i2, j2
def check_bound(self, i, j, int_index=True):
start = -0.5
subtracted = 1
if int_index:
start = 0
if int_index in ('lowerleft', 'll'):
subtracted = 2
if np.isscalar(i):
if (i >= start and i <= self.nx-subtracted) and (j >= start and j <= self.ny-subtracted):
return i, j
else:
raise OutOfGridBound("i: {}, j: {} is out of bound!".format(i, j))
else:
i = np.where((i >= start) & (i <= self.nx - subtracted), i, np.nan)
j = np.where((j >= start) & (j <= self.ny - subtracted), j, np.nan)
return i, j
class XYProjGridder(XYGridderBase):
def __init__(self, proj=None, x=None, y=None, nx=None, ny=None, dx=None, dy=None, x_orig=0.0, y_orig=0.0, **kwargs):
self.proj = proj
self._reset_raw_xy()
if x is not None and y is not None:
self.set_xy(x, y)
else:
self._init_with_para(nx, ny, dx, dy, x_orig, y_orig)
@property
def proj(self):
return self._proj
@proj.setter
def proj(self, p):
if p is None:
self._proj = NullProj()
elif isinstance(p, (Proj, NullProj)):
self._proj = p
elif isinstance(p, dict):
self._proj = Proj(**p)
else: # Treat as proj_string
self._proj = Proj(str(p)) # TODO: check PY3 compatibility.
self._reset_raw_xy()
if all([hasattr(self, attr) for attr in ('_nx', '_ny', '_dx', '_dy', '_x_orig', '_y_orig')]):
self._updateXY()
@property
def X(self):
return self._X
@X.setter
def X(self, x):
if self._raw_y is None:
raise ValueError("Cannot set x alone when no raw y presents.")
ndim_x = np.ndim(x)
if ndim_x == 1 and np.ndim(self._raw_y) == 1:
self.set_xy(x, self._raw_y)
elif ndim_x == 2 and np.shape(x) == np.shape(self.Y):
self.set_xy(x, self.Y)
else:
self._raise_invalid_shape(x, self.Y)
@property
def Y(self):
return self._Y
@Y.setter
def Y(self, y):
if self._raw_x is None:
raise ValueError("Cannot set y alone when no raw x presents.")
ndim_y = np.ndim(y)
if ndim_y == 1 and np.ndim(self._raw_x) == 1:
self.set_xy(self._raw_x, y)
elif ndim_y == 2 and np.shape(y) == np.shape(self.X):
self.set_xy(self.X, y)
else:
self._raise_invalid_shape(self.X, y)
@property
def CX(self):
return self._CX
@property
def CY(self):
return self._CY
@property
def x(self):
return self._raw_x if self._raw_x is not None else self._X
@property
def y(self):
return self._raw_y if self._raw_y is not None else self._Y
@property
def cx(self):
return self._raw_cx if self._raw_cx is not None else self._CX
@property
def cy(self):
return self._raw_cy if self._raw_cy is not None else self._CY
@property
def nx(self):
return self._nx
@nx.setter
def nx(self, value):
self._nx = value
self._reset_raw_xy()
self._updateXY()
@property
def ny(self):
return self._ny
@ny.setter
def ny(self, value):
self._ny = value
self._reset_raw_xy()
self._updateXY()
@property
def dx(self):
return self._dx
@dx.setter
def dx(self, value):
self._dx = value
self._reset_raw_xy()
self._updateXY()
@property
def dy(self):
return self._dy
@dy.setter
def dy(self, value):
self._dy = value
self._reset_raw_xy()
self._updateXY()
@property
def x_orig(self):
return self._x_orig
@x_orig.setter
def x_orig(self, value):
self._x_orig = value
self._reset_raw_xy()
self._updateXY()
@property
def y_orig(self):
return self._y_orig
@y_orig.setter
def y_orig(self, value):
self._y_orig = value
self._reset_raw_xy()
self._updateXY()
@property
def bbox(self):
return self._bbox
@property
def cbox(self):
"""corner box"""
return self._cbox
def _init_with_para(self, nx, ny, dx, dy, x_orig, y_orig):
self._nx = nx
self._ny = ny
self._dx = dx
self._dy = dy
self._x_orig = x_orig
self._y_orig = y_orig
self._updateXY()
@property
def has_null_proj(self):
return isinstance(self.proj, NullProj)
def set_xy(self, x, y):
ndim_x, ndim_y = np.ndim(x), np.ndim(y)
if ndim_x == 1 and ndim_y == 1:
self._nx, self._ny = len(x), len(y)
elif ndim_x == 2 and ndim_y == 2:
self._ny, self._nx = np.shape(x)
else:
self._raise_invalid_shape(x, y)
self._raw_x, self._raw_y = np.asarray(x), np.asarray(y)
self.calibrate(x, y)
def _raise_invalid_shape(self, x, y):
raise ValueError("Invalid x, y shape: {}, {}".format(np.shape(x), np.shape(y)))
def _reset_raw_xy(self):
self._raw_x, self._raw_y = None, None
def _updateXY(self):
jj, ii = np.mgrid[0:self.ny, 0:self.nx]
cjj, cii = np.mgrid[-0.5:self.ny, -0.5:self.nx]
xx, yy = self.i2x(ii, jj)
cxx, cyy = self.i2x(cii, cjj)
self._X, self._Y = xx, yy
self._CX, self._CY = cxx, cyy
if self._raw_x is not None and self._raw_x.ndim == 1:
self._raw_cx = self._CX[0]
else:
self._raw_cx = None
if self._raw_y is not None and self._raw_y.ndim == 1:
self._raw_cy = self._CY[:, 0]
else:
self._raw_cy = None
self._bbox = (np.min(self._X), np.min(self._Y), np.max(self._X), np.max(self._Y))
self._cbox = (np.min(self._CX), np.min(self._CY), np.max(self._CX), np.max(self._CY))
return xx, yy
def i2x(self, i, j):
px = i * self.dx + self.x_orig
py = j * self.dy + self.y_orig
return self.proj(px, py, inverse=True)
def x2i(self, x, y, int_index=True, check_bound=None):
px, py = self.proj(x, y)
i = (px - self.x_orig) / self.dx
j = (py - self.y_orig) / self.dy
if int_index:
if int_index in ('lowerleft', 'll'):
i = np.floor(i)
j = np.floor(j)
else:
i = np.round(i)
j = np.round(j)
if np.isscalar(i):
i = int(i)
j = int(j)
else:
i = i.astype('i')
j = j.astype('i')
if check_bound:
return self.check_bound(i, j, int_index=int_index)
else:
return i, j
def calibrate(self, x, y, x1=None, y1=None):
ndim_x, ndim_y = np.ndim(x), np.ndim(y)
if ndim_x == 0 and ndim_y == 0:
x0, y0 = x, y
if ndim_x == 1 and ndim_y == 1:
x0, x1 = x[0], x[1]
y0, y1 = y[0], y[1]
elif ndim_x == 2 and ndim_y == 2:
x0, x1 = x[0, 0], x[1, 1]
y0, y1 = y[0, 0], y[1, 1]
else:
self._raise_invalid_shape(x, y)
px0, py0 = self.proj(x0, y0)
self._x_orig = px0
self._y_orig = py0
if x1 is not None and y1 is not None:
px1, py1 = self.proj(x1, y1)
self._dx = px1 - px0
self._dy = py1 - py0
self._updateXY()
def dump(self):
return {
"proj": self.proj.srs,
"nx": self.nx, "ny": self.ny, "dx": self.dx, "dy": self.dy,
"x_orig": self.x_orig, "y_orig": self.y_orig
}
class LonLatSurroundingGridder(XYGridderBase):
def __init__(self, lon0, lat0, rmin, rmax, nr, ntheta, theta0=0.0, r_earth=6371):
self.lon0 = lon0
self.lat0 = lat0
self.rmin = rmin
self.rmax = rmax
self.nr = nr
self.ntheta = ntheta
self.theta0 = theta0
self.r_earth = r_earth
self.dtheta = np.pi * 2 / self.ntheta
self.dr = (self.rmax - self.rmin) / (self.nr - 1)
self._updateXY()
def _updateXY(self):
r = np.linspace(self.rmin, self.rmax, self.nr)
theta = np.arange(self.ntheta) * self.dtheta + self.theta0
THETA, R = np.meshgrid(theta, r)
LON, LAT = self.r_theta_to_lon_lat(R, THETA)
self._X = LON
self._Y = LAT
return self._X, self._Y
def r_theta_to_lon_lat(self, r, theta):
r_ = r / self.r_earth
sin_r = np.sin(r_)
cos_r = np.cos(r_)
lat0_ = np.deg2rad(self.lat0)
lon0_ = np.deg2rad(self.lon0)
sin_lat0 = np.sin(lat0_)
cos_lat0 = np.cos(lat0_)
sin_lat = sin_lat0 * cos_r + cos_lat0 * sin_r * np.cos(theta)
lat_ = np.arcsin(sin_lat)
lon_ = lon0_ + np.arctan2(np.sin(theta) * sin_r * cos_lat0, cos_r - sin_lat0 * sin_lat)
lon = np.rad2deg(lon_)
lat = np.rad2deg(lat_)
return lon, lat
@property
def nx(self):
return self.ntheta
@property
def ny(self):
return self.nr
@property
def X(self):
return self._X
@property
def Y(self):
return self._Y
@property
def x(self):
return self._X
@property
def y(self):
return self._Y
def i2x(self, i, j):
theta = self.theta0 + i * self.dtheta
r = self.rmin + j * self.dr
lon, lat = self.r_theta_to_lon_lat(r, theta)
return lon, lat
def x2i(self, x, y, int_index=True, check_bound=None):
lon2, lat2 = np.deg2rad(x), np.deg2rad(y)
lon1, lat1 = np.deg2rad(self.lon0), np.deg2rad(self.lat0)
dlon = lon2 - lon1
dlat = lat2 - lat1
sin_dlon = np.sin(dlon)
cos_dlon = np.cos(dlon)
sin_lat1 = np.sin(lat1)
cos_lat1 = np.cos(lat1)
sin_lat2 = np.sin(lat2)
cos_lat2 = np.cos(lat2)
a = cos_lat2 * sin_dlon
b = cos_lat1 * sin_lat2 - sin_lat1 * cos_lat2 * cos_dlon
theta = np.arctan2(a, b)
c = np.sin(dlat / 2) ** 2 + cos_lat1 * cos_lat2 * np.sin(dlon / 2) ** 2
d = 2 * np.arcsin(np.sqrt(c))
r = d * self.r_earth
i = (theta - self.theta0) / self.dtheta % self.ntheta
j = (r - self.rmin) / self.dr
if int_index:
i = np.round(i)
j = np.round(j)
if np.isscalar(i):
i = int(i)
j = int(j)
else:
i = i.astype('i')
j = j.astype('i')
if check_bound:
return self.check_bound(i, j, int_index=int_index)
else:
return i, j
class XYIrregularGridder(XYGridderBase):
# TODO: use kdtree.
def __init__(self, X, Y):
X = np.array(X)
Y = np.array(Y)
if X.ndim == 1:
self.X, self.Y = np.meshgrid(X, Y)
else:
self.X, self.Y = X, Y
self.ny, self.nx = X.shape
def i2x(self, i, j, *args, **kwargs):
return self.X[j, i], self.Y[j, i]
def x2i(self, x, y, *args, **kwargs):
distances = np.hypot(self.X-x, self.Y-y)
flat_i = np.argmin(distances)
nx = self.X.shape[1]
return flat_i / self.nx, flat_i % self.nx
def dump(self):
return {
"X": self.X,
"Y": self.Y,
"nx": self.nx,
"ny": self.ny,
}
|
[
"numpy.arctan2",
"numpy.floor",
"numpy.argmin",
"numpy.shape",
"numpy.sin",
"numpy.arange",
"numpy.round",
"numpy.meshgrid",
"numpy.ndim",
"numpy.arcsin",
"numpy.max",
"numpy.linspace",
"numpy.asarray",
"numpy.hypot",
"numpy.min",
"numpy.cos",
"numpy.alltrue",
"numpy.deg2rad",
"numpy.isscalar",
"numpy.rad2deg",
"numpy.where",
"numpy.array",
"numpy.diff",
"pyproj.Proj",
"numpy.sqrt"
] |
[((1494, 1517), 'numpy.alltrue', 'np.alltrue', (['bad'], {'axis': '(0)'}), '(bad, axis=0)\n', (1504, 1517), True, 'import numpy as np\n'), ((1534, 1557), 'numpy.alltrue', 'np.alltrue', (['bad'], {'axis': '(1)'}), '(bad, axis=1)\n', (1544, 1557), True, 'import numpy as np\n'), ((2117, 2131), 'numpy.isscalar', 'np.isscalar', (['i'], {}), '(i)\n', (2128, 2131), True, 'import numpy as np\n'), ((3714, 3724), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (3721, 3724), True, 'import numpy as np\n'), ((4193, 4203), 'numpy.ndim', 'np.ndim', (['y'], {}), '(y)\n', (4200, 4203), True, 'import numpy as np\n'), ((10223, 10265), 'numpy.linspace', 'np.linspace', (['self.rmin', 'self.rmax', 'self.nr'], {}), '(self.rmin, self.rmax, self.nr)\n', (10234, 10265), True, 'import numpy as np\n'), ((10353, 10374), 'numpy.meshgrid', 'np.meshgrid', (['theta', 'r'], {}), '(theta, r)\n', (10364, 10374), True, 'import numpy as np\n'), ((10597, 10607), 'numpy.sin', 'np.sin', (['r_'], {}), '(r_)\n', (10603, 10607), True, 'import numpy as np\n'), ((10624, 10634), 'numpy.cos', 'np.cos', (['r_'], {}), '(r_)\n', (10630, 10634), True, 'import numpy as np\n'), ((10651, 10672), 'numpy.deg2rad', 'np.deg2rad', (['self.lat0'], {}), '(self.lat0)\n', (10661, 10672), True, 'import numpy as np\n'), ((10689, 10710), 'numpy.deg2rad', 'np.deg2rad', (['self.lon0'], {}), '(self.lon0)\n', (10699, 10710), True, 'import numpy as np\n'), ((10730, 10743), 'numpy.sin', 'np.sin', (['lat0_'], {}), '(lat0_)\n', (10736, 10743), True, 'import numpy as np\n'), ((10763, 10776), 'numpy.cos', 'np.cos', (['lat0_'], {}), '(lat0_)\n', (10769, 10776), True, 'import numpy as np\n'), ((10863, 10881), 'numpy.arcsin', 'np.arcsin', (['sin_lat'], {}), '(sin_lat)\n', (10872, 10881), True, 'import numpy as np\n'), ((10993, 11009), 'numpy.rad2deg', 'np.rad2deg', (['lon_'], {}), '(lon_)\n', (11003, 11009), True, 'import numpy as np\n'), ((11024, 11040), 'numpy.rad2deg', 'np.rad2deg', (['lat_'], {}), '(lat_)\n', (11034, 11040), True, 'import numpy as np\n'), ((11836, 11848), 'numpy.sin', 'np.sin', (['dlon'], {}), '(dlon)\n', (11842, 11848), True, 'import numpy as np\n'), ((11868, 11880), 'numpy.cos', 'np.cos', (['dlon'], {}), '(dlon)\n', (11874, 11880), True, 'import numpy as np\n'), ((11900, 11912), 'numpy.sin', 'np.sin', (['lat1'], {}), '(lat1)\n', (11906, 11912), True, 'import numpy as np\n'), ((11932, 11944), 'numpy.cos', 'np.cos', (['lat1'], {}), '(lat1)\n', (11938, 11944), True, 'import numpy as np\n'), ((11964, 11976), 'numpy.sin', 'np.sin', (['lat2'], {}), '(lat2)\n', (11970, 11976), True, 'import numpy as np\n'), ((11996, 12008), 'numpy.cos', 'np.cos', (['lat2'], {}), '(lat2)\n', (12002, 12008), True, 'import numpy as np\n'), ((12124, 12140), 'numpy.arctan2', 'np.arctan2', (['a', 'b'], {}), '(a, b)\n', (12134, 12140), True, 'import numpy as np\n'), ((12877, 12888), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (12885, 12888), True, 'import numpy as np\n'), ((12901, 12912), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (12909, 12912), True, 'import numpy as np\n'), ((13216, 13248), 'numpy.hypot', 'np.hypot', (['(self.X - x)', '(self.Y - y)'], {}), '(self.X - x, self.Y - y)\n', (13224, 13248), True, 'import numpy as np\n'), ((13262, 13282), 'numpy.argmin', 'np.argmin', (['distances'], {}), '(distances)\n', (13271, 13282), True, 'import numpy as np\n'), ((1055, 1069), 'numpy.min', 'np.min', (['self.X'], {}), '(self.X)\n', (1061, 1069), True, 'import numpy as np\n'), ((1071, 1085), 'numpy.min', 'np.min', (['self.Y'], {}), '(self.Y)\n', (1077, 1085), True, 'import numpy as np\n'), ((1087, 1101), 'numpy.max', 'np.max', (['self.X'], {}), '(self.X)\n', (1093, 1101), True, 'import numpy as np\n'), ((1103, 1117), 'numpy.max', 'np.max', (['self.Y'], {}), '(self.Y)\n', (1109, 1117), True, 'import numpy as np\n'), ((2394, 2457), 'numpy.where', 'np.where', (['((i >= start) & (i <= self.nx - subtracted))', 'i', 'np.nan'], {}), '((i >= start) & (i <= self.nx - subtracted), i, np.nan)\n', (2402, 2457), True, 'import numpy as np\n'), ((2474, 2537), 'numpy.where', 'np.where', (['((j >= start) & (j <= self.ny - subtracted))', 'j', 'np.nan'], {}), '((j >= start) & (j <= self.ny - subtracted), j, np.nan)\n', (2482, 2537), True, 'import numpy as np\n'), ((6602, 6612), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (6609, 6612), True, 'import numpy as np\n'), ((6614, 6624), 'numpy.ndim', 'np.ndim', (['y'], {}), '(y)\n', (6621, 6624), True, 'import numpy as np\n'), ((6894, 6907), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (6904, 6907), True, 'import numpy as np\n'), ((6909, 6922), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (6919, 6922), True, 'import numpy as np\n'), ((7756, 7771), 'numpy.min', 'np.min', (['self._X'], {}), '(self._X)\n', (7762, 7771), True, 'import numpy as np\n'), ((7773, 7788), 'numpy.min', 'np.min', (['self._Y'], {}), '(self._Y)\n', (7779, 7788), True, 'import numpy as np\n'), ((7790, 7805), 'numpy.max', 'np.max', (['self._X'], {}), '(self._X)\n', (7796, 7805), True, 'import numpy as np\n'), ((7807, 7822), 'numpy.max', 'np.max', (['self._Y'], {}), '(self._Y)\n', (7813, 7822), True, 'import numpy as np\n'), ((7846, 7862), 'numpy.min', 'np.min', (['self._CX'], {}), '(self._CX)\n', (7852, 7862), True, 'import numpy as np\n'), ((7864, 7880), 'numpy.min', 'np.min', (['self._CY'], {}), '(self._CY)\n', (7870, 7880), True, 'import numpy as np\n'), ((7882, 7898), 'numpy.max', 'np.max', (['self._CX'], {}), '(self._CX)\n', (7888, 7898), True, 'import numpy as np\n'), ((7900, 7916), 'numpy.max', 'np.max', (['self._CY'], {}), '(self._CY)\n', (7906, 7916), True, 'import numpy as np\n'), ((8499, 8513), 'numpy.isscalar', 'np.isscalar', (['i'], {}), '(i)\n', (8510, 8513), True, 'import numpy as np\n'), ((8856, 8866), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (8863, 8866), True, 'import numpy as np\n'), ((8868, 8878), 'numpy.ndim', 'np.ndim', (['y'], {}), '(y)\n', (8875, 8878), True, 'import numpy as np\n'), ((11667, 11680), 'numpy.deg2rad', 'np.deg2rad', (['x'], {}), '(x)\n', (11677, 11680), True, 'import numpy as np\n'), ((11682, 11695), 'numpy.deg2rad', 'np.deg2rad', (['y'], {}), '(y)\n', (11692, 11695), True, 'import numpy as np\n'), ((11717, 11738), 'numpy.deg2rad', 'np.deg2rad', (['self.lon0'], {}), '(self.lon0)\n', (11727, 11738), True, 'import numpy as np\n'), ((11740, 11761), 'numpy.deg2rad', 'np.deg2rad', (['self.lat0'], {}), '(self.lat0)\n', (11750, 11761), True, 'import numpy as np\n'), ((12430, 12441), 'numpy.round', 'np.round', (['i'], {}), '(i)\n', (12438, 12441), True, 'import numpy as np\n'), ((12458, 12469), 'numpy.round', 'np.round', (['j'], {}), '(j)\n', (12466, 12469), True, 'import numpy as np\n'), ((12486, 12500), 'numpy.isscalar', 'np.isscalar', (['i'], {}), '(i)\n', (12497, 12500), True, 'import numpy as np\n'), ((12966, 12983), 'numpy.meshgrid', 'np.meshgrid', (['X', 'Y'], {}), '(X, Y)\n', (12977, 12983), True, 'import numpy as np\n'), ((3752, 3772), 'numpy.ndim', 'np.ndim', (['self._raw_y'], {}), '(self._raw_y)\n', (3759, 3772), True, 'import numpy as np\n'), ((4231, 4251), 'numpy.ndim', 'np.ndim', (['self._raw_x'], {}), '(self._raw_x)\n', (4238, 4251), True, 'import numpy as np\n'), ((6788, 6799), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (6796, 6799), True, 'import numpy as np\n'), ((7056, 7067), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (7064, 7067), True, 'import numpy as np\n'), ((7069, 7080), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (7077, 7080), True, 'import numpy as np\n'), ((8358, 8369), 'numpy.floor', 'np.floor', (['i'], {}), '(i)\n', (8366, 8369), True, 'import numpy as np\n'), ((8390, 8401), 'numpy.floor', 'np.floor', (['j'], {}), '(j)\n', (8398, 8401), True, 'import numpy as np\n'), ((8440, 8451), 'numpy.round', 'np.round', (['i'], {}), '(i)\n', (8448, 8451), True, 'import numpy as np\n'), ((8472, 8483), 'numpy.round', 'np.round', (['j'], {}), '(j)\n', (8480, 8483), True, 'import numpy as np\n'), ((10282, 10304), 'numpy.arange', 'np.arange', (['self.ntheta'], {}), '(self.ntheta)\n', (10291, 10304), True, 'import numpy as np\n'), ((10834, 10847), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (10840, 10847), True, 'import numpy as np\n'), ((12154, 12170), 'numpy.sin', 'np.sin', (['(dlat / 2)'], {}), '(dlat / 2)\n', (12160, 12170), True, 'import numpy as np\n'), ((12248, 12258), 'numpy.sqrt', 'np.sqrt', (['c'], {}), '(c)\n', (12255, 12258), True, 'import numpy as np\n'), ((1589, 1622), 'numpy.diff', 'np.diff', (['np.r_[True, x_bad, True]'], {}), '(np.r_[True, x_bad, True])\n', (1596, 1622), True, 'import numpy as np\n'), ((1670, 1703), 'numpy.diff', 'np.diff', (['np.r_[True, y_bad, True]'], {}), '(np.r_[True, y_bad, True])\n', (1677, 1703), True, 'import numpy as np\n'), ((3219, 3228), 'pyproj.Proj', 'Proj', ([], {}), '(**p)\n', (3223, 3228), False, 'from pyproj import Proj\n'), ((3848, 3859), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (3856, 3859), True, 'import numpy as np\n'), ((3863, 3879), 'numpy.shape', 'np.shape', (['self.Y'], {}), '(self.Y)\n', (3871, 3879), True, 'import numpy as np\n'), ((4327, 4338), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (4335, 4338), True, 'import numpy as np\n'), ((4342, 4358), 'numpy.shape', 'np.shape', (['self.X'], {}), '(self.X)\n', (4350, 4358), True, 'import numpy as np\n'), ((12200, 12216), 'numpy.sin', 'np.sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (12206, 12216), True, 'import numpy as np\n'), ((10916, 10929), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (10922, 10929), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import json
import unittest
from typing import List
from bidict import bidict
from scan_service.utils.hardware_config import HardwareConfig
class HardwareConfigTests(unittest.TestCase):
def setUp(self) -> None:
with open("tests/hardware_config.json") as f:
hardware_config = json.load(f)
HardwareConfig.set_config(hardware_config)
def test_class_variables(self) -> None:
self.assertDictEqual(
HardwareConfig.BEAM_ORDER,
{
"0": {
"-18": [0, 1, 2, 3, 4, 5, 6, 7],
"18": [8, 9, 10, 11, 12, 13, 14, 15],
},
"1": {
"0": [30, 29, 28, 27, 26, 25, 24, 16, 17, 18, 19, 20, 21, 22, 23]
},
},
)
self.assertDictEqual(
HardwareConfig.TXPOWERIDX_TO_TXPOWER,
{
"2": {
"10": {0: 19, 1: 20, 2: 21, 3: 22, 4: 23, 5: 24, 6: 25, 7: 26},
"6": {0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17},
},
"3": {"5": {0: 11, 1: 12, 2: 13, 3: 14, 4: 15, 5: 16, 6: 17, 7: 18}},
"default_channel": {
"default_mcs": {
0: 16,
1: 17,
2: 18,
3: 19,
4: 20,
5: 21,
6: 22,
7: 23,
}
},
},
)
self.assertEqual(HardwareConfig.BORESIDE_BW_IDX, 10)
self.assertEqual(HardwareConfig.MINIMUM_SNR_DB, -10)
self.assertEqual(HardwareConfig.SNR_SATURATE_THRESH_DB, 25)
self.assertEqual(HardwareConfig.BEAM_SEPERATE_IDX, 3)
self.assertEqual(HardwareConfig.MAX_SIDELOBE_LEVEL_DB, 12)
self.assertEqual(HardwareConfig.MAX_POWER, 23)
def test_get_adjacent_beam_index(self) -> None:
self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, 1), 1)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, -1), 0)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, 1), 9)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, -1), 8)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, 1), 15)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, -1), 14)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, 1), 17)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, -1), 24)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, 1), 23)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, -1), 22)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, 1), 29)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, -1), 30)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, 1), 60)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, -1), 60)
def test_get_pwr_offset(self) -> None:
self.assertEqual(HardwareConfig.get_pwr_offset(channel="2", mcs="6"), 0)
self.assertEqual(
HardwareConfig.get_pwr_offset(target_pwr_idx=4, channel="2", mcs="6"), -9
)
self.assertEqual(
HardwareConfig.get_pwr_offset(ref_pwr_idx=4, channel="2", mcs="6"), 9
)
self.assertEqual(
HardwareConfig.get_pwr_offset(ref_pwr_idx=5, channel="3", mcs="5"), 7
)
self.assertEqual(
HardwareConfig.get_pwr_offset(ref_pwr_idx=7, channel="2", mcs="10"), -3
)
self.assertEqual(HardwareConfig.get_pwr_offset(target_pwr_idx=5), -2)
|
[
"scan_service.utils.hardware_config.HardwareConfig.set_config",
"json.load",
"scan_service.utils.hardware_config.HardwareConfig.get_adjacent_beam_index",
"scan_service.utils.hardware_config.HardwareConfig.get_pwr_offset"
] |
[((382, 394), 'json.load', 'json.load', (['f'], {}), '(f)\n', (391, 394), False, 'import json\n'), ((407, 449), 'scan_service.utils.hardware_config.HardwareConfig.set_config', 'HardwareConfig.set_config', (['hardware_config'], {}), '(hardware_config)\n', (432, 449), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((2113, 2157), 'scan_service.utils.hardware_config.HardwareConfig.get_adjacent_beam_index', 'HardwareConfig.get_adjacent_beam_index', (['(0)', '(1)'], {}), '(0, 1)\n', (2151, 2157), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((2187, 2232), 'scan_service.utils.hardware_config.HardwareConfig.get_adjacent_beam_index', 'HardwareConfig.get_adjacent_beam_index', (['(0)', '(-1)'], {}), '(0, -1)\n', (2225, 2232), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((2263, 2307), 'scan_service.utils.hardware_config.HardwareConfig.get_adjacent_beam_index', 'HardwareConfig.get_adjacent_beam_index', (['(8)', '(1)'], {}), '(8, 1)\n', (2301, 2307), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((2337, 2382), 'scan_service.utils.hardware_config.HardwareConfig.get_adjacent_beam_index', 'HardwareConfig.get_adjacent_beam_index', (['(8)', '(-1)'], {}), '(8, -1)\n', (2375, 2382), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((2413, 2458), 'scan_service.utils.hardware_config.HardwareConfig.get_adjacent_beam_index', 'HardwareConfig.get_adjacent_beam_index', (['(15)', '(1)'], {}), '(15, 1)\n', (2451, 2458), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((2489, 2535), 'scan_service.utils.hardware_config.HardwareConfig.get_adjacent_beam_index', 'HardwareConfig.get_adjacent_beam_index', (['(15)', '(-1)'], {}), '(15, -1)\n', (2527, 2535), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((2567, 2612), 'scan_service.utils.hardware_config.HardwareConfig.get_adjacent_beam_index', 'HardwareConfig.get_adjacent_beam_index', (['(16)', '(1)'], {}), '(16, 1)\n', (2605, 2612), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((2643, 2689), 'scan_service.utils.hardware_config.HardwareConfig.get_adjacent_beam_index', 'HardwareConfig.get_adjacent_beam_index', (['(16)', '(-1)'], {}), '(16, -1)\n', (2681, 2689), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((2721, 2766), 'scan_service.utils.hardware_config.HardwareConfig.get_adjacent_beam_index', 'HardwareConfig.get_adjacent_beam_index', (['(23)', '(1)'], {}), '(23, 1)\n', (2759, 2766), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((2797, 2843), 'scan_service.utils.hardware_config.HardwareConfig.get_adjacent_beam_index', 'HardwareConfig.get_adjacent_beam_index', (['(23)', '(-1)'], {}), '(23, -1)\n', (2835, 2843), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((2875, 2920), 'scan_service.utils.hardware_config.HardwareConfig.get_adjacent_beam_index', 'HardwareConfig.get_adjacent_beam_index', (['(30)', '(1)'], {}), '(30, 1)\n', (2913, 2920), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((2951, 2997), 'scan_service.utils.hardware_config.HardwareConfig.get_adjacent_beam_index', 'HardwareConfig.get_adjacent_beam_index', (['(30)', '(-1)'], {}), '(30, -1)\n', (2989, 2997), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((3029, 3074), 'scan_service.utils.hardware_config.HardwareConfig.get_adjacent_beam_index', 'HardwareConfig.get_adjacent_beam_index', (['(60)', '(1)'], {}), '(60, 1)\n', (3067, 3074), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((3105, 3151), 'scan_service.utils.hardware_config.HardwareConfig.get_adjacent_beam_index', 'HardwareConfig.get_adjacent_beam_index', (['(60)', '(-1)'], {}), '(60, -1)\n', (3143, 3151), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((3226, 3277), 'scan_service.utils.hardware_config.HardwareConfig.get_pwr_offset', 'HardwareConfig.get_pwr_offset', ([], {'channel': '"""2"""', 'mcs': '"""6"""'}), "(channel='2', mcs='6')\n", (3255, 3277), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((3320, 3389), 'scan_service.utils.hardware_config.HardwareConfig.get_pwr_offset', 'HardwareConfig.get_pwr_offset', ([], {'target_pwr_idx': '(4)', 'channel': '"""2"""', 'mcs': '"""6"""'}), "(target_pwr_idx=4, channel='2', mcs='6')\n", (3349, 3389), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((3442, 3508), 'scan_service.utils.hardware_config.HardwareConfig.get_pwr_offset', 'HardwareConfig.get_pwr_offset', ([], {'ref_pwr_idx': '(4)', 'channel': '"""2"""', 'mcs': '"""6"""'}), "(ref_pwr_idx=4, channel='2', mcs='6')\n", (3471, 3508), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((3560, 3626), 'scan_service.utils.hardware_config.HardwareConfig.get_pwr_offset', 'HardwareConfig.get_pwr_offset', ([], {'ref_pwr_idx': '(5)', 'channel': '"""3"""', 'mcs': '"""5"""'}), "(ref_pwr_idx=5, channel='3', mcs='5')\n", (3589, 3626), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((3678, 3745), 'scan_service.utils.hardware_config.HardwareConfig.get_pwr_offset', 'HardwareConfig.get_pwr_offset', ([], {'ref_pwr_idx': '(7)', 'channel': '"""2"""', 'mcs': '"""10"""'}), "(ref_pwr_idx=7, channel='2', mcs='10')\n", (3707, 3745), False, 'from scan_service.utils.hardware_config import HardwareConfig\n'), ((3785, 3832), 'scan_service.utils.hardware_config.HardwareConfig.get_pwr_offset', 'HardwareConfig.get_pwr_offset', ([], {'target_pwr_idx': '(5)'}), '(target_pwr_idx=5)\n', (3814, 3832), False, 'from scan_service.utils.hardware_config import HardwareConfig\n')]
|
import json
import os
from unittest.mock import patch, mock_open
import pytest
from signal_interpreter_server.json_parser import JsonParser
from signal_interpreter_server.exceptions import SignalError
@pytest.mark.parametrize("identifier, expected_result", [
("11", "ECU Reset"),
("99", "Not existing"),
])
def test_get_signal_title(identifier, expected_result):
jason_parser = JsonParser()
jason_parser.data = {"services": [{"title": "ECU Reset", "id": "11"}]}
if identifier != '99':
assert jason_parser.get_signal_title(identifier) == expected_result
else:
with pytest.raises(SignalError):
jason_parser.get_signal_title(identifier)
@pytest.fixture(scope="session")
def test_load_file_with_fixure(tmpdir):
tmp_db = {"services": [{"title": "ECU Reset", "id": "11"}]}
filepath = os.path.join(tmpdir, "tmp_json.json")
with open(filepath, 'w') as jfile:
json.dump(tmp_db, jfile)
jason_parser = JsonParser()
jason_parser.load_file(filepath)
assert isinstance(jason_parser.data, dict)
assert jason_parser.data == tmp_db
def test_load_file_simple():
with patch("builtins.open",
mock_open(read_data='{"services": [{"title": "ECU Reset", "id": "11"}]}')):
json_parser = JsonParser()
json_parser.load_file("path/to/json/file")
assert json_parser.data == {"services": [{"title": "ECU Reset", "id": "11"}]}
def test_load_file_wrong_type():
with patch("builtins.open", mock_open(read_data="This is wrong data!")):
with pytest.raises(ValueError):
json_parser = JsonParser()
json_parser.load_file("path/to/json/file")
|
[
"json.dump",
"signal_interpreter_server.json_parser.JsonParser",
"pytest.fixture",
"pytest.raises",
"unittest.mock.mock_open",
"pytest.mark.parametrize",
"os.path.join"
] |
[((207, 312), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""identifier, expected_result"""', "[('11', 'ECU Reset'), ('99', 'Not existing')]"], {}), "('identifier, expected_result', [('11', 'ECU Reset'),\n ('99', 'Not existing')])\n", (230, 312), False, 'import pytest\n'), ((694, 725), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (708, 725), False, 'import pytest\n'), ((395, 407), 'signal_interpreter_server.json_parser.JsonParser', 'JsonParser', ([], {}), '()\n', (405, 407), False, 'from signal_interpreter_server.json_parser import JsonParser\n'), ((845, 882), 'os.path.join', 'os.path.join', (['tmpdir', '"""tmp_json.json"""'], {}), "(tmpdir, 'tmp_json.json')\n", (857, 882), False, 'import os\n'), ((975, 987), 'signal_interpreter_server.json_parser.JsonParser', 'JsonParser', ([], {}), '()\n', (985, 987), False, 'from signal_interpreter_server.json_parser import JsonParser\n'), ((931, 955), 'json.dump', 'json.dump', (['tmp_db', 'jfile'], {}), '(tmp_db, jfile)\n', (940, 955), False, 'import json\n'), ((1287, 1299), 'signal_interpreter_server.json_parser.JsonParser', 'JsonParser', ([], {}), '()\n', (1297, 1299), False, 'from signal_interpreter_server.json_parser import JsonParser\n'), ((609, 635), 'pytest.raises', 'pytest.raises', (['SignalError'], {}), '(SignalError)\n', (622, 635), False, 'import pytest\n'), ((1189, 1262), 'unittest.mock.mock_open', 'mock_open', ([], {'read_data': '"""{"services": [{"title": "ECU Reset", "id": "11"}]}"""'}), '(read_data=\'{"services": [{"title": "ECU Reset", "id": "11"}]}\')\n', (1198, 1262), False, 'from unittest.mock import patch, mock_open\n'), ((1504, 1546), 'unittest.mock.mock_open', 'mock_open', ([], {'read_data': '"""This is wrong data!"""'}), "(read_data='This is wrong data!')\n", (1513, 1546), False, 'from unittest.mock import patch, mock_open\n'), ((1562, 1587), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1575, 1587), False, 'import pytest\n'), ((1615, 1627), 'signal_interpreter_server.json_parser.JsonParser', 'JsonParser', ([], {}), '()\n', (1625, 1627), False, 'from signal_interpreter_server.json_parser import JsonParser\n')]
|
# web imports
from flask import Flask
from blinker import Namespace # or from flask.signals import Namespace
from flask_executor import Executor
from flask_executor.futures import Future
from flask_shell2http import Shell2HTTP
# Flask application instance
app = Flask(__name__)
# application factory
executor = Executor(app)
shell2http = Shell2HTTP(app, executor, base_url_prefix="/cmd/")
ENDPOINT = "echo"
CMD = "echo"
# Signal Handling
signal_handler = Namespace()
my_signal = signal_handler.signal(f"on_{CMD}_complete")
# ..or any other name of your choice
@my_signal.connect
def my_callback_fn(extra_callback_context, future: Future):
"""
Will be invoked on every process completion
"""
print("Process completed ?:", future.done())
print("Result: ", future.result())
shell2http.register_command(
endpoint=ENDPOINT, command_name=CMD, callback_fn=my_signal.send
)
# Test Runner
if __name__ == "__main__":
app.testing = True
c = app.test_client()
# request new process
data = {"args": ["Hello", "Friend!"]}
c.post(f"cmd/{ENDPOINT}", json=data)
# request new process
data = {"args": ["Bye", "Friend!"]}
c.post(f"cmd/{ENDPOINT}", json=data)
|
[
"blinker.Namespace",
"flask_executor.Executor",
"flask.Flask",
"flask_shell2http.Shell2HTTP"
] |
[((264, 279), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (269, 279), False, 'from flask import Flask\n'), ((314, 327), 'flask_executor.Executor', 'Executor', (['app'], {}), '(app)\n', (322, 327), False, 'from flask_executor import Executor\n'), ((341, 391), 'flask_shell2http.Shell2HTTP', 'Shell2HTTP', (['app', 'executor'], {'base_url_prefix': '"""/cmd/"""'}), "(app, executor, base_url_prefix='/cmd/')\n", (351, 391), False, 'from flask_shell2http import Shell2HTTP\n'), ((460, 471), 'blinker.Namespace', 'Namespace', ([], {}), '()\n', (469, 471), False, 'from blinker import Namespace\n')]
|
__author__ = '<NAME>'
from threading import RLock
from bson.objectid import ObjectId
from pymongo import ASCENDING
from pymongo.errors import DuplicateKeyError as MongoDuplicateKeyError
from synergy.system import time_helper
from synergy.system.time_qualifier import *
from synergy.system.decorator import thread_safe
from synergy.scheduler.scheduler_constants import COLLECTION_UNIT_OF_WORK, TYPE_MANAGED
from synergy.conf import context
from synergy.db.error import DuplicateKeyError
from synergy.db.model import unit_of_work
from synergy.db.model.unit_of_work import UnitOfWork
from synergy.db.manager import ds_manager
QUERY_GET_FREERUN_SINCE = lambda timeperiod, unprocessed_only: {
unit_of_work.TIMEPERIOD: {'$gte': timeperiod},
unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_FREERUN,
unit_of_work.STATE: {'$ne': unit_of_work.STATE_PROCESSED if unprocessed_only else None}
}
class UnitOfWorkDao(object):
""" Thread-safe Data Access Object from units_of_work table/collection """
def __init__(self, logger):
super(UnitOfWorkDao, self).__init__()
self.logger = logger
self.lock = RLock()
self.ds = ds_manager.ds_factory(logger)
@thread_safe
def get_one(self, key):
""" method finds unit_of_work record and returns it to the caller"""
if not isinstance(key, ObjectId):
# cast key to ObjectId
key = ObjectId(key)
query = {'_id': key}
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
document = collection.find_one(query)
if document is None:
msg = 'Unit_of_work with ID=%s was not found' % str(key)
self.logger.warn(msg)
raise LookupError(msg)
return UnitOfWork.from_json(document)
@thread_safe
def get_reprocessing_candidates(self, since=None):
""" method queries Unit Of Work whose <start_timeperiod> is younger than <since>
and who could be candidates for re-processing """
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
query = {unit_of_work.STATE: {'$in': [unit_of_work.STATE_IN_PROGRESS,
unit_of_work.STATE_INVALID,
unit_of_work.STATE_REQUESTED]},
unit_of_work.UNIT_OF_WORK_TYPE: TYPE_MANAGED}
if since is None:
cursor = collection.find(query).sort('_id', ASCENDING)
candidates = [UnitOfWork.from_json(document) for document in cursor]
else:
candidates = []
yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, since)
query[unit_of_work.START_TIMEPERIOD] = {'$gte': yearly_timeperiod}
cursor = collection.find(query).sort('_id', ASCENDING)
for document in cursor:
uow = UnitOfWork.from_json(document)
if uow.process_name not in context.process_context:
# this is a decommissioned process
continue
time_qualifier = context.process_context[uow.process_name].time_qualifier
if time_qualifier == QUALIFIER_REAL_TIME:
time_qualifier = QUALIFIER_HOURLY
process_specific_since = time_helper.cast_to_time_qualifier(time_qualifier, since)
if process_specific_since <= uow.start_timeperiod:
candidates.append(uow)
if len(candidates) == 0:
raise LookupError('MongoDB has no reprocessing candidates units of work')
return candidates
@thread_safe
def get_by_params(self, process_name, timeperiod, start_obj_id, end_obj_id):
""" method finds unit_of_work record and returns it to the caller"""
query = {unit_of_work.PROCESS_NAME: process_name,
unit_of_work.TIMEPERIOD: timeperiod,
unit_of_work.START_OBJ_ID: start_obj_id,
unit_of_work.END_OBJ_ID: end_obj_id}
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
document = collection.find_one(query)
if document is None:
raise LookupError('Unit_of_work satisfying query %r was not found' % query)
return UnitOfWork.from_json(document)
@thread_safe
def update(self, instance):
""" method finds unit_of_work record and change its status"""
assert isinstance(instance, UnitOfWork)
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
document = instance.document
if instance.db_id:
document['_id'] = ObjectId(instance.db_id)
instance.db_id = collection.save(document, safe=True)
return instance.db_id
@thread_safe
def insert(self, instance):
""" inserts a unit of work into MongoDB.
:raises DuplicateKeyError: if such record already exist """
assert isinstance(instance, UnitOfWork)
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
try:
return collection.insert(instance.document, safe=True)
except MongoDuplicateKeyError as e:
exc = DuplicateKeyError(instance.process_name,
instance.start_timeperiod,
instance.start_id,
instance.end_id,
e)
raise exc
@thread_safe
def remove(self, uow_id):
assert isinstance(uow_id, (str, ObjectId))
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
return collection.remove(uow_id, safe=True)
@thread_safe
def run_query(self, query):
""" method runs the query and returns a list of filtered UnitOfWork records """
cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query)
return [UnitOfWork.from_json(document) for document in cursor]
def recover_from_duplicatekeyerror(self, e):
""" method tries to recover from DuplicateKeyError """
if isinstance(e, DuplicateKeyError):
try:
return self.get_by_params(e.process_name, e.timeperiod, e.start_id, e.end_id)
except LookupError as e:
self.logger.error('Unable to recover from DuplicateKeyError error due to %s' % e.message, exc_info=True)
else:
msg = 'Unable to recover from DuplicateKeyError due to unspecified unit_of_work primary key'
self.logger.error(msg)
|
[
"synergy.system.time_helper.cast_to_time_qualifier",
"bson.objectid.ObjectId",
"threading.RLock",
"synergy.db.manager.ds_manager.ds_factory",
"synergy.db.error.DuplicateKeyError",
"synergy.db.model.unit_of_work.UnitOfWork.from_json"
] |
[((1138, 1145), 'threading.RLock', 'RLock', ([], {}), '()\n', (1143, 1145), False, 'from threading import RLock\n'), ((1164, 1193), 'synergy.db.manager.ds_manager.ds_factory', 'ds_manager.ds_factory', (['logger'], {}), '(logger)\n', (1185, 1193), False, 'from synergy.db.manager import ds_manager\n'), ((1750, 1780), 'synergy.db.model.unit_of_work.UnitOfWork.from_json', 'UnitOfWork.from_json', (['document'], {}), '(document)\n', (1770, 1780), False, 'from synergy.db.model.unit_of_work import UnitOfWork\n'), ((4260, 4290), 'synergy.db.model.unit_of_work.UnitOfWork.from_json', 'UnitOfWork.from_json', (['document'], {}), '(document)\n', (4280, 4290), False, 'from synergy.db.model.unit_of_work import UnitOfWork\n'), ((1412, 1425), 'bson.objectid.ObjectId', 'ObjectId', (['key'], {}), '(key)\n', (1420, 1425), False, 'from bson.objectid import ObjectId\n'), ((2609, 2668), 'synergy.system.time_helper.cast_to_time_qualifier', 'time_helper.cast_to_time_qualifier', (['QUALIFIER_YEARLY', 'since'], {}), '(QUALIFIER_YEARLY, since)\n', (2643, 2668), False, 'from synergy.system import time_helper\n'), ((4618, 4642), 'bson.objectid.ObjectId', 'ObjectId', (['instance.db_id'], {}), '(instance.db_id)\n', (4626, 4642), False, 'from bson.objectid import ObjectId\n'), ((5864, 5894), 'synergy.db.model.unit_of_work.UnitOfWork.from_json', 'UnitOfWork.from_json', (['document'], {}), '(document)\n', (5884, 5894), False, 'from synergy.db.model.unit_of_work import UnitOfWork\n'), ((2480, 2510), 'synergy.db.model.unit_of_work.UnitOfWork.from_json', 'UnitOfWork.from_json', (['document'], {}), '(document)\n', (2500, 2510), False, 'from synergy.db.model.unit_of_work import UnitOfWork\n'), ((2874, 2904), 'synergy.db.model.unit_of_work.UnitOfWork.from_json', 'UnitOfWork.from_json', (['document'], {}), '(document)\n', (2894, 2904), False, 'from synergy.db.model.unit_of_work import UnitOfWork\n'), ((3301, 3358), 'synergy.system.time_helper.cast_to_time_qualifier', 'time_helper.cast_to_time_qualifier', (['time_qualifier', 'since'], {}), '(time_qualifier, since)\n', (3335, 3358), False, 'from synergy.system import time_helper\n'), ((5157, 5267), 'synergy.db.error.DuplicateKeyError', 'DuplicateKeyError', (['instance.process_name', 'instance.start_timeperiod', 'instance.start_id', 'instance.end_id', 'e'], {}), '(instance.process_name, instance.start_timeperiod,\n instance.start_id, instance.end_id, e)\n', (5174, 5267), False, 'from synergy.db.error import DuplicateKeyError\n')]
|
def main():
a=0
b=0
if(1<2):
a = 1
else:
a = 2
if(1>2):
b = 1
else:
b = 2
return a + b
# Boilerplate
if __name__ == "__main__":
import sys
ret=main()
sys.exit(ret)
|
[
"sys.exit"
] |
[((225, 238), 'sys.exit', 'sys.exit', (['ret'], {}), '(ret)\n', (233, 238), False, 'import sys\n')]
|
import os
import time
import torch.optim as optim
import torch
import torch.nn.functional as F
import torch.nn as nn
from evaluation import evalFcn
from utils import myUtils
from .RawPSMNet import stackhourglass as rawPSMNet
from .RawPSMNet_TieCheng import stackhourglass as rawPSMNet_TieCheng
from ..Model import Model
from .. import SR
import collections
import torch.nn.parallel as P
from .PSMNet import *
class RawPSMNetDown(RawPSMNetScale):
def __init__(self, maxdisp, dispScale, multiple):
super(RawPSMNetDown, self).__init__(maxdisp, dispScale, multiple)
self.pool = nn.AvgPool2d((2, 2))
# input: RGB value range 0~1
# outputs: disparity range 0~self.maxdisp * self.dispScale / 2
def forward(self, left, right):
outDispHighs = super(RawPSMNetDown, self).forward(left, right)
outDispLows = myUtils.forNestingList(outDispHighs, lambda disp: self.pool(disp) / 2)
return outDispHighs, outDispLows
class PSMNetDown(PSMNet):
# dataset: only used for suffix of saveFolderName
def __init__(self, maxdisp=192, dispScale=1, cuda=True, half=False, stage='unnamed', dataset=None,
saveFolderSuffix=''):
super(PSMNetDown, self).__init__(maxdisp, dispScale, cuda, half, stage, dataset, saveFolderSuffix)
self.outputMaxDisp = self.outputMaxDisp // 2
self.getModel = RawPSMNetDown
def loss(self, outputs, gts, kitti=False, outputMaxDisp=None):
if outputMaxDisp is not None:
raise Exception('Error: outputMaxDisp of PSMNetDown has no use!')
losses = []
for output, gt, outputMaxDisp in zip(outputs, gts, (self.outputMaxDisp * 2, self.outputMaxDisp)):
losses.append(super(PSMNetDown, self).loss(
output, gt, kitti=kitti, outputMaxDisp=outputMaxDisp
) if gt is not None else None)
return losses
def trainOneSide(self, imgL, imgR, gts, returnOutputs=False, kitti=False, weights=(1, 0)):
self.optimizer.zero_grad()
outDispHighs, outDispLows = self.model.forward(imgL, imgR)
losses = self.loss((outDispHighs, outDispLows), gts, kitti=kitti)
loss = sum([weight * loss for weight, loss in zip(weights, losses) if loss is not None])
with self.amp_handle.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
self.optimizer.step()
dispOuts = []
if returnOutputs:
with torch.no_grad():
dispOuts.append(outDispHighs[2].detach() / (self.outputMaxDisp * 2))
dispOuts.append(outDispLows[2].detach() / self.outputMaxDisp)
losses = [loss] + losses
return [loss.data.item() for loss in losses], dispOuts
def train(self, batch, returnOutputs=False, kitti=False, weights=(1, 0), progress=0):
myUtils.assertBatchLen(batch, 8)
self.trainPrepare()
losses = myUtils.NameValues()
outputs = collections.OrderedDict()
imgL, imgR = batch.highResRGBs()
for inputL, inputR, gts, process, side in zip(
(imgL, imgR), (imgR, imgL),
zip(batch.highResDisps(), batch.lowResDisps()),
(lambda im: im, myUtils.flipLR),
('L', 'R')
):
if not all([gt is None for gt in gts]):
lossesList, outputsList = self.trainOneSide(
*process((inputL, inputR, gts)),
returnOutputs=returnOutputs,
kitti=kitti,
weights=weights
)
for suffix, loss in zip(('', 'DispHigh', 'Disp'), lossesList):
if loss is not None:
losses['loss' + suffix + side] = loss
if returnOutputs:
for suffix, output in zip(('High', 'Low'), outputsList):
outputs['outputDisp' + suffix + side] = process(output)
return losses, outputs
def test(self, batch, evalType='l1', returnOutputs=False, kitti=False):
myUtils.assertBatchLen(batch, 8)
batch = myUtils.Batch(batch.highResRGBs() + batch.lowestResDisps(), cuda=batch.cuda, half=batch.half)
scores, outputs, rawOutputs = super(PSMNetDown, self).test(batch, evalType, returnOutputs, kitti)
for rawOutputsSide, side in zip(rawOutputs, ('L', 'R')):
if rawOutputsSide is not None:
(outDispHigh, outDispLow) = rawOutputsSide
if returnOutputs:
if outDispHigh is not None:
outputs['outputDispHigh' + side] = outDispHigh / (self.outputMaxDisp * 2)
return scores, outputs, rawOutputs
|
[
"utils.myUtils.NameValues",
"utils.myUtils.assertBatchLen",
"collections.OrderedDict",
"torch.nn.AvgPool2d",
"torch.no_grad"
] |
[((596, 616), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(2, 2)'], {}), '((2, 2))\n', (608, 616), True, 'import torch.nn as nn\n'), ((2835, 2867), 'utils.myUtils.assertBatchLen', 'myUtils.assertBatchLen', (['batch', '(8)'], {}), '(batch, 8)\n', (2857, 2867), False, 'from utils import myUtils\n'), ((2914, 2934), 'utils.myUtils.NameValues', 'myUtils.NameValues', ([], {}), '()\n', (2932, 2934), False, 'from utils import myUtils\n'), ((2953, 2978), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (2976, 2978), False, 'import collections\n'), ((4063, 4095), 'utils.myUtils.assertBatchLen', 'myUtils.assertBatchLen', (['batch', '(8)'], {}), '(batch, 8)\n', (4085, 4095), False, 'from utils import myUtils\n'), ((2460, 2475), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2473, 2475), False, 'import torch\n')]
|
# coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common code for the simple cnn example."""
import functools
import os
from absl import logging
from flax import serialization
import haiku as hk
import jax
import jax.numpy as jnp
from learned_optimization import filesystem
import numpy as onp
import optax
import tensorflow_datasets as tfds
HKTree = hk.data_structures.to_immutable_dict({}).__class__
# We use flax for serialization but haiku's data struct is not registered.
def _ty_to_state_dict(v):
return serialization.to_state_dict(
{k: v for k, v in hk.data_structures.to_mutable_dict(v).items()})
def _ty_from_state_dict(target, d):
return HKTree(
**
{k: serialization.from_state_dict(target[k], v) for (k, v) in d.items()})
serialization.register_serialization_state(
HKTree, _ty_to_state_dict, _ty_from_state_dict, override=True)
def hk_forward_fn(batch):
"""Forward function for haiku."""
x = batch["image"].astype(jnp.float32) / 255.
mlp = hk.Sequential([
hk.Conv2D(64, (3, 3), stride=2),
jax.nn.relu,
hk.Conv2D(64, (3, 3), stride=1),
jax.nn.relu,
hk.Conv2D(64, (3, 3), stride=2),
jax.nn.relu,
hk.Conv2D(64, (3, 3), stride=1),
jax.nn.relu,
functools.partial(jnp.mean, axis=(1, 2)),
hk.Linear(10),
])
return mlp(x)
@jax.jit
def loss(params, key, batch):
net = hk.transform(hk_forward_fn)
logits = net.apply(params, key, batch)
labels = jax.nn.one_hot(batch["label"], 10)
softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits))
softmax_xent /= labels.shape[0]
return softmax_xent
@jax.jit
def update(params, key, state, batch, meta_params):
opt = optax.adam(meta_params["learning_rate"])
l, grad = jax.value_and_grad(loss)(params, key, batch)
updates, new_state = opt.update(grad, state, params)
new_params = optax.apply_updates(params, updates)
return new_params, new_state, l
def save_state(path, state):
filesystem.make_dirs(os.path.dirname(path))
with filesystem.file_open(path, "wb") as fp:
fp.write(serialization.to_bytes(state))
def load_state(path, state):
logging.info("Restoring state %s:", path)
with filesystem.file_open(path, "rb") as fp:
state_new = serialization.from_bytes(state, fp.read())
tree = jax.tree_structure(state)
leaves_new = jax.tree_leaves(state_new)
return jax.tree_unflatten(tree, leaves_new)
def get_data_iterators(fake_data=False):
"""Get training and test data iterators."""
batch_size = 128
if not fake_data:
remap_label = lambda x: {"image": x["image"], "label": x["label"]}
def data(split):
dataset = tfds.load("cifar10", split=split)
iterator = iter(
tfds.as_numpy(
dataset.repeat(-1).shuffle(
batch_size * 10).batch(batch_size).map(remap_label)))
return iterator
return data("train"), data("test")
else:
def data():
while True:
yield {
"image": onp.zeros([batch_size, 32, 32, 3]),
"label": onp.zeros([batch_size], dtype=onp.int32)
}
return data(), data()
|
[
"optax.adam",
"tensorflow_datasets.load",
"jax.nn.log_softmax",
"absl.logging.info",
"flax.serialization.register_serialization_state",
"flax.serialization.from_state_dict",
"jax.nn.one_hot",
"os.path.dirname",
"optax.apply_updates",
"haiku.data_structures.to_immutable_dict",
"haiku.Conv2D",
"haiku.data_structures.to_mutable_dict",
"haiku.Linear",
"functools.partial",
"haiku.transform",
"jax.tree_leaves",
"jax.tree_unflatten",
"jax.tree_structure",
"numpy.zeros",
"jax.value_and_grad",
"flax.serialization.to_bytes",
"learned_optimization.filesystem.file_open"
] |
[((1307, 1416), 'flax.serialization.register_serialization_state', 'serialization.register_serialization_state', (['HKTree', '_ty_to_state_dict', '_ty_from_state_dict'], {'override': '(True)'}), '(HKTree, _ty_to_state_dict,\n _ty_from_state_dict, override=True)\n', (1349, 1416), False, 'from flax import serialization\n'), ((897, 937), 'haiku.data_structures.to_immutable_dict', 'hk.data_structures.to_immutable_dict', (['{}'], {}), '({})\n', (933, 937), True, 'import haiku as hk\n'), ((1925, 1952), 'haiku.transform', 'hk.transform', (['hk_forward_fn'], {}), '(hk_forward_fn)\n', (1937, 1952), True, 'import haiku as hk\n'), ((2005, 2039), 'jax.nn.one_hot', 'jax.nn.one_hot', (["batch['label']", '(10)'], {}), "(batch['label'], 10)\n", (2019, 2039), False, 'import jax\n'), ((2231, 2271), 'optax.adam', 'optax.adam', (["meta_params['learning_rate']"], {}), "(meta_params['learning_rate'])\n", (2241, 2271), False, 'import optax\n'), ((2399, 2435), 'optax.apply_updates', 'optax.apply_updates', (['params', 'updates'], {}), '(params, updates)\n', (2418, 2435), False, 'import optax\n'), ((2672, 2713), 'absl.logging.info', 'logging.info', (['"""Restoring state %s:"""', 'path'], {}), "('Restoring state %s:', path)\n", (2684, 2713), False, 'from absl import logging\n'), ((2829, 2854), 'jax.tree_structure', 'jax.tree_structure', (['state'], {}), '(state)\n', (2847, 2854), False, 'import jax\n'), ((2870, 2896), 'jax.tree_leaves', 'jax.tree_leaves', (['state_new'], {}), '(state_new)\n', (2885, 2896), False, 'import jax\n'), ((2906, 2942), 'jax.tree_unflatten', 'jax.tree_unflatten', (['tree', 'leaves_new'], {}), '(tree, leaves_new)\n', (2924, 2942), False, 'import jax\n'), ((2284, 2308), 'jax.value_and_grad', 'jax.value_and_grad', (['loss'], {}), '(loss)\n', (2302, 2308), False, 'import jax\n'), ((2525, 2546), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (2540, 2546), False, 'import os\n'), ((2555, 2587), 'learned_optimization.filesystem.file_open', 'filesystem.file_open', (['path', '"""wb"""'], {}), "(path, 'wb')\n", (2575, 2587), False, 'from learned_optimization import filesystem\n'), ((2721, 2753), 'learned_optimization.filesystem.file_open', 'filesystem.file_open', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (2741, 2753), False, 'from learned_optimization import filesystem\n'), ((1560, 1591), 'haiku.Conv2D', 'hk.Conv2D', (['(64)', '(3, 3)'], {'stride': '(2)'}), '(64, (3, 3), stride=2)\n', (1569, 1591), True, 'import haiku as hk\n'), ((1618, 1649), 'haiku.Conv2D', 'hk.Conv2D', (['(64)', '(3, 3)'], {'stride': '(1)'}), '(64, (3, 3), stride=1)\n', (1627, 1649), True, 'import haiku as hk\n'), ((1676, 1707), 'haiku.Conv2D', 'hk.Conv2D', (['(64)', '(3, 3)'], {'stride': '(2)'}), '(64, (3, 3), stride=2)\n', (1685, 1707), True, 'import haiku as hk\n'), ((1734, 1765), 'haiku.Conv2D', 'hk.Conv2D', (['(64)', '(3, 3)'], {'stride': '(1)'}), '(64, (3, 3), stride=1)\n', (1743, 1765), True, 'import haiku as hk\n'), ((1792, 1832), 'functools.partial', 'functools.partial', (['jnp.mean'], {'axis': '(1, 2)'}), '(jnp.mean, axis=(1, 2))\n', (1809, 1832), False, 'import functools\n'), ((1840, 1853), 'haiku.Linear', 'hk.Linear', (['(10)'], {}), '(10)\n', (1849, 1853), True, 'import haiku as hk\n'), ((2608, 2637), 'flax.serialization.to_bytes', 'serialization.to_bytes', (['state'], {}), '(state)\n', (2630, 2637), False, 'from flax import serialization\n'), ((3180, 3213), 'tensorflow_datasets.load', 'tfds.load', (['"""cifar10"""'], {'split': 'split'}), "('cifar10', split=split)\n", (3189, 3213), True, 'import tensorflow_datasets as tfds\n'), ((1235, 1278), 'flax.serialization.from_state_dict', 'serialization.from_state_dict', (['target[k]', 'v'], {}), '(target[k], v)\n', (1264, 1278), False, 'from flax import serialization\n'), ((2076, 2102), 'jax.nn.log_softmax', 'jax.nn.log_softmax', (['logits'], {}), '(logits)\n', (2094, 2102), False, 'import jax\n'), ((1113, 1150), 'haiku.data_structures.to_mutable_dict', 'hk.data_structures.to_mutable_dict', (['v'], {}), '(v)\n', (1147, 1150), True, 'import haiku as hk\n'), ((3518, 3552), 'numpy.zeros', 'onp.zeros', (['[batch_size, 32, 32, 3]'], {}), '([batch_size, 32, 32, 3])\n', (3527, 3552), True, 'import numpy as onp\n'), ((3575, 3615), 'numpy.zeros', 'onp.zeros', (['[batch_size]'], {'dtype': 'onp.int32'}), '([batch_size], dtype=onp.int32)\n', (3584, 3615), True, 'import numpy as onp\n')]
|
"""
This example is for demonstartion purpose only.
No agent learns here anything useful, yet.
Well, maybe they do but it might take a long time to check.
Ain't nobody got time for that.
"""
from pettingzoo.sisl import waterworld_v3
from ai_traineree.agents.ppo import PPOAgent
from ai_traineree.multi_agents.independent import IndependentAgents
from ai_traineree.runners.multiagent_env_runner import MultiAgentCycleEnvRunner
from ai_traineree.tasks import PettingZooTask
env = waterworld_v3.env()
task = PettingZooTask(env=env)
task.reset() # Needs to be reset to access env.agents()
agents = []
for actor_name in task.agents:
obs_space = task.observation_spaces[actor_name]
action_space = task.action_spaces[actor_name]
agents.append(PPOAgent(obs_space, action_space))
multi_agent = IndependentAgents(agents, agent_names=task.agents)
runner = MultiAgentCycleEnvRunner(task, multi_agent=multi_agent)
runner.run(max_episodes=3)
|
[
"ai_traineree.tasks.PettingZooTask",
"ai_traineree.runners.multiagent_env_runner.MultiAgentCycleEnvRunner",
"pettingzoo.sisl.waterworld_v3.env",
"ai_traineree.agents.ppo.PPOAgent",
"ai_traineree.multi_agents.independent.IndependentAgents"
] |
[((481, 500), 'pettingzoo.sisl.waterworld_v3.env', 'waterworld_v3.env', ([], {}), '()\n', (498, 500), False, 'from pettingzoo.sisl import waterworld_v3\n'), ((508, 531), 'ai_traineree.tasks.PettingZooTask', 'PettingZooTask', ([], {'env': 'env'}), '(env=env)\n', (522, 531), False, 'from ai_traineree.tasks import PettingZooTask\n'), ((805, 855), 'ai_traineree.multi_agents.independent.IndependentAgents', 'IndependentAgents', (['agents'], {'agent_names': 'task.agents'}), '(agents, agent_names=task.agents)\n', (822, 855), False, 'from ai_traineree.multi_agents.independent import IndependentAgents\n'), ((865, 920), 'ai_traineree.runners.multiagent_env_runner.MultiAgentCycleEnvRunner', 'MultiAgentCycleEnvRunner', (['task'], {'multi_agent': 'multi_agent'}), '(task, multi_agent=multi_agent)\n', (889, 920), False, 'from ai_traineree.runners.multiagent_env_runner import MultiAgentCycleEnvRunner\n'), ((754, 787), 'ai_traineree.agents.ppo.PPOAgent', 'PPOAgent', (['obs_space', 'action_space'], {}), '(obs_space, action_space)\n', (762, 787), False, 'from ai_traineree.agents.ppo import PPOAgent\n')]
|
from unittest import TestCase
from src.PyWash import SharedDataFrame
from src.Exceptions import *
import pandas as pd
verbose = False
class TestDecorators(TestCase):
""" TestClass for SharedDataFrame methods """
def test_is_mergeable_column_names(self):
if verbose:
print("Testing: is_mergeable_columns")
df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})
df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'],
'hire_date': [2004, 2008, 2012, 2014]})
test1 = SharedDataFrame(df=df1, verbose=verbose)
test2 = SharedDataFrame(df=df2, verbose=verbose)
self.assertTrue(test1.is_mergeable(test2))
def test_is_mergeable_common_values(self):
if verbose:
print("Testing: is_mergeable_values")
df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})
df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue'],
'hire_date': [2004, 2008, 2012, 2014]})
test1 = SharedDataFrame(df=df1, verbose=verbose)
test2 = SharedDataFrame(df=df2, verbose=verbose)
self.assertTrue(test1.is_mergeable(test2))
def test_is_mergeable_false(self):
if verbose:
print("Testing: is_mergeable_false")
df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})
df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'],
'hire_date': [2004, 2008, 2012, 2014, 2019]})
test1 = SharedDataFrame(df=df1, verbose=verbose)
test2 = SharedDataFrame(df=df2, verbose=verbose)
self.assertFalse(test1.is_mergeable(test2))
def test_merge_on_column_names(self):
if verbose:
print("Testing: merge_on_columns")
df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})
df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'],
'hire_date': [2004, 2008, 2012, 2014]})
target = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR'],
'hire_date': [2008, 2012, 2004, 2014]})
test1 = SharedDataFrame(df=df1, verbose=verbose)
test2 = SharedDataFrame(df=df2, verbose=verbose)
test1.merge_into(test2)
self.assertTrue(test1.get_dataframe().equals(target), "Successfully merged the 2 DataFrames")
def test_merge_on_common_values(self):
if verbose:
print("Testing: merge_on_values")
df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})
df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue'],
'hire_date': [2004, 2008, 2012, 2014]})
target = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR'],
'names': ['Bob', 'Jake', 'Lisa', 'Sue'],
'hire_date': [2008, 2012, 2004, 2014]})
test1 = SharedDataFrame(df=df1, verbose=verbose)
test2 = SharedDataFrame(df=df2, verbose=verbose)
test1.merge_into(test2)
if verbose:
print(test1.get_dataframe())
print(target)
self.assertTrue(test1.get_dataframe().equals(target), "Successfully merged the 2 DataFrames")
def test_merge_on_false(self):
if verbose:
print("Testing: merge_false")
df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})
df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'],
'hire_date': [2004, 2008, 2012, 2014, 2019]})
test1 = SharedDataFrame(df=df1, verbose=verbose)
test2 = SharedDataFrame(df=df2, verbose=verbose)
if verbose:
print(test1.get_dataframe())
with self.assertRaises(NotMergableError):
test1.merge_into(test2)
if verbose:
print(test1.get_dataframe())
|
[
"pandas.DataFrame",
"src.PyWash.SharedDataFrame"
] |
[((351, 475), 'pandas.DataFrame', 'pd.DataFrame', (["{'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting',\n 'Engineering', 'Engineering', 'HR']}"], {}), "({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': [\n 'Accounting', 'Engineering', 'Engineering', 'HR']})\n", (363, 475), True, 'import pandas as pd\n'), ((513, 615), 'pandas.DataFrame', 'pd.DataFrame', (["{'employee': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012,\n 2014]}"], {}), "({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [\n 2004, 2008, 2012, 2014]})\n", (525, 615), True, 'import pandas as pd\n'), ((655, 695), 'src.PyWash.SharedDataFrame', 'SharedDataFrame', ([], {'df': 'df1', 'verbose': 'verbose'}), '(df=df1, verbose=verbose)\n', (670, 695), False, 'from src.PyWash import SharedDataFrame\n'), ((712, 752), 'src.PyWash.SharedDataFrame', 'SharedDataFrame', ([], {'df': 'df2', 'verbose': 'verbose'}), '(df=df2, verbose=verbose)\n', (727, 752), False, 'from src.PyWash import SharedDataFrame\n'), ((936, 1060), 'pandas.DataFrame', 'pd.DataFrame', (["{'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting',\n 'Engineering', 'Engineering', 'HR']}"], {}), "({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': [\n 'Accounting', 'Engineering', 'Engineering', 'HR']})\n", (948, 1060), True, 'import pandas as pd\n'), ((1098, 1197), 'pandas.DataFrame', 'pd.DataFrame', (["{'names': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]\n }"], {}), "({'names': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, \n 2008, 2012, 2014]})\n", (1110, 1197), True, 'import pandas as pd\n'), ((1237, 1277), 'src.PyWash.SharedDataFrame', 'SharedDataFrame', ([], {'df': 'df1', 'verbose': 'verbose'}), '(df=df1, verbose=verbose)\n', (1252, 1277), False, 'from src.PyWash import SharedDataFrame\n'), ((1294, 1334), 'src.PyWash.SharedDataFrame', 'SharedDataFrame', ([], {'df': 'df2', 'verbose': 'verbose'}), '(df=df2, verbose=verbose)\n', (1309, 1334), False, 'from src.PyWash import SharedDataFrame\n'), ((1509, 1633), 'pandas.DataFrame', 'pd.DataFrame', (["{'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting',\n 'Engineering', 'Engineering', 'HR']}"], {}), "({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': [\n 'Accounting', 'Engineering', 'Engineering', 'HR']})\n", (1521, 1633), True, 'import pandas as pd\n'), ((1671, 1784), 'pandas.DataFrame', 'pd.DataFrame', (["{'names': ['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'], 'hire_date': [2004, 2008,\n 2012, 2014, 2019]}"], {}), "({'names': ['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'], 'hire_date':\n [2004, 2008, 2012, 2014, 2019]})\n", (1683, 1784), True, 'import pandas as pd\n'), ((1825, 1865), 'src.PyWash.SharedDataFrame', 'SharedDataFrame', ([], {'df': 'df1', 'verbose': 'verbose'}), '(df=df1, verbose=verbose)\n', (1840, 1865), False, 'from src.PyWash import SharedDataFrame\n'), ((1882, 1922), 'src.PyWash.SharedDataFrame', 'SharedDataFrame', ([], {'df': 'df2', 'verbose': 'verbose'}), '(df=df2, verbose=verbose)\n', (1897, 1922), False, 'from src.PyWash import SharedDataFrame\n'), ((2099, 2223), 'pandas.DataFrame', 'pd.DataFrame', (["{'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting',\n 'Engineering', 'Engineering', 'HR']}"], {}), "({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': [\n 'Accounting', 'Engineering', 'Engineering', 'HR']})\n", (2111, 2223), True, 'import pandas as pd\n'), ((2261, 2363), 'pandas.DataFrame', 'pd.DataFrame', (["{'employee': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012,\n 2014]}"], {}), "({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [\n 2004, 2008, 2012, 2014]})\n", (2273, 2363), True, 'import pandas as pd\n'), ((2404, 2572), 'pandas.DataFrame', 'pd.DataFrame', (["{'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting',\n 'Engineering', 'Engineering', 'HR'], 'hire_date': [2008, 2012, 2004, 2014]}"], {}), "({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': [\n 'Accounting', 'Engineering', 'Engineering', 'HR'], 'hire_date': [2008, \n 2012, 2004, 2014]})\n", (2416, 2572), True, 'import pandas as pd\n'), ((2641, 2681), 'src.PyWash.SharedDataFrame', 'SharedDataFrame', ([], {'df': 'df1', 'verbose': 'verbose'}), '(df=df1, verbose=verbose)\n', (2656, 2681), False, 'from src.PyWash import SharedDataFrame\n'), ((2698, 2738), 'src.PyWash.SharedDataFrame', 'SharedDataFrame', ([], {'df': 'df2', 'verbose': 'verbose'}), '(df=df2, verbose=verbose)\n', (2713, 2738), False, 'from src.PyWash import SharedDataFrame\n'), ((2997, 3121), 'pandas.DataFrame', 'pd.DataFrame', (["{'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting',\n 'Engineering', 'Engineering', 'HR']}"], {}), "({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': [\n 'Accounting', 'Engineering', 'Engineering', 'HR']})\n", (3009, 3121), True, 'import pandas as pd\n'), ((3159, 3258), 'pandas.DataFrame', 'pd.DataFrame', (["{'names': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]\n }"], {}), "({'names': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, \n 2008, 2012, 2014]})\n", (3171, 3258), True, 'import pandas as pd\n'), ((3299, 3507), 'pandas.DataFrame', 'pd.DataFrame', (["{'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting',\n 'Engineering', 'Engineering', 'HR'], 'names': ['Bob', 'Jake', 'Lisa',\n 'Sue'], 'hire_date': [2008, 2012, 2004, 2014]}"], {}), "({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': [\n 'Accounting', 'Engineering', 'Engineering', 'HR'], 'names': ['Bob',\n 'Jake', 'Lisa', 'Sue'], 'hire_date': [2008, 2012, 2004, 2014]})\n", (3311, 3507), True, 'import pandas as pd\n'), ((3608, 3648), 'src.PyWash.SharedDataFrame', 'SharedDataFrame', ([], {'df': 'df1', 'verbose': 'verbose'}), '(df=df1, verbose=verbose)\n', (3623, 3648), False, 'from src.PyWash import SharedDataFrame\n'), ((3665, 3705), 'src.PyWash.SharedDataFrame', 'SharedDataFrame', ([], {'df': 'df2', 'verbose': 'verbose'}), '(df=df2, verbose=verbose)\n', (3680, 3705), False, 'from src.PyWash import SharedDataFrame\n'), ((4039, 4163), 'pandas.DataFrame', 'pd.DataFrame', (["{'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting',\n 'Engineering', 'Engineering', 'HR']}"], {}), "({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': [\n 'Accounting', 'Engineering', 'Engineering', 'HR']})\n", (4051, 4163), True, 'import pandas as pd\n'), ((4201, 4314), 'pandas.DataFrame', 'pd.DataFrame', (["{'names': ['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'], 'hire_date': [2004, 2008,\n 2012, 2014, 2019]}"], {}), "({'names': ['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'], 'hire_date':\n [2004, 2008, 2012, 2014, 2019]})\n", (4213, 4314), True, 'import pandas as pd\n'), ((4355, 4395), 'src.PyWash.SharedDataFrame', 'SharedDataFrame', ([], {'df': 'df1', 'verbose': 'verbose'}), '(df=df1, verbose=verbose)\n', (4370, 4395), False, 'from src.PyWash import SharedDataFrame\n'), ((4412, 4452), 'src.PyWash.SharedDataFrame', 'SharedDataFrame', ([], {'df': 'df2', 'verbose': 'verbose'}), '(df=df2, verbose=verbose)\n', (4427, 4452), False, 'from src.PyWash import SharedDataFrame\n')]
|
import os
import logging
import environ
from pathlib import Path
SUPPORTED_NONLOCALES = ['media', 'admin', 'static']
# Build paths inside the project like this: BASE_DIR / 'subdir'.
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + "../../../")
env = environ.Env()
# reading .env file
environ.Env.read_env()
env = environ.Env(
# set casting, default value
DEBUG=(bool, True)
)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + "../../../")
# Take environment variables from .env file
environ.Env.read_env(os.path.join(PROJECT_ROOT, '.env'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("SECRET_KEY", default="unsafe-secret-key")
# SECURITY WARNING: don't run with debug turned on in production!
# Debugging displays nice error messages, but leaks memory. Set this to False
# on all server instances and True only for development.
DEBUG = env('DEBUG')
ALLOWED_HOSTS = ["localhost", "127.0.0.1"]
# Application definition
INSTALLED_APPS = [
#Django default apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#Third party apps
'compressor',
'django_nose',
'django_extensions',
'debug_toolbar',
#Local apps
#Application base
'Application',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# third party middleware
'whitenoise.middleware.WhiteNoiseMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.history.HistoryPanel',
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# insert your TEMPLATE_DIRS here
os.path.join(PROJECT_ROOT, 'templates'),
os.path.join(PROJECT_ROOT, 'templates/.base'),
os.path.join(PROJECT_ROOT, 'templates/layout'),
],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
# Default context processors
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# Custom context processors here
#'config.context_processors.custom_context_processor',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# By default, be at least somewhat secure with our session cookies.
SESSION_COOKIE_HTTPONLY = True
# Set this to true if you are using https
SESSION_COOKIE_SECURE = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
STATIC_ROOT = 'static/'
# URL prefix for static files.
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = [
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
]
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Memorycached
SESSIONS_ENGINE='django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
# Argon2 password hashing
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
# Custom hasher
'Application.hashers.PBKDF2WrappedSHA1PasswordHasher',
]
# cacheable files and compression support
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_FINDERS = (
# django contrib default finders
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# third party finders
'compressor.finders.CompressorFinder',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
INTERNAL_IPS = ['127.0.0.1']
|
[
"os.path.dirname",
"os.path.join",
"environ.Env.read_env",
"environ.Env"
] |
[((265, 278), 'environ.Env', 'environ.Env', ([], {}), '()\n', (276, 278), False, 'import environ\n'), ((299, 321), 'environ.Env.read_env', 'environ.Env.read_env', ([], {}), '()\n', (319, 321), False, 'import environ\n'), ((329, 360), 'environ.Env', 'environ.Env', ([], {'DEBUG': '(bool, True)'}), '(DEBUG=(bool, True))\n', (340, 360), False, 'import environ\n'), ((4882, 4917), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '"""media"""'], {}), "(PROJECT_ROOT, 'media')\n", (4894, 4917), False, 'import os\n'), ((605, 639), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '""".env"""'], {}), "(PROJECT_ROOT, '.env')\n", (617, 639), False, 'import os\n'), ((216, 241), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (231, 241), False, 'import os\n'), ((497, 522), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (512, 522), False, 'import os\n'), ((2933, 2972), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '"""templates"""'], {}), "(PROJECT_ROOT, 'templates')\n", (2945, 2972), False, 'import os\n'), ((2986, 3031), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '"""templates/.base"""'], {}), "(PROJECT_ROOT, 'templates/.base')\n", (2998, 3031), False, 'import os\n'), ((3045, 3091), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '"""templates/layout"""'], {}), "(PROJECT_ROOT, 'templates/layout')\n", (3057, 3091), False, 'import os\n')]
|
# Generated by Django 3.1.2 on 2021-01-25 08:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TwitterUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('screen_name', models.CharField(max_length=15)),
],
options={
'db_table': 'twitter_users',
},
),
migrations.CreateModel(
name='Tweet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tweet_text', models.CharField(max_length=280)),
('tweet_date', models.DateTimeField()),
('tweet_lang', models.CharField(max_length=3, null=True)),
('tweet_id', models.CharField(db_index=True, max_length=20, null=True)),
('tweet_info', models.JSONField()),
('is_retweet', models.BooleanField(default=True)),
('retweet_count', models.IntegerField(null=True)),
('twitter_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='db.twitteruser')),
],
options={
'db_table': 'tweets',
},
),
]
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.JSONField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((340, 433), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (356, 433), False, 'from django.db import migrations, models\n'), ((464, 495), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)'}), '(max_length=15)\n', (480, 495), False, 'from django.db import migrations, models\n'), ((708, 801), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (724, 801), False, 'from django.db import migrations, models\n'), ((831, 863), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(280)'}), '(max_length=280)\n', (847, 863), False, 'from django.db import migrations, models\n'), ((897, 919), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (917, 919), False, 'from django.db import migrations, models\n'), ((953, 994), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)', 'null': '(True)'}), '(max_length=3, null=True)\n', (969, 994), False, 'from django.db import migrations, models\n'), ((1026, 1083), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'max_length': '(20)', 'null': '(True)'}), '(db_index=True, max_length=20, null=True)\n', (1042, 1083), False, 'from django.db import migrations, models\n'), ((1117, 1135), 'django.db.models.JSONField', 'models.JSONField', ([], {}), '()\n', (1133, 1135), False, 'from django.db import migrations, models\n'), ((1169, 1202), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1188, 1202), False, 'from django.db import migrations, models\n'), ((1239, 1269), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (1258, 1269), False, 'from django.db import migrations, models\n'), ((1305, 1393), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""db.twitteruser"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'db.twitteruser')\n", (1322, 1393), False, 'from django.db import migrations, models\n')]
|
from sanic import Blueprint
from sanic.response import json
info = Blueprint('info', url_prefix='/info')
@info.route("/ping")
async def ping(request):
"""
$ curl localhost:1700/info/ping
:param request:
:return:
"""
return json({ "hello": "world" })
@info.route('/env/<tag>')
async def env_handler(request, tag):
"""
$ curl localhost:1700/info/env/PATH
:param request:
:param tag:
:return:
"""
import os
return json({tag: os.environ.get(tag)})
@info.post('/echo/<tag>')
async def echo(request, tag):
"""
$ curl -d '{"key1":"value1", "key2":"value2"}' \
-H "Content-Type: application/json" -X POST \
localhost:1700/info/echo/hi | json
:param request:
:param tag:
:return:
"""
data=request.json
print("..", data)
return json({tag:'POST request - {}'.format(request.json),
'keys': list(data.keys()),
})
|
[
"os.environ.get",
"sanic.response.json",
"sanic.Blueprint"
] |
[((68, 105), 'sanic.Blueprint', 'Blueprint', (['"""info"""'], {'url_prefix': '"""/info"""'}), "('info', url_prefix='/info')\n", (77, 105), False, 'from sanic import Blueprint\n'), ((249, 273), 'sanic.response.json', 'json', (["{'hello': 'world'}"], {}), "({'hello': 'world'})\n", (253, 273), False, 'from sanic.response import json\n'), ((481, 500), 'os.environ.get', 'os.environ.get', (['tag'], {}), '(tag)\n', (495, 500), False, 'import os\n')]
|
from django.shortcuts import render
# Create your views here.
# -*- coding: utf-8 -*-
import json
import datetime
from DataBase import DBOPs
from django.http import HttpResponse
from chatbot import aimlKernel
def chat(request):
"""
Function:
对话接口
Args:
request: 请求
Returns:
返回Http报文
"""
dic = {}
if request.method == 'GET':
dic['botResponse'] = aimlKernel.k.respond(request.GET.get('ask', '无语'), request.GET.get('sessionid','test')).\
replace(' ', '')
DBOPs.InsertDB(request.GET.get('sessionid', 'test'), request.GET.get('ask', '无语'), dic['botResponse'])
dic['time'] = datetime.datetime.now().strftime(('%Y-%m-%d %H:%M:%S'))
dic['sessionid'] = request.GET.get('sessionid','test')
return HttpResponse(json.dumps(dic, ensure_ascii=False))
else:
dic['message'] = u'方法错误'
return HttpResponse(json.dumps(dic, ensure_ascii=False))
|
[
"datetime.datetime.now",
"json.dumps"
] |
[((809, 844), 'json.dumps', 'json.dumps', (['dic'], {'ensure_ascii': '(False)'}), '(dic, ensure_ascii=False)\n', (819, 844), False, 'import json\n'), ((917, 952), 'json.dumps', 'json.dumps', (['dic'], {'ensure_ascii': '(False)'}), '(dic, ensure_ascii=False)\n', (927, 952), False, 'import json\n'), ((662, 685), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (683, 685), False, 'import datetime\n')]
|
############### XML 1 - Find the Score ################
import sys
import xml.etree.ElementTree as etree
def get_attr_number(node):
# your code goes here
count = 0
for i in root.iter():
count += len(i.attrib)
return count
if __name__ == '__main__':
sys.stdin.readline()
xml = sys.stdin.read()
tree = etree.ElementTree(etree.fromstring(xml))
root = tree.getroot()
print(get_attr_number(root))
############### XML2 - Find the Maximum Depth ################
import xml.etree.ElementTree as etree
maxdepth = 0
def depth(elem, level):
global maxdepth
# your code goes here
level += 1
if level >= maxdepth:
maxdepth = level
for i in elem:
depth(i, level)
if __name__ == '__main__':
n = int(input())
xml = ""
for i in range(n):
xml = xml + input() + "\n"
tree = etree.ElementTree(etree.fromstring(xml))
depth(tree.getroot(), -1)
print(maxdepth)
|
[
"sys.stdin.read",
"sys.stdin.readline",
"xml.etree.ElementTree.fromstring"
] |
[((298, 318), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (316, 318), False, 'import sys\n'), ((330, 346), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (344, 346), False, 'import sys\n'), ((377, 398), 'xml.etree.ElementTree.fromstring', 'etree.fromstring', (['xml'], {}), '(xml)\n', (393, 398), True, 'import xml.etree.ElementTree as etree\n'), ((933, 954), 'xml.etree.ElementTree.fromstring', 'etree.fromstring', (['xml'], {}), '(xml)\n', (949, 954), True, 'import xml.etree.ElementTree as etree\n')]
|
#! /usr/bin/env python
from __future__ import unicode_literals
from PIL import Image
from subprocess import check_call
from concurrent import futures
import subprocess
import os
import io
import subprocess
import sys
from os import listdir
from os.path import isfile, join
import psutil
import time
vers_to_run = [ 3, 4, 5, 7, 8, 9,10,11,12,58,59,60,61,62,63,64]
in_vers = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 6, 6, 6, 6, 6, 6]
num_threads = 14
# The directory to convert
datasetpath = '/datasets/voc-2007/'
def convert_img(file_name,in_img_dir,out_img_dir):
# Make temp directory
temp_dir = 'temp_'+str(os.getpid())
subprocess.call('mkdir -p '+temp_dir,shell=True)
# Convert to png #
im = Image.open(in_img_dir+file_name)
im.save(temp_dir+'/'+file_name+'_temp.png')
# Run the given pipeline on the png
subprocess.call('../common/pipeline_V'+str(version) + '.o ' +
temp_dir + '/' + file_name + '_temp.png ' +
temp_dir + '/', shell=True)
# Convert back to jpeg and save
im = Image.open(temp_dir+'/'+'output.png')
im.save(out_img_dir+'/'+file_name)
# Delete temp directory
subprocess.call('rm -rf '+temp_dir,shell=True)
for i, version in enumerate(vers_to_run):
in_version = in_vers[i]
subprocess.call('make --directory ../common/ version='+str(version),shell=True)
# Copy all but the JPEG images
subprocess.call('rsync -av '+
datasetpath+'/v'+str(in_version)+'/ '+
datasetpath+'/v'+str(version)+' '+
'--exclude VOC2007/JPEGImages',
shell=True)
in_img_dir = datasetpath+'v'+str(in_version)+'/VOC2007/JPEGImages/'
out_img_dir = datasetpath+'v'+str(version)+'/VOC2007/JPEGImages/'
# Make the directory for this section
subprocess.call('mkdir -p '+out_img_dir,shell=True)
# Get list of files in directory
file_list = [f for f in listdir(in_img_dir) if
isfile(join(in_img_dir, f))]
file_list.sort()
with futures.ProcessPoolExecutor(max_workers=num_threads) as executor:
fs = [executor.submit(convert_img,file_name,in_img_dir,out_img_dir) for file_name in file_list]
for i, f in enumerate(futures.as_completed(fs)):
# Write progress to error so that it can be seen
sys.stderr.write( \
"Converted Image: {} / {} \r".format(i, len(file_list)))
|
[
"os.getpid",
"concurrent.futures.ProcessPoolExecutor",
"PIL.Image.open",
"subprocess.call",
"os.path.join",
"os.listdir",
"concurrent.futures.as_completed"
] |
[((942, 993), 'subprocess.call', 'subprocess.call', (["('mkdir -p ' + temp_dir)"], {'shell': '(True)'}), "('mkdir -p ' + temp_dir, shell=True)\n", (957, 993), False, 'import subprocess\n'), ((1020, 1054), 'PIL.Image.open', 'Image.open', (['(in_img_dir + file_name)'], {}), '(in_img_dir + file_name)\n', (1030, 1054), False, 'from PIL import Image\n'), ((1379, 1420), 'PIL.Image.open', 'Image.open', (["(temp_dir + '/' + 'output.png')"], {}), "(temp_dir + '/' + 'output.png')\n", (1389, 1420), False, 'from PIL import Image\n'), ((1518, 1567), 'subprocess.call', 'subprocess.call', (["('rm -rf ' + temp_dir)"], {'shell': '(True)'}), "('rm -rf ' + temp_dir, shell=True)\n", (1533, 1567), False, 'import subprocess\n'), ((2167, 2221), 'subprocess.call', 'subprocess.call', (["('mkdir -p ' + out_img_dir)"], {'shell': '(True)'}), "('mkdir -p ' + out_img_dir, shell=True)\n", (2182, 2221), False, 'import subprocess\n'), ((2405, 2457), 'concurrent.futures.ProcessPoolExecutor', 'futures.ProcessPoolExecutor', ([], {'max_workers': 'num_threads'}), '(max_workers=num_threads)\n', (2432, 2457), False, 'from concurrent import futures\n'), ((927, 938), 'os.getpid', 'os.getpid', ([], {}), '()\n', (936, 938), False, 'import os\n'), ((2283, 2302), 'os.listdir', 'listdir', (['in_img_dir'], {}), '(in_img_dir)\n', (2290, 2302), False, 'from os import listdir\n'), ((2599, 2623), 'concurrent.futures.as_completed', 'futures.as_completed', (['fs'], {}), '(fs)\n', (2619, 2623), False, 'from concurrent import futures\n'), ((2356, 2375), 'os.path.join', 'join', (['in_img_dir', 'f'], {}), '(in_img_dir, f)\n', (2360, 2375), False, 'from os.path import isfile, join\n')]
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from scrapy.conf import settings
from scrapy.exceptions import DropItem
from scrapy import log
class CoinnewsPipeline(object):
collection_name = 'coin_articles'
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DATABASE', 'items')
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
for data in item:
if not data:
raise DropItem("Missing data!")
self.db[self.collection_name].insert_one(dict(item))
log.msg("Question added to MongoDB database!",
level=log.DEBUG, spider=spider)
return item
|
[
"pymongo.MongoClient",
"scrapy.log.msg",
"scrapy.exceptions.DropItem"
] |
[((762, 797), 'pymongo.MongoClient', 'pymongo.MongoClient', (['self.mongo_uri'], {}), '(self.mongo_uri)\n', (781, 797), False, 'import pymongo\n'), ((1119, 1197), 'scrapy.log.msg', 'log.msg', (['"""Question added to MongoDB database!"""'], {'level': 'log.DEBUG', 'spider': 'spider'}), "('Question added to MongoDB database!', level=log.DEBUG, spider=spider)\n", (1126, 1197), False, 'from scrapy import log\n'), ((1024, 1049), 'scrapy.exceptions.DropItem', 'DropItem', (['"""Missing data!"""'], {}), "('Missing data!')\n", (1032, 1049), False, 'from scrapy.exceptions import DropItem\n')]
|
from __future__ import annotations
import asyncio
import datetime
import logging
from typing import TYPE_CHECKING
from pymongo import UpdateMany, InsertOne
from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery
from matchengine.internals.utilities.list_utils import chunk_list
from matchengine.internals.utilities.utilities import perform_db_call
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('matchengine')
if TYPE_CHECKING:
from matchengine.internals.engine import MatchEngine
async def async_update_matches_by_protocol_no(matchengine: MatchEngine, protocol_no: str):
"""
Update trial matches by diff'ing the newly created trial matches against existing matches in
the db. Delete matches by adding {is_disabled: true} and insert all new matches.
"""
matches_by_sample_id = matchengine.matches.get(protocol_no, dict())
updated_time = datetime.datetime.now()
for matches in matches_by_sample_id.values():
for match in matches:
match['_updated'] = updated_time
if protocol_no not in matchengine.matches or protocol_no not in matchengine._trials_to_match_on:
log.info(f"{matchengine.match_criteria_transform.trial_collection} {protocol_no} was not matched on, not updating {matchengine.match_criteria_transform.trial_collection} matches")
if not matchengine.skip_run_log_entry:
matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no))
await matchengine.task_q.join()
return
log.info(f"Updating matches for {protocol_no}")
if not matchengine.drop:
# If no matches are found, disable all match records by sample id
if not matchengine.matches[protocol_no]:
for chunk in chunk_list(list(matchengine.clinical_ids_for_protocol_cache[protocol_no]),
matchengine.chunk_size):
matchengine.task_q.put_nowait(
UpdateTask(
[UpdateMany(filter={matchengine.match_criteria_transform.match_trial_link_id: protocol_no,
'clinical_id': {'$in': chunk}},
update={'$set': {"is_disabled": True,
'_updated': updated_time}})],
protocol_no
)
)
else:
# Get matches to disable and issue queries
matches_to_disable = await get_all_except(matchengine, protocol_no, matches_by_sample_id)
delete_ops = await get_delete_ops(matches_to_disable, matchengine)
matchengine.task_q.put_nowait(UpdateTask(delete_ops, protocol_no))
for sample_id in matches_by_sample_id.keys():
if not matchengine.drop:
new_matches_hashes = [match['hash'] for match in matches_by_sample_id[sample_id]]
# get existing matches in db with identical hashes to newly found matches
existing = await get_existing_matches(matchengine, new_matches_hashes)
existing_hashes = {result['hash'] for result in existing}
disabled = {result['hash'] for result in existing if result['is_disabled']}
# insert new matches if they don't already exist. disable everything else
matches_to_insert = get_matches_to_insert(matches_by_sample_id,
existing_hashes,
sample_id)
matches_to_disable = await get_matches_to_disable(matchengine,
new_matches_hashes,
protocol_no,
sample_id)
# flip is_disabled flag if a new match generated during run matches hash of an existing
matches_to_mark_available = [m for m in matches_by_sample_id[sample_id] if
m['hash'] in disabled]
ops = get_update_operations(matches_to_disable,
matches_to_insert,
matches_to_mark_available,
matchengine)
else:
ops = [InsertOne(document=trial_match) for trial_match in
matches_by_sample_id[sample_id]]
matchengine.task_q.put_nowait(UpdateTask(ops, protocol_no))
if not matchengine.skip_run_log_entry:
matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no))
await matchengine.task_q.join()
async def get_all_except(matchengine: MatchEngine,
protocol_no: str,
trial_matches_by_sample_id: dict) -> list:
"""Return all matches except ones matching current protocol_no"""
# get clinical ids with matches
clinical_ids = {matchengine.sample_mapping[sample_id] for sample_id in trial_matches_by_sample_id.keys()}
# if protocol has been run previously, subtract clinical ids from current run from
# previously run clinical ids for a specific protocol. The remainder are ids
# which were run previously, but not in the current run.
if protocol_no in matchengine.clinical_run_log_entries:
clinical_ids = matchengine.clinical_run_log_entries[protocol_no] - clinical_ids
query = {
matchengine.match_criteria_transform.match_trial_link_id: protocol_no,
"clinical_id": {
'$in': [clinical_id for clinical_id in clinical_ids]
}
}
projection = {
'_id': 1,
'hash': 1,
'clinical_id': 1
}
results = await perform_db_call(matchengine,
collection=matchengine.trial_match_collection,
query=MongoQuery(query),
projection=projection)
return [result for result in results]
async def get_delete_ops(matches_to_disable: list, matchengine: MatchEngine) -> list:
updated_time = datetime.datetime.now()
hashes = [result['hash'] for result in matches_to_disable]
ops = list()
for chunk in chunk_list(hashes, matchengine.chunk_size):
ops.append(UpdateMany(filter={'hash': {'$in': chunk}},
update={'$set': {"is_disabled": True, '_updated': updated_time}}))
return ops
async def get_existing_matches(matchengine: MatchEngine, new_matches_hashes: list) -> list:
"""
Get matches in db which have the same hashes as newly found matches.
:param matchengine:
:param new_matches_hashes:
:return:
"""
matches_to_not_change_query = MongoQuery({'hash': {'$in': new_matches_hashes}})
projection = {"hash": 1, "is_disabled": 1}
matches = await asyncio.gather(
perform_db_call(matchengine,
matchengine.trial_match_collection,
matches_to_not_change_query,
projection)
)
return matches[0]
async def get_matches_to_disable(matchengine: MatchEngine,
new_matches_hashes: list,
protocol_no: str,
sample_id: str) -> list:
"""
Get matches to disable by looking for existing, enabled matches whose
hashes are not present in newly generated matches during current run.
Done for every sample_id
:param matchengine:
:param new_matches_hashes:
:param protocol_no:
:param sample_id:
:return:
"""
query = {
matchengine.match_criteria_transform.match_trial_link_id: protocol_no,
'sample_id': sample_id,
'is_disabled': False,
'hash': {
'$nin': new_matches_hashes
}
}
matches_to_disable_query = MongoQuery(query)
projection = {"hash": 1, "is_disabled": 1}
matches = await asyncio.gather(
perform_db_call(matchengine,
matchengine.trial_match_collection,
matches_to_disable_query,
projection)
)
return matches[0]
def get_update_operations(matches_to_disable: list,
matches_to_insert: list,
matches_to_mark_available: list,
matchengine: MatchEngine) -> list:
ops = list()
updated_time = datetime.datetime.now()
disable_hashes = [trial_match['hash'] for trial_match in matches_to_disable]
for chunk in chunk_list(disable_hashes, matchengine.chunk_size):
ops.append(UpdateMany(filter={'hash': {'$in': chunk}},
update={'$set': {'is_disabled': True,
'_updated': updated_time}}))
for to_insert in matches_to_insert:
ops.append(InsertOne(document=to_insert))
available_hashes = [trial_match['hash'] for trial_match in matches_to_mark_available]
for chunk in chunk_list(available_hashes, matchengine.chunk_size):
ops.append(UpdateMany(filter={'hash': {'$in': chunk}},
update={'$set': {'is_disabled': False,
'_updated': updated_time}}))
return ops
def get_matches_to_insert(matches_by_sample_id: list, existing_hashes: set,
sample_id: str) -> list:
return [m for m in matches_by_sample_id[sample_id] if m['hash'] not in existing_hashes]
|
[
"matchengine.internals.typing.matchengine_types.UpdateTask",
"logging.basicConfig",
"pymongo.UpdateMany",
"matchengine.internals.utilities.list_utils.chunk_list",
"matchengine.internals.typing.matchengine_types.RunLogUpdateTask",
"matchengine.internals.utilities.utilities.perform_db_call",
"matchengine.internals.typing.matchengine_types.MongoQuery",
"datetime.datetime.now",
"logging.getLogger",
"pymongo.InsertOne"
] |
[((396, 435), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (415, 435), False, 'import logging\n'), ((442, 474), 'logging.getLogger', 'logging.getLogger', (['"""matchengine"""'], {}), "('matchengine')\n", (459, 474), False, 'import logging\n'), ((932, 955), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (953, 955), False, 'import datetime\n'), ((6146, 6169), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6167, 6169), False, 'import datetime\n'), ((6267, 6309), 'matchengine.internals.utilities.list_utils.chunk_list', 'chunk_list', (['hashes', 'matchengine.chunk_size'], {}), '(hashes, matchengine.chunk_size)\n', (6277, 6309), False, 'from matchengine.internals.utilities.list_utils import chunk_list\n'), ((6771, 6820), 'matchengine.internals.typing.matchengine_types.MongoQuery', 'MongoQuery', (["{'hash': {'$in': new_matches_hashes}}"], {}), "({'hash': {'$in': new_matches_hashes}})\n", (6781, 6820), False, 'from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery\n'), ((7915, 7932), 'matchengine.internals.typing.matchengine_types.MongoQuery', 'MongoQuery', (['query'], {}), '(query)\n', (7925, 7932), False, 'from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery\n'), ((8488, 8511), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8509, 8511), False, 'import datetime\n'), ((8610, 8660), 'matchengine.internals.utilities.list_utils.chunk_list', 'chunk_list', (['disable_hashes', 'matchengine.chunk_size'], {}), '(disable_hashes, matchengine.chunk_size)\n', (8620, 8660), False, 'from matchengine.internals.utilities.list_utils import chunk_list\n'), ((9067, 9119), 'matchengine.internals.utilities.list_utils.chunk_list', 'chunk_list', (['available_hashes', 'matchengine.chunk_size'], {}), '(available_hashes, matchengine.chunk_size)\n', (9077, 9119), False, 'from matchengine.internals.utilities.list_utils import chunk_list\n'), ((4518, 4546), 'matchengine.internals.typing.matchengine_types.UpdateTask', 'UpdateTask', (['ops', 'protocol_no'], {}), '(ops, protocol_no)\n', (4528, 4546), False, 'from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery\n'), ((4630, 4659), 'matchengine.internals.typing.matchengine_types.RunLogUpdateTask', 'RunLogUpdateTask', (['protocol_no'], {}), '(protocol_no)\n', (4646, 4659), False, 'from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery\n'), ((6330, 6443), 'pymongo.UpdateMany', 'UpdateMany', ([], {'filter': "{'hash': {'$in': chunk}}", 'update': "{'$set': {'is_disabled': True, '_updated': updated_time}}"}), "(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled':\n True, '_updated': updated_time}})\n", (6340, 6443), False, 'from pymongo import UpdateMany, InsertOne\n'), ((6912, 7021), 'matchengine.internals.utilities.utilities.perform_db_call', 'perform_db_call', (['matchengine', 'matchengine.trial_match_collection', 'matches_to_not_change_query', 'projection'], {}), '(matchengine, matchengine.trial_match_collection,\n matches_to_not_change_query, projection)\n', (6927, 7021), False, 'from matchengine.internals.utilities.utilities import perform_db_call\n'), ((8024, 8130), 'matchengine.internals.utilities.utilities.perform_db_call', 'perform_db_call', (['matchengine', 'matchengine.trial_match_collection', 'matches_to_disable_query', 'projection'], {}), '(matchengine, matchengine.trial_match_collection,\n matches_to_disable_query, projection)\n', (8039, 8130), False, 'from matchengine.internals.utilities.utilities import perform_db_call\n'), ((8681, 8794), 'pymongo.UpdateMany', 'UpdateMany', ([], {'filter': "{'hash': {'$in': chunk}}", 'update': "{'$set': {'is_disabled': True, '_updated': updated_time}}"}), "(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled':\n True, '_updated': updated_time}})\n", (8691, 8794), False, 'from pymongo import UpdateMany, InsertOne\n'), ((8928, 8957), 'pymongo.InsertOne', 'InsertOne', ([], {'document': 'to_insert'}), '(document=to_insert)\n', (8937, 8957), False, 'from pymongo import UpdateMany, InsertOne\n'), ((9140, 9254), 'pymongo.UpdateMany', 'UpdateMany', ([], {'filter': "{'hash': {'$in': chunk}}", 'update': "{'$set': {'is_disabled': False, '_updated': updated_time}}"}), "(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled':\n False, '_updated': updated_time}})\n", (9150, 9254), False, 'from pymongo import UpdateMany, InsertOne\n'), ((1459, 1488), 'matchengine.internals.typing.matchengine_types.RunLogUpdateTask', 'RunLogUpdateTask', (['protocol_no'], {}), '(protocol_no)\n', (1475, 1488), False, 'from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery\n'), ((2706, 2741), 'matchengine.internals.typing.matchengine_types.UpdateTask', 'UpdateTask', (['delete_ops', 'protocol_no'], {}), '(delete_ops, protocol_no)\n', (2716, 2741), False, 'from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery\n'), ((4377, 4408), 'pymongo.InsertOne', 'InsertOne', ([], {'document': 'trial_match'}), '(document=trial_match)\n', (4386, 4408), False, 'from pymongo import UpdateMany, InsertOne\n'), ((5918, 5935), 'matchengine.internals.typing.matchengine_types.MongoQuery', 'MongoQuery', (['query'], {}), '(query)\n', (5928, 5935), False, 'from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery\n'), ((2015, 2211), 'pymongo.UpdateMany', 'UpdateMany', ([], {'filter': "{matchengine.match_criteria_transform.match_trial_link_id: protocol_no,\n 'clinical_id': {'$in': chunk}}", 'update': "{'$set': {'is_disabled': True, '_updated': updated_time}}"}), "(filter={matchengine.match_criteria_transform.match_trial_link_id:\n protocol_no, 'clinical_id': {'$in': chunk}}, update={'$set': {\n 'is_disabled': True, '_updated': updated_time}})\n", (2025, 2211), False, 'from pymongo import UpdateMany, InsertOne\n')]
|
import aquests
CONCURRENT = 50
MAX_REQ = 1000
_ID = 0
def makeload (response):
global _ID
print (response.meta ['_id'], response.code, response.msg, response.version)
if aquests.countreq () < MAX_REQ:
aquests.get ("http://127.0.0.1:5000/", meta = {'_id': _ID})
_ID += 1
def test_makeload ():
aquests.configure (CONCURRENT, callback = makeload) # cioncurrent
for i in range (CONCURRENT):
aquests.get ("http://127.0.0.1:5000/", meta = {'_id': _ID})
_ID += 1
aquests.fetchall ()
|
[
"aquests.fetchall",
"aquests.countreq",
"aquests.get",
"aquests.configure"
] |
[((305, 353), 'aquests.configure', 'aquests.configure', (['CONCURRENT'], {'callback': 'makeload'}), '(CONCURRENT, callback=makeload)\n', (322, 353), False, 'import aquests\n'), ((479, 497), 'aquests.fetchall', 'aquests.fetchall', ([], {}), '()\n', (495, 497), False, 'import aquests\n'), ((175, 193), 'aquests.countreq', 'aquests.countreq', ([], {}), '()\n', (191, 193), False, 'import aquests\n'), ((208, 264), 'aquests.get', 'aquests.get', (['"""http://127.0.0.1:5000/"""'], {'meta': "{'_id': _ID}"}), "('http://127.0.0.1:5000/', meta={'_id': _ID})\n", (219, 264), False, 'import aquests\n'), ((404, 460), 'aquests.get', 'aquests.get', (['"""http://127.0.0.1:5000/"""'], {'meta': "{'_id': _ID}"}), "('http://127.0.0.1:5000/', meta={'_id': _ID})\n", (415, 460), False, 'import aquests\n')]
|
import logging
import random
from . import formulas
from . import temperature
from .bond import Bond
from .bond import possible_group_bonds
from .coderack import coderack
from .correspondence import Correspondence
from .group import Group
from .letter import Letter
from .replacement import Replacement
from .slipnet import slipnet
from .workspace_formulas import choose_bond_facet
from .workspace_formulas import choose_directed_neighbor
from .workspace_formulas import choose_neighbour
from .workspace_formulas import choose_unmodified_object
from .workspace_formulas import workspace
from .workspace_object import WorkspaceObject
# some methods common to the codelets
def __show_which_string_object_is_from(structure):
if not structure:
return "unstructured"
if isinstance(structure, WorkspaceObject):
return "target"
if structure.string == workspace.initial:
return "initial"
return "other"
def __get_scout_source(slipnode, relevance_method, type_name):
initial_relevance = relevance_method(workspace.initial, slipnode)
target_relevance = relevance_method(workspace.target, slipnode)
initial_unhappiness = workspace.initial.intra_string_unhappiness
target_unhappiness = workspace.target.intra_string_unhappiness
logging.info(
f"initial : relevance = {initial_relevance}, "
f"unhappiness = {int(initial_unhappiness)}"
)
logging.info(
f"target : relevance = {target_relevance}, "
f"unhappiness = {int(target_unhappiness)}"
)
string = workspace.initial
relevances = initial_relevance + target_relevance
unhappinesses = initial_unhappiness + target_unhappiness
randomized = random.random() * (relevances + unhappinesses)
initials = initial_relevance + initial_unhappiness
if randomized > initials:
string = workspace.target
logging.info(f"target string selected: {workspace.target} for {type_name}")
else:
logging.info(f"initial string selected: {workspace.initial} for {type_name}")
source = choose_unmodified_object("intra_string_salience", string.objects)
return source
def __get_bond_facet(source, destination):
bond_facet = choose_bond_facet(source, destination)
assert bond_facet
return bond_facet
def __get_descriptors(bond_facet, source, destination):
source_descriptor = source.get_descriptor(bond_facet)
destination_descriptor = destination.get_descriptor(bond_facet)
assert source_descriptor
assert destination_descriptor
return source_descriptor, destination_descriptor
def __all_opposite_mappings(mappings):
return len([m for m in mappings if m.label != slipnet.opposite]) == 0
def __structure_versus_structure(structure1, weight1, structure2, weight2):
structure1.update_strength()
structure2.update_strength()
weighted_strength1 = formulas.temperature_adjusted_value(
structure1.total_strength * weight1
)
weighted_strength2 = formulas.temperature_adjusted_value(
structure2.total_strength * weight2
)
rhs = (weighted_strength1 + weighted_strength2) * random.random()
logging.info(f"{weighted_strength1} > {rhs}: {weighted_strength1 > rhs}")
return weighted_strength1 > rhs
def __fight(structure, structure_weight, incompatibles, incompatible_weight):
if not (incompatibles and len(incompatibles)):
return True
for incompatible in incompatibles:
if not __structure_versus_structure(
structure, structure_weight, incompatible, incompatible_weight
):
logging.info(f"lost fight with {incompatible}")
return False
logging.info(f"won fight with {incompatible}")
return True
def __fight_incompatibles(
incompatibles, structure, name, structure_weight, incompatible_weight
):
if len(incompatibles):
if __fight(structure, structure_weight, incompatibles, incompatible_weight):
logging.info(f"broke the {name}")
return True
logging.info(f"failed to break {name}: Fizzle")
return False
logging.info(f"no incompatible {name}")
return True
def __slippability(concept_mappings):
for mapping in concept_mappings:
slippiness = mapping.slippability() / 100.0
probability_of_slippage = formulas.temperature_adjusted_probability(slippiness)
if formulas.coin_flip(probability_of_slippage):
return True
return False
# start the actual codelets
def breaker():
probability_of_fizzle = (100.0 - formulas.Temperature) / 100.0
assert not formulas.coin_flip(probability_of_fizzle)
# choose a structure at random
structures = [
s for s in workspace.structures if isinstance(s, (Group, Bond, Correspondence))
]
assert structures
structure = random.choice(structures)
__show_which_string_object_is_from(structure)
break_objects = [structure]
if isinstance(structure, Bond):
if structure.source.group:
if structure.source.group == structure.destination.group:
break_objects += [structure.source.group]
# try to break all objects
for structure in break_objects:
break_probability = formulas.temperature_adjusted_probability(
structure.total_strength / 100.0
)
if formulas.coin_flip(break_probability):
return
for structure in break_objects:
structure.break_the_structure()
def bottom_up_description_scout(codelet):
chosen_object = choose_unmodified_object("total_salience", workspace.objects)
assert chosen_object
__show_which_string_object_is_from(chosen_object)
description = formulas.choose_relevant_description_by_activation(chosen_object)
assert description
sliplinks = formulas.similar_property_links(description.descriptor)
assert sliplinks
values = [
sliplink.degree_of_association() * sliplink.destination.activation
for sliplink in sliplinks
]
i = formulas.select_list_position(values)
chosen = sliplinks[i]
chosen_property = chosen.destination
coderack.propose_description(
chosen_object, chosen_property.category(), chosen_property, codelet
)
def top_down_description_scout(codelet):
description_type = codelet.arguments[0]
chosen_object = choose_unmodified_object("total_salience", workspace.objects)
assert chosen_object
__show_which_string_object_is_from(chosen_object)
descriptions = chosen_object.get_possible_descriptions(description_type)
assert descriptions
values = [n.activation for n in descriptions]
i = formulas.select_list_position(values)
chosen_property = descriptions[i]
coderack.propose_description(
chosen_object, chosen_property.category(), chosen_property, codelet
)
def description_strength_tester(codelet):
description = codelet.arguments[0]
description.descriptor.buffer = 100.0
description.update_strength()
strength = description.total_strength
probability = formulas.temperature_adjusted_probability(strength / 100.0)
assert formulas.coin_flip(probability)
coderack.new_codelet("description-builder", codelet, strength)
def description_builder(codelet):
description = codelet.arguments[0]
assert description.object in workspace.objects
if description.object.described(description.descriptor):
description.description_type.buffer = 100.0
description.descriptor.buffer = 100.0
else:
description.build()
def bottom_up_bond_scout(codelet):
source = choose_unmodified_object("intra_string_salience", workspace.objects)
__show_which_string_object_is_from(source)
destination = choose_neighbour(source)
assert destination
logging.info(f"destination: {destination}")
bond_facet = __get_bond_facet(source, destination)
logging.info(f"chosen bond facet: {bond_facet.get_name()}")
logging.info(f"Source: {source}, destination: {destination}")
bond_descriptors = __get_descriptors(bond_facet, source, destination)
source_descriptor, destination_descriptor = bond_descriptors
logging.info(f"source descriptor: {source_descriptor.name.upper()}")
logging.info(f"destination descriptor: {destination_descriptor.name.upper()}")
category = source_descriptor.get_bond_category(destination_descriptor)
assert category
if category == slipnet.identity:
category = slipnet.sameness
logging.info(f"proposing {category.name} bond ")
coderack.propose_bond(
source,
destination,
category,
bond_facet,
source_descriptor,
destination_descriptor,
codelet,
)
def rule_scout(codelet):
assert workspace.number_of_unreplaced_objects() == 0
changed_objects = [o for o in workspace.initial.objects if o.changed]
# assert len(changed_objects) < 2
# if there are no changed objects, propose a rule with no changes
if not changed_objects:
return coderack.propose_rule(None, None, None, None, codelet)
changed = changed_objects[-1]
# generate a list of distinguishing descriptions for the first object
# ie. string-position (left-,right-most,middle or whole) or letter category
# if it is the only one of its type in the string
object_list = []
position = changed.get_descriptor(slipnet.string_position_category)
if position:
object_list += [position]
letter = changed.get_descriptor(slipnet.letter_category)
other_objects_of_same_letter = [
o
for o in workspace.initial.objects
if not o != changed and o.get_description_type(letter)
]
if not len(other_objects_of_same_letter):
object_list += [letter]
# if this object corresponds to another object in the workspace
# object_list = the union of this and the distingushing descriptors
if changed.correspondence:
target_object = changed.correspondence.object_from_target
new_list = []
slippages = workspace.slippages()
for node in object_list:
node = node.apply_slippages(slippages)
if target_object.described(node):
if target_object.distinguishing_descriptor(node):
new_list += [node]
object_list = new_list # should this be += ??
assert object_list
# use conceptual depth to choose a description
value_list = []
for node in object_list:
depth = node.conceptual_depth
value = formulas.temperature_adjusted_value(depth)
value_list += [value]
i = formulas.select_list_position(value_list)
descriptor = object_list[i]
# choose the relation (change the letmost object to "successor" or "d"
object_list = []
if changed.replacement.relation:
object_list += [changed.replacement.relation]
object_list += [
changed.replacement.object_from_modified.get_descriptor(slipnet.letter_category)
]
# use conceptual depth to choose a relation
value_list = []
for node in object_list:
depth = node.conceptual_depth
value = formulas.temperature_adjusted_value(depth)
value_list += [value]
i = formulas.select_list_position(value_list)
relation = object_list[i]
coderack.propose_rule(
slipnet.letter_category, descriptor, slipnet.letter, relation, codelet
)
def rule_strength_tester(codelet):
rule = codelet.arguments[0]
rule.update_strength()
probability = formulas.temperature_adjusted_probability(rule.total_strength / 100.0)
assert random.random() <= probability
coderack.new_codelet("rule-builder", codelet, rule.total_strength, rule)
def replacement_finder():
# choose random letter in initial string
letters = [o for o in workspace.initial.objects if isinstance(o, Letter)]
letter_of_initial_string = random.choice(letters)
logging.info(f"selected letter in initial string = {letter_of_initial_string}")
if letter_of_initial_string.replacement:
logging.info(
f"Replacement already found for {letter_of_initial_string}, so fizzling"
)
return
position = letter_of_initial_string.left_index
more_letters = [
o
for o in workspace.modified.objects
if isinstance(o, Letter) and o.left_index == position
]
letter_of_modified_string = more_letters and more_letters[0] or None
assert letter_of_modified_string
position -= 1
initial_ascii = ord(workspace.initial_string[position])
modified_ascii = ord(workspace.modified_string[position])
diff = initial_ascii - modified_ascii
if abs(diff) < 2:
relations = {0: slipnet.sameness, -1: slipnet.successor, 1: slipnet.predecessor}
relation = relations[diff]
logging.info(f"Relation found: {relation.name}")
else:
relation = None
logging.info("no relation found")
letter_of_initial_string.replacement = Replacement(
letter_of_initial_string, letter_of_modified_string, relation
)
if relation != slipnet.sameness:
letter_of_initial_string.changed = True
workspace.changed_object = letter_of_initial_string
logging.info("building replacement")
def top_down_bond_scout__category(codelet):
logging.info("top_down_bond_scout__category")
category = codelet.arguments[0]
source = __get_scout_source(
category, formulas.local_bond_category_relevance, "bond"
)
destination = choose_neighbour(source)
logging.info(f"source: {source}, destination: {destination}")
assert destination
bond_facet = __get_bond_facet(source, destination)
source_descriptor, destination_descriptor = __get_descriptors(
bond_facet, source, destination
)
forward_bond = source_descriptor.get_bond_category(destination_descriptor)
if forward_bond == slipnet.identity:
forward_bond = slipnet.sameness
backward_bond = slipnet.sameness
else:
backward_bond = destination_descriptor.get_bond_category(source_descriptor)
assert category in [forward_bond, backward_bond]
if category == forward_bond:
coderack.propose_bond(
source,
destination,
category,
bond_facet,
source_descriptor,
destination_descriptor,
codelet,
)
else:
coderack.propose_bond(
destination,
source,
category,
bond_facet,
destination_descriptor,
source_descriptor,
codelet,
)
def top_down_bond_scout__direction(codelet):
direction = codelet.arguments[0]
source = __get_scout_source(
direction, formulas.local_direction_category_relevance, "bond"
)
destination = choose_directed_neighbor(source, direction)
assert destination
logging.info(f"to object: {destination}")
bond_facet = __get_bond_facet(source, destination)
source_descriptor, destination_descriptor = __get_descriptors(
bond_facet, source, destination
)
category = source_descriptor.get_bond_category(destination_descriptor)
assert category
if category == slipnet.identity:
category = slipnet.sameness
coderack.propose_bond(
source,
destination,
category,
bond_facet,
source_descriptor,
destination_descriptor,
codelet,
)
def bond_strength_tester(codelet):
bond = codelet.arguments[0]
__show_which_string_object_is_from(bond)
bond.update_strength()
strength = bond.total_strength
probability = formulas.temperature_adjusted_probability(strength / 100.0)
logging.info(f"bond strength = {strength} for {bond}")
assert formulas.coin_flip(probability)
bond.facet.buffer = 100.0
bond.source_descriptor.buffer = 100.0
bond.destination_descriptor.buffer = 100.0
logging.info("succeeded: posting bond-builder")
coderack.new_codelet("bond-builder", codelet, strength)
def bond_builder(codelet):
bond = codelet.arguments[0]
__show_which_string_object_is_from(bond)
bond.update_strength()
assert bond.source in workspace.objects or bond.destination in workspace.objects
for string_bond in bond.string.bonds:
if bond.same_neighbours(string_bond) and bond.same_categories(string_bond):
if bond.direction_category:
bond.direction_category.buffer = 100.0
bond.category.buffer = 100.0
logging.info("already exists: activate descriptors & Fizzle")
return
incompatible_bonds = bond.get_incompatible_bonds()
logging.info(f"number of incompatible_bonds: {len(incompatible_bonds)}")
if len(incompatible_bonds):
logging.info(str(incompatible_bonds[0]))
assert __fight_incompatibles(incompatible_bonds, bond, "bonds", 1.0, 1.0)
incompatible_groups = bond.source.get_common_groups(bond.destination)
assert __fight_incompatibles(incompatible_groups, bond, "groups", 1.0, 1.0)
# fight all incompatible correspondences
incompatible_correspondences = []
if bond.left_object.leftmost or bond.right_object.rightmost:
if bond.direction_category:
incompatible_correspondences = bond.get_incompatible_correspondences()
if incompatible_correspondences:
logging.info("trying to break incompatible correspondences")
assert __fight(bond, 2.0, incompatible_correspondences, 3.0)
for incompatible in incompatible_bonds:
incompatible.break_the_structure()
for incompatible in incompatible_groups:
incompatible.break_the_structure()
for incompatible in incompatible_correspondences:
incompatible.break_the_structure()
logging.info(f"building bond {bond}")
bond.build_bond()
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
def top_down_group_scout__category(codelet):
group_category = codelet.arguments[0]
category = group_category.get_related_node(slipnet.bond_category)
assert category
source = __get_scout_source(
category, formulas.local_bond_category_relevance, "group"
)
assert source
assert not source.spans_string()
if source.leftmost:
direction = slipnet.right
elif source.rightmost:
direction = slipnet.left
else:
activations = [slipnet.left.activation]
activations += [slipnet.right.activation]
if not formulas.select_list_position(activations):
direction = slipnet.left
else:
direction = slipnet.right
if direction == slipnet.left:
first_bond = source.left_bond
else:
first_bond = source.right_bond
if not first_bond or first_bond.category != category:
# check the other side of object
if direction == slipnet.right:
first_bond = source.left_bond
else:
first_bond = source.right_bond
if not first_bond or first_bond.category != category:
if category == slipnet.sameness and isinstance(source, Letter):
group = Group(
source.string,
slipnet.sameness_group,
None,
slipnet.letter_category,
[source],
[],
)
probability = group.single_letter_group_probability()
assert random.random() >= probability
coderack.propose_single_letter_group(source, codelet)
return
direction = first_bond.direction_category
search = True
bond_facet = None
# find leftmost object in group with these bonds
while search:
search = False
if not source.left_bond:
continue
if source.left_bond.category != category:
continue
if source.left_bond.direction_category != direction:
if source.left_bond.direction_category:
continue
if not bond_facet or bond_facet == source.left_bond.facet:
bond_facet = source.left_bond.facet
direction = source.left_bond.direction_category
source = source.left_bond.left_object
search = True
# find rightmost object in group with these bonds
search = True
destination = source
while search:
search = False
if not destination.right_bond:
continue
if destination.right_bond.category != category:
continue
if destination.right_bond.direction_category != direction:
if destination.right_bond.direction_category:
continue
if not bond_facet or bond_facet == destination.right_bond.facet:
bond_facet = destination.right_bond.facet
direction = source.right_bond.direction_category
destination = destination.right_bond.right_object
search = True
assert destination != source
objects = [source]
bonds = []
while source != destination:
bonds += [source.right_bond]
objects += [source.right_bond.right_object]
source = source.right_bond.right_object
coderack.propose_group(
objects, bonds, group_category, direction, bond_facet, codelet
)
def top_down_group_scout__direction(codelet):
direction = codelet.arguments[0]
source = __get_scout_source(
direction, formulas.local_direction_category_relevance, "direction"
)
logging.info(f"source chosen = {source}")
assert not source.spans_string()
if source.leftmost:
mydirection = slipnet.right
elif source.rightmost:
mydirection = slipnet.left
else:
activations = [slipnet.left.activation]
activations += [slipnet.right.activation]
if not formulas.select_list_position(activations):
mydirection = slipnet.left
else:
mydirection = slipnet.right
if mydirection == slipnet.left:
first_bond = source.left_bond
else:
first_bond = source.right_bond
if not first_bond:
logging.info("no first_bond")
else:
logging.info(f"first_bond: {first_bond}")
if first_bond and not first_bond.direction_category:
direction = None
if not first_bond or first_bond.direction_category != direction:
if mydirection == slipnet.right:
first_bond = source.left_bond
else:
first_bond = source.right_bond
if not first_bond:
logging.info("no first_bond2")
else:
logging.info(f"first_bond2: {first_bond}")
if first_bond and not first_bond.direction_category:
direction = None
assert first_bond
assert first_bond.direction_category == direction
logging.info(f"possible group: {first_bond}")
category = first_bond.category
assert category
group_category = category.get_related_node(slipnet.group_category)
logging.info(f"trying from {source} to {category.name}")
bond_facet = None
# find leftmost object in group with these bonds
search = True
while search:
search = False
if not source.left_bond:
continue
if source.left_bond.category != category:
continue
if source.left_bond.direction_category != direction:
if source.left_bond.direction_category:
continue
if not bond_facet or bond_facet == source.left_bond.facet:
bond_facet = source.left_bond.facet
direction = source.left_bond.direction_category
source = source.left_bond.left_object
search = True
destination = source
search = True
while search:
search = False
if not destination.right_bond:
continue
if destination.right_bond.category != category:
continue
if destination.right_bond.direction_category != direction:
if destination.right_bond.direction_category:
continue
if not bond_facet or bond_facet == destination.right_bond.facet:
bond_facet = destination.right_bond.facet
direction = source.right_bond.direction_category
destination = destination.right_bond.right_object
search = True
assert destination != source
logging.info(f"proposing group from {source} to {destination}")
objects = [source]
bonds = []
while source != destination:
bonds += [source.right_bond]
objects += [source.right_bond.right_object]
source = source.right_bond.right_object
coderack.propose_group(
objects, bonds, group_category, direction, bond_facet, codelet
)
# noinspection PyStringFormat
def group_scout__whole_string(codelet):
string = workspace.initial
if random.random() > 0.5:
string = workspace.target
logging.info(f"target string selected: {workspace.target}")
else:
logging.info(f"initial string selected: {workspace.initial}")
# find leftmost object & the highest group to which it belongs
leftmost = None
for objekt in string.objects:
if objekt.leftmost:
leftmost = objekt
while leftmost.group and leftmost.group.bond_category == slipnet.sameness:
leftmost = leftmost.group
if leftmost.spans_string():
# the object already spans the string - propose this object
group = leftmost
coderack.propose_group(
group.object_list,
group.bond_list,
group.group_category,
group.direction_category,
group.facet,
codelet,
)
return
bonds = []
objects = [leftmost]
while leftmost.right_bond:
bonds += [leftmost.right_bond]
leftmost = leftmost.right_bond.right_object
objects += [leftmost]
assert leftmost.rightmost
# choose a random bond from list
chosen_bond = random.choice(bonds)
category = chosen_bond.category
direction_category = chosen_bond.direction_category
bond_facet = chosen_bond.facet
bonds = possible_group_bonds(category, direction_category, bond_facet, bonds)
assert bonds
group_category = category.get_related_node(slipnet.group_category)
coderack.propose_group(
objects, bonds, group_category, direction_category, bond_facet, codelet
)
def group_strength_tester(codelet):
# update strength value of the group
group = codelet.arguments[0]
__show_which_string_object_is_from(group)
group.update_strength()
strength = group.total_strength
probability = formulas.temperature_adjusted_probability(strength / 100.0)
assert random.random() <= probability
# it is strong enough - post builder & activate nodes
group.group_category.get_related_node(slipnet.bond_category).buffer = 100.0
if group.direction_category:
group.direction_category.buffer = 100.0
coderack.new_codelet("group-builder", codelet, strength)
def group_builder(codelet):
# update strength value of the group
group = codelet.arguments[0]
__show_which_string_object_is_from(group)
equivalent = group.string.equivalent_group(group)
if equivalent:
logging.info("already exists...activate descriptors & fizzle")
group.activate_descriptions()
equivalent.add_descriptions(group.descriptions)
return
# check to see if all objects are still there
for o in group.object_list:
assert o in workspace.objects
# check to see if bonds are there of the same direction
incompatible_bonds = [] # incompatible bond list
if len(group.object_list) > 1:
previous = group.object_list[0]
for objekt in group.object_list[1:]:
left_bond = objekt.left_bond
if left_bond:
if left_bond.left_object == previous:
continue
if left_bond.direction_category == group.direction_category:
continue
incompatible_bonds += [left_bond]
previous = objekt
next_object = group.object_list[-1]
for objekt in reversed(group.object_list[:-1]):
right_bond = objekt.right_bond
if right_bond:
if right_bond.right_object == next_object:
continue
if right_bond.direction_category == group.direction_category:
continue
incompatible_bonds += [right_bond]
next_object = objekt
# if incompatible bonds exist - fight
group.update_strength()
assert __fight_incompatibles(incompatible_bonds, group, "bonds", 1.0, 1.0)
# fight incompatible groups
# fight all groups containing these objects
incompatible_groups = group.get_incompatible_groups()
assert __fight_incompatibles(incompatible_groups, group, "Groups", 1.0, 1.0)
for incompatible in incompatible_bonds:
incompatible.break_the_structure()
# create new bonds
group.bond_list = []
for i in range(1, len(group.object_list)):
object1 = group.object_list[i - 1]
object2 = group.object_list[i]
if not object1.right_bond:
if group.direction_category == slipnet.right:
source = object1
destination = object2
else:
source = object2
destination = object1
category = group.group_category.get_related_node(slipnet.bond_category)
facet = group.facet
new_bond = Bond(
source,
destination,
category,
facet,
source.get_descriptor(facet),
destination.get_descriptor(facet),
)
new_bond.build_bond()
group.bond_list += [object1.right_bond]
for incompatible in incompatible_groups:
incompatible.break_the_structure()
group.build_group()
group.activate_descriptions()
logging.info("building group")
def rule_builder(codelet):
rule = codelet.arguments[0]
if rule.rule_equal(workspace.rule):
rule.activate_rule_descriptions()
return
rule.update_strength()
assert rule.total_strength
# fight against other rules
if workspace.rule:
assert __structure_versus_structure(rule, 1.0, workspace.rule, 1.0)
workspace.build_rule(rule)
def __get_cut_off(density):
if density > 0.8:
distribution = [5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
elif density > 0.6:
distribution = [2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0]
elif density > 0.4:
distribution = [1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0]
elif density > 0.2:
distribution = [1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0]
else:
distribution = [1.0, 1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0]
stop = sum(distribution) * random.random()
total = 0.0
for i in range(0, len(distribution)):
total += distribution[i]
if total >= stop:
return i + 1
return len(distribution)
def rule_translator():
assert workspace.rule
if len(workspace.initial) == 1 and len(workspace.target) == 1:
bond_density = 1.0
else:
number_of_bonds = len(workspace.initial.bonds) + len(workspace.target.bonds)
nearly_total_length = len(workspace.initial) + len(workspace.target) - 2
bond_density = number_of_bonds / nearly_total_length
if bond_density > 1.0:
bond_density = 1.0
cutoff = __get_cut_off(bond_density) * 10.0
assert cutoff >= formulas.actual_temperature
if workspace.rule.build_translated_rule():
workspace.found_answer = True
else:
temperature.clamp_time = coderack.codelets_run + 100
temperature.clamped = True
formulas.Temperature = 100.0
def bottom_up_correspondence_scout(codelet):
object_from_initial = choose_unmodified_object(
"inter_string_salience", workspace.initial.objects
)
object_from_target = choose_unmodified_object(
"inter_string_salience", workspace.target.objects
)
assert object_from_initial.spans_string() == object_from_target.spans_string()
# get the posible concept mappings
concept_mappings = formulas.get_mappings(
object_from_initial,
object_from_target,
object_from_initial.relevant_descriptions(),
object_from_target.relevant_descriptions(),
)
assert concept_mappings
assert __slippability(concept_mappings)
# find out if any are distinguishing
distinguishing_mappings = [m for m in concept_mappings if m.distinguishing()]
assert distinguishing_mappings
# if both objects span the strings, check to see if the
# string description needs to be flipped
opposites = [
m
for m in distinguishing_mappings
if m.initial_description_type == slipnet.string_position_category
and m.initial_description_type != slipnet.bond_facet
]
initial_description_types = [m.initial_description_type for m in opposites]
flip_target_object = False
if (
object_from_initial.spans_string()
and object_from_target.spans_string()
and slipnet.direction_category in initial_description_types
and __all_opposite_mappings(formulas.opposite_mappings)
and slipnet.opposite.activation != 100.0
):
object_from_target = object_from_target.flipped_version()
concept_mappings = formulas.get_mappings(
object_from_initial,
object_from_target,
object_from_initial.relevant_descriptions(),
object_from_target.relevant_descriptions(),
)
flip_target_object = True
coderack.propose_correspondence(
object_from_initial,
object_from_target,
concept_mappings,
flip_target_object,
codelet,
)
def important_object_correspondence_scout(codelet):
object_from_initial = choose_unmodified_object(
"relative_importance", workspace.initial.objects
)
descriptors = object_from_initial.relevant_distinguishing_descriptors()
slipnode = formulas.choose_slipnode_by_conceptual_depth(descriptors)
assert slipnode
initial_descriptor = slipnode
for mapping in workspace.slippages():
if mapping.initial_descriptor == slipnode:
initial_descriptor = mapping.target_descriptor
target_candidates = []
for objekt in workspace.target.objects:
for description in objekt.relevant_descriptions():
if description.descriptor == initial_descriptor:
target_candidates += [objekt]
assert target_candidates
object_from_target = choose_unmodified_object(
"inter_string_salience", target_candidates
)
assert object_from_initial.spans_string() == object_from_target.spans_string()
# get the posible concept mappings
concept_mappings = formulas.get_mappings(
object_from_initial,
object_from_target,
object_from_initial.relevant_descriptions(),
object_from_target.relevant_descriptions(),
)
assert concept_mappings
assert __slippability(concept_mappings)
# find out if any are distinguishing
distinguishing_mappings = [m for m in concept_mappings if m.distinguishing()]
assert distinguishing_mappings
# if both objects span the strings, check to see if the
# string description needs to be flipped
opposites = [
m
for m in distinguishing_mappings
if m.initial_description_type == slipnet.string_position_category
and m.initial_description_type != slipnet.bond_facet
]
initial_description_types = [m.initial_description_type for m in opposites]
flip_target_object = False
if (
object_from_initial.spans_string()
and object_from_target.spans_string()
and slipnet.direction_category in initial_description_types
and __all_opposite_mappings(formulas.opposite_mappings)
and slipnet.opposite.activation != 100.0
):
object_from_target = object_from_target.flipped_version()
concept_mappings = formulas.get_mappings(
object_from_initial,
object_from_target,
object_from_initial.relevant_descriptions(),
object_from_target.relevant_descriptions(),
)
flip_target_object = True
coderack.propose_correspondence(
object_from_initial,
object_from_target,
concept_mappings,
flip_target_object,
codelet,
)
def correspondence_strength_tester(codelet):
correspondence = codelet.arguments[0]
object_from_initial = correspondence.object_from_initial
object_from_target = correspondence.object_from_target
assert object_from_initial in workspace.objects
assert (
object_from_target in workspace.objects
or correspondence.flip_target_object
and not workspace.target.equivalent_group(object_from_target.flipped_version())
)
correspondence.update_strength()
strength = correspondence.total_strength
probability = formulas.temperature_adjusted_probability(strength / 100.0)
assert random.random() <= probability
# activate some concepts
for mapping in correspondence.concept_mappings:
mapping.initial_description_type.buffer = 100.0
mapping.initial_descriptor.buffer = 100.0
mapping.target_description_type.buffer = 100.0
mapping.target_descriptor.buffer = 100.0
coderack.new_codelet("correspondence-builder", codelet, strength, correspondence)
def correspondence_builder(codelet):
correspondence = codelet.arguments[0]
object_from_initial = correspondence.object_from_initial
object_from_target = correspondence.object_from_target
want_flip = correspondence.flip_target_object
if want_flip:
flipper = object_from_target.flipped_version()
target_not_flipped = not workspace.target.equivalent_group(flipper)
else:
target_not_flipped = False
initial_in_objects = object_from_initial in workspace.objects
target_in_objects = object_from_target in workspace.objects
assert initial_in_objects or (
not target_in_objects and (not (want_flip and target_not_flipped))
)
if correspondence.reflexive():
# if the correspondence exists, activate concept mappings
# and add new ones to the existing corr.
existing = correspondence.object_from_initial.correspondence
for mapping in correspondence.concept_mappings:
if mapping.label:
mapping.label.buffer = 100.0
if not mapping.is_contained_by(existing.concept_mappings):
existing.concept_mappings += [mapping]
return
incompatibles = correspondence.get_incompatible_correspondences()
# fight against all correspondences
if incompatibles:
correspondence_spans = (
correspondence.object_from_initial.letter_span()
+ correspondence.object_from_target.letter_span()
)
for incompatible in incompatibles:
incompatible_spans = (
incompatible.object_from_initial.letter_span()
+ incompatible.object_from_target.letter_span()
)
assert __structure_versus_structure(
correspondence, correspondence_spans, incompatible, incompatible_spans
)
incompatible_bond = None
incompatible_group = None
# if there is an incompatible bond then fight against it
initial = correspondence.object_from_initial
target = correspondence.object_from_target
if initial.leftmost or initial.rightmost and target.leftmost or target.rightmost:
# search for the incompatible bond
incompatible_bond = correspondence.get_incompatible_bond()
if incompatible_bond:
# bond found - fight against it
assert __structure_versus_structure(
correspondence, 3.0, incompatible_bond, 2.0
)
# won against incompatible bond
incompatible_group = target.group
if incompatible_group:
assert __structure_versus_structure(
correspondence, 1.0, incompatible_group, 1.0
)
# if there is an incompatible rule, fight against it
incompatible_rule = None
if workspace.rule:
if workspace.rule.incompatible_rule_correspondence(correspondence):
incompatible_rule = workspace.rule
assert __structure_versus_structure(
correspondence, 1.0, incompatible_rule, 1.0
)
for incompatible in incompatibles:
incompatible.break_the_structure()
# break incompatible group and bond if they exist
if incompatible_bond:
incompatible_bond.break_the_structure()
if incompatible_group:
incompatible_group.break_the_structure()
if incompatible_rule:
workspace.break_rule()
correspondence.build_correspondence()
|
[
"logging.info",
"random.random",
"random.choice"
] |
[((3144, 3217), 'logging.info', 'logging.info', (['f"""{weighted_strength1} > {rhs}: {weighted_strength1 > rhs}"""'], {}), "(f'{weighted_strength1} > {rhs}: {weighted_strength1 > rhs}')\n", (3156, 3217), False, 'import logging\n'), ((4100, 4139), 'logging.info', 'logging.info', (['f"""no incompatible {name}"""'], {}), "(f'no incompatible {name}')\n", (4112, 4139), False, 'import logging\n'), ((4825, 4850), 'random.choice', 'random.choice', (['structures'], {}), '(structures)\n', (4838, 4850), False, 'import random\n'), ((7781, 7824), 'logging.info', 'logging.info', (['f"""destination: {destination}"""'], {}), "(f'destination: {destination}')\n", (7793, 7824), False, 'import logging\n'), ((7948, 8009), 'logging.info', 'logging.info', (['f"""Source: {source}, destination: {destination}"""'], {}), "(f'Source: {source}, destination: {destination}')\n", (7960, 8009), False, 'import logging\n'), ((8477, 8525), 'logging.info', 'logging.info', (['f"""proposing {category.name} bond """'], {}), "(f'proposing {category.name} bond ')\n", (8489, 8525), False, 'import logging\n'), ((11887, 11909), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (11900, 11909), False, 'import random\n'), ((11914, 11993), 'logging.info', 'logging.info', (['f"""selected letter in initial string = {letter_of_initial_string}"""'], {}), "(f'selected letter in initial string = {letter_of_initial_string}')\n", (11926, 11993), False, 'import logging\n'), ((13217, 13253), 'logging.info', 'logging.info', (['"""building replacement"""'], {}), "('building replacement')\n", (13229, 13253), False, 'import logging\n'), ((13304, 13349), 'logging.info', 'logging.info', (['"""top_down_bond_scout__category"""'], {}), "('top_down_bond_scout__category')\n", (13316, 13349), False, 'import logging\n'), ((13537, 13598), 'logging.info', 'logging.info', (['f"""source: {source}, destination: {destination}"""'], {}), "(f'source: {source}, destination: {destination}')\n", (13549, 13598), False, 'import logging\n'), ((14904, 14945), 'logging.info', 'logging.info', (['f"""to object: {destination}"""'], {}), "(f'to object: {destination}')\n", (14916, 14945), False, 'import logging\n'), ((15724, 15778), 'logging.info', 'logging.info', (['f"""bond strength = {strength} for {bond}"""'], {}), "(f'bond strength = {strength} for {bond}')\n", (15736, 15778), False, 'import logging\n'), ((15945, 15992), 'logging.info', 'logging.info', (['"""succeeded: posting bond-builder"""'], {}), "('succeeded: posting bond-builder')\n", (15957, 15992), False, 'import logging\n'), ((17813, 17850), 'logging.info', 'logging.info', (['f"""building bond {bond}"""'], {}), "(f'building bond {bond}')\n", (17825, 17850), False, 'import logging\n'), ((21563, 21604), 'logging.info', 'logging.info', (['f"""source chosen = {source}"""'], {}), "(f'source chosen = {source}')\n", (21575, 21604), False, 'import logging\n'), ((22876, 22921), 'logging.info', 'logging.info', (['f"""possible group: {first_bond}"""'], {}), "(f'possible group: {first_bond}')\n", (22888, 22921), False, 'import logging\n'), ((23052, 23108), 'logging.info', 'logging.info', (['f"""trying from {source} to {category.name}"""'], {}), "(f'trying from {source} to {category.name}')\n", (23064, 23108), False, 'import logging\n'), ((24441, 24504), 'logging.info', 'logging.info', (['f"""proposing group from {source} to {destination}"""'], {}), "(f'proposing group from {source} to {destination}')\n", (24453, 24504), False, 'import logging\n'), ((26062, 26082), 'random.choice', 'random.choice', (['bonds'], {}), '(bonds)\n', (26075, 26082), False, 'import random\n'), ((30134, 30164), 'logging.info', 'logging.info', (['"""building group"""'], {}), "('building group')\n", (30146, 30164), False, 'import logging\n'), ((1699, 1714), 'random.random', 'random.random', ([], {}), '()\n', (1712, 1714), False, 'import random\n'), ((1873, 1948), 'logging.info', 'logging.info', (['f"""target string selected: {workspace.target} for {type_name}"""'], {}), "(f'target string selected: {workspace.target} for {type_name}')\n", (1885, 1948), False, 'import logging\n'), ((1967, 2044), 'logging.info', 'logging.info', (['f"""initial string selected: {workspace.initial} for {type_name}"""'], {}), "(f'initial string selected: {workspace.initial} for {type_name}')\n", (1979, 2044), False, 'import logging\n'), ((3124, 3139), 'random.random', 'random.random', ([], {}), '()\n', (3137, 3139), False, 'import random\n'), ((3668, 3714), 'logging.info', 'logging.info', (['f"""won fight with {incompatible}"""'], {}), "(f'won fight with {incompatible}')\n", (3680, 3714), False, 'import logging\n'), ((4027, 4074), 'logging.info', 'logging.info', (['f"""failed to break {name}: Fizzle"""'], {}), "(f'failed to break {name}: Fizzle')\n", (4039, 4074), False, 'import logging\n'), ((11597, 11612), 'random.random', 'random.random', ([], {}), '()\n', (11610, 11612), False, 'import random\n'), ((12047, 12138), 'logging.info', 'logging.info', (['f"""Replacement already found for {letter_of_initial_string}, so fizzling"""'], {}), "(\n f'Replacement already found for {letter_of_initial_string}, so fizzling')\n", (12059, 12138), False, 'import logging\n'), ((12811, 12859), 'logging.info', 'logging.info', (['f"""Relation found: {relation.name}"""'], {}), "(f'Relation found: {relation.name}')\n", (12823, 12859), False, 'import logging\n'), ((12902, 12935), 'logging.info', 'logging.info', (['"""no relation found"""'], {}), "('no relation found')\n", (12914, 12935), False, 'import logging\n'), ((22178, 22207), 'logging.info', 'logging.info', (['"""no first_bond"""'], {}), "('no first_bond')\n", (22190, 22207), False, 'import logging\n'), ((22226, 22267), 'logging.info', 'logging.info', (['f"""first_bond: {first_bond}"""'], {}), "(f'first_bond: {first_bond}')\n", (22238, 22267), False, 'import logging\n'), ((24928, 24943), 'random.random', 'random.random', ([], {}), '()\n', (24941, 24943), False, 'import random\n'), ((24993, 25052), 'logging.info', 'logging.info', (['f"""target string selected: {workspace.target}"""'], {}), "(f'target string selected: {workspace.target}')\n", (25005, 25052), False, 'import logging\n'), ((25071, 25132), 'logging.info', 'logging.info', (['f"""initial string selected: {workspace.initial}"""'], {}), "(f'initial string selected: {workspace.initial}')\n", (25083, 25132), False, 'import logging\n'), ((26805, 26820), 'random.random', 'random.random', ([], {}), '()\n', (26818, 26820), False, 'import random\n'), ((27348, 27410), 'logging.info', 'logging.info', (['"""already exists...activate descriptors & fizzle"""'], {}), "('already exists...activate descriptors & fizzle')\n", (27360, 27410), False, 'import logging\n'), ((31088, 31103), 'random.random', 'random.random', ([], {}), '()\n', (31101, 31103), False, 'import random\n'), ((37427, 37442), 'random.random', 'random.random', ([], {}), '()\n', (37440, 37442), False, 'import random\n'), ((3587, 3634), 'logging.info', 'logging.info', (['f"""lost fight with {incompatible}"""'], {}), "(f'lost fight with {incompatible}')\n", (3599, 3634), False, 'import logging\n'), ((3961, 3994), 'logging.info', 'logging.info', (['f"""broke the {name}"""'], {}), "(f'broke the {name}')\n", (3973, 3994), False, 'import logging\n'), ((16545, 16606), 'logging.info', 'logging.info', (['"""already exists: activate descriptors & Fizzle"""'], {}), "('already exists: activate descriptors & Fizzle')\n", (16557, 16606), False, 'import logging\n'), ((22598, 22628), 'logging.info', 'logging.info', (['"""no first_bond2"""'], {}), "('no first_bond2')\n", (22610, 22628), False, 'import logging\n'), ((22655, 22697), 'logging.info', 'logging.info', (['f"""first_bond2: {first_bond}"""'], {}), "(f'first_bond2: {first_bond}')\n", (22667, 22697), False, 'import logging\n'), ((17399, 17459), 'logging.info', 'logging.info', (['"""trying to break incompatible correspondences"""'], {}), "('trying to break incompatible correspondences')\n", (17411, 17459), False, 'import logging\n'), ((19502, 19517), 'random.random', 'random.random', ([], {}), '()\n', (19515, 19517), False, 'import random\n')]
|
import os
import math
import time
import functools
import random
from tqdm import tqdm
import cv2
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
from pylab import rcParams
rcParams['figure.figsize'] = 20, 20 # noqa
from consts import FONT_SIZE
from utils import (
make_contours,
get_centers,
get_labels,
vis_pred_bbox,
filter_polygons_points_intersection,
vis_pred_bbox_polygon,
vis_pred_center,
font
)
from grpc_utils import (
KuzuSegment,
KuzuClassify
)
if __name__ == '__main__':
img_dir = "./images"
img_fp = os.path.join(img_dir, random.choice(os.listdir(img_dir)))
print(img_fp)
filter_polygon = True
kuzu_seg = KuzuSegment()
kuzu_cls = KuzuClassify()
img, origin_image, origin_h, origin_w = kuzu_seg.load_image(img_fp)
pred_bbox, pred_center = kuzu_seg.predict(img)
# get all polygon area in image
polygon_contours = make_contours(pred_bbox)
# get all center points by contour method
center_coords = get_centers(pred_center.astype(np.uint8))
no_center_points = len(center_coords)
final_center = vis_pred_center(center_coords, rad=2)
# filter polygon
if filter_polygon:
filtered_contours = filter_polygons_points_intersection(polygon_contours, center_coords) # noqa
pred_bbox = vis_pred_bbox_polygon(pred_bbox, filtered_contours)
final_bbox = vis_pred_bbox(pred_bbox, center_coords, width=2)
y_ratio = origin_h / 512
x_ratio = origin_w / 512
pil_img = Image.fromarray(origin_image).convert('RGBA')
char_canvas = Image.new('RGBA', pil_img.size)
char_draw = ImageDraw.Draw(char_canvas)
print(">>> {}".format(no_center_points))
if no_center_points > 0:
bbox_cluster = get_labels(center_coords, pred_bbox)
# ignore background hex color (=0)
for cluster_index in tqdm(range(len(center_coords))[1:]):
char_pixel = (bbox_cluster == cluster_index).astype(np.float32)
try:
horizontal_indicies = np.where(np.any(char_pixel, axis=0))[0]
vertical_indicies = np.where(np.any(char_pixel, axis=1))[0]
x_min, x_max = horizontal_indicies[[0, -1]]
y_min, y_max = vertical_indicies[[0, -1]]
except IndexError:
continue
x = x_min
y = y_min
w = x_max - x_min
h = y_max - y_min
# convert to original coordinates
x = int(x * x_ratio)
w = int(w * x_ratio)
y = int(y * y_ratio)
h = int(h * y_ratio)
# set offset to crop character
offset = 5 # percentage
y_diff = math.ceil(h * offset / 100)
x_diff = math.ceil(w * offset / 100)
# expand area
y_from = y - y_diff
y_to = y + h + y_diff
x_from = x - x_diff
x_to = x + w + x_diff
# tune
y_from, y_to, x_from, x_to = \
list(map(functools.partial(np.maximum, 0),
[y_from, y_to, x_from, x_to]))
try:
char_img = origin_image[y_from:y_to, x_from:x_to]
char_img = kuzu_cls.load_image(char_img)
pred_label = kuzu_cls.predict(char_img)
# print(pred_label)
char_draw.text(
(x + w + FONT_SIZE / 4, y + h / 2 - FONT_SIZE),
pred_label, fill=(0, 0, 255, 255),
font=font
)
except Exception as e:
print(e)
continue
char_img = Image.alpha_composite(pil_img, char_canvas)
char_img = char_img.convert("RGB")
char_img = np.asarray(char_img)
final_bbox = cv2.resize(final_bbox, (origin_w, origin_h))
final_center = cv2.resize(final_center, (origin_w, origin_h))
plt.imshow(char_img)
plt.imshow(final_bbox, cmap="jet", alpha=0.50)
plt.savefig("./assets/{}.jpg".format(time.time()), bbox_inches='tight')
|
[
"PIL.Image.new",
"grpc_utils.KuzuClassify",
"utils.make_contours",
"matplotlib.pyplot.imshow",
"utils.filter_polygons_points_intersection",
"utils.vis_pred_center",
"utils.vis_pred_bbox_polygon",
"PIL.ImageDraw.Draw",
"cv2.resize",
"functools.partial",
"math.ceil",
"numpy.asarray",
"os.listdir",
"time.time",
"numpy.any",
"PIL.Image.alpha_composite",
"utils.get_labels",
"PIL.Image.fromarray",
"grpc_utils.KuzuSegment",
"utils.vis_pred_bbox"
] |
[((720, 733), 'grpc_utils.KuzuSegment', 'KuzuSegment', ([], {}), '()\n', (731, 733), False, 'from grpc_utils import KuzuSegment, KuzuClassify\n'), ((749, 763), 'grpc_utils.KuzuClassify', 'KuzuClassify', ([], {}), '()\n', (761, 763), False, 'from grpc_utils import KuzuSegment, KuzuClassify\n'), ((947, 971), 'utils.make_contours', 'make_contours', (['pred_bbox'], {}), '(pred_bbox)\n', (960, 971), False, 'from utils import make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font\n'), ((1142, 1179), 'utils.vis_pred_center', 'vis_pred_center', (['center_coords'], {'rad': '(2)'}), '(center_coords, rad=2)\n', (1157, 1179), False, 'from utils import make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font\n'), ((1419, 1467), 'utils.vis_pred_bbox', 'vis_pred_bbox', (['pred_bbox', 'center_coords'], {'width': '(2)'}), '(pred_bbox, center_coords, width=2)\n', (1432, 1467), False, 'from utils import make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font\n'), ((1606, 1637), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'pil_img.size'], {}), "('RGBA', pil_img.size)\n", (1615, 1637), False, 'from PIL import Image, ImageDraw\n'), ((1654, 1681), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['char_canvas'], {}), '(char_canvas)\n', (1668, 1681), False, 'from PIL import Image, ImageDraw\n'), ((3689, 3732), 'PIL.Image.alpha_composite', 'Image.alpha_composite', (['pil_img', 'char_canvas'], {}), '(pil_img, char_canvas)\n', (3710, 3732), False, 'from PIL import Image, ImageDraw\n'), ((3787, 3807), 'numpy.asarray', 'np.asarray', (['char_img'], {}), '(char_img)\n', (3797, 3807), True, 'import numpy as np\n'), ((3826, 3870), 'cv2.resize', 'cv2.resize', (['final_bbox', '(origin_w, origin_h)'], {}), '(final_bbox, (origin_w, origin_h))\n', (3836, 3870), False, 'import cv2\n'), ((3890, 3936), 'cv2.resize', 'cv2.resize', (['final_center', '(origin_w, origin_h)'], {}), '(final_center, (origin_w, origin_h))\n', (3900, 3936), False, 'import cv2\n'), ((3942, 3962), 'matplotlib.pyplot.imshow', 'plt.imshow', (['char_img'], {}), '(char_img)\n', (3952, 3962), True, 'import matplotlib.pyplot as plt\n'), ((3967, 4012), 'matplotlib.pyplot.imshow', 'plt.imshow', (['final_bbox'], {'cmap': '"""jet"""', 'alpha': '(0.5)'}), "(final_bbox, cmap='jet', alpha=0.5)\n", (3977, 4012), True, 'import matplotlib.pyplot as plt\n'), ((1253, 1321), 'utils.filter_polygons_points_intersection', 'filter_polygons_points_intersection', (['polygon_contours', 'center_coords'], {}), '(polygon_contours, center_coords)\n', (1288, 1321), False, 'from utils import make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font\n'), ((1350, 1401), 'utils.vis_pred_bbox_polygon', 'vis_pred_bbox_polygon', (['pred_bbox', 'filtered_contours'], {}), '(pred_bbox, filtered_contours)\n', (1371, 1401), False, 'from utils import make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font\n'), ((1780, 1816), 'utils.get_labels', 'get_labels', (['center_coords', 'pred_bbox'], {}), '(center_coords, pred_bbox)\n', (1790, 1816), False, 'from utils import make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font\n'), ((639, 658), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (649, 658), False, 'import os\n'), ((1542, 1571), 'PIL.Image.fromarray', 'Image.fromarray', (['origin_image'], {}), '(origin_image)\n', (1557, 1571), False, 'from PIL import Image, ImageDraw\n'), ((2736, 2763), 'math.ceil', 'math.ceil', (['(h * offset / 100)'], {}), '(h * offset / 100)\n', (2745, 2763), False, 'import math\n'), ((2785, 2812), 'math.ceil', 'math.ceil', (['(w * offset / 100)'], {}), '(w * offset / 100)\n', (2794, 2812), False, 'import math\n'), ((4055, 4066), 'time.time', 'time.time', ([], {}), '()\n', (4064, 4066), False, 'import time\n'), ((3060, 3092), 'functools.partial', 'functools.partial', (['np.maximum', '(0)'], {}), '(np.maximum, 0)\n', (3077, 3092), False, 'import functools\n'), ((2069, 2095), 'numpy.any', 'np.any', (['char_pixel'], {'axis': '(0)'}), '(char_pixel, axis=0)\n', (2075, 2095), True, 'import numpy as np\n'), ((2145, 2171), 'numpy.any', 'np.any', (['char_pixel'], {'axis': '(1)'}), '(char_pixel, axis=1)\n', (2151, 2171), True, 'import numpy as np\n')]
|
from inspect import Traceback
from signal import getsignal, SIG_IGN, SIGINT, signal as signal_, Signals
from types import FrameType
from typing import Type
class DelayedKeyboardInterrupt:
def __init__(self, in_thread: bool = False) -> None:
"""
:param in_thread: Whether or not we're living in a thread or not
"""
self.in_thread = in_thread
self.signal_received = None
def __enter__(self) -> None:
# When we're in a thread we can't use signal handling
if not self.in_thread:
self.signal_received = False
self.old_handler = signal_(SIGINT, self.handler)
def handler(self, sig: Signals, frame: FrameType) -> None:
self.signal_received = (sig, frame)
def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None:
if not self.in_thread:
signal_(SIGINT, self.old_handler)
if self.signal_received:
self.old_handler(*self.signal_received)
class DisableKeyboardInterruptSignal:
def __enter__(self) -> None:
# Prevent signal from propagating to child process
self._handler = getsignal(SIGINT)
ignore_keyboard_interrupt()
def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None:
# Restore signal
signal_(SIGINT, self._handler)
def ignore_keyboard_interrupt():
signal_(SIGINT, SIG_IGN)
|
[
"signal.signal",
"signal.getsignal"
] |
[((1413, 1437), 'signal.signal', 'signal_', (['SIGINT', 'SIG_IGN'], {}), '(SIGINT, SIG_IGN)\n', (1420, 1437), True, 'from signal import getsignal, SIG_IGN, SIGINT, signal as signal_, Signals\n'), ((1168, 1185), 'signal.getsignal', 'getsignal', (['SIGINT'], {}), '(SIGINT)\n', (1177, 1185), False, 'from signal import getsignal, SIG_IGN, SIGINT, signal as signal_, Signals\n'), ((1343, 1373), 'signal.signal', 'signal_', (['SIGINT', 'self._handler'], {}), '(SIGINT, self._handler)\n', (1350, 1373), True, 'from signal import getsignal, SIG_IGN, SIGINT, signal as signal_, Signals\n'), ((615, 644), 'signal.signal', 'signal_', (['SIGINT', 'self.handler'], {}), '(SIGINT, self.handler)\n', (622, 644), True, 'from signal import getsignal, SIG_IGN, SIGINT, signal as signal_, Signals\n'), ((884, 917), 'signal.signal', 'signal_', (['SIGINT', 'self.old_handler'], {}), '(SIGINT, self.old_handler)\n', (891, 917), True, 'from signal import getsignal, SIG_IGN, SIGINT, signal as signal_, Signals\n')]
|
#!/usr/bin/env python
import argparse
import os
import sys
from pathlib import Path
from typing import Any, Optional
try:
import pythoncom
from win32com.propsys import propsys
from win32com.shell import shell
except ImportError:
raise ImportError(
"pywin32 is required to run create_shell_link.py.To install, execute 'pip install pywin32' in a terminal"
)
class IconFileAction(argparse.Action): # pragma: no cover
def __call__(self, parser_container, namespace, values: Any, option_string=None):
if values.suffix != ".ico":
raise ValueError("The supplied icon file is not of type .ico.")
setattr(namespace, self.dest, values)
# noinspection PyUnresolvedReferences
def create_shell_link(
appId: str,
appName: str,
iconPath: Optional[Path] = None,
overwrite: bool = False,
appDataPath: str = os.getenv("APPDATA"),
):
# See https://github.com/mohabouje/WinToast/blob/master/src/wintoastlib.cpp#L594
if appDataPath is None: # pragma: no cover
raise RuntimeError("Couldn't find APPDATA path. Please rerun this script with the --appdata argument")
programsPath = Path(appDataPath) / "Microsoft" / "Windows" / "Start Menu" / "Programs"
shellLinkPath = programsPath / f"{appName}.lnk"
linkExists = shellLinkPath.exists()
if linkExists: # pragma: no cover
if overwrite:
print("Script run with --overwrite, overwriting existing link...")
else:
sys.exit(
f"Link '{shellLinkPath}' already exists. To overwrite, rerun this script with the --overwrite argument"
)
# Adapted from https://github.com/mhammond/pywin32/blob/main/com/win32comext/shell/demos/create_link.py
# noinspection PyTypeChecker
shellLink = pythoncom.CoCreateInstance(
shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink
)
# Set shell link arguments
shellLink.SetPath("")
shellLink.SetArguments("")
shellLink.SetWorkingDirectory("")
if iconPath is not None:
shellLink.SetIconLocation(str(iconPath.resolve()), 0)
# Set AUMI to supplied argument
propertyStore = shellLink.QueryInterface(propsys.IID_IPropertyStore)
propertyKey = propsys.PSGetPropertyKeyFromName("System.AppUserModel.ID")
propertyStore.SetValue(propertyKey, propsys.PROPVARIANTType(appId))
propertyStore.Commit()
# Save file
# noinspection PyUnresolvedReferences
propertyStore.QueryInterface(pythoncom.IID_IPersistFile).Save(str(shellLinkPath), True)
print(f"Successfully {'modified' if linkExists else 'created'} shell link with the AUMI '{appId}'")
if __name__ == "__main__": # pragma: no cover
parser = argparse.ArgumentParser(description="Create shell link for use in toast notifications")
parser.add_argument("--appdata", "-ad", type=str, required=False, help="AppData path if script fails to find it")
parser.add_argument("--app_id", "-a", type=str, required=True, help="Application User Model ID for identification")
parser.add_argument("--name", "-n", type=str, required=True, help="Display name on notification")
parser.add_argument(
"--icon", "-i", type=Path, required=False, action=IconFileAction, help="Path to image file for desired icon"
)
if sys.version_info >= (3, 9):
parser.add_argument(
"--overwrite", "-o", action=argparse.BooleanOptionalAction, help="Overwrite if a link already exists"
)
else:
parser.add_argument(
"--overwrite", "-o", default=False, action="store_true", help="Overwrite if a link already exists"
)
args = parser.parse_args()
create_shell_link(
appId=args.app_id, appName=args.name, iconPath=args.icon, overwrite=args.overwrite, appDataPath=args.appdata
)
|
[
"win32com.propsys.propsys.PSGetPropertyKeyFromName",
"argparse.ArgumentParser",
"win32com.propsys.propsys.PROPVARIANTType",
"pathlib.Path",
"pythoncom.CoCreateInstance",
"os.getenv",
"sys.exit"
] |
[((878, 898), 'os.getenv', 'os.getenv', (['"""APPDATA"""'], {}), "('APPDATA')\n", (887, 898), False, 'import os\n'), ((1799, 1913), 'pythoncom.CoCreateInstance', 'pythoncom.CoCreateInstance', (['shell.CLSID_ShellLink', 'None', 'pythoncom.CLSCTX_INPROC_SERVER', 'shell.IID_IShellLink'], {}), '(shell.CLSID_ShellLink, None, pythoncom.\n CLSCTX_INPROC_SERVER, shell.IID_IShellLink)\n', (1825, 1913), False, 'import pythoncom\n'), ((2268, 2326), 'win32com.propsys.propsys.PSGetPropertyKeyFromName', 'propsys.PSGetPropertyKeyFromName', (['"""System.AppUserModel.ID"""'], {}), "('System.AppUserModel.ID')\n", (2300, 2326), False, 'from win32com.propsys import propsys\n'), ((2742, 2834), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create shell link for use in toast notifications"""'}), "(description=\n 'Create shell link for use in toast notifications')\n", (2765, 2834), False, 'import argparse\n'), ((2367, 2397), 'win32com.propsys.propsys.PROPVARIANTType', 'propsys.PROPVARIANTType', (['appId'], {}), '(appId)\n', (2390, 2397), False, 'from win32com.propsys import propsys\n'), ((1497, 1620), 'sys.exit', 'sys.exit', (['f"""Link \'{shellLinkPath}\' already exists. To overwrite, rerun this script with the --overwrite argument"""'], {}), '(\n f"Link \'{shellLinkPath}\' already exists. To overwrite, rerun this script with the --overwrite argument"\n )\n', (1505, 1620), False, 'import sys\n'), ((1167, 1184), 'pathlib.Path', 'Path', (['appDataPath'], {}), '(appDataPath)\n', (1171, 1184), False, 'from pathlib import Path\n')]
|
from api.event import Event
from que import TT_DUMMY, TG_DC_UNBOUND
from que.utils import DEFAULT_DC, task_id_from_string
class NodeSystemRestarted(Event):
"""
Called from node_sysinfo_cb after erigonesd:fast is restarted on a compute node.
"""
_name_ = 'node_system_restarted'
def __init__(self, node, **kwargs):
# Create such a task_id that info is send to SuperAdmins and node owner
task_id = task_id_from_string(node.owner.id, dummy=True, dc_id=DEFAULT_DC, tt=TT_DUMMY, tg=TG_DC_UNBOUND)
kwargs['node_hostname'] = node.hostname
super(NodeSystemRestarted, self).__init__(task_id, **kwargs)
|
[
"que.utils.task_id_from_string"
] |
[((435, 535), 'que.utils.task_id_from_string', 'task_id_from_string', (['node.owner.id'], {'dummy': '(True)', 'dc_id': 'DEFAULT_DC', 'tt': 'TT_DUMMY', 'tg': 'TG_DC_UNBOUND'}), '(node.owner.id, dummy=True, dc_id=DEFAULT_DC, tt=\n TT_DUMMY, tg=TG_DC_UNBOUND)\n', (454, 535), False, 'from que.utils import DEFAULT_DC, task_id_from_string\n')]
|
# TODO: In this module we'll start drawing a simple smiley face
# Yellow circle for the head
# Two black circle eyes
# Red rectangle (rect) mouth
# Red circle nose.
import pygame
import sys
pygame.init()
screen = pygame.display.set_mode((600, 600))
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
print(event.pos)
if event.type == pygame.QUIT:
sys.exit()
screen.fill((0, 200, 200))
# Draws the yellow head
pygame.draw.circle(screen, (255,255,0), (300,300), 250)
pygame.draw.circle(screen, (0, 0, 0), (300, 300), 250, 5)
# draws the eyes
pygame.draw.circle(screen, (0, 0, 0), (205, 200), 20)
pygame.draw.circle(screen, (0, 0, 0), (400, 200), 20)
# draws the nose
pygame.draw.circle(screen, (255, 0, 0), (300, 300), 35)
pygame.draw.circle(screen, (0, 0, 0), (300, 300), 35, 2)
# draws the mouth
pygame.draw.rect(screen, (127, 0, 0), (200, 400, 200, 25))
# pygame.draw.rect(screen, color, (x, y, width, height), thickness)
# pygame.draw.rect(screen, (100, 0, 0), (240, 350, 160, 30))
pygame.display.update()
|
[
"pygame.draw.circle",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.draw.rect",
"pygame.init",
"pygame.display.update",
"sys.exit"
] |
[((197, 210), 'pygame.init', 'pygame.init', ([], {}), '()\n', (208, 210), False, 'import pygame\n'), ((220, 255), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(600, 600)'], {}), '((600, 600))\n', (243, 255), False, 'import pygame\n'), ((285, 303), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (301, 303), False, 'import pygame\n'), ((509, 567), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', '(255, 255, 0)', '(300, 300)', '(250)'], {}), '(screen, (255, 255, 0), (300, 300), 250)\n', (527, 567), False, 'import pygame\n'), ((569, 626), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', '(0, 0, 0)', '(300, 300)', '(250)', '(5)'], {}), '(screen, (0, 0, 0), (300, 300), 250, 5)\n', (587, 626), False, 'import pygame\n'), ((653, 706), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', '(0, 0, 0)', '(205, 200)', '(20)'], {}), '(screen, (0, 0, 0), (205, 200), 20)\n', (671, 706), False, 'import pygame\n'), ((711, 764), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', '(0, 0, 0)', '(400, 200)', '(20)'], {}), '(screen, (0, 0, 0), (400, 200), 20)\n', (729, 764), False, 'import pygame\n'), ((791, 846), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', '(255, 0, 0)', '(300, 300)', '(35)'], {}), '(screen, (255, 0, 0), (300, 300), 35)\n', (809, 846), False, 'import pygame\n'), ((851, 907), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', '(0, 0, 0)', '(300, 300)', '(35)', '(2)'], {}), '(screen, (0, 0, 0), (300, 300), 35, 2)\n', (869, 907), False, 'import pygame\n'), ((935, 993), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', '(127, 0, 0)', '(200, 400, 200, 25)'], {}), '(screen, (127, 0, 0), (200, 400, 200, 25))\n', (951, 993), False, 'import pygame\n'), ((1138, 1161), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (1159, 1161), False, 'import pygame\n'), ((433, 443), 'sys.exit', 'sys.exit', ([], {}), '()\n', (441, 443), False, 'import sys\n')]
|
import os
from .cache import Cache
from .models import CacheEntry
from .utils import get_file_hash, save_file, load_file
import shutil
import io
from .compiler import ExpressionProcessor
from .expressions import stylesheets, scripts, html
import subprocess
import tempfile
class AssetCollection(object):
def __init__(self, file_list, settings):
self._assets = []
self._settings = settings
for path in file_list:
res = get_asset_objects(path, settings)
if type(res) is list:
for asset in res:
self._assets.append(asset)
self._assets[-1]._collection = self
self._assets[-1]._settings = settings
else:
if res is None:
continue
self._assets.append(res)
self._assets[-1]._collection = self
self._assets[-1]._settings = settings
def find_asset(self, path, lang):
for asset in self._assets:
if asset._path == path and asset._lang == lang:
return asset
return None
def pick_dependencies(self):
print('Found {count:d} assets'.format(count=len(self._assets)))
if self._settings.verbose:
print("Picking dependencies...")
for asset in self._assets:
asset.parse()
if self._settings.verbose:
print(asset)
print('Dependencies {dependencies}\n'.format(
dependencies=asset._dependencies))
self._assets = DependencyResolver.topological_sort(self._assets)
if self._settings.verbose:
print('Build order:\n{collection}\n'.format(
collection=self._assets))
def build(self):
print('Building assets...')
for asset in self._assets:
asset.compile(force=self._settings.force)
print('Build done.')
class DependencyResolver(object):
@staticmethod
def topological_sort(assets_unsorted):
assets_sorted = []
while len(assets_unsorted) > 0:
acyclic = False
for asset in assets_unsorted:
for dependency in asset._dependencies:
if dependency in assets_unsorted:
break
else:
acyclic = True
assets_unsorted.remove(asset)
assets_sorted.append(asset)
if not acyclic:
raise RuntimeError('A cyclic dependency occurred')
return assets_sorted
class Asset(object):
FILE = 0
STRING = 1
def __init__(self, resource_type, path, lang):
self._resource_type = resource_type
self._path = path
self._lang = lang
self._collection = None
self._settings = None
self._dependencies = []
self._tool_cache = Cache()
self._flag_modified = False
def is_partial(self, path):
return os.path.basename(path).startswith("_")
def get_target_path(self, **opts):
common_prefix = os.path.commonprefix([
self._path,
self._get_source_dir()])
path_part = self._path[len(common_prefix)+1:]
if 'hash' in opts:
parts = os.path.splitext(path_part)
new_filename = '%s-%s' % (parts[0], opts['hash'])
path_part = '%s%s' % (new_filename, parts[1])
if 'change_extension' in opts:
new_ext = opts['change_extension']
parts = os.path.splitext(path_part)
path_part = '%s%s' % (parts[0], new_ext)
if 'lang' in opts and not(opts['lang'] is None):
lang = opts['lang']
parts = os.path.splitext(path_part)
path_part = '%s-%s%s' % (parts[0], lang, parts[1])
if self.is_partial(path_part):
target_path = os.path.join(self._get_partials_dir(), path_part)
else:
target_path = os.path.join(self._get_target_dir(), path_part)
return target_path
def __repr__(self):
if self._lang is None:
t = '{path}'
else:
t = '{path} ({lang})'
common_prefix = os.path.commonprefix([
self._path,
self._get_source_dir()])
return t.format(path=self._path[len(common_prefix) + 1:],
lang=self._lang)
def add_dependency(self, path, lang=None):
dependency = self._collection.find_asset(path, lang)
if dependency:
if dependency not in self._dependencies:
self._dependencies.append(dependency)
else:
print("Couldn't find dependency with path %s" % path)
def __eq__(self, other):
return self._path == other._path and self._lang == other._lang
def __ne__(self, other):
return self._path != other._path and self._lang != other._lang
def parse(self):
self._parse()
def dependencies_modified(self):
for dep_asset in self._dependencies:
if dep_asset._flag_modified:
return True
return False
def compile(self, force=False):
if self._resource_type == Asset.FILE:
cache_entry = self._tool_cache.find_entry(self._path, self._lang)
file_modified = True if cache_entry is None\
else cache_entry.file_modified() or self.dependencies_modified()
if file_modified or force:
if cache_entry:
if os.path.exists(cache_entry.target):
os.remove(cache_entry.target)
target_path = self._get_target_path()
self._compile(target_path)
if cache_entry:
cache_entry.target = target_path
self._tool_cache.update(cache_entry)
print('Updated {asset}'.format(asset=self))
else:
cache_entry = CacheEntry(self._path, target_path, self._lang)
self._tool_cache.add(cache_entry)
print('Created {asset}'.format(asset=self))
self._flag_modified = True
else:
if self._settings.verbose:
print('Cached {asset}'.format(asset=self))
else:
print("String asset")
class TextAsset(Asset):
def __init__(self, path, lang=None):
super(TextAsset, self).__init__(Asset.FILE, path, lang)
self._data = None
split = os.path.splitext(path)
self._basename = split[0]
self._extension = split[1]
def load(self):
with io.open(self._path, 'r', encoding='utf-8') as f:
self._data = f.read()
def save(self, path):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
save_file(path, self._data)
class StylesheetAsset(TextAsset):
@staticmethod
def supported_extensions():
return ['.css', '.scss']
@staticmethod
def get_languages(settings):
return settings.stylesheets.languages
def _get_partials_dir(self):
return os.path.join(self._settings.partials, 'stylesheets')
def _get_source_dir(self):
return self._settings.stylesheets.source
def _get_target_dir(self):
return self._settings.stylesheets.target
def _get_target_path(self):
return self.get_target_path(hash=get_file_hash(self._path, unique=True))
def _parse(self):
self.load()
self._processor = ExpressionProcessor(self, [
stylesheets.ImageUrlExpression,
stylesheets.IncludeExpression,
stylesheets.FontUrlExpression
])
self._processor.parse()
def minify(self):
temp_path = tempfile.mkdtemp()
source_file = os.path.join(temp_path, "source.css")
save_file(source_file, self._data)
target_file = os.path.join(temp_path, "target.css")
proc = subprocess.Popen(
[
"java",
"-Xss100m",
"-jar",
self._settings.yuicompressor_file,
"--type",
"css",
"-o",
target_file,
source_file
],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
)
out, err = proc.communicate()
self._data = load_file(target_file)
shutil.rmtree(temp_path)
def _compile(self, target_path):
self._processor.compile(self._settings, target_path)
if self._settings.minify and not self.is_partial(target_path):
if self._settings.verbose:
print('Minifying {asset}'.format(asset=self))
self.minify()
self.save(target_path)
class ScriptAsset(TextAsset):
@staticmethod
def supported_extensions():
return ['.js', '.coffee']
@staticmethod
def get_languages(settings):
return settings.scripts.languages
def _get_partials_dir(self):
return os.path.join(self._settings.partials, 'scripts')
def _get_source_dir(self):
return self._settings.scripts.source
def _get_target_dir(self):
return self._settings.scripts.target
def _get_target_path(self):
return self.get_target_path(
hash=get_file_hash(self._path, unique=True),
change_extension='.js'
)
def _parse(self):
self.load()
self._processor = ExpressionProcessor(self, [
scripts.IncludeExpression,
scripts.ScriptUrlExpression,
scripts.AppConfExpression,
scripts.ResourceUrlExpression
])
self._processor.parse()
def minify(self):
temp_path = tempfile.mkdtemp()
source_file = os.path.join(temp_path, "source.js")
save_file(source_file, self._data)
target_file = os.path.join(temp_path, "target.js")
proc = subprocess.Popen(
[
"java",
"-jar",
self._settings.yuicompressor_file,
"--type",
"js",
"-o",
target_file,
source_file
],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
)
out, err = proc.communicate()
self._data = load_file(target_file)
shutil.rmtree(temp_path)
def compile_coffee(self):
temp_path = tempfile.mkdtemp()
source_file = os.path.join(temp_path, "source.coffee")
save_file(source_file, self._data)
target_file = os.path.join(temp_path, "source.js")
proc = subprocess.Popen(
[
self._settings.coffee_bin,
"-c",
source_file
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
self._data = load_file(target_file)
shutil.rmtree(temp_path)
def _compile(self, target_path):
self._processor.compile(self._settings, target_path)
if self._extension == '.coffee':
if self._settings.verbose:
print('Using CoffeeScript Compiler for {asset}'.format(asset=self))
self.compile_coffee()
if self._settings.minify and not self.is_partial(target_path):
if self._settings.verbose:
print('Minifying {asset}'.format(asset=self))
self.minify()
self.save(target_path)
class HtmlAsset(TextAsset):
@staticmethod
def supported_extensions():
return ['.html']
@staticmethod
def get_languages(settings):
return settings.html.languages
def _get_partials_dir(self):
return os.path.join(self._settings.partials, 'html')
def _get_source_dir(self):
return self._settings.html.source
def _get_target_dir(self):
return self._settings.html.target
def _get_target_path(self):
return self.get_target_path(lang=self._lang)
def _parse(self):
self.load()
self._processor = ExpressionProcessor(self, [
html.IncludeExpression,
html.StylesheetUrlExpression,
html.ScriptUrlExpression,
html.ImageUrlExpression,
html.AppConfExpression,
html.I18nExpression,
html.I18nTemplateExpression,
html.ResourceUrlExpression
])
self._processor.parse()
def minify(self):
temp_path = tempfile.mkdtemp()
source_file = os.path.join(temp_path, "source.html")
save_file(source_file, self._data)
target_file = os.path.join(temp_path, "target.html")
proc = subprocess.Popen(
[
"java",
"-jar",
self._settings.htmlcompressor_file,
"--type",
"html",
"--mask",
"*.html",
"-o",
target_file,
source_file,
"--remove-intertag-spaces"
],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE
)
out, err = proc.communicate()
self._data = load_file(target_file)
shutil.rmtree(temp_path)
def _compile(self, target_path):
self._processor.compile(self._settings, target_path)
if self._settings.minify and not self.is_partial(target_path):
if self._settings.verbose:
print('Minifying {asset}'.format(asset=self))
self.minify()
self.save(target_path)
class BinaryAsset(Asset):
def __init__(self, path, lang=None):
super(BinaryAsset, self).__init__(Asset.FILE, path, lang)
def _get_target_path(self):
return self.get_target_path(hash=get_file_hash(self._path, unique=True))
def _parse(self):
pass
def _compile(self, target_path):
if not os.path.exists(os.path.dirname(target_path)):
os.makedirs(os.path.dirname(target_path))
shutil.copy(self._path, target_path)
class ImageAsset(BinaryAsset):
def __init__(self, path, lang=None):
super(ImageAsset, self).__init__(path, lang)
@staticmethod
def supported_extensions():
return ['.png', '.jpg', '.gif']
@staticmethod
def get_languages(settings):
return settings.images.languages
def _get_partials_dir(self):
return os.path.join(self._settings.partials, 'images')
def _get_source_dir(self):
return self._settings.images.source
def _get_target_dir(self):
return self._settings.images.target
class FontAsset(BinaryAsset):
def __init__(self, path, lang=None):
super(FontAsset, self).__init__(path, lang)
@staticmethod
def supported_extensions():
return ['.eot', '.svg', '.ttf', '.woff']
@staticmethod
def get_languages(settings):
return settings.fonts.languages
def _get_partials_dir(self):
return os.path.join(self._settings.partials, 'fonts')
def _get_source_dir(self):
return self._settings.fonts.source
def _get_target_dir(self):
return self._settings.fonts.target
def get_asset_objects(path, settings):
asset_classes = [
ImageAsset,
FontAsset,
StylesheetAsset,
HtmlAsset,
ScriptAsset
]
file_ext = os.path.splitext(path)[1]
for asset_class in asset_classes:
if file_ext in asset_class.supported_extensions():
langs = asset_class.get_languages(settings)
if langs is None:
return asset_class(path, None)
else:
return [asset_class(path, lang) for lang in langs]
return None
|
[
"subprocess.Popen",
"os.remove",
"os.path.basename",
"os.path.dirname",
"os.path.exists",
"tempfile.mkdtemp",
"os.path.splitext",
"io.open",
"shutil.rmtree",
"os.path.join",
"shutil.copy"
] |
[((6556, 6578), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (6572, 6578), False, 'import os\n'), ((7195, 7247), 'os.path.join', 'os.path.join', (['self._settings.partials', '"""stylesheets"""'], {}), "(self._settings.partials, 'stylesheets')\n", (7207, 7247), False, 'import os\n'), ((7836, 7854), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (7852, 7854), False, 'import tempfile\n'), ((7878, 7915), 'os.path.join', 'os.path.join', (['temp_path', '"""source.css"""'], {}), "(temp_path, 'source.css')\n", (7890, 7915), False, 'import os\n'), ((7981, 8018), 'os.path.join', 'os.path.join', (['temp_path', '"""target.css"""'], {}), "(temp_path, 'target.css')\n", (7993, 8018), False, 'import os\n'), ((8035, 8221), 'subprocess.Popen', 'subprocess.Popen', (["['java', '-Xss100m', '-jar', self._settings.yuicompressor_file, '--type',\n 'css', '-o', target_file, source_file]"], {'stdout': 'subprocess.PIPE', 'stdin': 'subprocess.PIPE'}), "(['java', '-Xss100m', '-jar', self._settings.\n yuicompressor_file, '--type', 'css', '-o', target_file, source_file],\n stdout=subprocess.PIPE, stdin=subprocess.PIPE)\n", (8051, 8221), False, 'import subprocess\n'), ((8509, 8533), 'shutil.rmtree', 'shutil.rmtree', (['temp_path'], {}), '(temp_path)\n', (8522, 8533), False, 'import shutil\n'), ((9121, 9169), 'os.path.join', 'os.path.join', (['self._settings.partials', '"""scripts"""'], {}), "(self._settings.partials, 'scripts')\n", (9133, 9169), False, 'import os\n'), ((9840, 9858), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (9856, 9858), False, 'import tempfile\n'), ((9882, 9918), 'os.path.join', 'os.path.join', (['temp_path', '"""source.js"""'], {}), "(temp_path, 'source.js')\n", (9894, 9918), False, 'import os\n'), ((9984, 10020), 'os.path.join', 'os.path.join', (['temp_path', '"""target.js"""'], {}), "(temp_path, 'target.js')\n", (9996, 10020), False, 'import os\n'), ((10037, 10209), 'subprocess.Popen', 'subprocess.Popen', (["['java', '-jar', self._settings.yuicompressor_file, '--type', 'js', '-o',\n target_file, source_file]"], {'stdout': 'subprocess.PIPE', 'stdin': 'subprocess.PIPE'}), "(['java', '-jar', self._settings.yuicompressor_file,\n '--type', 'js', '-o', target_file, source_file], stdout=subprocess.PIPE,\n stdin=subprocess.PIPE)\n", (10053, 10209), False, 'import subprocess\n'), ((10482, 10506), 'shutil.rmtree', 'shutil.rmtree', (['temp_path'], {}), '(temp_path)\n', (10495, 10506), False, 'import shutil\n'), ((10558, 10576), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (10574, 10576), False, 'import tempfile\n'), ((10600, 10640), 'os.path.join', 'os.path.join', (['temp_path', '"""source.coffee"""'], {}), "(temp_path, 'source.coffee')\n", (10612, 10640), False, 'import os\n'), ((10706, 10742), 'os.path.join', 'os.path.join', (['temp_path', '"""source.js"""'], {}), "(temp_path, 'source.js')\n", (10718, 10742), False, 'import os\n'), ((10759, 10876), 'subprocess.Popen', 'subprocess.Popen', (["[self._settings.coffee_bin, '-c', source_file]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "([self._settings.coffee_bin, '-c', source_file], stdout=\n subprocess.PIPE, stderr=subprocess.PIPE)\n", (10775, 10876), False, 'import subprocess\n'), ((11062, 11086), 'shutil.rmtree', 'shutil.rmtree', (['temp_path'], {}), '(temp_path)\n', (11075, 11086), False, 'import shutil\n'), ((11858, 11903), 'os.path.join', 'os.path.join', (['self._settings.partials', '"""html"""'], {}), "(self._settings.partials, 'html')\n", (11870, 11903), False, 'import os\n'), ((12623, 12641), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (12639, 12641), False, 'import tempfile\n'), ((12665, 12703), 'os.path.join', 'os.path.join', (['temp_path', '"""source.html"""'], {}), "(temp_path, 'source.html')\n", (12677, 12703), False, 'import os\n'), ((12769, 12807), 'os.path.join', 'os.path.join', (['temp_path', '"""target.html"""'], {}), "(temp_path, 'target.html')\n", (12781, 12807), False, 'import os\n'), ((12824, 13047), 'subprocess.Popen', 'subprocess.Popen', (["['java', '-jar', self._settings.htmlcompressor_file, '--type', 'html',\n '--mask', '*.html', '-o', target_file, source_file,\n '--remove-intertag-spaces']"], {'stdout': 'subprocess.PIPE', 'stdin': 'subprocess.PIPE'}), "(['java', '-jar', self._settings.htmlcompressor_file,\n '--type', 'html', '--mask', '*.html', '-o', target_file, source_file,\n '--remove-intertag-spaces'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)\n", (12840, 13047), False, 'import subprocess\n'), ((13367, 13391), 'shutil.rmtree', 'shutil.rmtree', (['temp_path'], {}), '(temp_path)\n', (13380, 13391), False, 'import shutil\n'), ((14166, 14202), 'shutil.copy', 'shutil.copy', (['self._path', 'target_path'], {}), '(self._path, target_path)\n', (14177, 14202), False, 'import shutil\n'), ((14563, 14610), 'os.path.join', 'os.path.join', (['self._settings.partials', '"""images"""'], {}), "(self._settings.partials, 'images')\n", (14575, 14610), False, 'import os\n'), ((15129, 15175), 'os.path.join', 'os.path.join', (['self._settings.partials', '"""fonts"""'], {}), "(self._settings.partials, 'fonts')\n", (15141, 15175), False, 'import os\n'), ((15514, 15536), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (15530, 15536), False, 'import os\n'), ((3308, 3335), 'os.path.splitext', 'os.path.splitext', (['path_part'], {}), '(path_part)\n', (3324, 3335), False, 'import os\n'), ((3562, 3589), 'os.path.splitext', 'os.path.splitext', (['path_part'], {}), '(path_part)\n', (3578, 3589), False, 'import os\n'), ((3752, 3779), 'os.path.splitext', 'os.path.splitext', (['path_part'], {}), '(path_part)\n', (3768, 3779), False, 'import os\n'), ((6682, 6724), 'io.open', 'io.open', (['self._path', '"""r"""'], {'encoding': '"""utf-8"""'}), "(self._path, 'r', encoding='utf-8')\n", (6689, 6724), False, 'import io\n'), ((3020, 3042), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (3036, 3042), False, 'import os\n'), ((6822, 6843), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (6837, 6843), False, 'import os\n'), ((6870, 6891), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (6885, 6891), False, 'import os\n'), ((14073, 14101), 'os.path.dirname', 'os.path.dirname', (['target_path'], {}), '(target_path)\n', (14088, 14101), False, 'import os\n'), ((14128, 14156), 'os.path.dirname', 'os.path.dirname', (['target_path'], {}), '(target_path)\n', (14143, 14156), False, 'import os\n'), ((5550, 5584), 'os.path.exists', 'os.path.exists', (['cache_entry.target'], {}), '(cache_entry.target)\n', (5564, 5584), False, 'import os\n'), ((5610, 5639), 'os.remove', 'os.remove', (['cache_entry.target'], {}), '(cache_entry.target)\n', (5619, 5639), False, 'import os\n')]
|
from itertools import product
alpha = ['A', 'T', 'C', 'G']
motifs = [a+b+c+d+e+f+g for a,b,c,d,e,f,g in product(alpha, repeat=7)]
with open('motifs7.txt', 'w') as f:
for item in motifs:
f.write("%s\n" % item)
|
[
"itertools.product"
] |
[((105, 129), 'itertools.product', 'product', (['alpha'], {'repeat': '(7)'}), '(alpha, repeat=7)\n', (112, 129), False, 'from itertools import product\n')]
|
import networkx as nx
graph = nx.DiGraph()
nodes = [f"{i}" for i in range(1, 13)]
nodes.extend([chr(i) for i in range(1, 13)])
graph.add_nodes_from([])
class Node:
def __init__(self, name, direct, sig):
self.name = name
self.direct = direct
self.sig = sig
class Edge:
def __init__(self, prev, to, tp, direct):
self.prev = prev
self.to = to
self.tp = tp
self.direct = direct
|
[
"networkx.DiGraph"
] |
[((31, 43), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (41, 43), True, 'import networkx as nx\n')]
|
import unittest
from datastructure.links.Node import Node
def from_second(head):
if head is None:
raise ValueError("Linked list is empty")
return head._next
class MyTestCase(unittest.TestCase):
def test_something(self):
head = Node(0, None)
current = head
for i in range(1, 6):
new_node = Node(i, None)
current._next = new_node
current = new_node
second = from_second(head)
result = []
node = second
while node is not None:
result.append(node._element)
node = node._next
self.assertEqual([1, 2, 3, 4, 5], result)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"datastructure.links.Node.Node"
] |
[((697, 712), 'unittest.main', 'unittest.main', ([], {}), '()\n', (710, 712), False, 'import unittest\n'), ((259, 272), 'datastructure.links.Node.Node', 'Node', (['(0)', 'None'], {}), '(0, None)\n', (263, 272), False, 'from datastructure.links.Node import Node\n'), ((349, 362), 'datastructure.links.Node.Node', 'Node', (['i', 'None'], {}), '(i, None)\n', (353, 362), False, 'from datastructure.links.Node import Node\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose, assert_equal
import astropy.units as u
from astropy.table import Table
from gammapy.astro.population import (
add_observed_parameters,
add_pulsar_parameters,
add_pwn_parameters,
add_snr_parameters,
make_base_catalog_galactic,
make_catalog_random_positions_cube,
make_catalog_random_positions_sphere,
)
def test_make_catalog_random_positions_cube():
table = make_catalog_random_positions_cube(random_state=0)
d = table[0]
assert len(table) == 100
assert len(table.colnames) == 3
assert table["x"].unit == "pc"
assert_allclose(d["x"], 0.0976270078546495)
assert table["y"].unit == "pc"
assert_allclose(d["y"], 0.3556330735924602)
assert table["z"].unit == "pc"
assert_allclose(d["z"], -0.37640823601179485)
table = make_catalog_random_positions_cube(dimension=2, random_state=0)
assert_equal(table["z"], 0)
table = make_catalog_random_positions_cube(dimension=1, random_state=0)
assert_equal(table["y"], 0)
assert_equal(table["z"], 0)
def test_make_catalog_random_positions_sphere():
table = make_catalog_random_positions_sphere(random_state=0)
d = table[0]
assert len(table) == 100
assert len(table.colnames) == 3
assert table["lon"].unit == "rad"
assert_allclose(d["lon"], 3.4482969442579128)
assert table["lat"].unit == "rad"
assert_allclose(d["lat"], 0.36359133530192267)
assert table["distance"].unit == "pc"
assert_allclose(d["distance"], 0.6780943487897606)
def test_make_base_catalog_galactic():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
d = table[0]
assert len(table) == 10
assert len(table.colnames) == 13
assert table["age"].unit == "yr"
assert_allclose(d["age"], 548813.50392732478)
assert table["n_ISM"].unit == "cm-3"
assert_allclose(d["n_ISM"], 1.0)
assert table["spiralarm"].unit is None
assert d["spiralarm"] == "Crux Scutum"
assert table["x_birth"].unit == "kpc"
assert_allclose(d["x_birth"], -5.856461, atol=1e-5)
assert table["y_birth"].unit == "kpc"
assert_allclose(d["y_birth"], 3.017292, atol=1e-5)
assert table["z_birth"].unit == "kpc"
assert_allclose(d["z_birth"], 0.049088, atol=1e-5)
assert table["x"].unit == "kpc"
assert_allclose(d["x"], -5.941061, atol=1e-5)
assert table["y"].unit == "kpc"
assert_allclose(d["y"], 3.081642, atol=1e-5)
assert table["z"].unit == "kpc"
assert_allclose(d["z"], 0.023161, atol=1e-5)
assert table["vx"].unit == "km/s"
assert_allclose(d["vx"], -150.727104, atol=1e-5)
assert table["vy"].unit == "km/s"
assert_allclose(d["vy"], 114.648494, atol=1e-5)
assert table["vz"].unit == "km/s"
assert_allclose(d["vz"], -46.193814, atol=1e-5)
assert table["v_abs"].unit == "km/s"
assert_allclose(d["v_abs"], 194.927693, atol=1e-5)
def test_add_snr_parameters():
table = Table()
table["age"] = [100, 1000] * u.yr
table["n_ISM"] = u.Quantity(1, "cm-3")
table = add_snr_parameters(table)
assert len(table) == 2
assert table.colnames == ["age", "n_ISM", "E_SN", "r_out", "r_in", "L_SNR"]
assert table["E_SN"].unit == "erg"
assert_allclose(table["E_SN"], 1e51)
assert table["r_out"].unit == "pc"
assert_allclose(table["r_out"], [1, 3.80730787743])
assert table["r_in"].unit == "pc"
assert_allclose(table["r_in"], [0.9086, 3.45931993743])
assert table["L_SNR"].unit == "1 / s"
assert_allclose(table["L_SNR"], [0, 1.0768e33])
def test_add_pulsar_parameters():
table = Table()
table["age"] = [100, 1000] * u.yr
table = add_pulsar_parameters(table, random_state=0)
assert len(table) == 2
assert len(table.colnames) == 10
assert table["age"].unit == "yr"
assert_allclose(table["age"], [100, 1000])
assert table["P0"].unit == "s"
assert_allclose(table["P0"], [0.214478, 0.246349], atol=1e-5)
assert table["P1"].unit == ""
assert_allclose(table["P1"], [6.310423e-13, 4.198294e-16], atol=1e-5)
assert table["P0_birth"].unit == "s"
assert_allclose(table["P0_birth"], [0.212418, 0.246336], atol=1e-5)
assert table["P1_birth"].unit == ""
assert_allclose(table["P1_birth"], [6.558773e-13, 4.199198e-16], atol=1e-5)
assert table["CharAge"].unit == "yr"
assert_allclose(table["CharAge"], [2.207394e-21, 1.638930e-24], atol=1e-5)
assert table["Tau0"].unit == "yr"
assert_allclose(table["Tau0"], [5.131385e03, 9.294538e06], atol=1e-5)
assert table["L_PSR"].unit == "erg / s"
assert_allclose(table["L_PSR"], [2.599229e36, 1.108788e33], rtol=1e-5)
assert table["L0_PSR"].unit == "erg / s"
assert_allclose(table["L0_PSR"], [2.701524e36, 1.109026e33], rtol=1e-5)
assert table["B_PSR"].unit == "G"
assert_allclose(table["B_PSR"], [1.194420e13, 3.254597e11], rtol=1e-5)
def test_add_pwn_parameters():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
# To compute PWN parameters we need PSR and SNR parameters first
table = add_snr_parameters(table)
table = add_pulsar_parameters(table, random_state=0)
table = add_pwn_parameters(table)
d = table[0]
assert len(table) == 10
assert len(table.colnames) == 27
assert table["r_out_PWN"].unit == "pc"
assert_allclose(d["r_out_PWN"], 1.378224, atol=1e-4)
def test_add_observed_parameters():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
table = add_observed_parameters(table)
d = table[0]
assert len(table) == 10
assert len(table.colnames) == 20
assert table["distance"].unit == "pc"
assert_allclose(d["distance"], 13016.572756, atol=1e-5)
assert table["GLON"].unit == "deg"
assert_allclose(d["GLON"], -27.156565, atol=1e-5)
assert table["GLAT"].unit == "deg"
assert_allclose(d["GLAT"], 0.101948, atol=1e-5)
assert table["VGLON"].unit == "deg / Myr"
assert_allclose(d["VGLON"], 0.368166, atol=1e-5)
assert table["VGLAT"].unit == "deg / Myr"
assert_allclose(d["VGLAT"], -0.209514, atol=1e-5)
assert table["RA"].unit == "deg"
assert_allclose(d["RA"], 244.347149, atol=1e-5)
assert table["DEC"].unit == "deg"
assert_allclose(d["DEC"], -50.410142, atol=1e-5)
def test_chain_all():
# Test that running the simulation functions in chain works
table = make_base_catalog_galactic(n_sources=10, random_state=0)
table = add_snr_parameters(table)
table = add_pulsar_parameters(table, random_state=0)
table = add_pwn_parameters(table)
table = add_observed_parameters(table)
d = table[0]
# Note: the individual functions are tested above.
# Here we just run them in a chain and do very basic asserts
# on the output so that we make sure we notice changes.
assert len(table) == 10
assert len(table.colnames) == 34
assert table["r_out_PWN"].unit == "pc"
assert_allclose(d["r_out_PWN"], 1.378224, atol=1e-4)
assert table["RA"].unit == "deg"
assert_allclose(d["RA"], 244.347149, atol=1e-5)
|
[
"gammapy.astro.population.make_catalog_random_positions_cube",
"astropy.table.Table",
"astropy.units.Quantity",
"gammapy.astro.population.add_observed_parameters",
"gammapy.astro.population.make_base_catalog_galactic",
"gammapy.astro.population.add_pulsar_parameters",
"gammapy.astro.population.add_pwn_parameters",
"gammapy.astro.population.make_catalog_random_positions_sphere",
"numpy.testing.assert_equal",
"numpy.testing.assert_allclose",
"gammapy.astro.population.add_snr_parameters"
] |
[((498, 548), 'gammapy.astro.population.make_catalog_random_positions_cube', 'make_catalog_random_positions_cube', ([], {'random_state': '(0)'}), '(random_state=0)\n', (532, 548), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((672, 715), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['x']", '(0.0976270078546495)'], {}), "(d['x'], 0.0976270078546495)\n", (687, 715), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((755, 798), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['y']", '(0.3556330735924602)'], {}), "(d['y'], 0.3556330735924602)\n", (770, 798), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((838, 883), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['z']", '(-0.37640823601179485)'], {}), "(d['z'], -0.37640823601179485)\n", (853, 883), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((897, 960), 'gammapy.astro.population.make_catalog_random_positions_cube', 'make_catalog_random_positions_cube', ([], {'dimension': '(2)', 'random_state': '(0)'}), '(dimension=2, random_state=0)\n', (931, 960), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((965, 992), 'numpy.testing.assert_equal', 'assert_equal', (["table['z']", '(0)'], {}), "(table['z'], 0)\n", (977, 992), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1006, 1069), 'gammapy.astro.population.make_catalog_random_positions_cube', 'make_catalog_random_positions_cube', ([], {'dimension': '(1)', 'random_state': '(0)'}), '(dimension=1, random_state=0)\n', (1040, 1069), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((1074, 1101), 'numpy.testing.assert_equal', 'assert_equal', (["table['y']", '(0)'], {}), "(table['y'], 0)\n", (1086, 1101), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1106, 1133), 'numpy.testing.assert_equal', 'assert_equal', (["table['z']", '(0)'], {}), "(table['z'], 0)\n", (1118, 1133), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1197, 1249), 'gammapy.astro.population.make_catalog_random_positions_sphere', 'make_catalog_random_positions_sphere', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1233, 1249), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((1376, 1421), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['lon']", '(3.4482969442579128)'], {}), "(d['lon'], 3.4482969442579128)\n", (1391, 1421), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1464, 1510), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['lat']", '(0.36359133530192267)'], {}), "(d['lat'], 0.36359133530192267)\n", (1479, 1510), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1557, 1607), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['distance']", '(0.6780943487897606)'], {}), "(d['distance'], 0.6780943487897606)\n", (1572, 1607), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1661, 1717), 'gammapy.astro.population.make_base_catalog_galactic', 'make_base_catalog_galactic', ([], {'n_sources': '(10)', 'random_state': '(0)'}), '(n_sources=10, random_state=0)\n', (1687, 1717), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((1843, 1887), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['age']", '(548813.5039273248)'], {}), "(d['age'], 548813.5039273248)\n", (1858, 1887), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1934, 1966), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['n_ISM']", '(1.0)'], {}), "(d['n_ISM'], 1.0)\n", (1949, 1966), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2099, 2151), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['x_birth']", '(-5.856461)'], {'atol': '(1e-05)'}), "(d['x_birth'], -5.856461, atol=1e-05)\n", (2114, 2151), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2197, 2248), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['y_birth']", '(3.017292)'], {'atol': '(1e-05)'}), "(d['y_birth'], 3.017292, atol=1e-05)\n", (2212, 2248), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2294, 2345), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['z_birth']", '(0.049088)'], {'atol': '(1e-05)'}), "(d['z_birth'], 0.049088, atol=1e-05)\n", (2309, 2345), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2385, 2431), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['x']", '(-5.941061)'], {'atol': '(1e-05)'}), "(d['x'], -5.941061, atol=1e-05)\n", (2400, 2431), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2471, 2516), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['y']", '(3.081642)'], {'atol': '(1e-05)'}), "(d['y'], 3.081642, atol=1e-05)\n", (2486, 2516), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2556, 2601), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['z']", '(0.023161)'], {'atol': '(1e-05)'}), "(d['z'], 0.023161, atol=1e-05)\n", (2571, 2601), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2643, 2692), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['vx']", '(-150.727104)'], {'atol': '(1e-05)'}), "(d['vx'], -150.727104, atol=1e-05)\n", (2658, 2692), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2734, 2782), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['vy']", '(114.648494)'], {'atol': '(1e-05)'}), "(d['vy'], 114.648494, atol=1e-05)\n", (2749, 2782), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2824, 2872), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['vz']", '(-46.193814)'], {'atol': '(1e-05)'}), "(d['vz'], -46.193814, atol=1e-05)\n", (2839, 2872), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2917, 2968), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['v_abs']", '(194.927693)'], {'atol': '(1e-05)'}), "(d['v_abs'], 194.927693, atol=1e-05)\n", (2932, 2968), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3013, 3020), 'astropy.table.Table', 'Table', ([], {}), '()\n', (3018, 3020), False, 'from astropy.table import Table\n'), ((3080, 3101), 'astropy.units.Quantity', 'u.Quantity', (['(1)', '"""cm-3"""'], {}), "(1, 'cm-3')\n", (3090, 3101), True, 'import astropy.units as u\n'), ((3115, 3140), 'gammapy.astro.population.add_snr_parameters', 'add_snr_parameters', (['table'], {}), '(table)\n', (3133, 3140), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((3293, 3330), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['E_SN']", '(1e+51)'], {}), "(table['E_SN'], 1e+51)\n", (3308, 3330), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3373, 3424), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['r_out']", '[1, 3.80730787743]'], {}), "(table['r_out'], [1, 3.80730787743])\n", (3388, 3424), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3467, 3522), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['r_in']", '[0.9086, 3.45931993743]'], {}), "(table['r_in'], [0.9086, 3.45931993743])\n", (3482, 3522), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3569, 3617), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['L_SNR']", '[0, 1.0768e+33]'], {}), "(table['L_SNR'], [0, 1.0768e+33])\n", (3584, 3617), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3665, 3672), 'astropy.table.Table', 'Table', ([], {}), '()\n', (3670, 3672), False, 'from astropy.table import Table\n'), ((3724, 3768), 'gammapy.astro.population.add_pulsar_parameters', 'add_pulsar_parameters', (['table'], {'random_state': '(0)'}), '(table, random_state=0)\n', (3745, 3768), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((3876, 3918), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['age']", '[100, 1000]'], {}), "(table['age'], [100, 1000])\n", (3891, 3918), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3958, 4020), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['P0']", '[0.214478, 0.246349]'], {'atol': '(1e-05)'}), "(table['P0'], [0.214478, 0.246349], atol=1e-05)\n", (3973, 4020), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4058, 4128), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['P1']", '[6.310423e-13, 4.198294e-16]'], {'atol': '(1e-05)'}), "(table['P1'], [6.310423e-13, 4.198294e-16], atol=1e-05)\n", (4073, 4128), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4173, 4241), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['P0_birth']", '[0.212418, 0.246336]'], {'atol': '(1e-05)'}), "(table['P0_birth'], [0.212418, 0.246336], atol=1e-05)\n", (4188, 4241), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4285, 4361), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['P1_birth']", '[6.558773e-13, 4.199198e-16]'], {'atol': '(1e-05)'}), "(table['P1_birth'], [6.558773e-13, 4.199198e-16], atol=1e-05)\n", (4300, 4361), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4406, 4480), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['CharAge']", '[2.207394e-21, 1.63893e-24]'], {'atol': '(1e-05)'}), "(table['CharAge'], [2.207394e-21, 1.63893e-24], atol=1e-05)\n", (4421, 4480), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4523, 4588), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['Tau0']", '[5131.385, 9294538.0]'], {'atol': '(1e-05)'}), "(table['Tau0'], [5131.385, 9294538.0], atol=1e-05)\n", (4538, 4588), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4641, 4714), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['L_PSR']", '[2.599229e+36, 1.108788e+33]'], {'rtol': '(1e-05)'}), "(table['L_PSR'], [2.599229e+36, 1.108788e+33], rtol=1e-05)\n", (4656, 4714), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4761, 4835), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['L0_PSR']", '[2.701524e+36, 1.109026e+33]'], {'rtol': '(1e-05)'}), "(table['L0_PSR'], [2.701524e+36, 1.109026e+33], rtol=1e-05)\n", (4776, 4835), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4875, 4954), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['B_PSR']", '[11944200000000.0, 325459700000.0]'], {'rtol': '(1e-05)'}), "(table['B_PSR'], [11944200000000.0, 325459700000.0], rtol=1e-05)\n", (4890, 4954), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4991, 5047), 'gammapy.astro.population.make_base_catalog_galactic', 'make_base_catalog_galactic', ([], {'n_sources': '(10)', 'random_state': '(0)'}), '(n_sources=10, random_state=0)\n', (5017, 5047), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((5129, 5154), 'gammapy.astro.population.add_snr_parameters', 'add_snr_parameters', (['table'], {}), '(table)\n', (5147, 5154), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((5167, 5211), 'gammapy.astro.population.add_pulsar_parameters', 'add_pulsar_parameters', (['table'], {'random_state': '(0)'}), '(table, random_state=0)\n', (5188, 5211), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((5224, 5249), 'gammapy.astro.population.add_pwn_parameters', 'add_pwn_parameters', (['table'], {}), '(table)\n', (5242, 5249), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((5381, 5435), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['r_out_PWN']", '(1.378224)'], {'atol': '(0.0001)'}), "(d['r_out_PWN'], 1.378224, atol=0.0001)\n", (5396, 5435), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5484, 5540), 'gammapy.astro.population.make_base_catalog_galactic', 'make_base_catalog_galactic', ([], {'n_sources': '(10)', 'random_state': '(0)'}), '(n_sources=10, random_state=0)\n', (5510, 5540), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((5553, 5583), 'gammapy.astro.population.add_observed_parameters', 'add_observed_parameters', (['table'], {}), '(table)\n', (5576, 5583), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((5714, 5770), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['distance']", '(13016.572756)'], {'atol': '(1e-05)'}), "(d['distance'], 13016.572756, atol=1e-05)\n", (5729, 5770), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5813, 5863), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['GLON']", '(-27.156565)'], {'atol': '(1e-05)'}), "(d['GLON'], -27.156565, atol=1e-05)\n", (5828, 5863), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5906, 5954), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['GLAT']", '(0.101948)'], {'atol': '(1e-05)'}), "(d['GLAT'], 0.101948, atol=1e-05)\n", (5921, 5954), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6004, 6053), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['VGLON']", '(0.368166)'], {'atol': '(1e-05)'}), "(d['VGLON'], 0.368166, atol=1e-05)\n", (6019, 6053), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6103, 6153), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['VGLAT']", '(-0.209514)'], {'atol': '(1e-05)'}), "(d['VGLAT'], -0.209514, atol=1e-05)\n", (6118, 6153), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6194, 6242), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['RA']", '(244.347149)'], {'atol': '(1e-05)'}), "(d['RA'], 244.347149, atol=1e-05)\n", (6209, 6242), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6284, 6333), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['DEC']", '(-50.410142)'], {'atol': '(1e-05)'}), "(d['DEC'], -50.410142, atol=1e-05)\n", (6299, 6333), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6433, 6489), 'gammapy.astro.population.make_base_catalog_galactic', 'make_base_catalog_galactic', ([], {'n_sources': '(10)', 'random_state': '(0)'}), '(n_sources=10, random_state=0)\n', (6459, 6489), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((6502, 6527), 'gammapy.astro.population.add_snr_parameters', 'add_snr_parameters', (['table'], {}), '(table)\n', (6520, 6527), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((6540, 6584), 'gammapy.astro.population.add_pulsar_parameters', 'add_pulsar_parameters', (['table'], {'random_state': '(0)'}), '(table, random_state=0)\n', (6561, 6584), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((6597, 6622), 'gammapy.astro.population.add_pwn_parameters', 'add_pwn_parameters', (['table'], {}), '(table)\n', (6615, 6622), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((6635, 6665), 'gammapy.astro.population.add_observed_parameters', 'add_observed_parameters', (['table'], {}), '(table)\n', (6658, 6665), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((6977, 7031), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['r_out_PWN']", '(1.378224)'], {'atol': '(0.0001)'}), "(d['r_out_PWN'], 1.378224, atol=0.0001)\n", (6992, 7031), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7071, 7119), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['RA']", '(244.347149)'], {'atol': '(1e-05)'}), "(d['RA'], 244.347149, atol=1e-05)\n", (7086, 7119), False, 'from numpy.testing import assert_allclose, assert_equal\n')]
|
"""Class definition for the DataSetParser ABC and FeaturizerMixin."""
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Callable, Generator, List, Tuple, Type
import numpy as np
import pandas as pd
from sklearn.preprocessing import RobustScaler
class FeaturizerMixin:
"""Mixin to provide secondary featurization functionality."""
def featurize_secondary(self):
"""
Perform secondary featurization.
Sequentially trigger each featurizer to extract secondary features.
The extracted secondary metafeatures are stored in each featurizer's
`sec_metafeatures` and `sec_test_metafeatures` attributes.
These extracted metafeatures will then be collected and appended column-wise
to the `metafeature` and `test_metafeature` attributes of the DataSetParser
subclass instance.
"""
for featurizer in self.featurizers:
if type(featurizer).__name__ == "RawDataSetFeaturizerViaLambda":
featurizer.featurize(
self._create_raw_generator(),
keys=self.metafeatures,
test_keys=self.test_metafeatures,
multiprocess=self._multiprocess_raw_secondary,
)
else:
featurizer.featurize(
meta_df=self.metafeatures,
test_meta_df=self.test_metafeatures,
)
self.__add_secondary_metafeatures()
def __add_secondary_metafeatures(self):
"""Add secondary features to the training and test metafeature attributes."""
# Get secondary feature names
if self.metafeatures is not None:
sec_feature_names = list(self.metafeatures) + [
name
for featurizer in self.featurizers
for name in featurizer.sec_feature_names
]
elif self.test_metafeatures is not None:
sec_feature_names = list(self.test_metafeatures) + [
name
for featurizer in self.featurizers
for name in featurizer.sec_feature_names
]
if self.metafeatures is not None:
sec_metafeatures = [x.sec_metafeatures for x in self.featurizers]
self.metafeatures = pd.concat(
[self.metafeatures, *sec_metafeatures],
axis=1,
ignore_index=True,
)
self.metafeatures.columns = sec_feature_names
if self.test_metafeatures is not None:
sec_test_metafeatures = [
x.sec_test_metafeatures for x in self.featurizers
]
self.test_metafeatures = pd.concat(
[self.test_metafeatures, *sec_test_metafeatures],
axis=1,
ignore_index=True,
)
self.test_metafeatures.columns = sec_feature_names
class DataSetParser(ABC, FeaturizerMixin):
"""
Abstract base class to load and extract metafeatures from raw data sets.
FeaturizerMixin provides the `.featurize` method.
Instance attributes:
src {Path}
-- Path to data set file on disk.
metafeatures {pd.DataFrame}
-- Metafeatures extracted from the raw data set. Each metafeature
row corresponds to a feature column in the raw data set.
labels {pd.Series}
-- Label corresponding to each metafeature.
test_src {Path}
-- Optional path to test raw data set file on disk. This attribute
applies more to the subclasses of MetaDataSetParser.
test_metafeatures {pd.DataFrame}
-- Optional metafeatures extracted from the test raw data set.
test_labels {pd.Series}
-- Optional labels corresponding to each test metafeature row.
scaler {RobustScaler}
-- A scaler to handle normalize metafeatures before serving them
for training.
featurizers: {List}
-- A list of featurizers that performs secondary metafeaturizations.
Class attributes:
NUM_BASE_METAFEATURES {int}
-- Number of base metafeatures.
Used to separate base and secondary metafeatures.
Abstract methods:
load_data_set
-- Load the data set and perform necessarily cleaning and parsing.
featurize_base
-- Featurize base metafeatures.
normalize_features
-- Performs normalization on the metafeatures and test metafeatures
(if provided).
_create_raw_generator
-- Returns a generator of raw data sets. This supports the
MetaDataSetFeaturizerViaLambda class functionality.
"""
NUM_BASE_METAFEATURES = (
7
) # Includes (total_val, min, max, mean, std, num_nans, num_distincts)
def __init__(self):
"""Init function."""
self.src: Path = None
self.labels: pd.Series = None
self.metafeatures: pd.DataFrame = None
self.test_src: Path = None
self.test_labels: pd.Series = None
self.test_metafeatures: pd.DataFrame = None
self.scaler: Type[RobustScaler] = None
self.featurizers: List = []
self._multiprocess_raw_secondary: bool = False # Multiprocessing of raw dataframe(s)
@abstractmethod
def load_data_set(self):
"""Load data set from source."""
raise NotImplementedError
@abstractmethod
def featurize_base(self):
"""Featurize base metafeatures."""
raise NotImplementedError
@abstractmethod
def normalize_features(self):
"""Normalize metafeatures for training."""
raise NotImplementedError
@abstractmethod
def _create_raw_generator(
self
) -> Generator[Tuple[str, Callable[[], pd.DataFrame]], None, None]:
raise NotImplementedError
def _select_metafeatures(
self, df: pd.DataFrame, mark: str = "*"
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Select metafeatures to normalize and to retain for training.
The following criteria is used.
Metafeatures to:
- normalize: Numerical columns
- not normalize but retain for training: Features whose title ends with `mark`.
Remainder metafeatures are dropped.
Note:
Columns are tracked by indices instead of names to avoid problems when
there are duplicated columnn names.
Arguments:
df {pd.DataFrame}
-- Metafeatures dataframe.
mark {str}
-- Character to append to names of columns that should not be
normlized but retained for training.
Returns:
Tuple[pd.DataFrame, pd.DataFrame]
-- (metafeatures_to_normalize, metafeatures_to_retain)
"""
idx_to_normalize: List[int] = []
idx_to_retain: List[int] = []
IGNORE_COLS = (
"attribute_name", # Already represented as ngrams
"sample", # Ignore sample columns which may be of type int
"total_val", # Intent prediction should not be based on # data points
"num_distincts", # Use `normalized_distinct_rate` instead
"num_nans", # Captured in `nan_rate`
)
for i, col in enumerate(df.columns):
if col in IGNORE_COLS:
continue
# Save columns that are either numeric or that have been marked
# into appropriate groups
if col[-1] == "*":
idx_to_retain.append(i)
elif self._is_numeric(df.iloc[:, i]):
idx_to_normalize.append(i)
features_to_normalize = df.iloc[:, idx_to_normalize]
features_to_retain = df.iloc[:, idx_to_retain]
return features_to_normalize, features_to_retain
def _is_numeric(self, series: pd.Series) -> bool:
return pd.api.types.is_numeric_dtype(series)
@staticmethod
def _split_features_and_labels(
mds: pd.DataFrame, label_col: str
) -> Tuple[pd.DataFrame, pd.Series]:
"""
Split features and labels.
Arguments:
mds {pd.DataFrame} -- MetaDataSet.
label_col {str} -- Column containing labels in the MetaDataSet.
Returns:
Tuple[pd.DataFrame, pd.Series] -- (features, labels) tuple.
"""
return mds.drop(label_col, axis=1), mds[label_col]
|
[
"pandas.concat",
"pandas.api.types.is_numeric_dtype"
] |
[((8026, 8063), 'pandas.api.types.is_numeric_dtype', 'pd.api.types.is_numeric_dtype', (['series'], {}), '(series)\n', (8055, 8063), True, 'import pandas as pd\n'), ((2326, 2402), 'pandas.concat', 'pd.concat', (['[self.metafeatures, *sec_metafeatures]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([self.metafeatures, *sec_metafeatures], axis=1, ignore_index=True)\n', (2335, 2402), True, 'import pandas as pd\n'), ((2727, 2817), 'pandas.concat', 'pd.concat', (['[self.test_metafeatures, *sec_test_metafeatures]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([self.test_metafeatures, *sec_test_metafeatures], axis=1,\n ignore_index=True)\n', (2736, 2817), True, 'import pandas as pd\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class MLP(nn.Module):
def __init__(self, MLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, dropoutRate=0.0, initStd=0.0001):
super(MLP, self).__init__()
self.multiLayerPerceptron = nn.ModuleList() # MLP
for i in range(len(MLPInfo)-1):
self.multiLayerPerceptron.append(nn.Linear(MLPInfo[i], MLPInfo[i + 1]))
if isUseBN:
self.multiLayerPerceptron.append(nn.BatchNorm1d(MLPInfo[i + 1]))
actiFun = nn.PReLU(1, init=PReLuInit) if activation == 'PReLU' else Dice()
self.multiLayerPerceptron.append(actiFun)
self.multiLayerPerceptron.append(nn.Dropout(dropoutRate))
def forward(self, x):
for layer in self.multiLayerPerceptron:
x = layer(x)
return x
class Bottom(nn.Module):
def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU',
PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6,
dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')):
super(Bottom, self).__init__()
self.dev = device
self.embeddingGroups = nn.ModuleDict() # embedding group
for key, value in embeddingGroupInfo.items():
if key == 'MovieId' or key == 'Genre':
self.embeddingGroups[key] = nn.Embedding(value[0], value[1], padding_idx=0)
else:
self.embeddingGroups[key] = nn.Embedding(value[0], value[1])
self.sequenceMeanPooling = SequencePoolingLayer(mod='mean', device=self.dev) # sequence pooling layer
self.attentionActivationUnit = AttentionActivationUnit(attMLPInfo, activation,
PReLuInit, initStd) # attention activation unit
self.sequenceAttentionPooling = SequencePoolingLayer(mod='attention', device=self.dev) # sequence pooling layer
self.to(self.dev)
def forward(self, movieIdSequence,ads, movieFeature):
movieFeatSequence = movieFeature[movieIdSequence]
adsFeat = movieFeature[ads]
movieIdFeat = self.embeddingGroups['MovieId'](movieFeatSequence[:, :, 0]) # (B, SeqLen, 16)
movieGenreFeat = self.embeddingGroups['Genre'](movieFeatSequence[:, :, 1:]) # (B, SeqLen, 6, 8)
movieGenreFeat = self.sequenceMeanPooling(movieGenreFeat, movieFeatSequence[:, :, 1:] > 0) # (B, SeqLen, 8)
#print(movieGenreFeat)
#input()
adsIdFeat = self.embeddingGroups['MovieId'](adsFeat[:, 0]) # (B, 16)
adsGenreFeat = self.embeddingGroups['Genre'](adsFeat[:, 1:]) # (B, 6, 8)
adsGenreFeat = self.sequenceMeanPooling(adsGenreFeat, adsFeat[:, 1:] > 0) # (B, 8)
adsEmbedding = torch.cat((adsIdFeat, adsGenreFeat), dim=-1) # (B, 24)
movieEmbedding = torch.cat((movieIdFeat, movieGenreFeat), dim=-1) # (B, SeqLen, 24)
attentionWeights = self.attentionActivationUnit(movieEmbedding, adsEmbedding) # (B, SeqLen, 1)
movieSequenceEmbedding = self.sequenceAttentionPooling(movieEmbedding, attentionWeights) # (B, 24)
return movieSequenceEmbedding,adsEmbedding
def forward_FR(self, movieIdSequence,ads, movieFeature):
movieSequenceEmbedding,adsEmbedding=self.forward(movieIdSequence,ads, movieFeature)
out=torch.cat((movieSequenceEmbedding,adsEmbedding),dim=0)
return out
class DIN(nn.Module):
def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU',
PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6,
dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')):
super(DIN, self).__init__()
self.l2RegEmbeddding = l2RegEmbedding
self.dev = device
self.MLP = MLP(MLPInfo, activation, PReLuInit, isUseBN, dropoutRate) # MLP
self.output = nn.Linear(MLPInfo[-1], 2) # output layer
self.to(self.dev)
def forward(self, m1,m2,a1,a2):
#interactive
movieSequenceEmbedding=m1+m2
adsEmbedding=a1+a2
# MLP inputs
x = torch.cat((movieSequenceEmbedding, adsEmbedding), dim=-1)
x = self.MLP(x)
x = F.softmax(self.output(x), dim=1)
return x # (B, 2)
def regLoss(self):
totalRegLoss = torch.zeros(size=(1,), device=self.dev)
for name, param in self.named_parameters():
if 'embedding' in name and 'MovieId' in name and 'weight' in name:
totalRegLoss += torch.sum(self.l2RegEmbeddding * param*param)
return totalRegLoss
def loss(self, m1,m2,a1,a2,label, lossFunc):
preds = self.forward(m1,m2,a1,a2)
loss = lossFunc(preds[:, 1], label.float(), reduction='mean') + self.regLoss()
return loss
def predict(self, m1,m2,a1,a2):
preds = self.forward(m1,m2,a1,a2)[:, 1]
return preds.cpu().detach().numpy()
class SequencePoolingLayer(nn.Module):
def __init__(self, mod='mean', device=torch.device('cpu')):
super(SequencePoolingLayer, self).__init__()
self.mod = mod
self.dev = device
self.eps = torch.FloatTensor([1e-8]).to(self.dev)
def forward(self, x, mask):
if self.mod == 'mean':
length = torch.sum(mask.type(torch.float32), dim=-1, keepdim=True) # (..., dim, 6) -> (...,dim, 1)
x = torch.sum(x, dim=-2, keepdim=False) # (..., dim, 6, 8) -> (..., dim, 8)
x = torch.div(x, length.type(torch.float32) + self.eps) # (..., dim, 8)
elif self.mod == 'attention':
attentionWeights = torch.repeat_interleave(mask, x.shape[-1], dim=-1) # (..., dim, 1) -> (.... dim, E)
x = torch.mul(x, attentionWeights) # (..., dim, E)
x = torch.sum(x, dim=-2, keepdim=False) # (..., dim, E) -> (..., E)
else:
pass
return x
class AttentionActivationUnit(nn.Module):
def __init__(self, attMLPInfo, activation='PReLu', PReLuInit=0.25, initStd=0.0001):
super(AttentionActivationUnit, self).__init__()
self.MLP = MLP(attMLPInfo, activation, PReLuInit, isUseBN=False, dropoutRate=0.0, initStd=initStd)
self.output = nn.Linear(attMLPInfo[-1], 1)
def forward(self, x, target):
target = torch.unsqueeze(target, dim=1) # (B, 1, 24)
target = torch.repeat_interleave(target, x.shape[-2], dim=1) # (B, SeqLen, 24)
product = torch.mul(x, target) # (B, SeqLen, 24)
# product = torch.sum(product, dim=-1, keepdim=True) # (B, SeqLen, 1)
x = torch.cat((x, target, product), dim=2) # (B, SeqLen, 72)
x = self.MLP(x)
x = self.output(x)
# product = torch.sum(product, dim=-1, keepdim=True)
# product = F.softmax(product, dim=1)
return x # (B, SeqLen, 1)
class Dice(nn.Module):
def __init__(self):
super(Dice, self).__init__()
pass
|
[
"torch.nn.Dropout",
"torch.repeat_interleave",
"torch.nn.PReLU",
"torch.nn.ModuleList",
"torch.nn.Embedding",
"torch.unsqueeze",
"torch.FloatTensor",
"torch.cat",
"torch.nn.BatchNorm1d",
"torch.mul",
"torch.nn.Linear",
"torch.nn.ModuleDict",
"torch.device",
"torch.zeros",
"torch.sum"
] |
[((324, 339), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (337, 339), True, 'import torch.nn as nn\n'), ((1162, 1181), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1174, 1181), False, 'import torch\n'), ((1283, 1298), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (1296, 1298), True, 'import torch.nn as nn\n'), ((2908, 2952), 'torch.cat', 'torch.cat', (['(adsIdFeat, adsGenreFeat)'], {'dim': '(-1)'}), '((adsIdFeat, adsGenreFeat), dim=-1)\n', (2917, 2952), False, 'import torch\n'), ((3000, 3048), 'torch.cat', 'torch.cat', (['(movieIdFeat, movieGenreFeat)'], {'dim': '(-1)'}), '((movieIdFeat, movieGenreFeat), dim=-1)\n', (3009, 3048), False, 'import torch\n'), ((3506, 3562), 'torch.cat', 'torch.cat', (['(movieSequenceEmbedding, adsEmbedding)'], {'dim': '(0)'}), '((movieSequenceEmbedding, adsEmbedding), dim=0)\n', (3515, 3562), False, 'import torch\n'), ((3816, 3835), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3828, 3835), False, 'import torch\n'), ((4059, 4084), 'torch.nn.Linear', 'nn.Linear', (['MLPInfo[-1]', '(2)'], {}), '(MLPInfo[-1], 2)\n', (4068, 4084), True, 'import torch.nn as nn\n'), ((4290, 4347), 'torch.cat', 'torch.cat', (['(movieSequenceEmbedding, adsEmbedding)'], {'dim': '(-1)'}), '((movieSequenceEmbedding, adsEmbedding), dim=-1)\n', (4299, 4347), False, 'import torch\n'), ((4497, 4536), 'torch.zeros', 'torch.zeros', ([], {'size': '(1,)', 'device': 'self.dev'}), '(size=(1,), device=self.dev)\n', (4508, 4536), False, 'import torch\n'), ((5202, 5221), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5214, 5221), False, 'import torch\n'), ((6426, 6454), 'torch.nn.Linear', 'nn.Linear', (['attMLPInfo[-1]', '(1)'], {}), '(attMLPInfo[-1], 1)\n', (6435, 6454), True, 'import torch.nn as nn\n'), ((6510, 6540), 'torch.unsqueeze', 'torch.unsqueeze', (['target'], {'dim': '(1)'}), '(target, dim=1)\n', (6525, 6540), False, 'import torch\n'), ((6573, 6624), 'torch.repeat_interleave', 'torch.repeat_interleave', (['target', 'x.shape[-2]'], {'dim': '(1)'}), '(target, x.shape[-2], dim=1)\n', (6596, 6624), False, 'import torch\n'), ((6663, 6683), 'torch.mul', 'torch.mul', (['x', 'target'], {}), '(x, target)\n', (6672, 6683), False, 'import torch\n'), ((6800, 6838), 'torch.cat', 'torch.cat', (['(x, target, product)'], {'dim': '(2)'}), '((x, target, product), dim=2)\n', (6809, 6838), False, 'import torch\n'), ((5585, 5620), 'torch.sum', 'torch.sum', (['x'], {'dim': '(-2)', 'keepdim': '(False)'}), '(x, dim=-2, keepdim=False)\n', (5594, 5620), False, 'import torch\n'), ((434, 471), 'torch.nn.Linear', 'nn.Linear', (['MLPInfo[i]', 'MLPInfo[i + 1]'], {}), '(MLPInfo[i], MLPInfo[i + 1])\n', (443, 471), True, 'import torch.nn as nn\n'), ((607, 634), 'torch.nn.PReLU', 'nn.PReLU', (['(1)'], {'init': 'PReLuInit'}), '(1, init=PReLuInit)\n', (615, 634), True, 'import torch.nn as nn\n'), ((775, 798), 'torch.nn.Dropout', 'nn.Dropout', (['dropoutRate'], {}), '(dropoutRate)\n', (785, 798), True, 'import torch.nn as nn\n'), ((1470, 1517), 'torch.nn.Embedding', 'nn.Embedding', (['value[0]', 'value[1]'], {'padding_idx': '(0)'}), '(value[0], value[1], padding_idx=0)\n', (1482, 1517), True, 'import torch.nn as nn\n'), ((1582, 1614), 'torch.nn.Embedding', 'nn.Embedding', (['value[0]', 'value[1]'], {}), '(value[0], value[1])\n', (1594, 1614), True, 'import torch.nn as nn\n'), ((4703, 4750), 'torch.sum', 'torch.sum', (['(self.l2RegEmbeddding * param * param)'], {}), '(self.l2RegEmbeddding * param * param)\n', (4712, 4750), False, 'import torch\n'), ((5349, 5375), 'torch.FloatTensor', 'torch.FloatTensor', (['[1e-08]'], {}), '([1e-08])\n', (5366, 5375), False, 'import torch\n'), ((5815, 5865), 'torch.repeat_interleave', 'torch.repeat_interleave', (['mask', 'x.shape[-1]'], {'dim': '(-1)'}), '(mask, x.shape[-1], dim=-1)\n', (5838, 5865), False, 'import torch\n'), ((5917, 5947), 'torch.mul', 'torch.mul', (['x', 'attentionWeights'], {}), '(x, attentionWeights)\n', (5926, 5947), False, 'import torch\n'), ((5982, 6017), 'torch.sum', 'torch.sum', (['x'], {'dim': '(-2)', 'keepdim': '(False)'}), '(x, dim=-2, keepdim=False)\n', (5991, 6017), False, 'import torch\n'), ((550, 580), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['MLPInfo[i + 1]'], {}), '(MLPInfo[i + 1])\n', (564, 580), True, 'import torch.nn as nn\n')]
|
#!/usr/bin/env python3
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import unittest
from cros.factory.probe.runtime_probe import probe_config_types
from cros.factory.utils import json_utils
class ProbeStatementDefinitionBuilderTest(unittest.TestCase):
def testBuildProbeStatementDefinition(self):
builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x')
builder.AddProbeFunction('func_1', 'This is func 1.')
builder.AddProbeFunction('func2', 'This is func 2.')
builder.AddIntOutputField('field1', 'This is field1')
builder.AddStrOutputField('field2', 'This is field2')
builder.AddHexOutputField('field3', 'This is field3')
builder.AddIntOutputField('field_only_func1',
'This is field ?',
probe_function_names=['func_1'])
d = builder.Build()
self.assertEqual(d.category_name, 'category_x')
self.assertCountEqual(list(d.expected_fields.keys()),
['field1', 'field2', 'field3', 'field_only_func1'])
self.assertCountEqual(list(d.probe_functions.keys()), ['func_1', 'func2'])
self.assertCountEqual(
[f.name for f in d.probe_functions['func_1'].output_fields],
['field1', 'field2', 'field3', 'field_only_func1'])
self.assertCountEqual(
[f.name for f in d.probe_functions['func2'].output_fields],
['field1', 'field2', 'field3'])
class ConcreteProbeStatementDefinitionTestBase(unittest.TestCase):
def setUp(self):
builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x')
builder.AddProbeFunction('func_1', 'This is func 1.')
builder.AddProbeFunction('func2', 'This is func 2.')
builder.AddIntOutputField('int_field', '')
builder.AddStrOutputField('str_field', '')
builder.AddStrOutputField('str_field_started_with_a',
'',
value_pattern=re.compile('a.*'))
builder.AddHexOutputField('hex_field', '')
builder.AddHexOutputField('hex_field_three_digits', '', num_value_digits=3)
self.probe_statement_definition = builder.Build()
class ProbeStatementDefinitionTest(ConcreteProbeStatementDefinitionTestBase):
def _GenerateExpectResult(self, comp_name, func_name, expect_field,
func_arg=None, information=None):
statement = {
'eval': {
func_name: func_arg or {}
},
'expect': expect_field
}
if information is not None:
statement['information'] = information
return probe_config_types.ComponentProbeStatement('category_x', comp_name,
statement)
def testGenerateProbeStatementNoField(self):
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {})
self.assertEqual(result,
self._GenerateExpectResult('comp_1', 'func_1', {}))
def testGenerateProbeStatementIntField(self):
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'int_field': None})
self.assertEqual(
result,
self._GenerateExpectResult('comp_1', 'func_1',
{'int_field': [False, 'int']}))
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'int_field': 3})
self.assertEqual(
result,
self._GenerateExpectResult('comp_1', 'func_1',
{'int_field': [True, 'int', '!eq 3']}))
def testGenerateProbeStatementStrField(self):
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'str_field': None})
self.assertEqual(
result,
self._GenerateExpectResult('comp_1', 'func_1',
{'str_field': [False, 'str']}))
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'str_field': 'sss'})
self.assertEqual(
result,
self._GenerateExpectResult('comp_1', 'func_1',
{'str_field': [True, 'str', '!eq sss']}))
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'str_field_started_with_a': 'a_value'})
self.assertEqual(
result,
self._GenerateExpectResult(
'comp_1', 'func_1',
{'str_field_started_with_a': [True, 'str', '!eq a_value']}))
with self.assertRaises(ValueError): # format error
self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'str_field_started_with_a': 'b_value'})
# Ignore the regular expression check if the given expected value is also
# an regular expression pattern.
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1',
{'str_field_started_with_a': re.compile('x.*')})
self.assertEqual(
result,
self._GenerateExpectResult('comp_1', 'func_1', {
'str_field_started_with_a': [True, 'str', '!re x.*']
}))
def testGenerateProbeStatementHexField(self):
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'hex_field': '0AAAA'})
self.assertEqual(
result,
self._GenerateExpectResult(
'comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']}))
with self.assertRaises(ValueError):
self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'hex_field': 'xyz'})
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'hex_field_three_digits': None})
self.assertEqual(
result,
self._GenerateExpectResult('comp_1', 'func_1',
{'hex_field_three_digits': [False, 'hex']}))
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'hex_field_three_digits': 'B3F'})
self.assertEqual(
result,
self._GenerateExpectResult(
'comp_1', 'func_1',
{'hex_field_three_digits': [True, 'hex', '!eq 0xB3F']}))
with self.assertRaises(ValueError):
self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'hex_field_three_digits': 'B3FF'})
def testGenerateProbeStatementList(self):
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', [{
'hex_field': '0AAAA'
}])
self.assertEqual(
result,
self._GenerateExpectResult('comp_1', 'func_1',
{'hex_field': [True, 'hex', '!eq 0x0AAAA']}))
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', [{
'hex_field': '0AAAA'
}, {
'str_field': 'sss'
}])
self.assertEqual(
result,
self._GenerateExpectResult('comp_1', 'func_1', [{
'hex_field': [True, 'hex', '!eq 0x0AAAA']
}, {
'str_field': [True, 'str', '!eq sss']
}]))
def testGenerateProbeStatementExtraInformation(self):
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {
'str_field': 'sss',
'int_field': 3,
'hex_field': '0BAD'}, information={'comp_group': 'other_name'})
self.assertEqual(
result,
self._GenerateExpectResult(
'comp_1', 'func_1', {
'str_field': [True, 'str', '!eq sss'],
'int_field': [True, 'int', '!eq 3'],
'hex_field': [True, 'hex', '!eq 0x0BAD']}, information={
'comp_group': 'other_name'}))
def testGenerateProbeStatementWithArgument(self):
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {}, probe_function_argument={'arg_1': 'aaa'})
self.assertEqual(result,
self._GenerateExpectResult('comp_1', 'func_1', {},
func_arg={'arg_1': 'aaa'}))
class ProbeConfigPayloadTest(ConcreteProbeStatementDefinitionTestBase):
def testAll(self):
p = probe_config_types.ProbeConfigPayload()
p.AddComponentProbeStatement(
self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'int_field': 1}))
p.AddComponentProbeStatement(
self.probe_statement_definition.GenerateProbeStatement(
'comp_2', 'func_1', {'int_field': 2}))
p.AddComponentProbeStatement(
self.probe_statement_definition.GenerateProbeStatement(
'comp_3', 'func_2', {'int_field': 3}))
with self.assertRaises(ValueError): # component name confliction
p.AddComponentProbeStatement(
self.probe_statement_definition.GenerateProbeStatement(
'comp_2', 'func_1', {'int_field': 4}))
with self.assertRaises(ValueError): # probe statement confliction.
p.AddComponentProbeStatement(
self.probe_statement_definition.GenerateProbeStatement(
'comp_4', 'func_1', {'int_field': 2}))
result = p.DumpToString()
self.assertEqual(
json_utils.LoadStr(result),
{
'category_x': {
'comp_1': {
'eval': {'func_1': {}},
'expect': {'int_field': [True, 'int', '!eq 1']}
},
'comp_2': {
'eval': {'func_1': {}},
'expect': {'int_field': [True, 'int', '!eq 2']}
},
'comp_3': {
'eval': {'func_2': {}},
'expect': {'int_field': [True, 'int', '!eq 3']}
},
}
})
class ComponentProbeStatementTest(unittest.TestCase):
def testIdenticalStatements(self):
cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
})
cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
})
self.assertEqual(cps1.statement_hash, cps2.statement_hash)
self.assertEqual(cps1, cps2)
def testHashCompNamesDiffer(self):
cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
})
cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp2', {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
})
self.assertEqual(cps1.statement_hash, cps2.statement_hash)
self.assertNotEqual(cps1, cps2)
def testHashCategoryNamesDiffer(self):
cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
})
cps2 = probe_config_types.ComponentProbeStatement('category2', 'comp1', {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
})
self.assertNotEqual(cps1.statement_hash, cps2.statement_hash)
self.assertNotEqual(cps1, cps2)
def testHashFunctionNamesDiffer(self):
cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
})
cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {
'eval': {
'func_2': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
})
self.assertNotEqual(cps1.statement_hash, cps2.statement_hash)
self.assertNotEqual(cps1, cps2)
def testFromDictSucceed(self):
self.assertEqual(
probe_config_types.ComponentProbeStatement('category1', 'comp1', {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
}),
probe_config_types.ComponentProbeStatement.FromDict({
'category1': {
'comp1': {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
}
}
}))
def testFromDictValueHashMultipleCategories(self):
self.assertRaises(
ValueError, probe_config_types.ComponentProbeStatement.FromDict, {
'category1': {
'comp_name1': {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
}
},
'category2': {
'comp_name1': {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
}
},
})
def testFromDictCategoryNotString(self):
self.assertRaises(
ValueError, probe_config_types.ComponentProbeStatement.FromDict, {
123: {
'comp_name1': {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
}
}
})
def testFromDictMultipleComponents(self):
self.assertRaises(
ValueError, probe_config_types.ComponentProbeStatement.FromDict, {
'category1': {
'comp_name1': {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
},
'comp_name2': {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
}
}
})
def testFromDictComponentNameNotString(self):
self.assertRaises(
ValueError, probe_config_types.ComponentProbeStatement.FromDict, {
'category1': {
3.1415926: {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
}
}
})
def testFromDictMiscErrors(self):
self.assertRaises(ValueError,
probe_config_types.ComponentProbeStatement.FromDict,
{'category1': 100})
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"cros.factory.probe.runtime_probe.probe_config_types.ProbeStatementDefinitionBuilder",
"cros.factory.probe.runtime_probe.probe_config_types.ComponentProbeStatement",
"cros.factory.probe.runtime_probe.probe_config_types.ComponentProbeStatement.FromDict",
"cros.factory.probe.runtime_probe.probe_config_types.ProbeConfigPayload",
"cros.factory.utils.json_utils.LoadStr",
"re.compile"
] |
[((15538, 15553), 'unittest.main', 'unittest.main', ([], {}), '()\n', (15551, 15553), False, 'import unittest\n'), ((447, 511), 'cros.factory.probe.runtime_probe.probe_config_types.ProbeStatementDefinitionBuilder', 'probe_config_types.ProbeStatementDefinitionBuilder', (['"""category_x"""'], {}), "('category_x')\n", (497, 511), False, 'from cros.factory.probe.runtime_probe import probe_config_types\n'), ((1647, 1711), 'cros.factory.probe.runtime_probe.probe_config_types.ProbeStatementDefinitionBuilder', 'probe_config_types.ProbeStatementDefinitionBuilder', (['"""category_x"""'], {}), "('category_x')\n", (1697, 1711), False, 'from cros.factory.probe.runtime_probe import probe_config_types\n'), ((2679, 2757), 'cros.factory.probe.runtime_probe.probe_config_types.ComponentProbeStatement', 'probe_config_types.ComponentProbeStatement', (['"""category_x"""', 'comp_name', 'statement'], {}), "('category_x', comp_name, statement)\n", (2721, 2757), False, 'from cros.factory.probe.runtime_probe import probe_config_types\n'), ((8386, 8425), 'cros.factory.probe.runtime_probe.probe_config_types.ProbeConfigPayload', 'probe_config_types.ProbeConfigPayload', ([], {}), '()\n', (8423, 8425), False, 'from cros.factory.probe.runtime_probe import probe_config_types\n'), ((10061, 10205), 'cros.factory.probe.runtime_probe.probe_config_types.ComponentProbeStatement', 'probe_config_types.ComponentProbeStatement', (['"""category1"""', '"""comp1"""', "{'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}}"], {}), "('category1', 'comp1', {'eval': {\n 'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}})\n", (10103, 10205), False, 'from cros.factory.probe.runtime_probe import probe_config_types\n'), ((10278, 10422), 'cros.factory.probe.runtime_probe.probe_config_types.ComponentProbeStatement', 'probe_config_types.ComponentProbeStatement', (['"""category1"""', '"""comp1"""', "{'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}}"], {}), "('category1', 'comp1', {'eval': {\n 'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}})\n", (10320, 10422), False, 'from cros.factory.probe.runtime_probe import probe_config_types\n'), ((10629, 10773), 'cros.factory.probe.runtime_probe.probe_config_types.ComponentProbeStatement', 'probe_config_types.ComponentProbeStatement', (['"""category1"""', '"""comp1"""', "{'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}}"], {}), "('category1', 'comp1', {'eval': {\n 'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}})\n", (10671, 10773), False, 'from cros.factory.probe.runtime_probe import probe_config_types\n'), ((10846, 10990), 'cros.factory.probe.runtime_probe.probe_config_types.ComponentProbeStatement', 'probe_config_types.ComponentProbeStatement', (['"""category1"""', '"""comp2"""', "{'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}}"], {}), "('category1', 'comp2', {'eval': {\n 'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}})\n", (10888, 10990), False, 'from cros.factory.probe.runtime_probe import probe_config_types\n'), ((11204, 11348), 'cros.factory.probe.runtime_probe.probe_config_types.ComponentProbeStatement', 'probe_config_types.ComponentProbeStatement', (['"""category1"""', '"""comp1"""', "{'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}}"], {}), "('category1', 'comp1', {'eval': {\n 'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}})\n", (11246, 11348), False, 'from cros.factory.probe.runtime_probe import probe_config_types\n'), ((11421, 11565), 'cros.factory.probe.runtime_probe.probe_config_types.ComponentProbeStatement', 'probe_config_types.ComponentProbeStatement', (['"""category2"""', '"""comp1"""', "{'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}}"], {}), "('category2', 'comp1', {'eval': {\n 'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}})\n", (11463, 11565), False, 'from cros.factory.probe.runtime_probe import probe_config_types\n'), ((11782, 11926), 'cros.factory.probe.runtime_probe.probe_config_types.ComponentProbeStatement', 'probe_config_types.ComponentProbeStatement', (['"""category1"""', '"""comp1"""', "{'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}}"], {}), "('category1', 'comp1', {'eval': {\n 'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}})\n", (11824, 11926), False, 'from cros.factory.probe.runtime_probe import probe_config_types\n'), ((11999, 12143), 'cros.factory.probe.runtime_probe.probe_config_types.ComponentProbeStatement', 'probe_config_types.ComponentProbeStatement', (['"""category1"""', '"""comp1"""', "{'eval': {'func_2': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}}"], {}), "('category1', 'comp1', {'eval': {\n 'func_2': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}})\n", (12041, 12143), False, 'from cros.factory.probe.runtime_probe import probe_config_types\n'), ((9388, 9414), 'cros.factory.utils.json_utils.LoadStr', 'json_utils.LoadStr', (['result'], {}), '(result)\n', (9406, 9414), False, 'from cros.factory.utils import json_utils\n'), ((12371, 12515), 'cros.factory.probe.runtime_probe.probe_config_types.ComponentProbeStatement', 'probe_config_types.ComponentProbeStatement', (['"""category1"""', '"""comp1"""', "{'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}}"], {}), "('category1', 'comp1', {'eval': {\n 'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}})\n", (12413, 12515), False, 'from cros.factory.probe.runtime_probe import probe_config_types\n'), ((12614, 12775), 'cros.factory.probe.runtime_probe.probe_config_types.ComponentProbeStatement.FromDict', 'probe_config_types.ComponentProbeStatement.FromDict', (["{'category1': {'comp1': {'eval': {'func_1': {}}, 'expect': {'int_field': [\n True, 'int', '!eq 1']}}}}"], {}), "({'category1': {'comp1':\n {'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}}}}\n )\n", (12665, 12775), False, 'from cros.factory.probe.runtime_probe import probe_config_types\n'), ((2057, 2074), 're.compile', 're.compile', (['"""a.*"""'], {}), "('a.*')\n", (2067, 2074), False, 'import re\n'), ((5048, 5065), 're.compile', 're.compile', (['"""x.*"""'], {}), "('x.*')\n", (5058, 5065), False, 'import re\n')]
|
# Generated by Django 2.2.7 on 2019-11-19 21:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submissions', '0006_merge_20191113_0542'),
]
operations = [
migrations.CreateModel(
name='SubmissionTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
],
),
migrations.AddField(
model_name='submission',
name='tags',
field=models.ManyToManyField(to='submissions.SubmissionTag', verbose_name='tags'),
),
]
|
[
"django.db.models.CharField",
"django.db.models.ManyToManyField",
"django.db.models.AutoField"
] |
[((642, 717), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""submissions.SubmissionTag"""', 'verbose_name': '"""tags"""'}), "(to='submissions.SubmissionTag', verbose_name='tags')\n", (664, 717), False, 'from django.db import migrations, models\n'), ((342, 435), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (358, 435), False, 'from django.db import migrations, models\n'), ((459, 504), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'unique': '(True)'}), '(max_length=100, unique=True)\n', (475, 504), False, 'from django.db import migrations, models\n')]
|
import numpy as np
import matplotlib.pyplot as plt
PI = np.pi
# =========================define sinc
# ---------------normalized
def sinc1(x):
PI = np.pi
x = np.array(x)
y = np.where(np.abs(PI * x) < 1e-38, 1.0, np.sin(PI * x) / (PI * x))
return y
def sinc_interpolation(x, t, T):
ns = np.arange(x.size)
print(ns, "============")
y = []
for tt in t:
y.append(np.sum(x * sinc1((tt - ns * T) / T)))
return np.array(y)
# =========================test sinc definition
f0 = 100
Ns = 2000
Tp = 20.0 / Ns
t = np.linspace(-10, 10, Ns)
t2 = np.linspace(-10, 10, Ns * 2)
y1 = sinc1(t / Tp)
x = np.sin(2 * PI * f0 * t)
print(x.shape)
y = sinc_interpolation(x, t2, Tp)
print(y.shape, "===")
yfft = np.fft.fftshift(np.fft.fft(y))
plt.figure()
plt.subplot(131)
plt.plot(t, x, '^b')
plt.plot(t2, y, '+r')
plt.legend(['original', 'sinc interpolated'])
plt.title('sinc(t/Tp), ' + "Tp=" + str(Tp))
plt.xlabel('Time/s')
plt.ylabel('Amplitude')
plt.grid()
plt.show()
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.plot",
"numpy.fft.fft",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] |
[((556, 580), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', 'Ns'], {}), '(-10, 10, Ns)\n', (567, 580), True, 'import numpy as np\n'), ((586, 614), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(Ns * 2)'], {}), '(-10, 10, Ns * 2)\n', (597, 614), True, 'import numpy as np\n'), ((640, 663), 'numpy.sin', 'np.sin', (['(2 * PI * f0 * t)'], {}), '(2 * PI * f0 * t)\n', (646, 663), True, 'import numpy as np\n'), ((777, 789), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (787, 789), True, 'import matplotlib.pyplot as plt\n'), ((790, 806), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (801, 806), True, 'import matplotlib.pyplot as plt\n'), ((807, 827), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'x', '"""^b"""'], {}), "(t, x, '^b')\n", (815, 827), True, 'import matplotlib.pyplot as plt\n'), ((828, 849), 'matplotlib.pyplot.plot', 'plt.plot', (['t2', 'y', '"""+r"""'], {}), "(t2, y, '+r')\n", (836, 849), True, 'import matplotlib.pyplot as plt\n'), ((850, 895), 'matplotlib.pyplot.legend', 'plt.legend', (["['original', 'sinc interpolated']"], {}), "(['original', 'sinc interpolated'])\n", (860, 895), True, 'import matplotlib.pyplot as plt\n'), ((940, 960), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time/s"""'], {}), "('Time/s')\n", (950, 960), True, 'import matplotlib.pyplot as plt\n'), ((961, 984), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (971, 984), True, 'import matplotlib.pyplot as plt\n'), ((985, 995), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (993, 995), True, 'import matplotlib.pyplot as plt\n'), ((998, 1008), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1006, 1008), True, 'import matplotlib.pyplot as plt\n'), ((169, 180), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (177, 180), True, 'import numpy as np\n'), ((311, 328), 'numpy.arange', 'np.arange', (['x.size'], {}), '(x.size)\n', (320, 328), True, 'import numpy as np\n'), ((455, 466), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (463, 466), True, 'import numpy as np\n'), ((761, 774), 'numpy.fft.fft', 'np.fft.fft', (['y'], {}), '(y)\n', (771, 774), True, 'import numpy as np\n'), ((198, 212), 'numpy.abs', 'np.abs', (['(PI * x)'], {}), '(PI * x)\n', (204, 212), True, 'import numpy as np\n'), ((227, 241), 'numpy.sin', 'np.sin', (['(PI * x)'], {}), '(PI * x)\n', (233, 241), True, 'import numpy as np\n')]
|
from string import ascii_lowercase
import functools
from itertools import combinations
def generate_binary(n):
"""
Function returns generator with binary sequences of a set length
:param n: length of a binary sequence
:return: generator with binary sequence
"""
if n == 0:
yield ""
else:
for c in generate_binary(n - 1):
yield "0" + c
yield "1" + c
def find_value(zipped_list, x):
for a, b in zipped_list:
if a == x:
return b
return -1
def replace_mapping(zipped_list, x):
if x == 'T':
return 1
elif x == 'F':
return 0
elif x in (ascii_lowercase + 'TF'):
return find_value(zipped_list, x)
else:
return x
def get_variables(expression):
"""
Functions filters the expression for variables and returns them
As a variable we mean any lower case character
:param expression: expression to search in
:return: list with variables from expression
"""
variables = []
for variable in expression:
if variable in ascii_lowercase and variable not in variables:
variables.append(variable)
return variables
def calculate_onp(expression, values):
"""
Function calculates a value of an expression in reverse polish notation
:param expression: Expression in RPN given as a string.
:param values: binary sequence with values to be put in coresponding positions. Also string
:return: Bool value of an expression
Warning: function will only work on correct RNP expression and will not return any warnings in case of errors
"""
zipped_list = list(zip(get_variables(expression), list(values)))
expression = list(map(lambda x: replace_mapping(zipped_list, x), expression))
operators = {'^': lambda x, y: bool(x) ^ bool(y), '&': lambda x, y: bool(x) and bool(y),
'|': lambda x, y: bool(x) or bool(y), '/': lambda x, y: not (bool(x) and bool(y)),
'>': lambda x, y: not bool(x) or bool(y)}
stack = []
while len(expression) > 0:
if expression[0] in ['0', '1']:
stack.append(int(expression[0]))
else:
if expression[0] == '~':
top = not bool(stack.pop())
stack.append(top)
else:
e1 = int(stack.pop())
e2 = int(stack.pop())
stack.append(operators[expression[0]](e2, e1))
del expression[0]
return stack[0]
def is_associative(tkn, associativity_type):
if tkn == '>' and associativity_type == 'r': # because only in case of > it matters.
return False
return True
def concat(s1, s2):
"""
Helper function to reduce expressions
:param s1: Sthing we can iterate over with binary sequence and '_'
:param s2: Sthing we can iterate over with binary sequence and '_'
:return: Merged version of input, when certain bits are different this place is being replaced by '_'
"""
w = ""
lz = 0
for z1, z2 in zip(s1, s2):
if z1 == z2:
w += z1
else:
lz += 1
w += "_"
if lz == 1:
return w
return False
def reduce_(s):
"""
Main reduce function
:param s: Set with values
:return: reduced set
"""
result = set()
b2 = False
for e1 in s:
b1 = False
for e2 in s:
v = concat(e1, e2)
if v:
result.add(v)
b1 = b2 = True
if not b1:
result.add(e1)
if b2:
return reduce_(result)
return result
def expression_to_string(s):
"""
Helper function to change a reduced set to human-readable form
:param s: Set with values
:return: String made from input in pattern: (expression)|(expression)|(expression) or T (if expression is tautology)
"""
result2 = ""
for e1 in s:
result = ""
for i in range(0, len(e1)):
if e1[i] == '_':
continue
if e1[i] == '0':
result += '~'
result += ascii_lowercase[i] + "&"
result2 += '(' + result[:-1] + ')|'
if result2 == '()|':
return 'T'
return result2[:-1]
def trim_expression(expression):
"""
Basic expression trimming
:param expression: takes an expression which in most cases matches a pattern: (expression) and trims brackets
:return: expression with trimmed brackets
"""
e = Expression('')
while len(expression) > 2 and expression[0] == '(' and expression[-1] == ')' and e.check_expression(expression):
expression = expression[1:-1]
return expression
def reduce_tuple(expression):
"""
Function reduces a tuple of string expressions
:param expression: tuple containing expressions. We assume that they do not contain '|'
since in this case they are a product of QuineMcCluskey algorithm
:return: String containing reduced expression or the input one if further reduction was not successful
"""
expression_list = list(expression)
variables = get_variables(str.join('|', expression_list))
binary_generator = generate_binary(len(variables))
incorrect_binaries = []
some_expression = Expression('')
onp_expression = some_expression.convert_to_onp(str.join('|', expression_list))
onp_xor = some_expression.convert_to_onp(functools.reduce(lambda x, y: x + '^' + y, variables))
while True:
try:
x = binary_generator.__next__()
if calculate_onp(onp_expression, x) != calculate_onp(onp_xor, x):
incorrect_binaries.append(x)
except:
break
if len(incorrect_binaries) > 0:
return str.join('|', expression_list)
return '(' + functools.reduce(lambda x, y: x + '^' + y, variables) + ')'
def reduce_xor(expression):
"""
Specific function to reduce xor expressions. It generates combinations of k elements in len(variables)
where k is in range from 2 to len(variables). It checks whether it is not the same as var1 xor var2 xor var3 etc
:param expression: String expression to be reduced. We assume that it matches a pattern: (expr1)|(expr2)|(expr3) ...
:return: reduced expression in string form or input one if further reduction was not possible
"""
expressions_list = expression.split('|')
n = len(expressions_list)
for a in range(2, n + 1):
for expr in combinations(expressions_list, a): # i feel really bad for this
reduced_sub_expression = reduce_tuple(expr)
prev_expression = str.join('|', expr)
if len(reduced_sub_expression) < len(prev_expression):
for var in list(expr):
del expressions_list[expressions_list.index(var)]
expressions_list.append(reduced_sub_expression)
return reduce_xor(functools.reduce(lambda x, y: '|' + x + y + '|', expressions_list))
return expression
def reduce_brackets(expression):
"""
Function that reduces unessesary brackets. It eliminates situations where between two | there is a expression that doesnt need them
example:
(expr1)|(a)|(expr2) will be evaluated to: (expr1)|a|(expr2)
:param expression: string expression in form (expr1)|(expr2)|(expr3)
:return: reduced expression
"""
expression_list = expression.split('|')
if len(expression_list) == 1:
return trim_expression(expression_list[0])
reduced_expressions = []
for some in expression_list:
if len(some) <= 4:
# we are sure that there will be 2 brackets + we want 1 variable (or variable + negation)
reduced_expressions.append(trim_expression(some))
else:
reduced_expressions.append(some)
return str.join('|', reduced_expressions)
def reduce_logical_expression(expression):
"""
Main function that is responsible for driving program.
It calls functions to check if expression is correct and then reduces expression
:param expression: String expression to be reduced
:return: reduced expression or ERROR if it is not correct
"""
expression_object = Expression(expression)
if not expression_object.check_expression():
return 'ERROR'
expression_in_general_form = expression_object.generate_general_form()
expression_with_xor = reduce_brackets(reduce_xor(expression_in_general_form))
if len(expression_with_xor) < len(expression):
return expression_with_xor
e = reduce_brackets(expression_in_general_form)
if len(e) < len(expression):
return e
return reduce_brackets(expression)
class Expression:
"""
Class designed to handle most of expression operations.
It contains map with bindings:
<operator> -> (priority,arguments_number)
Also string with correct signs and expression itself
"""
def __init__(self, expression):
self.general_form = ''
self.correctSigns = '~^&|/>()TF' + ascii_lowercase
self.expression = expression.replace(' ', '')
self.operators = {'~': (4, 1), '^': (3, 2), '&': (2, 2), '|': (2, 2), '/': (2, 2),
'>': (1, 2)} # <operator> -> (priority,arguments_number)
def check_if_brackets_are_correct(self, expression=''):
"""
Helper function to determine whether brackets are placed correctly
:param expression: expression in String form
:return: Bool result of brackets checking
"""
if not expression:
expression = self.expression
brackets = 0
for a in expression:
if a == '(':
brackets += 1
elif a == ')':
brackets -= 1
if brackets < 0:
return False
if brackets == 0:
return True
return False
def check_if_signs_are_correct(self, expression=''):
"""
Simple filter function that checks if expression contains correct signs and is semantically correct
:param expression: String expression to be checked
:return: Bool result
"""
if not expression:
expression = self.expression
if not expression:
return True
if [x for x in expression if x not in self.correctSigns]:
return False
state = True
for single in expression:
if state:
if single in self.operators and self.operators[single][1] == 1 or single in ['(', ')']: # we want ~
# we ignore brackets since they are already checked
continue
elif single in (ascii_lowercase + 'TF'):
state = False
else:
return False
else:
if single in self.operators and self.operators[single][1] == 2: # everything else than ~
state = True
elif single in ['(', ')']:
continue
else:
return False
return not state
def check_expression(self, expression=''):
"""
Higher level interface for checking expression
It calls methods to determine whether expression is correct semantically, in terms of brackets and signs
:param expression: String expression to check
:return: Bool result
"""
if not expression:
expression = self.expression
return self.check_if_signs_are_correct(expression) and self.check_if_brackets_are_correct(expression)
def convert_to_onp(self, expression=''):
"""
Function converts an infix expression to RPN
Warning: it doesnt check whether this expression is correct
:param expression: Infix expression
:return: RPN expression
"""
if not expression:
expression = self.expression
stack = []
onp = []
for tkn in expression:
if tkn in self.operators:
while len(stack) > 0 and stack[-1] in self.operators:
if (is_associative(tkn, 'l') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) <= 0) \
or (
is_associative(tkn, 'r') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) < 0):
onp.append(stack.pop())
continue
break
stack.append(tkn)
elif tkn == '(':
stack.append(tkn)
elif tkn == ')':
while len(stack) > 0 and stack[-1] != '(':
onp.append(stack.pop())
stack.pop()
else:
onp.append(tkn)
while len(stack) > 0:
onp.append(stack.pop())
return functools.reduce(lambda x, y: x + y, onp)
def generate_general_form(self, expression=''):
"""
Function generates general form from infix expression
It uses QuineMcCluskey algorithm
Result matches a pattern: (expression1)|(expression2)|(expression3)...
:param expression: Infix expression as a String
:return: String infix expression evaluated using QuineMcCluskey
"""
if not expression:
expression = self.expression
n = len(get_variables(expression))
correct_binaries = []
generator = generate_binary(n)
current_expression = self.convert_to_onp(expression)
while True:
try:
x = generator.__next__()
if calculate_onp(current_expression, x):
correct_binaries.append(x)
except:
break
set2 = reduce_(correct_binaries)
self.general_form = expression_to_string(set2)
return self.general_form
if __name__ == '__main__':
x = None
while not x:
x = input('')
if x:
print(reduce_logical_expression(x))
else:
break
|
[
"functools.reduce",
"itertools.combinations"
] |
[((5435, 5488), 'functools.reduce', 'functools.reduce', (["(lambda x, y: x + '^' + y)", 'variables'], {}), "(lambda x, y: x + '^' + y, variables)\n", (5451, 5488), False, 'import functools\n'), ((6496, 6529), 'itertools.combinations', 'combinations', (['expressions_list', 'a'], {}), '(expressions_list, a)\n', (6508, 6529), False, 'from itertools import combinations\n'), ((12964, 13005), 'functools.reduce', 'functools.reduce', (['(lambda x, y: x + y)', 'onp'], {}), '(lambda x, y: x + y, onp)\n', (12980, 13005), False, 'import functools\n'), ((5822, 5875), 'functools.reduce', 'functools.reduce', (["(lambda x, y: x + '^' + y)", 'variables'], {}), "(lambda x, y: x + '^' + y, variables)\n", (5838, 5875), False, 'import functools\n'), ((6941, 7007), 'functools.reduce', 'functools.reduce', (["(lambda x, y: '|' + x + y + '|')", 'expressions_list'], {}), "(lambda x, y: '|' + x + y + '|', expressions_list)\n", (6957, 7007), False, 'import functools\n')]
|
# Generated by Django 3.2.6 on 2021-09-08 14:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bjorn', '0009_auto_20210908_1427'),
]
operations = [
migrations.AlterField(
model_name='certificate',
name='revocation_reason',
field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Unspecified'), (2, 'Key compromise'), (3, 'CA compromise'), (4, 'Affiliation changed'), (5, 'Superseded'), (6, 'Cessation of operation'), (7, 'Certificate hold'), (8, 'Remove from CRL'), (9, 'Privilege withdrawn'), (10, 'AA compromise')], null=True),
),
]
|
[
"django.db.models.PositiveSmallIntegerField"
] |
[((350, 675), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'blank': '(True)', 'choices': "[(1, 'Unspecified'), (2, 'Key compromise'), (3, 'CA compromise'), (4,\n 'Affiliation changed'), (5, 'Superseded'), (6, 'Cessation of operation'\n ), (7, 'Certificate hold'), (8, 'Remove from CRL'), (9,\n 'Privilege withdrawn'), (10, 'AA compromise')]", 'null': '(True)'}), "(blank=True, choices=[(1, 'Unspecified'), (\n 2, 'Key compromise'), (3, 'CA compromise'), (4, 'Affiliation changed'),\n (5, 'Superseded'), (6, 'Cessation of operation'), (7,\n 'Certificate hold'), (8, 'Remove from CRL'), (9, 'Privilege withdrawn'),\n (10, 'AA compromise')], null=True)\n", (382, 675), False, 'from django.db import migrations, models\n')]
|
########################## THE TOON LAND PROJECT ##########################
# Filename: HackerCrypt.py
# Created by: Cody/Fd Green Cat Fd (January 31st, 2013)
####
# Description:
#
# Encryption method written by Team FD in 2011 for their personal releases.
# The script has been modified to meet Toon Land's coding standards.
####
from base64 import b64encode, b64decode
from binascii import hexlify, unhexlify
from random import randrange
from __main__ import __dict__ as __main__
from bz2 import compress as c_bz2
from bz2 import decompress as d_bz2
from zlib import compress as c_zlib
from zlib import decompress as d_zlib
from sha import sha as sha1
class HackerCrypt:
__version__ = 'v1.2.0.2'
def __init__(self):
self.MAGIC = sha1('[TL]').digest()
self.KEY = sha1('TL-Cookies').digest()
def makeIV(self):
iv = ''
for i in range(4):
iv += chr(randrange(256))
return iv
def rc4(self, data, key):
j = 0
s = range(256)
for i in range(256):
j = (j + s[i] + ord(key[i % len(key)])) % 256
s[i], s[j] = s[j], s[i]
j = i = 0
results = []
for c in data:
j = (j + 1) % 256
i = (i + s[j]) % 256
s[j], s[i] = s[i], s[j]
results.append(chr(ord(c) ^ s[(s[j] + s[i]) % 256]))
return ''.join(results)
def encode(self, data):
b64 = b64encode(data)
hex = hexlify(b64)
encoded = list(hexlify(hex))
for x in range(len(encoded)):
alpha = int(encoded[x]) + 2
encoded[x] = chr(alpha)
return ''.join(encoded)
def decode(self, encoded):
encoded = list(encoded)
for x in range(len(encoded)):
alpha = str(encoded[x])
encoded[x] = str(ord(alpha) - 2)
encoded = unhexlify(''.join(encoded))
unhexed = unhexlify(encoded)
return b64decode(unhexed)
def compress(self, data):
bz2 = b64encode(c_bz2(data))
return c_zlib(hexlify(bz2))
def decompress(self, compressed):
unhexed = unhexlify(d_zlib(compressed))
return d_bz2(b64decode(unhexed))
def encrypt(self, data):
compressed = self.compress(data)
encoded = self.encode(compressed)
data = self.MAGIC + encoded
iv = self.makeIV()
key = self.KEY + iv
return iv + self.rc4(data, key)
def decrypt(self, encrypted):
if len(encrypted) < 4:
return None
iv = encrypted[:4]
data = encrypted[4:]
key = self.KEY + iv
data = self.rc4(data, key)
if not data.startswith(self.MAGIC):
return None
decoded = self.decode(data[len(self.MAGIC):])
return self.decompress(decoded)
|
[
"binascii.hexlify",
"base64.b64decode",
"bz2.compress",
"binascii.unhexlify",
"base64.b64encode",
"zlib.decompress",
"random.randrange",
"sha.sha"
] |
[((1437, 1452), 'base64.b64encode', 'b64encode', (['data'], {}), '(data)\n', (1446, 1452), False, 'from base64 import b64encode, b64decode\n'), ((1467, 1479), 'binascii.hexlify', 'hexlify', (['b64'], {}), '(b64)\n', (1474, 1479), False, 'from binascii import hexlify, unhexlify\n'), ((1910, 1928), 'binascii.unhexlify', 'unhexlify', (['encoded'], {}), '(encoded)\n', (1919, 1928), False, 'from binascii import hexlify, unhexlify\n'), ((1944, 1962), 'base64.b64decode', 'b64decode', (['unhexed'], {}), '(unhexed)\n', (1953, 1962), False, 'from base64 import b64encode, b64decode\n'), ((1503, 1515), 'binascii.hexlify', 'hexlify', (['hex'], {}), '(hex)\n', (1510, 1515), False, 'from binascii import hexlify, unhexlify\n'), ((2018, 2029), 'bz2.compress', 'c_bz2', (['data'], {}), '(data)\n', (2023, 2029), True, 'from bz2 import compress as c_bz2\n'), ((2053, 2065), 'binascii.hexlify', 'hexlify', (['bz2'], {}), '(bz2)\n', (2060, 2065), False, 'from binascii import hexlify, unhexlify\n'), ((2134, 2152), 'zlib.decompress', 'd_zlib', (['compressed'], {}), '(compressed)\n', (2140, 2152), True, 'from zlib import decompress as d_zlib\n'), ((2175, 2193), 'base64.b64decode', 'b64decode', (['unhexed'], {}), '(unhexed)\n', (2184, 2193), False, 'from base64 import b64encode, b64decode\n'), ((752, 764), 'sha.sha', 'sha1', (['"""[TL]"""'], {}), "('[TL]')\n", (756, 764), True, 'from sha import sha as sha1\n'), ((795, 813), 'sha.sha', 'sha1', (['"""TL-Cookies"""'], {}), "('TL-Cookies')\n", (799, 813), True, 'from sha import sha as sha1\n'), ((911, 925), 'random.randrange', 'randrange', (['(256)'], {}), '(256)\n', (920, 925), False, 'from random import randrange\n')]
|
"""scrapli_cfg.platform.core.arista_eos.base"""
import json
import re
from datetime import datetime
from logging import LoggerAdapter
from typing import Iterable, List, Tuple, Union
from scrapli.driver import AsyncNetworkDriver, NetworkDriver
from scrapli.response import Response
from scrapli_cfg.exceptions import ScrapliCfgException
from scrapli_cfg.platform.core.arista_eos.patterns import (
BANNER_PATTERN,
END_PATTERN,
GLOBAL_COMMENT_LINE_PATTERN,
VERSION_PATTERN,
)
from scrapli_cfg.response import ScrapliCfgResponse
CONFIG_SOURCES = [
"running",
"startup",
]
class ScrapliCfgEOSBase:
conn: Union[NetworkDriver, AsyncNetworkDriver]
logger: LoggerAdapter
config_sources: List[str]
config_session_name: str
candidate_config: str
@staticmethod
def _parse_version(device_output: str) -> str:
"""
Parse version string out of device output
Args:
device_output: output from show version command
Returns:
str: device version string
Raises:
N/A
"""
version_string_search = re.search(pattern=VERSION_PATTERN, string=device_output)
if not version_string_search:
return ""
version_string = version_string_search.group(0) or ""
return version_string
@staticmethod
def _parse_config_sessions(device_output: str) -> List[str]:
"""
Parse config session names out of device output
Args:
device_output: output from show version command
Returns:
list[str]: config session names
Raises:
N/A
"""
try:
config_session_dict = json.loads(device_output)
except json.JSONDecodeError:
return []
sessions = list(config_session_dict.get("sessions", {}))
return sessions
@staticmethod
def _get_config_command(source: str) -> str:
"""
Return command to use to get config based on the provided source
Args:
source: name of the config source, generally running|startup
Returns:
str: command to use to fetch the requested config
Raises:
N/A
"""
if source == "running":
return "show running-config"
return "show startup-config"
@staticmethod
def _prepare_config_payloads(config: str) -> Tuple[str, str]:
"""
Prepare a configuration so it can be nicely sent to the device via scrapli
Args:
config: configuration to prep
Returns:
tuple: tuple of "normal" config lines and "eager" config lines
Raises:
N/A
"""
# remove comment lines
config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, repl="!", string=config)
# remove "end" at the end of config if present - if its present it will drop scrapli out
# of the config session which we do not want
config = re.sub(pattern=END_PATTERN, repl="!", string=config)
# find all sections that need to be "eagerly" sent
eager_config = re.findall(pattern=BANNER_PATTERN, string=config)
for eager_section in eager_config:
config = config.replace(eager_section, "!")
joined_eager_config = "\n".join(captured_section for captured_section in eager_config)
return config, joined_eager_config
def _prepare_load_config_session_and_payload(self, config: str) -> Tuple[str, str, bool]:
"""
Prepare the normal and eager payloads and decide if we need to register a config session
Args:
config: candidate config to load
Returns:
tuple: tuple containing "normal" config elements to send to the device and "eager" mode
config elements to send to the device (things like banners/macro that require
scrapli "eager=True"), and lastly a bool indicating if the config session needs to
be registered on the device
Raises:
N/A
"""
config, eager_config = self._prepare_config_payloads(config=config)
register_config_session = False
if not self.config_session_name:
self.config_session_name = f"scrapli_cfg_{round(datetime.now().timestamp())}"
self.logger.debug(f"configuration session name will be '{self.config_session_name}'")
register_config_session = True
return config, eager_config, register_config_session
def _reset_config_session(self) -> None:
"""
Reset config session info
Resets the candidate config and config session name attributes -- when these are "empty" we
know there is no current config session
Args:
N/A
Returns:
None
Raises:
N/A
"""
self.logger.debug("resetting candidate config and config session name")
self.candidate_config = ""
self.config_session_name = ""
def _normalize_source_candidate_configs(self, source_config: str) -> Tuple[str, str]:
"""
Normalize candidate config and source config so that we can easily diff them
Args:
source_config: current config of the source config store
Returns:
ScrapliCfgDiff: scrapli cfg diff object
Raises:
N/A
"""
self.logger.debug("normalizing source and candidate configs for diff object")
# Remove all comment lines from both the source and candidate configs -- this is only done
# here pre-diff, so we dont modify the user provided candidate config which can totally have
# those comment lines - we only remove "global" (top level) comments though... user comments
# attached to interfaces and the stuff will remain
source_config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, string=source_config, repl="")
source_config = "\n".join(line for line in source_config.splitlines() if line)
candidate_config = re.sub(
pattern=GLOBAL_COMMENT_LINE_PATTERN, string=self.candidate_config, repl=""
)
candidate_config = "\n".join(line for line in candidate_config.splitlines() if line)
return source_config, candidate_config
def _pre_clear_config_sessions(self) -> ScrapliCfgResponse:
"""
Handle pre "clear_config_sessions" operations for parity between sync and async
Args:
N/A
Returns:
ScrapliCfgResponse: new response object to update w/ get results
Raises:
N/A
"""
self.logger.info("clear_config_sessions requested")
response = ScrapliCfgResponse(
host=self.conn.host, raise_for_status_exception=ScrapliCfgException
)
return response
def _post_clear_config_sessions(
self,
response: ScrapliCfgResponse,
scrapli_responses: Iterable[Response],
) -> ScrapliCfgResponse:
"""
Handle post "clear_config_sessions" operations for parity between sync and async
Args:
response: response object to update
scrapli_responses: list of scrapli response objects from fetching the version
Returns:
ScrapliCfgResponse: response object containing string of the version as the `result`
attribute
Raises:
N/A
"""
response.record_response(scrapli_responses=scrapli_responses)
if response.failed:
msg = "failed to clear device configuration session(s)"
self.logger.critical(msg)
response.result = msg
else:
response.result = "configuration session(s) cleared"
return response
|
[
"scrapli_cfg.response.ScrapliCfgResponse",
"json.loads",
"datetime.datetime.now",
"re.findall",
"re.search",
"re.sub"
] |
[((1125, 1181), 're.search', 're.search', ([], {'pattern': 'VERSION_PATTERN', 'string': 'device_output'}), '(pattern=VERSION_PATTERN, string=device_output)\n', (1134, 1181), False, 'import re\n'), ((2794, 2862), 're.sub', 're.sub', ([], {'pattern': 'GLOBAL_COMMENT_LINE_PATTERN', 'repl': '"""!"""', 'string': 'config'}), "(pattern=GLOBAL_COMMENT_LINE_PATTERN, repl='!', string=config)\n", (2800, 2862), False, 'import re\n'), ((3031, 3083), 're.sub', 're.sub', ([], {'pattern': 'END_PATTERN', 'repl': '"""!"""', 'string': 'config'}), "(pattern=END_PATTERN, repl='!', string=config)\n", (3037, 3083), False, 'import re\n'), ((3167, 3216), 're.findall', 're.findall', ([], {'pattern': 'BANNER_PATTERN', 'string': 'config'}), '(pattern=BANNER_PATTERN, string=config)\n', (3177, 3216), False, 'import re\n'), ((5937, 6011), 're.sub', 're.sub', ([], {'pattern': 'GLOBAL_COMMENT_LINE_PATTERN', 'string': 'source_config', 'repl': '""""""'}), "(pattern=GLOBAL_COMMENT_LINE_PATTERN, string=source_config, repl='')\n", (5943, 6011), False, 'import re\n'), ((6126, 6212), 're.sub', 're.sub', ([], {'pattern': 'GLOBAL_COMMENT_LINE_PATTERN', 'string': 'self.candidate_config', 'repl': '""""""'}), "(pattern=GLOBAL_COMMENT_LINE_PATTERN, string=self.candidate_config,\n repl='')\n", (6132, 6212), False, 'import re\n'), ((6789, 6881), 'scrapli_cfg.response.ScrapliCfgResponse', 'ScrapliCfgResponse', ([], {'host': 'self.conn.host', 'raise_for_status_exception': 'ScrapliCfgException'}), '(host=self.conn.host, raise_for_status_exception=\n ScrapliCfgException)\n', (6807, 6881), False, 'from scrapli_cfg.response import ScrapliCfgResponse\n'), ((1718, 1743), 'json.loads', 'json.loads', (['device_output'], {}), '(device_output)\n', (1728, 1743), False, 'import json\n'), ((4339, 4353), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4351, 4353), False, 'from datetime import datetime\n')]
|
# -*- coding: utf-8 -*-
import time
# !! This is the configuration of Nikola. !! #
# !! You should edit it to your liking. !! #
# Data about this site
BLOG_AUTHOR = "<NAME>" # (translatable)
BLOG_TITLE = "My Nikola Site" # (translatable)
# This is the main URL for your site. It will be used
# in a prominent link. Don't forget the protocol (http/https)!
SITE_URL = "https://example.com/"
# This is the URL where Nikola's output will be deployed.
# If not set, defaults to SITE_URL
# BASE_URL = "https://example.com/"
BLOG_EMAIL = "<EMAIL>"
BLOG_DESCRIPTION = "This is a demo site for Nikola." # (translatable)
# What is the default language?
DEFAULT_LANG = "en"
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
DEFAULT_LANG: "",
# Example for another language:
# "es": "./es",
}
# What will translated input files be named like?
TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}'
# Links for the sidebar / navigation bar. (translatable)
# This is a dict. The keys are languages, and values are tuples.
NAVIGATION_LINKS = {
DEFAULT_LANG: (
("/archive.html", "Archive"),
("/categories/", "Tags"),
("/rss.xml", "RSS feed"),
),
}
# Alternative navigation links. Works the same way NAVIGATION_LINKS does,
# although themes may not always support them. (translatable)
# (Bootstrap 4: right-side of navbar, Bootblog 4: right side of title)
NAVIGATION_ALT_LINKS = {
DEFAULT_LANG: ()
}
# Name of the theme to use.
#THEME = "bootblog4"
THEME = "disimplex"
# A theme color. In default themes, it might be displayed by some browsers as
# the browser UI color (eg. Chrome on Android). Other themes might also use it
# as an accent color (the default ones don’t). Must be a HEX value.
THEME_COLOR = '#5670d4'
# Theme configuration. Fully theme-dependent. (translatable)
# Samples for bootblog4 (enabled) and bootstrap4 (commented) follow.
# bootblog4 supports: featured_large featured_small featured_on_mobile
# featured_large_image_on_mobile featured_strip_html sidebar
# bootstrap4 supports: navbar_light (defaults to False)
# navbar_custom_bg (defaults to '')
# Config for bootblog4:
THEME_CONFIG = {
DEFAULT_LANG: {
# Show the latest featured post in a large box, with the previewimage as its background.
'featured_large': False,
# Show the first (remaining) two featured posts in small boxes.
'featured_small': False,
# Show featured posts on mobile.
'featured_on_mobile': True,
# Show image in `featured_large` on mobile.
# `featured_small` displays them only on desktop.
'featured_large_image_on_mobile': True,
# Strip HTML from featured post text.
'featured_strip_html': False,
# Contents of the sidebar, If empty, the sidebar is not displayed.
'sidebar': ''
}
}
# POSTS and PAGES contains (wildcard, destination, template) tuples.
# (translatable)
#
POSTS = (
("posts/*.rst", "posts", "post.tmpl"),
("posts/*.md", "posts", "post.tmpl"),
("posts/*.txt", "posts", "post.tmpl"),
("posts/*.html", "posts", "post.tmpl"),
)
PAGES = (
("pages/*.rst", "", "page.tmpl"),
("pages/*.md", "", "page.tmpl"),
("pages/*.txt", "", "page.tmpl"),
("pages/*.html", "", "page.tmpl"),
)
# Below this point, everything is optional
# Post's dates are considered in UTC by default, if you want to use
# another time zone, please set TIMEZONE to match. Check the available
# list from Wikipedia:
TIMEZONE = "Europe/London"
# Date format used to display post dates. (translatable)
# Used by babel.dates, CLDR style: http://cldr.unicode.org/translation/date-time-1/date-time
# You can also use 'full', 'long', 'medium', or 'short'
# DATE_FORMAT = 'yyyy-MM-dd HH:mm'
# Date format used to display post dates, if local dates are used. (translatable)
# Used by Luxon: https://moment.github.io/luxon/docs/manual/formatting
# Example for presets: {'preset': True, 'format': 'DATE_FULL'}
# LUXON_DATE_FORMAT = {
# DEFAULT_LANG: {'preset': False, 'format': 'yyyy-MM-dd HH:mm'},
# }
# Date fanciness.
#
# 0 = using DATE_FORMAT and TIMEZONE (without JS)
# 1 = using LUXON_DATE_FORMAT and local user time (JS, using Luxon)
# 2 = using a string like “2 days ago” (JS, using Luxon)
#
# Your theme must support it, Bootstrap already does.
# DATE_FANCINESS = 0
# Customize the locale/region used for a language.
# For example, to use British instead of US English: LOCALES = {'en': 'en_GB'}
# LOCALES = {}
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of {source: relative destination}.
# Default is:
# FILES_FOLDERS = {'files': ''}
# Which means copy 'files' into 'output'
# One or more folders containing code listings to be processed and published on
# the site. The format is a dictionary of {source: relative destination}.
# Default is:
# LISTINGS_FOLDERS = {'listings': 'listings'}
# Which means process listings from 'listings' into 'output/listings'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# The default compiler for `new_post` is the first entry in the POSTS tuple.
#
# 'rest' is reStructuredText
# 'markdown' is Markdown
# 'html' assumes the file is HTML and just copies it
COMPILERS = {
"rest": ['.rst', '.txt'],
"markdown": ['.md', '.mdown', '.markdown'],
"textile": ['.textile'],
"txt2tags": ['.t2t'],
"bbcode": ['.bb'],
"wiki": ['.wiki'],
"ipynb": ['.ipynb'],
"html": ['.html', '.htm'],
# PHP files are rendered the usual way (i.e. with the full templates).
# The resulting files have .php extensions, making it possible to run
# them without reconfiguring your server to recognize them.
"php": ['.php'],
# Pandoc detects the input from the source filename
# but is disabled by default as it would conflict
# with many of the others.
# "pandoc": ['.rst', '.md', '.txt'],
}
# Preferred metadata format for new posts
# "YAML": YAML wrapped in "---"
METADATA_FORMAT = "YAML"
# If you do not want to display a tag publicly, you can mark it as hidden.
# The tag will not be displayed on the tag list page and posts.
# Tag pages will still be generated.
HIDDEN_TAGS = ['mathjax']
# If CATEGORY_ALLOW_HIERARCHIES is set to True, categories can be organized in
# hierarchies. For a post, the whole path in the hierarchy must be specified,
# using a forward slash ('/') to separate paths. Use a backslash ('\') to escape
# a forward slash or a backslash (i.e. '\//\\' is a path specifying the
# subcategory called '\' of the top-level category called '/').
CATEGORY_ALLOW_HIERARCHIES = False
# If CATEGORY_OUTPUT_FLAT_HIERARCHY is set to True, the output written to output
# contains only the name of the leaf category and not the whole path.
CATEGORY_OUTPUT_FLAT_HIERARCHY = False
# If you do not want to display a category publicly, you can mark it as hidden.
# The category will not be displayed on the category list page.
# Category pages will still be generated.
HIDDEN_CATEGORIES = []
# If ENABLE_AUTHOR_PAGES is set to True and there is more than one
# author, author pages are generated.
ENABLE_AUTHOR_PAGES = False
# If you do not want to display an author publicly, you can mark it as hidden.
# The author will not be displayed on the author list page and posts.
# Tag pages will still be generated.
HIDDEN_AUTHORS = ['Guest']
# Optional HTML that displayed on “main” blog index.html files.
# May be used for a greeting. (translatable)
FRONT_INDEX_HEADER = {
DEFAULT_LANG: ''
}
# URLs to other posts/pages can take 3 forms:
# rel_path: a relative URL to the current page/post (default)
# full_path: a URL with the full path from the root
# absolute: a complete URL (that includes the SITE_URL)
# URL_TYPE = 'rel_path'
#
# Note that our use of "server side includes" / partials
# REQUIRES the use of 'full_path'
#
URL_TYPE = 'full_path'
# Extension for RSS feed files
# RSS_EXTENSION = ".xml"
# RSS filename base (without extension); used for indexes and galleries.
# (translatable)
# RSS_FILENAME_BASE = "rss"
# Atom filename base (without extension); used for indexes.
# (translatable)
ATOM_FILENAME_BASE = "feed"
# Extension for Atom feed files
# ATOM_EXTENSION = ".atom"
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
REDIRECTIONS = []
# Presets of commands to execute to deploy. Can be anything, for
# example, you may use rsync:
# "rsync -rav --delete output/ [email protected]:/srv/www/site"
# And then do a backup, or run `nikola ping` from the `ping`
# plugin (`nikola plugin -i ping`). Or run `nikola check -l`.
# You may also want to use github_deploy (see below).
# You can define multiple presets and specify them as arguments
# to `nikola deploy`. If no arguments are specified, a preset
# named `default` will be executed. You can use as many presets
# in a `nikola deploy` command as you like.
# DEPLOY_COMMANDS = {
# 'default': [
# "rsync -rav --delete output/ [email protected]:/srv/www/site",
# ]
# }
# github_deploy configuration
# For more details, read the manual:
# https://getnikola.com/handbook.html#deploying-to-github
# You will need to configure the deployment branch on GitHub.
GITHUB_SOURCE_BRANCH = 'src'
GITHUB_DEPLOY_BRANCH = 'master'
# The name of the remote where you wish to push to, using github_deploy.
GITHUB_REMOTE_NAME = 'origin'
# Whether or not github_deploy should commit to the source branch automatically
# before deploying.
GITHUB_COMMIT_SOURCE = True
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
# OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
# CACHE_FOLDER = 'cache'
# #############################################################################
# Image Gallery Options
# #############################################################################
# Use a thumbnail (defined by ".. previewimage:" in the gallery's index) in
# list of galleries for each gallery
GALLERIES_USE_THUMBNAIL = False
# Image to use as thumbnail for those galleries that don't have one
# None: show a grey square
# '/url/to/file': show the image in that url
GALLERIES_DEFAULT_THUMBNAIL = None
# Images will be scaled down according to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE
# options, but will have to be referenced manually to be visible on the site
# (the thumbnail has ``.thumbnail`` added before the file extension by default,
# but a different naming template can be configured with IMAGE_THUMBNAIL_FORMAT).
IMAGE_FOLDERS = {'images': 'images'}
# IMAGE_THUMBNAIL_SIZE = 400
# IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}'
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# 'Read more...' for the index page, if INDEX_TEASERS is True (translatable)
INDEX_READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
# 'Read more...' for the feeds, if FEED_TEASERS is True (translatable)
FEED_READ_MORE_LINK = '<p><a href="{link}">{read_more}…</a> ({min_remaining_read})</p>'
# Append a URL query to the FEED_READ_MORE_LINK in Atom and RSS feeds. Advanced
# option used for traffic source tracking.
FEED_LINKS_APPEND_QUERY = False
# A HTML fragment describing the license, for the sidebar.
# (translatable)
LICENSE = ""
# I recommend using the Creative Commons' wizard:
# https://creativecommons.org/choose/
# LICENSE = """
# <a rel="license" href="https://creativecommons.org/licenses/by-nc-sa/4.0/">
# <img alt="Creative Commons License BY-NC-SA"
# style="border-width:0; margin-bottom:12px;"
# src="https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png"></a>"""
# A small copyright notice for the page footer (in HTML).
# (translatable)
CONTENT_FOOTER = 'Contents © {date} <a href="mailto:{email}">{author}</a> - Powered by <a href="https://getnikola.com" rel="nofollow">Nikola</a> {license}'
# Things that will be passed to CONTENT_FOOTER.format(). This is done
CONTENT_FOOTER_FORMATS = {
DEFAULT_LANG: (
(),
{
"email": BLOG_EMAIL,
"author": BLOG_AUTHOR,
"date": time.gmtime().tm_year,
"license": LICENSE
}
)
}
# A simple copyright tag for inclusion in RSS feeds that works just
# like CONTENT_FOOTER and CONTENT_FOOTER_FORMATS
RSS_COPYRIGHT = 'Contents © {date} <a href="mailto:{email}">{author}</a> {license}'
RSS_COPYRIGHT_PLAIN = 'Contents © {date} {author} {license}'
RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS
# To use comments, you can choose between different third party comment
# systems. The following comment systems are supported by Nikola:
# disqus, facebook, intensedebate, isso, muut, commento, utterances
# You can leave this option blank to disable comments.
COMMENT_SYSTEM = ""
# And you also need to add your COMMENT_SYSTEM_ID which
# depends on what comment system you use. The default is
# "nikolademo" which is a test account for Disqus. More information
# is in the manual.
COMMENT_SYSTEM_ID = ""
# Create index.html for page folders?
# WARNING: if a page would conflict with the index file (usually
# caused by setting slug to `index`), the PAGE_INDEX
# will not be generated for that directory.
# PAGE_INDEX = False
# Enable comments on pages (i.e. not posts)?
# COMMENTS_IN_PAGES = False
# Enable comments on picture gallery pages?
# COMMENTS_IN_GALLERIES = False
# What file should be used for directory indexes?
# Defaults to index.html
# Common other alternatives: default.html for IIS, index.php
# INDEX_FILE = "index.html"
# If a link ends in /index.html, drop the index.html part.
# http://mysite/foo/bar/index.html => http://mysite/foo/bar/
# (Uses the INDEX_FILE setting, so if that is, say, default.html,
# it will instead /foo/default.html => /foo)
STRIP_INDEXES = False
# List of files relative to the server root (!) that will be asked to be excluded
# from indexing and other robotic spidering. * is supported. Will only be effective
# if SITE_URL points to server root. The list is used to exclude resources from
# /robots.txt and /sitemap.xml, and to inform search engines about /sitemapindex.xml.
# ROBOTS_EXCLUSIONS = ["/archive.html", "/category/*.html"]
# Instead of putting files in <slug>.html, put them in <slug>/index.html.
# No web server configuration is required. Also enables STRIP_INDEXES.
# This can be disabled on a per-page/post basis by adding
# .. pretty_url: False
# to the metadata.
PRETTY_URLS = False
# If True, publish future dated posts right away instead of scheduling them.
# Defaults to False.
# FUTURE_IS_NOW = False
# If True, future dated posts are allowed in deployed output
# Only the individual posts are published/deployed; not in indexes/sitemap
# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.
# DEPLOY_FUTURE = False
# If False, draft posts will not be deployed
# DEPLOY_DRAFTS = True
# Allows scheduling of posts using the rule specified here (new_post -s)
# Specify an iCal Recurrence Rule: https://www.kanzaki.com/docs/ical/rrule.html
# SCHEDULE_RULE = ''
# If True, use the scheduling rule to all posts (not pages!) by default
# SCHEDULE_ALL = False
# Do you want to add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you want support for the $.$ syntax (which may conflict with running
# text!), just use this config:
# MATHJAX_CONFIG = """
# <script type="text/x-mathjax-config">
# MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ],
# processEscapes: true
# },
# displayAlign: 'center', // Change this to 'left' if you want left-aligned equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
# });
# </script>
# """
# Want to use KaTeX instead of MathJax? While KaTeX may not support every
# feature yet, it's faster and the output looks better.
# USE_KATEX = False
# KaTeX auto-render settings. If you want support for the $.$ syntax (which may
# conflict with running text!), just use this config:
# KATEX_AUTO_RENDER = """
# delimiters: [
# {left: "$$", right: "$$", display: true},
# {left: "\\\\[", right: "\\\\]", display: true},
# {left: "\\\\begin{equation*}", right: "\\\\end{equation*}", display: true},
# {left: "$", right: "$", display: false},
# {left: "\\\\(", right: "\\\\)", display: false}
# ]
# """
# What Markdown extensions to enable?
# You will also get gist, nikola and podcast because those are
# done in the code, hope you don't mind ;-)
# Note: most Nikola-specific extensions are done via the Nikola plugin system,
# with the MarkdownExtension class and should not be added here.
# Defaults are markdown.extensions.(fenced_code|codehilite|extra)
# markdown.extensions.meta is required for Markdown metadata.
MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite', 'markdown.extensions.extra', 'markdown.extensions.toc']
# Options to be passed to markdown extensions (See https://python-markdown.github.io/reference/)
# Default is {} (no config at all)
# MARKDOWN_EXTENSION_CONFIGS = {}
# Social buttons. This is sample code for AddThis (which was the default for a
# long time). Insert anything you want here, or even make it empty (which is
# the default right now)
# (translatable)
# SOCIAL_BUTTONS_CODE = """
# <!-- Social buttons -->
# <div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style">
# <a class="addthis_button_more">Share</a>
# <ul><li><a class="addthis_button_facebook"></a>
# <li><a class="addthis_button_google_plusone_share"></a>
# <li><a class="addthis_button_linkedin"></a>
# <li><a class="addthis_button_twitter"></a>
# </ul>
# </div>
# <script src="https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script>
# <!-- End of social buttons -->
# """
# Show link to source for the posts?
SHOW_SOURCELINK = False
# Copy the source files for your pages?
# Setting it to False implies SHOW_SOURCELINK = False
COPY_SOURCES = False
# Modify the number of Post per Index Page
# Defaults to 10
# INDEX_DISPLAY_POST_COUNT = 10
# Extra things you want in the pages HEAD tag. This will be added right
# before </head>
# (translatable)
# EXTRA_HEAD_DATA = ""
# Google Analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# (translatable)
# BODY_END = ""
# Bundle JS and CSS into single files to make site loading faster in a HTTP/1.1
# environment but is not recommended for HTTP/2.0 when caching is used.
# Defaults to True.
# USE_BUNDLES = True
USE_BUNDLES = False
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Special settings to disable only parts of the indexes plugin.
# Use with care.
# DISABLE_INDEXES = False
# DISABLE_MAIN_ATOM_FEED = False
# DISABLE_MAIN_RSS_FEED = False
# Add the absolute paths to directories containing plugins to use them.
# For example, the `plugins` directory of your clone of the Nikola plugins
# repository.
# EXTRA_PLUGINS_DIRS = []
# Add the absolute paths to directories containing themes to use them.
# For example, the `v7` directory of your clone of the Nikola themes
# repository.
# EXTRA_THEMES_DIRS = []
# List of regular expressions, links matching them will always be considered
# valid by "nikola check -l"
# LINK_CHECK_WHITELIST = []
# The <hN> tags in HTML generated by certain compilers (reST/Markdown)
# will be demoted by that much (1 → h1 will become h2 and so on)
# This was a hidden feature of the Markdown and reST compilers in the
# past. Useful especially if your post titles are in <h1> tags too, for
# example.
# (defaults to 1.)
# DEMOTE_HEADERS = 1
# If set to True, the tags 'draft', 'mathjax' and 'private' have special
# meaning. If set to False, these tags are handled like regular tags.
USE_TAG_METADATA = False
# If set to True, a warning is issued if one of the 'draft', 'mathjax'
# and 'private' tags are found in a post. Useful for checking that
# migration was successful.
WARN_ABOUT_TAG_METADATA = False
# Templates will use those filters, along with the defaults.
# Consult your engine's documentation on filters if you need help defining
# those.
# TEMPLATE_FILTERS = {}
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
# Add functions here and they will be called with template
# GLOBAL_CONTEXT as parameter when the template is about to be
# rendered
GLOBAL_CONTEXT_FILLER = []
# Settings for the (boot)Reveal theme must be added to the global context.
# subtheme selection: beige/serif/simple/sky/night/default
# transition selection: cube/page/concave/linear/none/default
GLOBAL_CONTEXT.update({
'subtheme': 'simple',
'transition': 'none'
})
|
[
"time.gmtime"
] |
[((12867, 12880), 'time.gmtime', 'time.gmtime', ([], {}), '()\n', (12878, 12880), False, 'import time\n')]
|
""" Tests for GSPDataSource """
import os
from datetime import datetime
import pandas as pd
import nowcasting_dataset
from nowcasting_dataset.data_sources.gsp.gsp_data_source import (
GSPDataSource,
drop_gsp_north_of_boundary,
)
from nowcasting_dataset.geospatial import osgb_to_lat_lon
def test_gsp_pv_data_source_init():
"""Test GSP init"""
local_path = os.path.dirname(nowcasting_dataset.__file__) + "/.."
_ = GSPDataSource(
zarr_path=f"{local_path}/tests/data/gsp/test.zarr",
start_datetime=datetime(2020, 4, 1),
end_datetime=datetime(2020, 4, 2),
history_minutes=30,
forecast_minutes=60,
image_size_pixels=64,
meters_per_pixel=2000,
)
def test_gsp_pv_data_source_get_locations():
"""Test GSP locations"""
local_path = os.path.dirname(nowcasting_dataset.__file__) + "/.."
gsp = GSPDataSource(
zarr_path=f"{local_path}/tests/data/gsp/test.zarr",
start_datetime=datetime(2020, 4, 1),
end_datetime=datetime(2020, 4, 2),
history_minutes=30,
forecast_minutes=60,
image_size_pixels=64,
meters_per_pixel=2000,
)
locations_x, locations_y = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10])
assert len(locations_x) == len(locations_y)
# This makes sure it is not in lat/lon.
# Note that OSGB could be <= than 90, but that would mean a location in the middle of the sea,
# which is impossible for GSP data
assert locations_x[0] > 90
assert locations_y[0] > 90
lat, lon = osgb_to_lat_lon(locations_x, locations_y)
assert 0 < lat[0] < 90 # this makes sure it is in lat/lon
assert -90 < lon[0] < 90 # this makes sure it is in lat/lon
def test_gsp_pv_data_source_get_all_locations():
"""Test GSP example"""
local_path = os.path.dirname(nowcasting_dataset.__file__) + "/.."
gsp = GSPDataSource(
zarr_path=f"{local_path}/tests/data/gsp/test.zarr",
start_datetime=datetime(2020, 4, 1),
end_datetime=datetime(2020, 4, 2),
history_minutes=30,
forecast_minutes=60,
image_size_pixels=64,
meters_per_pixel=2000,
)
N_gsps = len(gsp.metadata)
t0_datetimes_utc = gsp.gsp_power.index[0:10]
x_locations = gsp.metadata.location_x
(
t0_datetimes_utc_all_gsps,
x_centers_osgb_all_gsps,
y_centers_osgb_all_gsps,
) = gsp.get_all_locations(t0_datetimes_utc=t0_datetimes_utc)
assert len(t0_datetimes_utc_all_gsps) == len(x_centers_osgb_all_gsps)
assert len(t0_datetimes_utc_all_gsps) == len(y_centers_osgb_all_gsps)
assert len(t0_datetimes_utc_all_gsps) == len(x_locations) * len(t0_datetimes_utc)
# check first few are the same datetime
assert (x_centers_osgb_all_gsps[0:N_gsps] == x_locations.values).all()
assert (t0_datetimes_utc_all_gsps[0:N_gsps] == t0_datetimes_utc[0]).all()
# check second set of datetimes
assert (x_centers_osgb_all_gsps[N_gsps : 2 * N_gsps] == x_locations.values).all()
assert (t0_datetimes_utc_all_gsps[N_gsps : 2 * N_gsps] == t0_datetimes_utc[1]).all()
# check all datetimes
t0_datetimes_utc_all_gsps_overlap = t0_datetimes_utc_all_gsps.union(t0_datetimes_utc)
assert len(t0_datetimes_utc_all_gsps_overlap) == len(t0_datetimes_utc_all_gsps)
def test_gsp_pv_data_source_get_example():
"""Test GSP example"""
local_path = os.path.dirname(nowcasting_dataset.__file__) + "/.."
start_dt = datetime(2020, 4, 1)
end_dt = datetime(2020, 4, 1)
gsp = GSPDataSource(
zarr_path=f"{local_path}/tests/data/gsp/test.zarr",
start_datetime=datetime(2020, 4, 1),
end_datetime=datetime(2020, 4, 2),
history_minutes=30,
forecast_minutes=60,
image_size_pixels=64,
meters_per_pixel=2000,
)
x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10])
example = gsp.get_example(
t0_datetime_utc=gsp.gsp_power.index[0],
x_center_osgb=x_locations[0],
y_center_osgb=y_locations[0],
)
assert len(example.id) == len(example.power_mw[0])
assert len(example.x_osgb) == len(example.y_osgb)
assert len(example.x_osgb) > 0
assert pd.Timestamp(example.time[0].values) <= end_dt
assert pd.Timestamp(example.time[0].values) >= start_dt
def test_gsp_pv_data_source_get_batch():
"""Test GSP batch"""
local_path = os.path.dirname(nowcasting_dataset.__file__) + "/.."
gsp = GSPDataSource(
zarr_path=f"{local_path}/tests/data/gsp/test.zarr",
start_datetime=datetime(2020, 4, 1),
end_datetime=datetime(2020, 4, 2),
history_minutes=30,
forecast_minutes=60,
image_size_pixels=64,
meters_per_pixel=2000,
)
batch_size = 10
x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:batch_size])
batch = gsp.get_batch(
t0_datetimes_utc=gsp.gsp_power.index[batch_size : 2 * batch_size],
x_centers_osgb=x_locations[0:batch_size],
y_centers_osgb=y_locations[0:batch_size],
)
assert len(batch.power_mw[0]) == 4
assert len(batch.id[0]) == len(batch.x_osgb[0])
assert len(batch.x_osgb[1]) == len(batch.y_osgb[1])
assert len(batch.x_osgb[2]) > 0
# assert T0_DT in batch[3].keys()
def test_drop_gsp_north_of_boundary(test_data_folder):
"""Test that dropping GSP north of a boundary works"""
gsp = GSPDataSource(
zarr_path=f"{test_data_folder}/gsp/test.zarr",
start_datetime=datetime(2020, 4, 1),
end_datetime=datetime(2020, 4, 2),
history_minutes=30,
forecast_minutes=60,
image_size_pixels=64,
meters_per_pixel=2000,
northern_boundary_osgb=None,
)
# remove all gsp systems
gsp_power, metadata = drop_gsp_north_of_boundary(
gsp.gsp_power, gsp.metadata, northern_boundary_osgb=0
)
assert len(gsp_power.columns) == 0
assert len(metadata) == 0
# remove half the systems
north_osgb_median = int(gsp.metadata.location_y.median())
gsp_power, metadata = drop_gsp_north_of_boundary(
gsp.gsp_power, gsp.metadata, northern_boundary_osgb=north_osgb_median
)
assert len(gsp_power.columns) == len(gsp.gsp_power.columns) / 2
assert len(metadata) == len(gsp.metadata) / 2
|
[
"nowcasting_dataset.data_sources.gsp.gsp_data_source.drop_gsp_north_of_boundary",
"pandas.Timestamp",
"nowcasting_dataset.geospatial.osgb_to_lat_lon",
"os.path.dirname",
"datetime.datetime"
] |
[((1572, 1613), 'nowcasting_dataset.geospatial.osgb_to_lat_lon', 'osgb_to_lat_lon', (['locations_x', 'locations_y'], {}), '(locations_x, locations_y)\n', (1587, 1613), False, 'from nowcasting_dataset.geospatial import osgb_to_lat_lon\n'), ((3490, 3510), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(1)'], {}), '(2020, 4, 1)\n', (3498, 3510), False, 'from datetime import datetime\n'), ((3524, 3544), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(1)'], {}), '(2020, 4, 1)\n', (3532, 3544), False, 'from datetime import datetime\n'), ((5853, 5938), 'nowcasting_dataset.data_sources.gsp.gsp_data_source.drop_gsp_north_of_boundary', 'drop_gsp_north_of_boundary', (['gsp.gsp_power', 'gsp.metadata'], {'northern_boundary_osgb': '(0)'}), '(gsp.gsp_power, gsp.metadata,\n northern_boundary_osgb=0)\n', (5879, 5938), False, 'from nowcasting_dataset.data_sources.gsp.gsp_data_source import GSPDataSource, drop_gsp_north_of_boundary\n'), ((6137, 6238), 'nowcasting_dataset.data_sources.gsp.gsp_data_source.drop_gsp_north_of_boundary', 'drop_gsp_north_of_boundary', (['gsp.gsp_power', 'gsp.metadata'], {'northern_boundary_osgb': 'north_osgb_median'}), '(gsp.gsp_power, gsp.metadata,\n northern_boundary_osgb=north_osgb_median)\n', (6163, 6238), False, 'from nowcasting_dataset.data_sources.gsp.gsp_data_source import GSPDataSource, drop_gsp_north_of_boundary\n'), ((376, 420), 'os.path.dirname', 'os.path.dirname', (['nowcasting_dataset.__file__'], {}), '(nowcasting_dataset.__file__)\n', (391, 420), False, 'import os\n'), ((818, 862), 'os.path.dirname', 'os.path.dirname', (['nowcasting_dataset.__file__'], {}), '(nowcasting_dataset.__file__)\n', (833, 862), False, 'import os\n'), ((1838, 1882), 'os.path.dirname', 'os.path.dirname', (['nowcasting_dataset.__file__'], {}), '(nowcasting_dataset.__file__)\n', (1853, 1882), False, 'import os\n'), ((3421, 3465), 'os.path.dirname', 'os.path.dirname', (['nowcasting_dataset.__file__'], {}), '(nowcasting_dataset.__file__)\n', (3436, 3465), False, 'import os\n'), ((4254, 4290), 'pandas.Timestamp', 'pd.Timestamp', (['example.time[0].values'], {}), '(example.time[0].values)\n', (4266, 4290), True, 'import pandas as pd\n'), ((4312, 4348), 'pandas.Timestamp', 'pd.Timestamp', (['example.time[0].values'], {}), '(example.time[0].values)\n', (4324, 4348), True, 'import pandas as pd\n'), ((4446, 4490), 'os.path.dirname', 'os.path.dirname', (['nowcasting_dataset.__file__'], {}), '(nowcasting_dataset.__file__)\n', (4461, 4490), False, 'import os\n'), ((536, 556), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(1)'], {}), '(2020, 4, 1)\n', (544, 556), False, 'from datetime import datetime\n'), ((579, 599), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(2)'], {}), '(2020, 4, 2)\n', (587, 599), False, 'from datetime import datetime\n'), ((980, 1000), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(1)'], {}), '(2020, 4, 1)\n', (988, 1000), False, 'from datetime import datetime\n'), ((1023, 1043), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(2)'], {}), '(2020, 4, 2)\n', (1031, 1043), False, 'from datetime import datetime\n'), ((2000, 2020), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(1)'], {}), '(2020, 4, 1)\n', (2008, 2020), False, 'from datetime import datetime\n'), ((2043, 2063), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(2)'], {}), '(2020, 4, 2)\n', (2051, 2063), False, 'from datetime import datetime\n'), ((3654, 3674), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(1)'], {}), '(2020, 4, 1)\n', (3662, 3674), False, 'from datetime import datetime\n'), ((3697, 3717), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(2)'], {}), '(2020, 4, 2)\n', (3705, 3717), False, 'from datetime import datetime\n'), ((4608, 4628), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(1)'], {}), '(2020, 4, 1)\n', (4616, 4628), False, 'from datetime import datetime\n'), ((4651, 4671), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(2)'], {}), '(2020, 4, 2)\n', (4659, 4671), False, 'from datetime import datetime\n'), ((5571, 5591), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(1)'], {}), '(2020, 4, 1)\n', (5579, 5591), False, 'from datetime import datetime\n'), ((5614, 5634), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(2)'], {}), '(2020, 4, 2)\n', (5622, 5634), False, 'from datetime import datetime\n')]
|
# coding=utf-8
# Copyright (C) 2019 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# add BiLSTM as encoder
import torch.nn as nn
import torch.nn.functional as f
import torch
from . import Conv1d
class Encoder(nn.Module):
def __init__(self, args, input_size):
super().__init__()
self.dropout = args.dropout
self.encoders = nn.ModuleList([Conv1d(
in_channels=input_size if i == 0 else args.hidden_size,
out_channels=args.hidden_size,
kernel_sizes=args.kernel_sizes) for i in range(args.enc_layers)])
def forward(self, x, mask):
x = x.transpose(1, 2) # B x C x L
mask = mask.transpose(1, 2)
for i, encoder in enumerate(self.encoders):
x.masked_fill_(~mask, 0.)
if i > 0:
x = f.dropout(x, self.dropout, self.training)
x = encoder(x)
x = f.dropout(x, self.dropout, self.training)
return x.transpose(1, 2) # B x L x C
def sort_by_seq_lens(batch, sequences_lengths, descending=True):
sorted_seq_lens, sorting_index =\
sequences_lengths.sort(0, descending=descending)
sorted_batch = batch.index_select(0, sorting_index)
idx_range = torch.arange(0, len(sequences_lengths)).to(sequences_lengths.device)
_, reverse_mapping = sorting_index.sort(0, descending=False)
restoration_index = idx_range.index_select(0, reverse_mapping)
return sorted_batch, sorted_seq_lens, sorting_index, restoration_index
class Seq2SeqEncoder(nn.Module):
def __init__(self,
rnn_type,
input_size,
hidden_size,
num_layers=1,
bias=True,
dropout=0.2,
bidirectional=False):
assert issubclass(rnn_type, nn.RNNBase),\
"rnn_type must be a class inheriting from torch.nn.RNNBase"
super(Seq2SeqEncoder, self).__init__()
self.rnn_type = rnn_type
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.dropout = dropout
self.bidirectional = bidirectional
self._encoder = rnn_type(input_size,
hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional)
def forward(self, sequences_batch, sequences_lengths):
outputs, _ = self._encoder(sequences_batch, None)
return outputs
|
[
"torch.nn.functional.dropout"
] |
[((1432, 1473), 'torch.nn.functional.dropout', 'f.dropout', (['x', 'self.dropout', 'self.training'], {}), '(x, self.dropout, self.training)\n', (1441, 1473), True, 'import torch.nn.functional as f\n'), ((1351, 1392), 'torch.nn.functional.dropout', 'f.dropout', (['x', 'self.dropout', 'self.training'], {}), '(x, self.dropout, self.training)\n', (1360, 1392), True, 'import torch.nn.functional as f\n')]
|
import argparse
import os
import draw_his
import train
import test
from get_data import import_data
from model_zoo import googLeNet, resnet, load_model
import utils
import ensembel_model
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', action='store', type=str, default="0")
parser.add_argument('--lr', action='store', type=float, default=0.001)
parser.add_argument('--epochs', action='store', type=int, default=10)
parser.add_argument('--train_v', action='store', type=str, default="1.0")
parser.add_argument('--load_v', action='store', type=str, default="1.0")
default_load_data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "get_data/data")
# default_load_data_dir = "/media/Data/datasets/cifar/cifar-10-python/data"
parser.add_argument('--load_data_dir', action='store', type=str, default=default_load_data_dir)
parser.add_argument('--retrain', type=lambda x: bool(str2bool(x)), default=False)
parser.add_argument('--regularize', type=lambda x: bool(str2bool(x)), default=False)
parser.add_argument('--batch_size', action='store', type=int, default=32)
parser.add_argument('--T', action='store', type=float, default=10)
parser.add_argument('--alpha', action='store', type=float, default=0.1)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(args.gpu)
print("[info]: use gpu: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
print("[info]: set learning rate: {}".format(args.lr))
print("[info]: epochs: {}".format(args.epochs))
print("[info]: train_version: {}".format(args.train_v))
print("[info]: load_version: {}".format(args.load_v))
print("[info]: retrain: {}".format(args.retrain))
print("[info]: regularize: {}".format(args.regularize))
print("[info]: batch_size: {}".format(args.batch_size))
print("[info]: T: {}".format(args.T))
print("[info]: alpha: {}".format(args.alpha))
# my_util = utils.GoogLeNetUtils()
my_util = utils.ResNetUtils()
# my_util = utils.DistillModelUtils(
# cumbersome_model=ensembel_model.my_ensembel_model(),
# T=args.T,
# alpha=args.alpha
# )
# new_model = googLeNet.my_googLeNet
new_model = resnet.my_resnet
model, create_new = load_model.load_model(
version=args.load_v,
new_model=new_model,
just_weights=False,
retrain=args.retrain,
to_cuda=True
)
train_set, valid_set, test_set = import_data.import_dataset(
load_dir=args.load_data_dir,
train_to_cuda=False,
test_to_cuda=True
)
train.train(
model=model,
train_set=train_set,
valid_set=valid_set,
lr=args.lr,
epoch=args.epochs,
batch_size=args.batch_size,
regularize=args.regularize,
train_version=args.train_v,
train_loss_function=my_util.loss_for_train,
get_true_pred=my_util.get_true_pred,
eval_loss_function=my_util.loss_for_eval,
detach_pred=my_util.detach_pred,
learn_rate_schedule=my_util.learn_rate_schedule
)
draw_his.draw_his(version=args.train_v, show=False)
model = model.cpu()
load_model.save_model(args.train_v, model)
test.test(
test_version=args.train_v,
test_set=test_set,
new_model=new_model,
batch_size=args.batch_size,
get_true_pred=my_util.get_true_pred,
eval_loss_function=my_util.loss_for_eval,
detach_pred=my_util.detach_pred,
just_weights=False
)
|
[
"draw_his.draw_his",
"model_zoo.load_model.load_model",
"argparse.ArgumentParser",
"os.path.realpath",
"test.test",
"get_data.import_data.import_dataset",
"utils.ResNetUtils",
"model_zoo.load_model.save_model",
"train.train",
"argparse.ArgumentTypeError"
] |
[((442, 467), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (465, 467), False, 'import argparse\n'), ((2142, 2161), 'utils.ResNetUtils', 'utils.ResNetUtils', ([], {}), '()\n', (2159, 2161), False, 'import utils\n'), ((2391, 2514), 'model_zoo.load_model.load_model', 'load_model.load_model', ([], {'version': 'args.load_v', 'new_model': 'new_model', 'just_weights': '(False)', 'retrain': 'args.retrain', 'to_cuda': '(True)'}), '(version=args.load_v, new_model=new_model,\n just_weights=False, retrain=args.retrain, to_cuda=True)\n', (2412, 2514), False, 'from model_zoo import googLeNet, resnet, load_model\n'), ((2567, 2666), 'get_data.import_data.import_dataset', 'import_data.import_dataset', ([], {'load_dir': 'args.load_data_dir', 'train_to_cuda': '(False)', 'test_to_cuda': '(True)'}), '(load_dir=args.load_data_dir, train_to_cuda=False,\n test_to_cuda=True)\n', (2593, 2666), False, 'from get_data import import_data\n'), ((2678, 3088), 'train.train', 'train.train', ([], {'model': 'model', 'train_set': 'train_set', 'valid_set': 'valid_set', 'lr': 'args.lr', 'epoch': 'args.epochs', 'batch_size': 'args.batch_size', 'regularize': 'args.regularize', 'train_version': 'args.train_v', 'train_loss_function': 'my_util.loss_for_train', 'get_true_pred': 'my_util.get_true_pred', 'eval_loss_function': 'my_util.loss_for_eval', 'detach_pred': 'my_util.detach_pred', 'learn_rate_schedule': 'my_util.learn_rate_schedule'}), '(model=model, train_set=train_set, valid_set=valid_set, lr=args.\n lr, epoch=args.epochs, batch_size=args.batch_size, regularize=args.\n regularize, train_version=args.train_v, train_loss_function=my_util.\n loss_for_train, get_true_pred=my_util.get_true_pred, eval_loss_function\n =my_util.loss_for_eval, detach_pred=my_util.detach_pred,\n learn_rate_schedule=my_util.learn_rate_schedule)\n', (2689, 3088), False, 'import train\n'), ((3120, 3171), 'draw_his.draw_his', 'draw_his.draw_his', ([], {'version': 'args.train_v', 'show': '(False)'}), '(version=args.train_v, show=False)\n', (3137, 3171), False, 'import draw_his\n'), ((3192, 3234), 'model_zoo.load_model.save_model', 'load_model.save_model', (['args.train_v', 'model'], {}), '(args.train_v, model)\n', (3213, 3234), False, 'from model_zoo import googLeNet, resnet, load_model\n'), ((3236, 3485), 'test.test', 'test.test', ([], {'test_version': 'args.train_v', 'test_set': 'test_set', 'new_model': 'new_model', 'batch_size': 'args.batch_size', 'get_true_pred': 'my_util.get_true_pred', 'eval_loss_function': 'my_util.loss_for_eval', 'detach_pred': 'my_util.detach_pred', 'just_weights': '(False)'}), '(test_version=args.train_v, test_set=test_set, new_model=new_model,\n batch_size=args.batch_size, get_true_pred=my_util.get_true_pred,\n eval_loss_function=my_util.loss_for_eval, detach_pred=my_util.\n detach_pred, just_weights=False)\n', (3245, 3485), False, 'import test\n'), ((878, 904), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (894, 904), False, 'import os\n'), ((377, 430), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (403, 430), False, 'import argparse\n')]
|
from subprocess import CalledProcessError, check_call, DEVNULL
from typing import Any, List, Sequence
import dotbot
class Apt(dotbot.Plugin):
def can_handle(self, directive: str) -> bool:
return directive == "apt"
def handle(self, directive: str, packages: List[str]) -> bool:
success = self._run(["sudo", "apt", "update"], "Updating APT") \
and self._run(["sudo", "apt", "install", "-y"] + packages,
"Installing the APT packages: {}".format(", ".join(packages)))
if success:
self._log.info("APT packages installed successfully")
return success
def _run(self, command: Sequence[Any], low_info: str) -> bool:
self._log.lowinfo(low_info)
try:
check_call(command, stdout=DEVNULL, stderr=DEVNULL)
return True
except CalledProcessError as e:
self._log.error(e)
return False
|
[
"subprocess.check_call"
] |
[((782, 833), 'subprocess.check_call', 'check_call', (['command'], {'stdout': 'DEVNULL', 'stderr': 'DEVNULL'}), '(command, stdout=DEVNULL, stderr=DEVNULL)\n', (792, 833), False, 'from subprocess import CalledProcessError, check_call, DEVNULL\n')]
|
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 2*np.pi, 10)
y = np.sin(x) #Función original
xvals = np.linspace(0, 2*np.pi, 50)
yinterp = np.interp(xvals, x, y)
plt.plot(x, y, 'o')
plt.plot(xvals, yinterp, '-x')
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.sin",
"numpy.linspace",
"numpy.interp"
] |
[((56, 85), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(10)'], {}), '(0, 2 * np.pi, 10)\n', (67, 85), True, 'import numpy as np\n'), ((88, 97), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (94, 97), True, 'import numpy as np\n'), ((124, 153), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(50)'], {}), '(0, 2 * np.pi, 50)\n', (135, 153), True, 'import numpy as np\n'), ((162, 184), 'numpy.interp', 'np.interp', (['xvals', 'x', 'y'], {}), '(xvals, x, y)\n', (171, 184), True, 'import numpy as np\n'), ((185, 204), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o"""'], {}), "(x, y, 'o')\n", (193, 204), True, 'import matplotlib.pyplot as plt\n'), ((205, 235), 'matplotlib.pyplot.plot', 'plt.plot', (['xvals', 'yinterp', '"""-x"""'], {}), "(xvals, yinterp, '-x')\n", (213, 235), True, 'import matplotlib.pyplot as plt\n'), ((236, 246), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (244, 246), True, 'import matplotlib.pyplot as plt\n')]
|
from django.conf.urls import patterns, include, url
from django.views.generic.simple import direct_to_template
from .views import SubmissionView, SubmissionListView, SubmissionSuccess
urlpatterns = patterns('',
url(r'^$',
SubmissionView.as_view(),
name='submission',
),
url(r'^success/$',
SubmissionSuccess.as_view(),
name='success_submission',
),
url(r'^end/$',
direct_to_template, {'template': 'submission/end.html'},
name='end',
),
url(r'^votar/$',
SubmissionListView.as_view(),
name='vote',
),
url(r'^votar/erro/$',
direct_to_template, {'template': 'submission/error.html'},
name='error',
),
url(r'^votar/success/$',
direct_to_template, {'template': 'submission/success.html'},
name='success',
),
)
|
[
"django.conf.urls.url"
] |
[((403, 490), 'django.conf.urls.url', 'url', (['"""^end/$"""', 'direct_to_template', "{'template': 'submission/end.html'}"], {'name': '"""end"""'}), "('^end/$', direct_to_template, {'template': 'submission/end.html'}, name\n ='end')\n", (406, 490), False, 'from django.conf.urls import patterns, include, url\n'), ((602, 699), 'django.conf.urls.url', 'url', (['"""^votar/erro/$"""', 'direct_to_template', "{'template': 'submission/error.html'}"], {'name': '"""error"""'}), "('^votar/erro/$', direct_to_template, {'template':\n 'submission/error.html'}, name='error')\n", (605, 699), False, 'from django.conf.urls import patterns, include, url\n'), ((724, 828), 'django.conf.urls.url', 'url', (['"""^votar/success/$"""', 'direct_to_template', "{'template': 'submission/success.html'}"], {'name': '"""success"""'}), "('^votar/success/$', direct_to_template, {'template':\n 'submission/success.html'}, name='success')\n", (727, 828), False, 'from django.conf.urls import patterns, include, url\n')]
|
import pytest
from fixtures import get_title, get_url
from craigslist_meta import Site
selector = "area"
# use a site key with areas
site_key = "sfbay"
@pytest.fixture
def area():
"""Get an instance of Area."""
area = next(iter(Site(site_key)))
global area_key
area_key = area.key
yield area
def test_key(area):
"""Test `key` attribute of area instance."""
expected_key = area._key
assert area_key == expected_key
def test_title(area, get_title):
"""Test `title` attribute of area instance."""
area_title = area.title
expected_title = get_title(selector, area_key)
assert area_title == expected_title
def test_url(area, get_url):
"""Test `url` attribute of area instance."""
area_url = area.url
expected_url = get_url(selector, area_key)
assert area_url == expected_url
def test_all_raises(area):
"""`all` class method should raise an exception for Area."""
with pytest.raises(AttributeError, match="'Area' object has no attribute 'all'"):
area.all()
def test_keys_raises(area):
"""`keys` class method should raise an exception for Area."""
with pytest.raises(AttributeError, match="'Area' object has no attribute 'keys'"):
area.keys
def test_children_raises(area):
"""`children` attribute should raise an exception for Area."""
with pytest.raises(AttributeError, match="'Area' object has no attribute 'children'"):
area.children
|
[
"fixtures.get_title",
"pytest.raises",
"fixtures.get_url",
"craigslist_meta.Site"
] |
[((586, 615), 'fixtures.get_title', 'get_title', (['selector', 'area_key'], {}), '(selector, area_key)\n', (595, 615), False, 'from fixtures import get_title, get_url\n'), ((779, 806), 'fixtures.get_url', 'get_url', (['selector', 'area_key'], {}), '(selector, area_key)\n', (786, 806), False, 'from fixtures import get_title, get_url\n'), ((946, 1021), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': '"""\'Area\' object has no attribute \'all\'"""'}), '(AttributeError, match="\'Area\' object has no attribute \'all\'")\n', (959, 1021), False, 'import pytest\n'), ((1147, 1223), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': '"""\'Area\' object has no attribute \'keys\'"""'}), '(AttributeError, match="\'Area\' object has no attribute \'keys\'")\n', (1160, 1223), False, 'import pytest\n'), ((1353, 1438), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': '"""\'Area\' object has no attribute \'children\'"""'}), '(AttributeError, match="\'Area\' object has no attribute \'children\'"\n )\n', (1366, 1438), False, 'import pytest\n'), ((239, 253), 'craigslist_meta.Site', 'Site', (['site_key'], {}), '(site_key)\n', (243, 253), False, 'from craigslist_meta import Site\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import argparse
import numpy as np
import mxnet as mx
ctx = mx.cpu(0)
image_size = (112, 112)
prefix = "../models/resnet-50"
epoch = 0
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix,
epoch)
all_layers = sym.get_internals()
sym = all_layers['relu1_output']
dellist = []
for k,v in arg_params.iteritems():
if k.startswith('fc1'):
dellist.append(k)
for d in dellist:
del arg_params[d]
mx.model.save_checkpoint(prefix+"s", 0, sym, arg_params, aux_params)
digraph = mx.viz.plot_network(sym, shape={'data':(1,3,256,256)},
node_attrs={"fixedsize":"false"})
digraph.view()
|
[
"mxnet.model.save_checkpoint",
"mxnet.model.load_checkpoint",
"mxnet.cpu",
"mxnet.viz.plot_network"
] |
[((193, 202), 'mxnet.cpu', 'mx.cpu', (['(0)'], {}), '(0)\n', (199, 202), True, 'import mxnet as mx\n'), ((298, 337), 'mxnet.model.load_checkpoint', 'mx.model.load_checkpoint', (['prefix', 'epoch'], {}), '(prefix, epoch)\n', (322, 337), True, 'import mxnet as mx\n'), ((594, 664), 'mxnet.model.save_checkpoint', 'mx.model.save_checkpoint', (["(prefix + 's')", '(0)', 'sym', 'arg_params', 'aux_params'], {}), "(prefix + 's', 0, sym, arg_params, aux_params)\n", (618, 664), True, 'import mxnet as mx\n'), ((674, 772), 'mxnet.viz.plot_network', 'mx.viz.plot_network', (['sym'], {'shape': "{'data': (1, 3, 256, 256)}", 'node_attrs': "{'fixedsize': 'false'}"}), "(sym, shape={'data': (1, 3, 256, 256)}, node_attrs={\n 'fixedsize': 'false'})\n", (693, 772), True, 'import mxnet as mx\n')]
|
from flying_ioc import IocManager, IocFactory
class TSingleton1:
def __init__(self):
pass
class TSingleton2:
def __init__(self):
pass
class TSingleton3(TSingleton1):
def __init__(self, ts: TSingleton2):
super().__init__()
self.ts = ts
class TSingleton3dot2(TSingleton3):
def __init__(self, **kwargs):
super().__init__(**kwargs)
class TSingleton3dot1(TSingleton3):
def __init__(self, **kwargs):
super().__init__(**kwargs)
class TSingleton3dot3:
def __init__(self, ts: TSingleton2):
self.ts = ts
class MyIocFactory(IocFactory):
@staticmethod
def get_instance(ioc_manager, name, frame_info):
if frame_info.function == 'test_factory_container':
return ioc_manager.TSingleton3dot1
if name == 'TSingleton3':
return ioc_manager.TSingleton3dot2
return ioc_manager.TSingleton3dot3
def test_factory_container():
ioc = IocManager(stats=True)
ioc.set_class(name='TSingleton1', cls=TSingleton1, singleton=True)
ioc.set_class(name='TSingleton2', cls=TSingleton2, singleton=False)
ioc.set_factory(name='TSingleton3', cls=MyIocFactory)
ioc.set_class(name='TSingleton3dot1', cls=TSingleton3dot1, singleton=False)
ioc.set_class(name='TSingleton3dot2', cls=TSingleton3dot2, singleton=False)
ioc.set_class(name='TSingleton3dot3', cls=TSingleton3dot3, singleton=False)
assert ioc.TSingleton1 is ioc.TSingleton1
ts3 = ioc.TSingleton3
assert isinstance(ts3, TSingleton3dot1)
ioc.print_stats()
|
[
"flying_ioc.IocManager"
] |
[((966, 988), 'flying_ioc.IocManager', 'IocManager', ([], {'stats': '(True)'}), '(stats=True)\n', (976, 988), False, 'from flying_ioc import IocManager, IocFactory\n')]
|
from time import time
from hashlib import md5
from datetime import datetime
## Hashing functions:
# Slower, 64 bytes
#sha256 = sha256('content').hexdigest()
# Faster, 32 bytes
#md5 = md5('content').hexdigest()
class Block:
timestamp = ''
prev_hash = ''
content = ''
nonce = 0
hash = ''
def __init__(self, timestamp, prev_hash, content, nonce, hash):
self.timestamp = timestamp
self.prev_hash = prev_hash
self.content = content
self.nonce = nonce
self.hash = hash
def serialize(self):
return self.prev_hash+self.content+str(self.nonce)
class Blockchain:
MAX_NONCE = 999999 # To prevent infinite mining
prefix = '00000' # Mining difficulty
blocks = []
# Genesis block:
def __init__(self):
nonce = 622722 # For 00000
self.blocks.append(Block(datetime.now(), ''.zfill(32), 'Genesis', nonce,
md5((''.zfill(32)+'Genesis'+str(nonce)).encode('utf-8')).hexdigest()))
def add_block(self, content = ''):
nonce = 0
prev_hash = self.blocks[-1].hash
hash = md5((prev_hash+content+str(nonce)).encode('utf-8')).hexdigest()
# Mining:
while hash[0:len(self.prefix)] != self.prefix and nonce < self.MAX_NONCE:
nonce += 1
hash = md5((prev_hash+content+str(nonce)).encode('utf-8')).hexdigest()
if nonce < self.MAX_NONCE:
self.blocks.append(Block(datetime.now(), prev_hash, content, nonce,
hash))
else:
print('Unable to mine block #'+str(len(self.blocks)+1))
def print_chain(self):
i = 1
for block in self.blocks:
print('BLOCK #%d =======================' % i); i += 1
print(block.prev_hash)
print(block.timestamp)
print(block.content)
print(block.hash)
print('================================\n\t\t|\n\t\tV')
def check_block(self, block_num):
if block_num > 0:
block = self.blocks[block_num-1]
if md5((block.serialize()).encode('utf-8')).hexdigest() == block.hash:
print('Block #%d is valid' % block_num)
else:
print('Block #%d is invalid' % block_num)
else:
print('Invalid block number')
def check_chain(self):
for i in range(1, len(self.blocks)+1):
self.check_block(i)
b = Blockchain()
t1 = time()
b.add_block('Johnny')
b.add_block('Noelle')
t2 = time()
b.print_chain()
print('Elapsed time: %.2fs' % (t2-t1))
b.check_chain()
|
[
"datetime.datetime.now",
"time.time"
] |
[((2495, 2501), 'time.time', 'time', ([], {}), '()\n', (2499, 2501), False, 'from time import time\n'), ((2551, 2557), 'time.time', 'time', ([], {}), '()\n', (2555, 2557), False, 'from time import time\n'), ((862, 876), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (874, 876), False, 'from datetime import datetime\n'), ((1475, 1489), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1487, 1489), False, 'from datetime import datetime\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class Distance:
def __init__(self):
self.graph = tf.Graph()
self.sess = tf.Session(graph=self.graph)
with self.graph.as_default():
self.b_tf = tf.placeholder(shape=[None, 512], dtype=tf.float32)
self.A_tf = tf.placeholder(shape=[None, 512], dtype=tf.float32)
self.distance_tf = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.A_tf, tf.expand_dims(self.b_tf, 1))), axis=2))
def __del__(self):
self.sess.close()
def fit(self, A, b):
return self.sess.run(self.distance_tf, feed_dict={self.A_tf: A, self.b_tf: b})
|
[
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.Graph",
"tensorflow.expand_dims"
] |
[((197, 207), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (205, 207), True, 'import tensorflow as tf\n'), ((228, 256), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph'}), '(graph=self.graph)\n', (238, 256), True, 'import tensorflow as tf\n'), ((319, 370), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, 512]', 'dtype': 'tf.float32'}), '(shape=[None, 512], dtype=tf.float32)\n', (333, 370), True, 'import tensorflow as tf\n'), ((395, 446), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, 512]', 'dtype': 'tf.float32'}), '(shape=[None, 512], dtype=tf.float32)\n', (409, 446), True, 'import tensorflow as tf\n'), ((533, 561), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.b_tf', '(1)'], {}), '(self.b_tf, 1)\n', (547, 561), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python
# Copyright (c) 2015 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``test_networkd.py``
`NetworkD Unittests`
"""
from unittest.mock import MagicMock
from testlib.linux.networkd import NetworkD
class TestNetworkD(object):
def test_single_mgmt_port(self):
run_command = MagicMock()
n = NetworkD(run_command, ["test"])
n.clear_settings()
assert run_command.call_args_list[0][0][
0] == "find /etc/systemd/network/ -mindepth 1 -not \\( -name 'test.network' -or " \
"-name 'test.netdev' -or -name 'test.link' -or -name 'test.swport' \\) -delete"
def test_multiple_mgmt_port(self):
run_command = MagicMock()
n = NetworkD(run_command, ["test1", "test2"])
n.clear_settings()
assert run_command.call_args_list[0][0][
0] == "find /etc/systemd/network/ -mindepth 1 -not \\( -name 'test1.network' -or " \
"-name 'test1.netdev' -or -name 'test1.link' -or -name 'test1.swport' -or " \
"-name 'test2.network' -or -name 'test2.netdev' -or -name 'test2.link' -or " \
"-name 'test2.swport' \\) -delete"
def test_empty_list(self):
run_command = MagicMock()
n = NetworkD(run_command, [])
n.clear_settings()
assert run_command.call_args_list[0][0][
0] == "find /etc/systemd/network/ -mindepth 1 -not \\( \\) -delete"
def test_extra_excludes_are_appended(self):
run_command = MagicMock()
n = NetworkD(run_command, ["test1", "test2"])
n.clear_settings(exclude_ports=["extra1", "extra2"])
assert run_command.call_args_list[0][0][
0] == "find /etc/systemd/network/ -mindepth 1 -not \\( -name 'test1.network' -or " \
"-name 'test1.netdev' -or -name 'test1.link' -or -name 'test1.swport' -or " \
"-name 'test2.network' -or -name 'test2.netdev' -or -name 'test2.link' -or " \
"-name 'test2.swport' -or -name 'extra1.network' -or -name 'extra1.netdev' -or " \
"-name 'extra1.link' -or -name 'extra1.swport' -or -name 'extra2.network' -or " \
"-name 'extra2.netdev' -or -name 'extra2.link' -or -name 'extra2.swport' \\) -delete"
def test_just_extra_excludes(self):
run_command = MagicMock()
n = NetworkD(run_command, [])
n.clear_settings(exclude_ports=["extra1", "extra2"])
assert run_command.call_args_list[0][0][
0] == "find /etc/systemd/network/ -mindepth 1 -not \\( -name 'extra1.network' -or " \
"-name 'extra1.netdev' -or -name 'extra1.link' -or -name 'extra1.swport' -or " \
"-name 'extra2.network' -or -name 'extra2.netdev' -or -name 'extra2.link' -or " \
"-name 'extra2.swport' \\) -delete"
|
[
"unittest.mock.MagicMock",
"testlib.linux.networkd.NetworkD"
] |
[((838, 849), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (847, 849), False, 'from unittest.mock import MagicMock\n'), ((862, 893), 'testlib.linux.networkd.NetworkD', 'NetworkD', (['run_command', "['test']"], {}), "(run_command, ['test'])\n", (870, 893), False, 'from testlib.linux.networkd import NetworkD\n'), ((1226, 1237), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1235, 1237), False, 'from unittest.mock import MagicMock\n'), ((1250, 1291), 'testlib.linux.networkd.NetworkD', 'NetworkD', (['run_command', "['test1', 'test2']"], {}), "(run_command, ['test1', 'test2'])\n", (1258, 1291), False, 'from testlib.linux.networkd import NetworkD\n'), ((1765, 1776), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1774, 1776), False, 'from unittest.mock import MagicMock\n'), ((1789, 1814), 'testlib.linux.networkd.NetworkD', 'NetworkD', (['run_command', '[]'], {}), '(run_command, [])\n', (1797, 1814), False, 'from testlib.linux.networkd import NetworkD\n'), ((2043, 2054), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2052, 2054), False, 'from unittest.mock import MagicMock\n'), ((2067, 2108), 'testlib.linux.networkd.NetworkD', 'NetworkD', (['run_command', "['test1', 'test2']"], {}), "(run_command, ['test1', 'test2'])\n", (2075, 2108), False, 'from testlib.linux.networkd import NetworkD\n'), ((2877, 2888), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2886, 2888), False, 'from unittest.mock import MagicMock\n'), ((2901, 2926), 'testlib.linux.networkd.NetworkD', 'NetworkD', (['run_command', '[]'], {}), '(run_command, [])\n', (2909, 2926), False, 'from testlib.linux.networkd import NetworkD\n')]
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: batch_get_tool_detail.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tool_sdk.model.tool import tool_pb2 as tool__sdk_dot_model_dot_tool_dot_tool__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='batch_get_tool_detail.proto',
package='basic',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x1b\x62\x61tch_get_tool_detail.proto\x12\x05\x62\x61sic\x1a\x1etool_sdk/model/tool/tool.proto\",\n\x19\x42\x61tchGetToolDetailRequest\x12\x0f\n\x07toolIds\x18\x01 \x01(\t\"d\n\x1a\x42\x61tchGetToolDetailResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x18\n\x04\x64\x61ta\x18\x04 \x03(\x0b\x32\n.tool.Tool\"\x86\x01\n!BatchGetToolDetailResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12/\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32!.basic.BatchGetToolDetailResponseb\x06proto3')
,
dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,])
_BATCHGETTOOLDETAILREQUEST = _descriptor.Descriptor(
name='BatchGetToolDetailRequest',
full_name='basic.BatchGetToolDetailRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='toolIds', full_name='basic.BatchGetToolDetailRequest.toolIds', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=70,
serialized_end=114,
)
_BATCHGETTOOLDETAILRESPONSE = _descriptor.Descriptor(
name='BatchGetToolDetailResponse',
full_name='basic.BatchGetToolDetailResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='basic.BatchGetToolDetailResponse.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='basic.BatchGetToolDetailResponse.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='basic.BatchGetToolDetailResponse.message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='basic.BatchGetToolDetailResponse.data', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=116,
serialized_end=216,
)
_BATCHGETTOOLDETAILRESPONSEWRAPPER = _descriptor.Descriptor(
name='BatchGetToolDetailResponseWrapper',
full_name='basic.BatchGetToolDetailResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='basic.BatchGetToolDetailResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='basic.BatchGetToolDetailResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='basic.BatchGetToolDetailResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='basic.BatchGetToolDetailResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=219,
serialized_end=353,
)
_BATCHGETTOOLDETAILRESPONSE.fields_by_name['data'].message_type = tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL
_BATCHGETTOOLDETAILRESPONSEWRAPPER.fields_by_name['data'].message_type = _BATCHGETTOOLDETAILRESPONSE
DESCRIPTOR.message_types_by_name['BatchGetToolDetailRequest'] = _BATCHGETTOOLDETAILREQUEST
DESCRIPTOR.message_types_by_name['BatchGetToolDetailResponse'] = _BATCHGETTOOLDETAILRESPONSE
DESCRIPTOR.message_types_by_name['BatchGetToolDetailResponseWrapper'] = _BATCHGETTOOLDETAILRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BatchGetToolDetailRequest = _reflection.GeneratedProtocolMessageType('BatchGetToolDetailRequest', (_message.Message,), {
'DESCRIPTOR' : _BATCHGETTOOLDETAILREQUEST,
'__module__' : 'batch_get_tool_detail_pb2'
# @@protoc_insertion_point(class_scope:basic.BatchGetToolDetailRequest)
})
_sym_db.RegisterMessage(BatchGetToolDetailRequest)
BatchGetToolDetailResponse = _reflection.GeneratedProtocolMessageType('BatchGetToolDetailResponse', (_message.Message,), {
'DESCRIPTOR' : _BATCHGETTOOLDETAILRESPONSE,
'__module__' : 'batch_get_tool_detail_pb2'
# @@protoc_insertion_point(class_scope:basic.BatchGetToolDetailResponse)
})
_sym_db.RegisterMessage(BatchGetToolDetailResponse)
BatchGetToolDetailResponseWrapper = _reflection.GeneratedProtocolMessageType('BatchGetToolDetailResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _BATCHGETTOOLDETAILRESPONSEWRAPPER,
'__module__' : 'batch_get_tool_detail_pb2'
# @@protoc_insertion_point(class_scope:basic.BatchGetToolDetailResponseWrapper)
})
_sym_db.RegisterMessage(BatchGetToolDetailResponseWrapper)
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.reflection.GeneratedProtocolMessageType"
] |
[((475, 501), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (499, 501), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((6824, 7010), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""BatchGetToolDetailRequest"""', '(_message.Message,)', "{'DESCRIPTOR': _BATCHGETTOOLDETAILREQUEST, '__module__':\n 'batch_get_tool_detail_pb2'}"], {}), "('BatchGetToolDetailRequest', (\n _message.Message,), {'DESCRIPTOR': _BATCHGETTOOLDETAILREQUEST,\n '__module__': 'batch_get_tool_detail_pb2'})\n", (6864, 7010), True, 'from google.protobuf import reflection as _reflection\n'), ((7167, 7355), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""BatchGetToolDetailResponse"""', '(_message.Message,)', "{'DESCRIPTOR': _BATCHGETTOOLDETAILRESPONSE, '__module__':\n 'batch_get_tool_detail_pb2'}"], {}), "('BatchGetToolDetailResponse', (\n _message.Message,), {'DESCRIPTOR': _BATCHGETTOOLDETAILRESPONSE,\n '__module__': 'batch_get_tool_detail_pb2'})\n", (7207, 7355), True, 'from google.protobuf import reflection as _reflection\n'), ((7521, 7722), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""BatchGetToolDetailResponseWrapper"""', '(_message.Message,)', "{'DESCRIPTOR': _BATCHGETTOOLDETAILRESPONSEWRAPPER, '__module__':\n 'batch_get_tool_detail_pb2'}"], {}), "('BatchGetToolDetailResponseWrapper',\n (_message.Message,), {'DESCRIPTOR': _BATCHGETTOOLDETAILRESPONSEWRAPPER,\n '__module__': 'batch_get_tool_detail_pb2'})\n", (7561, 7722), True, 'from google.protobuf import reflection as _reflection\n'), ((2511, 2851), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""code"""', 'full_name': '"""basic.BatchGetToolDetailResponse.code"""', 'index': '(0)', 'number': '(1)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='code', full_name=\n 'basic.BatchGetToolDetailResponse.code', index=0, number=1, type=5,\n cpp_type=1, label=1, has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (2538, 2851), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3656, 3999), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""data"""', 'full_name': '"""basic.BatchGetToolDetailResponse.data"""', 'index': '(3)', 'number': '(4)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='data', full_name=\n 'basic.BatchGetToolDetailResponse.data', index=3, number=4, type=11,\n cpp_type=10, label=3, has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (3683, 3999), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4477, 4825), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""code"""', 'full_name': '"""basic.BatchGetToolDetailResponseWrapper.code"""', 'index': '(0)', 'number': '(1)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='code', full_name=\n 'basic.BatchGetToolDetailResponseWrapper.code', index=0, number=1, type\n =5, cpp_type=1, label=1, has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (4504, 4825), True, 'from google.protobuf import descriptor as _descriptor\n'), ((5651, 6004), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""data"""', 'full_name': '"""basic.BatchGetToolDetailResponseWrapper.data"""', 'index': '(3)', 'number': '(4)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='data', full_name=\n 'basic.BatchGetToolDetailResponseWrapper.data', index=3, number=4, type\n =11, cpp_type=10, label=1, has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (5678, 6004), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
"""
TODO
"""
import io
import logging
import re
import string
import sys
import textwrap
import time
from datetime import timedelta
from pathlib import Path
from typing import List
import sublib
from deep_translator import GoogleTranslator
# https://pypi.org/project/sublib/
# https://pypi.org/project/deep-translator
sample_file = Path(__file__).parent.parent.parent.absolute() / Path('input/1_short.srt')
sample_str = io.StringIO(textwrap.dedent('''\
1
00:00:00,123 --> 00:00:03,456
Hi there
2
00:01:04,843 --> 00:01:05,428
This is an example of a
subtitle file in SRT format
'''))
def translate_array(texts: List[str], source_language='auto', target_language='hu'):
"""
It takes a list of texts and translates them from source language to target language
:param texts: The list of texts to be translated
:type texts: List[str]
:param source_language: The language you want to translate from, defaults to auto (optional)
:param target_language: The language to translate the text into, defaults to hu (optional)
:return: A list of translated texts.
"""
for i, text in enumerate(texts):
if not text or not isinstance(text, str) or not text.strip():
texts[i] = " zzz "
if text.isdigit() or all(i in string.punctuation for i in text):
texts[i] += " zzz "
result = GoogleTranslator(source=source_language, target=target_language).translate_batch(texts)
return result
def split_up(text: str, pieces_count: int = 2) -> List[str]:
"""
Given a text and a number of pieces, split the text into pieces
:param text: The text to split up
:type text: str
:param pieces_count: The number of pieces to split the text into, defaults to 2
:type pieces_count: int (optional)
:return: A list of strings.
"""
pieces = []
if pieces_count < 1:
logging.error("pieces error.")
sys.exit(1)
elif pieces_count == 1:
return [text]
def get_optimal_split(where: float, p_split_points: List[int]):
"""
Get the optimal split point from a list of split points
:param where: the point where you want to split the data
:type where: float
:param p_split_points: The list of split points
:type p_split_points: List[int]
:return: The optimal split point and the list of split points with the optimal split point removed.
"""
distance_min = 9999.0
min_point = None
for a_split_point in p_split_points:
distance = abs(where - a_split_point)
if distance < distance_min:
distance_min = distance
min_point = a_split_point
if min_point:
p_split_points.remove(min_point)
return min_point, p_split_points
len_of_a_piece = len(text) / pieces_count
optimal_split_positions = [len_of_a_piece * x for x in range(1, pieces_count)]
indices_object = re.finditer(pattern=r'\w+', string=text)
possible_split_points = [index.start() for index in indices_object]
if 0 in possible_split_points:
possible_split_points.remove(0)
if len(possible_split_points) + 1 < pieces_count:
logging.info("[{}]".format(" | ".join(re.split(r'\W+', text).remove(''))))
logging.error(
f"There are {len(possible_split_points)} split points and we want "
f"to split the text '{text}' in {pieces_count} pieces... Giving up.")
sys.exit(42)
def get_split_points(optimal_split_positions: List[float],
p_possible_split_points: List[int] = possible_split_points):
"""
Given a list of optimal split positions, return a list of the corresponding split points
:param optimal_split_positions: The list of optimal split positions
:type optimal_split_positions: List[float]
:param p_possible_split_points: List[int] = possible_split_points
:type p_possible_split_points: List[int]
:return: The list of optimal split points.
"""
split_points = []
for an_optimal_position in optimal_split_positions:
a_split_point, p_possible_split_points = get_optimal_split(where=an_optimal_position,
p_split_points=p_possible_split_points)
split_points.append(a_split_point)
return split_points
start_ind = 0
for split_point in get_split_points(optimal_split_positions=optimal_split_positions,
p_possible_split_points=possible_split_points):
pieces.append(text[start_ind:split_point].strip())
start_ind = split_point
pieces.append(text[start_ind:].strip())
logging.debug(f"Splitting up '{text}' in {pieces_count} pieces: {pieces}")
return pieces
def translate_subtitle_file(input_file=sample_file, target_language='hu'):
"""
It takes a subtitle file, splits it up into sentences, translates them, and then puts them back together
:param input_file: The subtitle file to be translated
:param target_language: The language you want the text to be translated to, defaults to hu (optional)
:return: The lines of the translated file.
"""
translation_char_limit = 4000 # 4000
subtitle = sublib.SubRip(input_file, "utf-8")
# s2 = copy.deepcopy(subtitle)
general = subtitle.get_general_format()
def is_end_of_sentence(text: str):
return text.endswith('.') or text.endswith('?') or text.endswith('!')
def starts_with_lowercase(text: str):
first_char = text[0]
return first_char.isalpha() and first_char.islower()
translated_all = []
entries_to_be_translated = []
entry = {'index_start': 0, 'index_end': 0, 'text': ''}
logging.info("# Phase 1: Prepare translation: Join entries to sentences.")
for i, a_general in enumerate(general):
start, end, text = a_general
text = text.replace('|', ' ').replace(' ', '')
if len(entry['text']) > 0:
entry['text'] += ' '
entry['text'] += text
if len(general) > i + 1:
start_next = general[i + 1][0]
else:
start_next = end + timedelta(100)
silence_to_next = start_next - end
if is_end_of_sentence(text) or silence_to_next.seconds > 1:
entry['index_end'] = i
entries_to_be_translated.append(entry)
entry = {'index_start': i + 1, 'index_end': i + 1, 'text': ''}
logging.info("# Phase 2: Translate (5000 char limitation)")
start = 0
last_i = len(entries_to_be_translated)
translated_all = []
for i in range(last_i):
an_entry = entries_to_be_translated[start:i + 1]
chars_sum = sum([len(t['text']) for t in an_entry])
if chars_sum > translation_char_limit - 10 or i == last_i - 1:
texts = [t['text'] for t in entries_to_be_translated[start:i + 1]]
time_start = general[entries_to_be_translated[start]['index_end']][1]
time_end = general[entries_to_be_translated[i]['index_end']][1]
# strfdelta(time_start, "{hours}:{minutes}:{seconds}")
logging.info("Translating {} - {}".format(str(time_start)[:-3], str(time_end)[:-3]))
start = time.time()
translated = translate_array(texts=texts, target_language=target_language)
end = time.time()
logging.info(
"{} requests in {:.2f} seconds,{:.0f} ch/s, "
"{:.2f} req/s".format(len(texts),
end - start,
float(chars_sum) / (
end - start),
float(len(texts)) / (
end - start)))
for res in zip(texts, translated):
logging.debug(f" [{res[0]}] -> [{res[1]}]")
translated_all.extend(translated)
# print(translated)
start = i + 1
logging.info("# Phase 3: Split up sentences (undo #1)")
for i, entry in enumerate(entries_to_be_translated):
text_long = translated_all[i]
split_pieces = entry['index_end'] - entry['index_start'] + 1
texts = split_up(text=text_long, pieces_count=split_pieces)
if len(texts) != split_pieces:
logging.error("bahh")
insert_start = entry['index_start']
insert_end = entry['index_end']
for i2 in range(insert_end - insert_start + 1):
iii = insert_start + i2 - 1
if iii < len(general) - 1:
general[iii][2] = texts[i2]
else:
logging.error("Index overrun.")
sys.exit(1)
logging.info("# Phase 4: Split up lines")
for i, entry in enumerate(general):
pieces = int(len(entry[2]) / 40) + 1
if pieces > 1:
new_text = "\n".join(split_up(entry[2], pieces_count=pieces))
entry[2] = new_text
logging.info("# Phase 5: Saving file")
empty_subtitle = sublib.SubRip()
empty_subtitle.set_from_general_format(general)
lines = empty_subtitle.content
output_name = str(input_file).replace('.srt', '.out.srt')
logging.info(f" Writing output to {output_name}")
with open(output_name, 'w', encoding='utf-8') as out:
out.writelines(lines)
return lines
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# ss = split_up("Ööö, mit csináltál?", 3)
# ss = split_up("Ööö, mit sdfg sfhg wert sxghsfhgdfhg dfhg g ghdfhg csináltál?", 15)
# result = translate_array(texts=["hallo welt", "guten morgen",
# 'Weltfrieden für Manuela'], target_language='hu')
translate_subtitle_file(input_file=sample_file)
|
[
"textwrap.dedent",
"logging.error",
"logging.debug",
"re.split",
"logging.basicConfig",
"re.finditer",
"deep_translator.GoogleTranslator",
"time.time",
"logging.info",
"pathlib.Path",
"datetime.timedelta",
"sublib.SubRip",
"sys.exit"
] |
[((384, 409), 'pathlib.Path', 'Path', (['"""input/1_short.srt"""'], {}), "('input/1_short.srt')\n", (388, 409), False, 'from pathlib import Path\n'), ((435, 626), 'textwrap.dedent', 'textwrap.dedent', (['""" 1\n 00:00:00,123 --> 00:00:03,456\n Hi there\n \n 2\n 00:01:04,843 --> 00:01:05,428\n This is an example of a\n subtitle file in SRT format\n"""'], {}), '(\n """ 1\n 00:00:00,123 --> 00:00:03,456\n Hi there\n \n 2\n 00:01:04,843 --> 00:01:05,428\n This is an example of a\n subtitle file in SRT format\n"""\n )\n', (450, 626), False, 'import textwrap\n'), ((2988, 3028), 're.finditer', 're.finditer', ([], {'pattern': '"""\\\\w+"""', 'string': 'text'}), "(pattern='\\\\w+', string=text)\n", (2999, 3028), False, 'import re\n'), ((4800, 4874), 'logging.debug', 'logging.debug', (['f"""Splitting up \'{text}\' in {pieces_count} pieces: {pieces}"""'], {}), '(f"Splitting up \'{text}\' in {pieces_count} pieces: {pieces}")\n', (4813, 4874), False, 'import logging\n'), ((5365, 5399), 'sublib.SubRip', 'sublib.SubRip', (['input_file', '"""utf-8"""'], {}), "(input_file, 'utf-8')\n", (5378, 5399), False, 'import sublib\n'), ((5854, 5928), 'logging.info', 'logging.info', (['"""# Phase 1: Prepare translation: Join entries to sentences."""'], {}), "('# Phase 1: Prepare translation: Join entries to sentences.')\n", (5866, 5928), False, 'import logging\n'), ((6579, 6638), 'logging.info', 'logging.info', (['"""# Phase 2: Translate (5000 char limitation)"""'], {}), "('# Phase 2: Translate (5000 char limitation)')\n", (6591, 6638), False, 'import logging\n'), ((8137, 8192), 'logging.info', 'logging.info', (['"""# Phase 3: Split up sentences (undo #1)"""'], {}), "('# Phase 3: Split up sentences (undo #1)')\n", (8149, 8192), False, 'import logging\n'), ((8841, 8882), 'logging.info', 'logging.info', (['"""# Phase 4: Split up lines"""'], {}), "('# Phase 4: Split up lines')\n", (8853, 8882), False, 'import logging\n'), ((9102, 9140), 'logging.info', 'logging.info', (['"""# Phase 5: Saving file"""'], {}), "('# Phase 5: Saving file')\n", (9114, 9140), False, 'import logging\n'), ((9162, 9177), 'sublib.SubRip', 'sublib.SubRip', ([], {}), '()\n', (9175, 9177), False, 'import sublib\n'), ((9331, 9382), 'logging.info', 'logging.info', (['f""" Writing output to {output_name}"""'], {}), "(f' Writing output to {output_name}')\n", (9343, 9382), False, 'import logging\n'), ((9522, 9561), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (9541, 9561), False, 'import logging\n'), ((1901, 1931), 'logging.error', 'logging.error', (['"""pieces error."""'], {}), "('pieces error.')\n", (1914, 1931), False, 'import logging\n'), ((1940, 1951), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1948, 1951), False, 'import sys\n'), ((3508, 3520), 'sys.exit', 'sys.exit', (['(42)'], {}), '(42)\n', (3516, 3520), False, 'import sys\n'), ((1383, 1447), 'deep_translator.GoogleTranslator', 'GoogleTranslator', ([], {'source': 'source_language', 'target': 'target_language'}), '(source=source_language, target=target_language)\n', (1399, 1447), False, 'from deep_translator import GoogleTranslator\n'), ((7361, 7372), 'time.time', 'time.time', ([], {}), '()\n', (7370, 7372), False, 'import time\n'), ((7478, 7489), 'time.time', 'time.time', ([], {}), '()\n', (7487, 7489), False, 'import time\n'), ((8476, 8497), 'logging.error', 'logging.error', (['"""bahh"""'], {}), "('bahh')\n", (8489, 8497), False, 'import logging\n'), ((8780, 8811), 'logging.error', 'logging.error', (['"""Index overrun."""'], {}), "('Index overrun.')\n", (8793, 8811), False, 'import logging\n'), ((8824, 8835), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8832, 8835), False, 'import sys\n'), ((6286, 6300), 'datetime.timedelta', 'timedelta', (['(100)'], {}), '(100)\n', (6295, 6300), False, 'from datetime import timedelta\n'), ((7983, 8026), 'logging.debug', 'logging.debug', (['f""" [{res[0]}] -> [{res[1]}]"""'], {}), "(f' [{res[0]}] -> [{res[1]}]')\n", (7996, 8026), False, 'import logging\n'), ((335, 349), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (339, 349), False, 'from pathlib import Path\n'), ((3278, 3300), 're.split', 're.split', (['"""\\\\W+"""', 'text'], {}), "('\\\\W+', text)\n", (3286, 3300), False, 'import re\n')]
|
import os
from src.ezcord import *
APP = 874663148374880287
TEST = 877399405056102431
bot = Bot(prefix='-', app_id=APP, guild_id=TEST, intents=Intents.members)
@bot.command(name='ping')
async def ping(ctx: Context):
emd = Embed(description=f'**Pong: {bot.latency}ms**')
await ctx.reply(embed=emd)
@bot.command(name='foo')
async def _foo(ctx: Context):
await ctx.send(f'{bot.user()}')
@bot.event
async def on_ready():
print(f'Logged in as {bot.user} (ID: {bot.user.id})')
print(f'------')
bot.run(os.getenv('DISCORD_TOKEN'))
|
[
"os.getenv"
] |
[((527, 553), 'os.getenv', 'os.getenv', (['"""DISCORD_TOKEN"""'], {}), "('DISCORD_TOKEN')\n", (536, 553), False, 'import os\n')]
|
from kaiba.models.kaiba_object import KaibaObject
def test_create_jsonschema_from_model():
"""Test that we can create jsonschema."""
assert KaibaObject.schema_json(indent=2)
|
[
"kaiba.models.kaiba_object.KaibaObject.schema_json"
] |
[((150, 183), 'kaiba.models.kaiba_object.KaibaObject.schema_json', 'KaibaObject.schema_json', ([], {'indent': '(2)'}), '(indent=2)\n', (173, 183), False, 'from kaiba.models.kaiba_object import KaibaObject\n')]
|
import csv
import statistics
from matplotlib import pyplot as plt
from scipy.interpolate import interp1d
import numpy as np
with open('pknorlen_akcje.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
next(readCSV)
counter = 0
kursy_lista = []
for row in readCSV:
kursy_lista.append({"data": row[0], "kurs_max": float(row[5]), "kurs_min": float(row[6])})
counter += 1
if counter > 5000:
break
print(counter)
print("Srednia: ", statistics.mean([k["kurs_max"] for k in kursy_lista]))
print("Odch.Std:", statistics.stdev([k["kurs_max"] for k in kursy_lista]))
print("Max: ", max([k["kurs_max"] for k in kursy_lista]))
print("Min:", min([k["kurs_max"] for k in kursy_lista]))
y_es = [k["kurs_max"] for k in kursy_lista]
x_es = range(0,len(y_es))
f_linear = interp1d(x_es, y_es, kind='linear')
# xnew = np.arange(1, len(y_es), 0.1)
# plt.plot(x_es, y_es, 'o', x_es, f_linear(x_es), '-')
plt.plot(x_es, f_linear(x_es), '-')
plt.show()
|
[
"csv.reader",
"matplotlib.pyplot.show",
"statistics.stdev",
"statistics.mean",
"scipy.interpolate.interp1d"
] |
[((824, 859), 'scipy.interpolate.interp1d', 'interp1d', (['x_es', 'y_es'], {'kind': '"""linear"""'}), "(x_es, y_es, kind='linear')\n", (832, 859), False, 'from scipy.interpolate import interp1d\n'), ((990, 1000), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (998, 1000), True, 'from matplotlib import pyplot as plt\n'), ((183, 217), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (193, 217), False, 'import csv\n'), ((497, 550), 'statistics.mean', 'statistics.mean', (["[k['kurs_max'] for k in kursy_lista]"], {}), "([k['kurs_max'] for k in kursy_lista])\n", (512, 550), False, 'import statistics\n'), ((571, 625), 'statistics.stdev', 'statistics.stdev', (["[k['kurs_max'] for k in kursy_lista]"], {}), "([k['kurs_max'] for k in kursy_lista])\n", (587, 625), False, 'import statistics\n')]
|
import os
import csv
from csv import reader
def read_data():
data = []
with open ("data.csv", "r") as f:
csv_reader = reader(f)
header = next(csv_reader)
if header != None:
for rows in csv_reader:
data.append(rows[0].replace(";",","))
print(data)
with open ("data1.csv", "w") as f:
for eachitem in data:
f.write(eachitem + "\n")
read_data()
|
[
"csv.reader"
] |
[((135, 144), 'csv.reader', 'reader', (['f'], {}), '(f)\n', (141, 144), False, 'from csv import reader\n')]
|
from bs4 import BeautifulSoup as bs
import re
import mysql.connector
class Products_Infos():
def __init__(self, products):
self.products = products
self.productInfos = []
def insertSpaces(self):
for i in self.products:
self.productInfos.append([])
def get_product_link(self):
cont = 0
for link in self.products:
product_link = link.find('a', class_='item-link item__js-link').get('href')
self.productInfos[cont].append(product_link)
cont += 1
def get_product_name(self):
cont = 0
for name in self.products:
product_name = name.find('span', class_='main-title').string
self.productInfos[cont].append(product_name)
cont += 1
def get_product_price(self):
cont = 0
for price in self.products:
try:
product_price = price.find('span', class_='price__fraction').string
except:
try:
product_price_label = price.find('div', class_=re.compile('pdp_options__text'))
product_price = product_price_label.find('span').string
except:
print('HOUVE UM ERRO AO LER O PREÇO DO PRODUTO')
cont += 1
else:
self.productInfos[cont].append(product_price)
cont += 1
print(product_price)
else:
self.productInfos[cont].append(product_price)
cont += 1
def get_shipping_info(self):
cont = 0
for ship in self.products:
try:
product_shipping_info = ship.find('span', class_='text-shipping').string
except:
self.productInfos[cont].append(0)
cont += 1
else:
self.productInfos[cont].append(1)
cont += 1
def get_product_image(self):
cont = 0
for image in self.products:
try:
product_image = image.find('img', src=re.compile('https://http2.mlstatic.com')).get('src')
except:
print('ERRO AO LER A IMAGEM')
self.productInfos[cont].append("")
cont += 1
else:
self.productInfos[cont].append(product_image)
cont += 1
|
[
"re.compile"
] |
[((1082, 1113), 're.compile', 're.compile', (['"""pdp_options__text"""'], {}), "('pdp_options__text')\n", (1092, 1113), False, 'import re\n'), ((2126, 2166), 're.compile', 're.compile', (['"""https://http2.mlstatic.com"""'], {}), "('https://http2.mlstatic.com')\n", (2136, 2166), False, 'import re\n')]
|
import re
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import StringField, TextAreaField, SubmitField
from wtforms.validators import DataRequired, regexp
class ArticleForm(FlaskForm):
title = StringField('Название статьи', validators=[DataRequired()])
text = TextAreaField('Текст статьи', validators=[DataRequired()])
picture = FileField('Картинка', validators=[FileRequired(),
FileAllowed(['jpg', 'png'],
'Допустимы только изображения форматов jpg и png')])
submit = SubmitField('Подтвердить')
|
[
"wtforms.SubmitField",
"flask_wtf.file.FileAllowed",
"wtforms.validators.DataRequired",
"flask_wtf.file.FileRequired"
] |
[((659, 685), 'wtforms.SubmitField', 'SubmitField', (['"""Подтвердить"""'], {}), "('Подтвердить')\n", (670, 685), False, 'from wtforms import StringField, TextAreaField, SubmitField\n'), ((320, 334), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (332, 334), False, 'from wtforms.validators import DataRequired, regexp\n'), ((387, 401), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (399, 401), False, 'from wtforms.validators import DataRequired, regexp\n'), ((449, 463), 'flask_wtf.file.FileRequired', 'FileRequired', ([], {}), '()\n', (461, 463), False, 'from flask_wtf.file import FileField, FileAllowed, FileRequired\n'), ((505, 583), 'flask_wtf.file.FileAllowed', 'FileAllowed', (["['jpg', 'png']", '"""Допустимы только изображения форматов jpg и png"""'], {}), "(['jpg', 'png'], 'Допустимы только изображения форматов jpg и png')\n", (516, 583), False, 'from flask_wtf.file import FileField, FileAllowed, FileRequired\n')]
|
"""Wrapper around ZMQStream
"""
import sys
import time
import zmq
from zmq.eventloop import IOLoop
from zmq.eventloop.zmqstream import ZMQStream
import skytools
from cc.message import CCMessage, zmsg_size
from cc.util import stat_inc
__all__ = ['CCStream', 'CCReqStream']
#
# simple wrapper around ZMQStream
#
class CCStream (ZMQStream):
"""
Adds CCMessage methods to ZMQStream as well as protection (on by default)
against unlimited memory (send queue) growth.
"""
def __init__ (self, *args, **kwargs):
self.qmaxsize = kwargs.pop ('qmaxsize', None)
if self.qmaxsize is None:
self.qmaxsize = 1000
elif self.qmaxsize <= 0:
self.qmaxsize = sys.maxsize
super(CCStream, self).__init__(*args, **kwargs)
def send_multipart (self, msg, *args, **kwargs):
if self._send_queue.qsize() < self.qmaxsize:
super(CCStream, self).send_multipart (msg, *args, **kwargs)
else:
stat_inc ('count.dropped', 1)
stat_inc ('bytes.dropped', zmsg_size (msg))
def send_cmsg(self, cmsg):
"""Send CCMessage to socket"""
self.send_multipart(cmsg.zmsg)
def on_recv_cmsg(self, cbfunc):
"""Set callback that receives CCMessage."""
def convert_cmsg(zmsg):
cmsg = CCMessage(zmsg)
cbfunc(cmsg)
self.on_recv(convert_cmsg)
#
# request multiplexer on single stream
#
class QueryInfo:
"""Store callback details for query."""
log = skytools.getLogger('QueryInfo')
def __init__(self, qid, cmsg, cbfunc, rqs):
self.qid = qid
self.orig_cmsg = cmsg
self.cbfunc = cbfunc
self.timeout_ref = None
self.ioloop = rqs.ioloop
self.remove_query = rqs.remove_query
def on_timeout(self):
"""Called by ioloop on timeout, needs to handle exceptions"""
try:
self.timeout_ref = None
self.launch_cb(None)
except:
self.log.exception('timeout callback crashed')
def launch_cb(self, arg):
"""Run callback, re-wire timeout and query if needed."""
keep, timeout = self.cbfunc(arg)
self.log.trace('keep=%r', keep)
if keep:
self.set_timeout(timeout)
else:
self.remove_query(self.qid)
def set_timeout(self, timeout):
"""Set new timeout for task, None means drop it"""
if self.timeout_ref:
self.ioloop.remove_timeout(self.timeout_ref)
self.timeout_ref = None
if timeout:
deadline = time.time() + timeout
self.timeout_ref = self.ioloop.add_timeout(deadline, self.on_timeout)
def send_to(self, cc):
self.orig_cmsg.send_to(cc)
class CCReqStream:
"""Request-based API for CC socket.
Add request-id into route, later map replies to original request
based on that.
"""
log = skytools.getLogger('CCReqStream')
zmq_hwm = 100
zmq_linger = 500
def __init__(self, cc_url, xtx, ioloop=None, zctx=None):
"""Initialize stream."""
zctx = zctx or zmq.Context.instance()
ioloop = ioloop or IOLoop.instance()
s = zctx.socket (zmq.XREQ)
s.setsockopt (zmq.HWM, self.zmq_hwm)
s.setsockopt (zmq.LINGER, self.zmq_linger)
s.connect (cc_url)
self.ccs = CCStream(s, ioloop, qmaxsize = self.zmq_hwm)
self.ioloop = ioloop
self.xtx = xtx
self.query_id_seq = 1
self.query_cache = {}
self.ccs.on_recv(self.handle_recv)
def remove_query(self, qid):
"""Drop query state. Further replies are ignored."""
qi = self.query_cache.get(qid)
if qi:
del self.query_cache[qid]
qi.set_timeout(None)
def ccquery_sync(self, msg, timeout=0):
"""Synchronous query.
Returns first reply.
"""
res = [None]
def sync_cb(_rep):
res[0] = _rep
self.ioloop.stop()
return (False, 0)
self.ccquery_async(msg, sync_cb, timeout)
self.ioloop.start()
return res[0]
def ccquery_async(self, msg, cbfunc, timeout=0):
"""Asynchronous query.
Maps replies to callback function based on request id.
"""
# create query id prefix
qid = "Q%06d" % self.query_id_seq
self.query_id_seq += 1
# create message, add query id
cmsg = self.xtx.create_cmsg(msg)
cmsg.set_route([qid])
qi = QueryInfo(qid, cmsg, cbfunc, self)
self.query_cache[qid] = qi
qi.set_timeout(timeout)
qi.send_to(self.ccs)
return qid
def ccpublish(self, msg):
"""Broadcast API."""
cmsg = self.xtx.create_cmsg(msg)
cmsg.send_to(self.ccs)
def handle_recv(self, zmsg):
"""Internal callback on ZMQStream.
It must not throw exceptions.
"""
try:
self.handle_recv_real(zmsg)
except Exception:
self.log.exception('handle_recv_real crashed, dropping msg: %r', zmsg)
def handle_recv_real(self, zmsg):
"""Actual callback that can throw exceptions."""
cmsg = CCMessage(zmsg)
route = cmsg.get_route()
if len(route) != 1:
self.log.error('Invalid reply route: %r', route)
return
qid = route[0]
if qid not in self.query_cache:
self.log.error('reply for unknown query: %r', qid)
return
msg = cmsg.get_payload(self.xtx)
qi = self.query_cache[qid]
qi.launch_cb(msg)
def resend(self, qid, timeout=0):
if qid in self.query_cache:
qi = self.query_cache[qid]
qi.send_to(self.ccs)
qi.set_timeout(timeout)
else:
pass # ?
|
[
"time.time",
"skytools.getLogger",
"zmq.Context.instance",
"cc.util.stat_inc",
"cc.message.CCMessage",
"zmq.eventloop.IOLoop.instance",
"cc.message.zmsg_size"
] |
[((1516, 1547), 'skytools.getLogger', 'skytools.getLogger', (['"""QueryInfo"""'], {}), "('QueryInfo')\n", (1534, 1547), False, 'import skytools\n'), ((2925, 2958), 'skytools.getLogger', 'skytools.getLogger', (['"""CCReqStream"""'], {}), "('CCReqStream')\n", (2943, 2958), False, 'import skytools\n'), ((5219, 5234), 'cc.message.CCMessage', 'CCMessage', (['zmsg'], {}), '(zmsg)\n', (5228, 5234), False, 'from cc.message import CCMessage, zmsg_size\n'), ((987, 1015), 'cc.util.stat_inc', 'stat_inc', (['"""count.dropped"""', '(1)'], {}), "('count.dropped', 1)\n", (995, 1015), False, 'from cc.util import stat_inc\n'), ((1323, 1338), 'cc.message.CCMessage', 'CCMessage', (['zmsg'], {}), '(zmsg)\n', (1332, 1338), False, 'from cc.message import CCMessage, zmsg_size\n'), ((3118, 3140), 'zmq.Context.instance', 'zmq.Context.instance', ([], {}), '()\n', (3138, 3140), False, 'import zmq\n'), ((3168, 3185), 'zmq.eventloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (3183, 3185), False, 'from zmq.eventloop import IOLoop\n'), ((1056, 1070), 'cc.message.zmsg_size', 'zmsg_size', (['msg'], {}), '(msg)\n', (1065, 1070), False, 'from cc.message import CCMessage, zmsg_size\n'), ((2590, 2601), 'time.time', 'time.time', ([], {}), '()\n', (2599, 2601), False, 'import time\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.