code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import asyncio
from aiocouch import CouchDB
async def main_with():
async with CouchDB(
"http://localhost:5984", user="admin", password="<PASSWORD>"
) as couchdb:
database = await couchdb["config"]
async for doc in database.docs(["db-hta"]):
print(doc)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main_with())
|
[
"asyncio.get_event_loop",
"aiocouch.CouchDB"
] |
[((341, 365), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (363, 365), False, 'import asyncio\n'), ((85, 154), 'aiocouch.CouchDB', 'CouchDB', (['"""http://localhost:5984"""'], {'user': '"""admin"""', 'password': '"""<PASSWORD>"""'}), "('http://localhost:5984', user='admin', password='<PASSWORD>')\n", (92, 154), False, 'from aiocouch import CouchDB\n')]
|
import pandas as pd
import numpy as np
dataset_name = "Caltech"
relative = "../../../"
df = pd.read_csv(relative + "datasets/" + dataset_name + '/'+ dataset_name + '.csv', sep=";", header=None)
df = df.drop(0, 1)
print(df.describe())
print(df.nunique())
print(df.head())
print(df.shape)
df[11] = pd.Categorical(df[11])
df[11] = df[11].cat.codes
num_cols = df.shape[1]-1
np.savetxt(relative + "datasets/" + dataset_name + '/' + dataset_name + "_prep_encoding2.csv", df.values[:,:num_cols], delimiter=",")
np.savetxt(relative + "datasets/" + dataset_name + '/' + dataset_name + "_labels.csv", df.values[:,num_cols], delimiter=",")
import umap
X_embedded = umap.UMAP().fit_transform(df.values[:,:num_cols])
import matplotlib.pyplot as plt
plt.scatter(X_embedded[:,0], X_embedded[:,1], c = df.values[:,num_cols])
plt.show()
|
[
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"numpy.savetxt",
"umap.UMAP",
"pandas.Categorical"
] |
[((96, 202), 'pandas.read_csv', 'pd.read_csv', (["(relative + 'datasets/' + dataset_name + '/' + dataset_name + '.csv')"], {'sep': '""";"""', 'header': 'None'}), "(relative + 'datasets/' + dataset_name + '/' + dataset_name +\n '.csv', sep=';', header=None)\n", (107, 202), True, 'import pandas as pd\n'), ((306, 328), 'pandas.Categorical', 'pd.Categorical', (['df[11]'], {}), '(df[11])\n', (320, 328), True, 'import pandas as pd\n'), ((383, 521), 'numpy.savetxt', 'np.savetxt', (["(relative + 'datasets/' + dataset_name + '/' + dataset_name +\n '_prep_encoding2.csv')", 'df.values[:, :num_cols]'], {'delimiter': '""","""'}), "(relative + 'datasets/' + dataset_name + '/' + dataset_name +\n '_prep_encoding2.csv', df.values[:, :num_cols], delimiter=',')\n", (393, 521), True, 'import numpy as np\n'), ((517, 646), 'numpy.savetxt', 'np.savetxt', (["(relative + 'datasets/' + dataset_name + '/' + dataset_name + '_labels.csv')", 'df.values[:, num_cols]'], {'delimiter': '""","""'}), "(relative + 'datasets/' + dataset_name + '/' + dataset_name +\n '_labels.csv', df.values[:, num_cols], delimiter=',')\n", (527, 646), True, 'import numpy as np\n'), ((758, 831), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_embedded[:, 0]', 'X_embedded[:, 1]'], {'c': 'df.values[:, num_cols]'}), '(X_embedded[:, 0], X_embedded[:, 1], c=df.values[:, num_cols])\n', (769, 831), True, 'import matplotlib.pyplot as plt\n'), ((831, 841), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (839, 841), True, 'import matplotlib.pyplot as plt\n'), ((672, 683), 'umap.UMAP', 'umap.UMAP', ([], {}), '()\n', (681, 683), False, 'import umap\n')]
|
# Generated by Django 2.2.13 on 2020-07-07 17:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
(
"samlidp",
"0003_samlapplication_allow_access_by_email_suffix_squashed_0004_auto_20200420_1246",
),
("user", "0032_user_last_modified"),
]
operations = [
migrations.CreateModel(
name="ServiceEmailAddress",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"email",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="service_emails",
to="user.EmailAddress",
),
),
(
"saml_application",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="samlidp.SamlApplication"
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="service_emails",
to="user.User",
),
),
],
options={
"unique_together": {("user", "saml_application", "email")},
},
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.AutoField"
] |
[((557, 650), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (573, 650), False, 'from django.db import migrations, models\n'), ((780, 902), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""service_emails"""', 'to': '"""user.EmailAddress"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='service_emails', to='user.EmailAddress')\n", (797, 902), False, 'from django.db import migrations, models\n'), ((1091, 1188), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""samlidp.SamlApplication"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'samlidp.SamlApplication')\n", (1108, 1188), False, 'from django.db import migrations, models\n'), ((1316, 1430), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""service_emails"""', 'to': '"""user.User"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='service_emails', to='user.User')\n", (1333, 1430), False, 'from django.db import migrations, models\n')]
|
"""Tree Practice
=== Module description ===
- Task 1, which contains one Tree method to implement.
- Task 2, which asks you to implement two operations that allow you
to convert between trees and nested lists.
- Task 3, which asks you to learn about and use a more restricted form of
trees known as *binary trees*.
"""
from typing import Optional, List, Union
class Tree:
"""A recursive tree data structure.
Note the relationship between this class and LinkedListRec
from Lab 7; the only major difference is that _rest
has been replaced by _subtrees to handle multiple
recursive sub-parts.
"""
# === Private Attributes ===
# The item stored at this tree's root, or None if the tree is empty.
_root: Optional[object]
# The list of all subtrees of this tree.
_subtrees: List['Tree']
# === Representation Invariants ===
# - If self._root is None then self._subtrees is an empty list.
# This setting of attributes represents an empty Tree.
# - self._subtrees may be empty when self._root is not None.
# This setting of attributes represents a tree consisting of just one
# node.
# === Methods ===
def __init__(self, root: object, subtrees: List['Tree']) -> None:
"""Initialize a new Tree with the given root value and subtrees.
If <root> is None, the tree is empty.
Precondition: if <root> is None, then <subtrees> is empty.
"""
self._root = root
self._subtrees = subtrees
def is_empty(self) -> bool:
"""Return True if this tree is empty.
>>> t1 = Tree(None, [])
>>> t1.is_empty()
True
>>> t2 = Tree(3, [])
>>> t2.is_empty()
False
"""
return self._root is None
##############################################################################
# Task 1: Another tree method
##############################################################################
def __eq__(self, other: 'Tree') -> bool:
"""Return whether <self> and <other> are equal.
Hint: you can use the standard structure for recursive functions on
trees, except that you'll want to loop using an index:
`for i in range(len(self._subtrees))`)
This way, you can access the corresponding subtree in `other`.
"""
if self.is_empty():
return other.is_empty()
elif len(self._subtrees) == 0:
return self._root == other._root and len(other._subtrees) == 0
elif len(self._subtrees) == len(other._subtrees):
for subtree_index in range(len(self._subtrees)):
if self._subtrees[subtree_index] != \
other._subtrees[subtree_index]:
return False
return True
##############################################################################
# Task 2: Trees and nested lists
##############################################################################
def to_nested_list(self) -> list:
"""Return the nested list representation of this tree.
"""
nested_list = []
if self.is_empty():
return nested_list
elif len(self._subtrees) == 0:
nested_list.append(self._root)
return nested_list
else:
nested_list.append(self._root)
sub_list = []
for subtree_index in range(len(self._subtrees)):
sub_list.append(self._subtrees[subtree_index].to_nested_list())
nested_list.extend(sub_list)
return nested_list
def to_tree(obj: Union[int, List]) -> 'Tree':
"""Return the Tree which <obj> represents.
You may not access Tree attributes directly. This function can be
implemented only using the Tree initializer. >>> tree3 = Tree(3, [])
>>> tree2 = Tree(2, [tree3])
>>> tree1 = Tree(1, [tree2])
>>> nested_tree = tree1.to_nested_list() # [1, [2, [3]]]
>>> type(to_tree(nested_tree))
'Tree'
>>> to_tree(nested_tree)._root
1
>>> to_tree(nested_tree)._subtrees
2
>>> tree3 = Tree(3, [])
>>> tree2 = Tree(2, [tree3])
>>> tree1 = Tree(1, [tree2])
>>> tree1.to_nested_list()
[1, [2, [3]]]
"""
subtree = []
if obj == []:
return Tree(None, subtree)
elif len(obj) == 1:
root = obj[0]
return Tree(root, subtree)
else:
root = obj[0]
# tree = Tree(obj[0], subtree) # obj is a List of int and list
for item in range(1, len(obj)):
subtree.append(to_tree(obj[item]))
return Tree(root, subtree)
##############################################################################
# Task 3: Binary trees
##############################################################################
class BinaryTree:
"""A class representing a binary tree.
A binary tree is either empty, or a root connected to
a *left* binary tree and a *right* binary tree (which could be empty).
"""
# === Private Attributes ===
_root: Optional[object]
_left: Optional['BinaryTree']
_right: Optional['BinaryTree']
# === Representation Invariants ===
# _root, _left, _right are either ALL None, or none of them are None.
# If they are all None, this represents an empty BinaryTree.
def __init__(self, root: Optional[object],
left: Optional['BinaryTree'],
right: Optional['BinaryTree']) -> None:
"""Initialise a new binary tree with the given values.
If <root> is None, this represents an empty BinaryTree
(<left> and <right> are ignored in this case).
Precondition: if <root> is not None, then neither <left> nor <right>
are None.
"""
if root is None:
# store an empty BinaryTree
self._root = None
self._left = None
self._right = None
else:
self._root = root
self._left = left
self._right = right
def is_empty(self) -> bool:
"""Return True if this binary tree is empty.
Note that only empty binary trees can have left and right
attributes set to None.
"""
return self._root is None
def preorder(self) -> list:
"""Return a list of this tree's items using a *preorder* traversal.
"""
result = []
if self.is_empty():
return result
else:
result.append(self._root)
result += self._left.preorder()
result += self._right.preorder()
return result
def inorder(self) -> list:
"""Return a list of this tree's items using an *inorder* traversal.
"""
result = []
if self.is_empty():
return result
result += self._left.inorder()
result.append(self._root)
result += self._right.inorder()
return result
def postorder(self) -> list:
"""Return a list of this tree's items using a *postorder* traversal.
"""
result = []
if self.is_empty():
return result
result += self._left.postorder()
result += self._right.postorder()
result.append(self._root)
return result
if __name__ == '__main__':
import python_ta
python_ta.check_all()
|
[
"python_ta.check_all"
] |
[((7577, 7598), 'python_ta.check_all', 'python_ta.check_all', ([], {}), '()\n', (7596, 7598), False, 'import python_ta\n')]
|
import string
import itertools
from operator import add
import argparse
import json
parser = argparse.ArgumentParser()
parser.add_argument("--l1", default="en", help="name of language 1")
parser.add_argument("--l2", default="sp", help="name of language 2")
parser.add_argument("--probs_l1", default="../../data/probs/probs_en.txt", help="location of probs file for language 1")
parser.add_argument("--probs_l2", default="../../data/probs/probs_sp.txt", help="location of probs file for language 2")
parser.add_argument("--dict_l1", default="../../data/dictionaries/dict_en", help="location of dictionary file for language 1")
parser.add_argument("--dict_l2", default="../../data/dictionaries/dict_sp", help="location of dictionary file for language 2")
parser.add_argument("--single_char_l1", default="AI", help="a string with characters that are o.k in L1 on there own")
parser.add_argument("--single_char_l2", default="AEOUY", help="a string with characters that are o.k in L2 on there own")
parser.add_argument("--FST", default="../../data/FSTs/", help="location of created FSTs")
parser.add_argument("--mappings", default="../../data/mappings/mappings.json", help="location of mappings file")
args = parser.parse_args()
# extract unigram probabilities
def extract_unigrams(filename):
d_probs = {}
tot = 0
for l in open(filename):
w, p = l.split("\t")
d_probs[w] = float(p)
tot += float(p)
for w in d_probs:
d_probs[w] = d_probs[w]/tot
return d_probs
# make the additional changes to make both sets as similar as possible
def replace_phones(v, mapping):
v_all = []
v = v.split()
for p in v:
if p in mapping:
v_all.append(mapping[p])
else:
v_all.append(p)
return " ".join(v_all)
# create a python dictionary from the phonemes dictionary
def create_dict(map_phones, dict):
d = {}
for l in open(dict):
k, v = l.strip().split("\t", 1)
k = k.upper()
v = v.split("\t")
d[k] = []
for item in v:
d[k].append(replace_phones(item, map_phones))
return d
def write_to_files(i, diff, v, k , d_probs, f, f_inv, f_both, f_both_inv, lang):
if len(v) == 1:
f.write('(0 (0 {}__{} {} {}))\n'.format(k, lang, v[0], d_probs[k]))
f_both.write('(0 (0 {}__{} {} {}))\n'.format(k, lang, v[0], d_probs[k]))
f_inv.write('(0 (0 {} {}__{} {}))\n'.format(v[0], k, lang, d_probs[k]))
f_both_inv.write('(0 (0 {} {}__{} {}))\n'.format(v[0], k, lang, d_probs[k]))
if len(v) > 1:
l = len(v)
f.write('(0 ({} *e* {} {}))\n'.format(i+1, v[0], d_probs[k]))
f_both.write('(0 ({} *e* {} {}))\n'.format(i+diff+1, v[0], d_probs[k]))
f_inv.write('(0 ({} {} *e* {}))\n'.format(i+1, v[0], d_probs[k]))
f_both_inv.write('(0 ({} {} *e* {}))\n'.format(i+diff+1, v[0], d_probs[k]))
f.write('({} (0 {}__{} {}))\n'.format(i+l-1, k, lang, v[l-1]))
f_both.write('({} (0 {}__{} {}))\n'.format(i+diff+l-1, k, lang, v[l-1]))
f_inv.write('({} (0 {} {}__{}))\n'.format(i+l-1, v[l-1], k, lang))
f_both_inv.write('({} (0 {} {}__{}))\n'.format(i+diff+l-1, v[l-1], k, lang))
for j,syl in enumerate(v[1:-1]):
f.write('({} ({} *e* {}))\n'.format(i+j+1, i+j+2, syl))
f_both.write('({} ({} *e* {}))\n'.format(i+diff+j+1, i+diff+j+2, syl))
f_inv.write('({} ({} {} *e*))\n'.format(i+j+1, i+j+2, syl))
f_both_inv.write('({} ({} {} *e*))\n'.format(i+diff+j+1, i+diff+j+2, syl))
i = i + l - 1
return i
def write_lang_to_file(i, diff, d, d_probs, f, f_inv, f_l1_l2, f_l1_l2_inv, lang):
for k in d:
if d_probs[k] == 0:
continue
for v in d[k]:
v = v.split()
i = write_to_files(i, diff, v, k , d_probs, f, f_inv, f_l1_l2, f_l1_l2_inv, lang)
return i
# creates a file for FST in carmel
# This creates the FSTs from the dictionaries: l1, l2, l1+l2, and the inverted ones
# Each has edges with words, and it outputs the matching sequences of phones when a word is read (each phone on a separate edge)
# The inverted ones are opposite
def create_fsts(d_l1, d_l2, d_probs_l1, d_probs_l2):
with open(args.FST+args.l1, "w") as f_l1, open(args.FST+args.l1+"_inv", "w") as f_l1_inv, \
open(args.FST+args.l2, "w") as f_l2, open(args.FST+args.l2+"_inv", "w") as f_l2_inv, \
open(args.FST+args.l1+args.l2, "w") as f_l1_l2, open(args.FST+args.l1+args.l2+"_inv", "w") as f_l1_l2_inv:
f_l1.write("%%%% fst with separate phones from L1 dictionary %%%%\n0\n")
f_l1_inv.write("%%%% fst with separate phones from L1 dictionary - inverted %%%%\n0\n")
f_l2.write("%%%% fst with separate phones from L2 dictionary %%%%\n0\n")
f_l2_inv.write("%%%% fst with separate phones from L2 dictionary - inverted %%%%\n0\n")
f_l1_l2.write("%%%% fst with separate phones from L1+L2 dictionaries %%%%\n0\n")
f_l1_l2_inv.write("%%%% fst with separate phones from L1+L2 dictionaries - inverted %%%%\n0\n")
diff = write_lang_to_file(0, 0, d_l1, d_probs_l1, f_l1, f_l1_inv, f_l1_l2, f_l1_l2_inv, args.l1)
diff = write_lang_to_file(0, diff, d_l2, d_probs_l2, f_l2, f_l2_inv, f_l1_l2, f_l1_l2_inv, args.l2)
if __name__ == '__main__':
# extract unigram probabilities
d_probs_l1 = extract_unigrams(args.probs_l1)
d_probs_l2 = extract_unigrams(args.probs_l2)
# discard words than end with "." or with ")"
# discard words with one letter, except for a predefined list
for w in d_probs_l1:
if w.endswith(")") or w.endswith("."):
d_probs_l1[w] = 0
if len(w) == 1 and w not in args.single_char_l1:
d_probs_l1[w] = 0
for w in d_probs_l2:
if w.endswith(")") or w.endswith("."):
d_probs_l2[w] = 0
if len(w) == 1 and w not in args.single_char_l2:
d_probs_l2[w] = 0
if args.l1 == "en" and args.l2 == "sp":
with open(args.mappings, "r") as f:
mappings = json.load(f)
l2_l1_map, map_phones_l1, map_phones_l2 = mappings["l2_l1_map"], mappings["map_phones_l1"], mappings["map_phones_l2"]
else:
map_phones_l1 = map_phones_l2 = None
d_l1 = create_dict(map_phones_l1, args.dict_l1)
d_l2 = create_dict(map_phones_l2, args.dict_l2)
create_fsts(d_l1, d_l2, d_probs_l1, d_probs_l2)
|
[
"json.load",
"argparse.ArgumentParser"
] |
[((102, 127), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (125, 127), False, 'import argparse\n'), ((6251, 6263), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6260, 6263), False, 'import json\n')]
|
# -- Path setup --------------------------------------------------------------
import os
import sys
sys.path.insert(0, os.path.abspath("../../src"))
import sphinx_gallery
# -- Project information -----------------------------------------------------
project = "SPFlow"
copyright = "2020, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
author = "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
# Get __version__ from _meta
from spn._meta import __version__
version = __version__
release = __version__
extensions = [
"sphinx.ext.linkcode",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
"sphinx_gallery.gen_gallery",
]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
exclude_patterns = ["build", "Thumbs.db", ".DS_Store", "env"]
pygments_style = "sphinx"
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
html_logo = "../../Documentation/logo/spflow_logoSquare.png"
# -- Extension configuration -------------------------------------------------
autosummary_generate = True
autodoc_default_options = {"undoc-members": None}
# -- Options for intersphinx extension ---------------------------------------
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"sklearn": ("https://scikit-learn.org/stable", None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Linkcode extension
def linkcode_resolve(domain, info):
if domain != "py":
return None
if not info["module"]:
return None
filename = info["module"].replace(".", "/")
return "https://github.com/SPFlow/SPFlow/blob/master/src/%s.py" % filename
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# Napoleon settings
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# sphinx_gallery.gen_gallery settings
sphinx_gallery_conf = {
"doc_module": "spn",
"backreferences_dir": os.path.join("generated"),
"reference_url": {"spn": None},
"remove_config_comments": True,
}
|
[
"os.path.abspath",
"os.path.join"
] |
[((120, 148), 'os.path.abspath', 'os.path.abspath', (['"""../../src"""'], {}), "('../../src')\n", (135, 148), False, 'import os\n'), ((2681, 2706), 'os.path.join', 'os.path.join', (['"""generated"""'], {}), "('generated')\n", (2693, 2706), False, 'import os\n')]
|
"""Tests for views of applications."""
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponseRedirect
from nose.tools import raises
from oauth2_provider.models import AccessToken
from rest_framework.test import APIRequestFactory
from geokey.projects.tests.model_factories import UserFactory
from ..views import (
ApplicationOverview, ApplicationCreate, ApplicationSettings,
ApplicationDelete, ApplicationConnected, ApplicationDisconnect
)
from ..models import Application
from .model_factories import ApplicationFactory
class ApplicationOverviewTest(TestCase):
def test_get_with_user(self):
view = ApplicationOverview.as_view()
url = reverse('admin:app_overview')
request = APIRequestFactory().get(url)
request.user = UserFactory.create()
response = view(request).render()
self.assertEqual(response.status_code, 200)
def test_get_with_anonymous(self):
view = ApplicationOverview.as_view()
url = reverse('admin:app_overview')
request = APIRequestFactory().get(url)
request.user = AnonymousUser()
response = view(request)
self.assertTrue(isinstance(response, HttpResponseRedirect))
class ApplicationConnectedTest(TestCase):
def test_get_with_user(self):
view = ApplicationConnected.as_view()
url = reverse('admin:app_connected')
request = APIRequestFactory().get(url)
request.user = UserFactory.create()
response = view(request).render()
self.assertEqual(response.status_code, 200)
def test_get_with_anonymous(self):
view = ApplicationConnected.as_view()
url = reverse('admin:app_connected')
request = APIRequestFactory().get(url)
request.user = AnonymousUser()
response = view(request)
self.assertTrue(isinstance(response, HttpResponseRedirect))
class ApplicationDisconnectTest(TestCase):
def setUp(self):
self.user = UserFactory.create()
self.app = ApplicationFactory.create()
self.token = AccessToken.objects.create(
user=self.user,
application=self.app,
token='df0af6a395b4cd072445b3832e9379bfee257da0',
scope=1,
expires='2030-12-31T23:59:01+00:00'
)
@raises(AccessToken.DoesNotExist)
def test_get_with_user(self):
view = ApplicationDisconnect.as_view()
url = reverse('admin:app_disconnect', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
from django.contrib.messages.storage.fallback import FallbackStorage
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
request.user = self.user
response = view(request, app_id=self.app.id)
self.assertTrue(isinstance(response, HttpResponseRedirect))
AccessToken.objects.get(pk=self.token.id)
def test_get_with_anonymous(self):
view = ApplicationDisconnect.as_view()
url = reverse('admin:app_disconnect', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
request.user = AnonymousUser()
response = view(request, app_id=self.app.id)
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertIsNotNone(AccessToken.objects.get(pk=self.token.id))
def test_get_with_unconnected_user(self):
view = ApplicationDisconnect.as_view()
url = reverse('admin:app_disconnect', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
request.user = UserFactory.create()
response = view(request, app_id=self.app.id)
self.assertTrue(isinstance(response, HttpResponseRedirect))
class ApplicationCreateTest(TestCase):
def test_get_with_user(self):
view = ApplicationCreate.as_view()
url = reverse('admin:app_register')
request = APIRequestFactory().get(url)
request.user = UserFactory.create()
response = view(request).render()
self.assertEqual(response.status_code, 200)
def test_get_with_anonymous(self):
view = ApplicationCreate.as_view()
url = reverse('admin:app_register')
request = APIRequestFactory().get(url)
request.user = AnonymousUser()
response = view(request)
self.assertTrue(isinstance(response, HttpResponseRedirect))
def test_post_with_user(self):
data = {
'name': '<NAME>',
'description:': '',
'download_url': 'http://example.com',
'redirect_uris': 'http://example.com',
'authorization_grant_type': 'password',
'skip_authorization': False,
}
view = ApplicationCreate.as_view()
url = reverse('admin:app_register')
request = APIRequestFactory().post(url, data)
request.user = UserFactory.create()
from django.contrib.messages.storage.fallback import FallbackStorage
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
response = view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(Application.objects.count(), 1)
def test_post_with_anonymous(self):
data = {
'name': '<NAME>',
'description': '',
'download_url': 'http://example.com',
'redirect_uris': 'http://example.com',
'authorization_grant_type': 'password',
'skip_authorization': False,
}
view = ApplicationCreate.as_view()
url = reverse('admin:app_register')
request = APIRequestFactory().post(url, data)
request.user = AnonymousUser()
response = view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(Application.objects.count(), 0)
class ApplicationSettingsTest(TestCase):
def setUp(self):
self.creator = UserFactory.create()
self.app = ApplicationFactory.create(**{'user': self.creator})
def test_get_with_creator(self):
view = ApplicationSettings.as_view()
url = reverse('admin:app_settings', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
request.user = self.creator
response = view(request, app_id=self.app.id).render()
self.assertEqual(response.status_code, 200)
self.assertNotContains(
response,
'You are not the owner of this application and therefore not'
'allowed to access this app.'
)
def test_get_with_user(self):
view = ApplicationSettings.as_view()
url = reverse('admin:app_settings', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
request.user = UserFactory.create()
response = view(request, app_id=self.app.id).render()
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'You are not the owner of this application and therefore not '
'allowed to access this app.'
)
def test_get_with_anonymous(self):
view = ApplicationSettings.as_view()
url = reverse('admin:app_settings', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
request.user = AnonymousUser()
response = view(request, app_id=self.app.id)
self.assertTrue(isinstance(response, HttpResponseRedirect))
def test_post_with_creator(self):
data = {
'name': '<NAME>',
'description': '',
'download_url': 'http://example.com',
'redirect_uris': 'http://example.com',
'authorization_grant_type': 'password',
'skip_authorization': True,
}
view = ApplicationSettings.as_view()
url = reverse('admin:app_settings', kwargs={'app_id': self.app.id})
request = APIRequestFactory().post(url, data)
request.user = self.creator
from django.contrib.messages.storage.fallback import FallbackStorage
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
response = view(request, app_id=self.app.id).render()
self.assertEqual(response.status_code, 200)
self.assertNotContains(
response,
'You are not the owner of this application and therefore not'
'allowed to access this app.'
)
ref = Application.objects.get(pk=self.app.id)
self.assertEqual(ref.name, data.get('name'))
self.assertEqual(ref.description, data.get('description'))
self.assertEqual(ref.download_url, data.get('download_url'))
self.assertEqual(ref.redirect_uris, data.get('redirect_uris'))
self.assertEqual(
ref.authorization_grant_type,
data.get('authorization_grant_type')
)
def test_post_with_user(self):
data = {
'name': '<NAME>',
'description': '',
'download_url': 'http://example.com/download',
'redirect_uris': 'http://example.com/redirect',
'authorization_grant_type': 'password',
'skip_authorization': True,
}
view = ApplicationSettings.as_view()
url = reverse('admin:app_settings', kwargs={'app_id': self.app.id})
request = APIRequestFactory().post(url, data)
request.user = UserFactory.create()
response = view(request, app_id=self.app.id).render()
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'You are not the owner of this application and therefore not '
'allowed to access this app.'
)
ref = Application.objects.get(pk=self.app.id)
self.assertNotEqual(ref.name, data.get('name'))
self.assertNotEqual(ref.description, data.get('description'))
self.assertNotEqual(ref.download_url, data.get('download_url'))
self.assertNotEqual(ref.redirect_uris, data.get('redirect_uris'))
self.assertNotEqual(
ref.authorization_grant_type,
data.get('authorization_grant_type')
)
def test_post_with_anonymous(self):
data = {
'name': '<NAME>',
'description': '',
'download_url': 'http://example.com/download',
'redirect_uris': 'http://example.com/redirect',
'authorization_grant_type': 'password',
'skip_authorization': True,
}
view = ApplicationSettings.as_view()
url = reverse('admin:app_settings', kwargs={'app_id': self.app.id})
request = APIRequestFactory().post(url, data)
request.user = AnonymousUser()
response = view(request, app_id=self.app.id)
self.assertTrue(isinstance(response, HttpResponseRedirect))
ref = Application.objects.get(pk=self.app.id)
self.assertNotEqual(ref.name, data.get('name'))
self.assertNotEqual(ref.description, data.get('description'))
self.assertNotEqual(ref.download_url, data.get('download_url'))
self.assertNotEqual(ref.redirect_uris, data.get('redirect_uris'))
self.assertNotEqual(
ref.authorization_grant_type,
data.get('authorization_grant_type')
)
class ApplicationDeleteTest(TestCase):
def setUp(self):
self.creator = UserFactory.create()
self.app = ApplicationFactory.create(**{'user': self.creator})
def test_get_with_creator(self):
view = ApplicationDelete.as_view()
url = reverse('admin:app_delete', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
from django.contrib.messages.storage.fallback import FallbackStorage
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
request.user = self.creator
response = view(request, app_id=self.app.id)
self.assertTrue(isinstance(response, HttpResponseRedirect))
def test_get_with_user(self):
view = ApplicationDelete.as_view()
url = reverse('admin:app_delete', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
request.user = UserFactory.create()
response = view(request, app_id=self.app.id).render()
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'You are not the owner of this application and therefore not '
'allowed to access this app.'
)
def test_get_with_anonymous(self):
view = ApplicationDelete.as_view()
url = reverse('admin:app_delete', kwargs={'app_id': self.app.id})
request = APIRequestFactory().get(url)
request.user = AnonymousUser()
response = view(request, app_id=self.app.id)
self.assertTrue(isinstance(response, HttpResponseRedirect))
|
[
"django.contrib.auth.models.AnonymousUser",
"django.core.urlresolvers.reverse",
"oauth2_provider.models.AccessToken.objects.get",
"rest_framework.test.APIRequestFactory",
"geokey.projects.tests.model_factories.UserFactory.create",
"oauth2_provider.models.AccessToken.objects.create",
"django.contrib.messages.storage.fallback.FallbackStorage",
"nose.tools.raises"
] |
[((2399, 2431), 'nose.tools.raises', 'raises', (['AccessToken.DoesNotExist'], {}), '(AccessToken.DoesNotExist)\n', (2405, 2431), False, 'from nose.tools import raises\n'), ((783, 812), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_overview"""'], {}), "('admin:app_overview')\n", (790, 812), False, 'from django.core.urlresolvers import reverse\n'), ((883, 903), 'geokey.projects.tests.model_factories.UserFactory.create', 'UserFactory.create', ([], {}), '()\n', (901, 903), False, 'from geokey.projects.tests.model_factories import UserFactory\n'), ((1097, 1126), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_overview"""'], {}), "('admin:app_overview')\n", (1104, 1126), False, 'from django.core.urlresolvers import reverse\n'), ((1197, 1212), 'django.contrib.auth.models.AnonymousUser', 'AnonymousUser', ([], {}), '()\n', (1210, 1212), False, 'from django.contrib.auth.models import AnonymousUser\n'), ((1452, 1482), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_connected"""'], {}), "('admin:app_connected')\n", (1459, 1482), False, 'from django.core.urlresolvers import reverse\n'), ((1554, 1574), 'geokey.projects.tests.model_factories.UserFactory.create', 'UserFactory.create', ([], {}), '()\n', (1572, 1574), False, 'from geokey.projects.tests.model_factories import UserFactory\n'), ((1769, 1799), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_connected"""'], {}), "('admin:app_connected')\n", (1776, 1799), False, 'from django.core.urlresolvers import reverse\n'), ((1870, 1885), 'django.contrib.auth.models.AnonymousUser', 'AnonymousUser', ([], {}), '()\n', (1883, 1885), False, 'from django.contrib.auth.models import AnonymousUser\n'), ((2073, 2093), 'geokey.projects.tests.model_factories.UserFactory.create', 'UserFactory.create', ([], {}), '()\n', (2091, 2093), False, 'from geokey.projects.tests.model_factories import UserFactory\n'), ((2162, 2332), 'oauth2_provider.models.AccessToken.objects.create', 'AccessToken.objects.create', ([], {'user': 'self.user', 'application': 'self.app', 'token': '"""df0af6a395b4cd072445b3832e9379bfee257da0"""', 'scope': '(1)', 'expires': '"""2030-12-31T23:59:01+00:00"""'}), "(user=self.user, application=self.app, token=\n 'df0af6a395b4cd072445b3832e9379bfee257da0', scope=1, expires=\n '2030-12-31T23:59:01+00:00')\n", (2188, 2332), False, 'from oauth2_provider.models import AccessToken\n'), ((2527, 2590), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_disconnect"""'], {'kwargs': "{'app_id': self.app.id}"}), "('admin:app_disconnect', kwargs={'app_id': self.app.id})\n", (2534, 2590), False, 'from django.core.urlresolvers import reverse\n'), ((2782, 2806), 'django.contrib.messages.storage.fallback.FallbackStorage', 'FallbackStorage', (['request'], {}), '(request)\n', (2797, 2806), False, 'from django.contrib.messages.storage.fallback import FallbackStorage\n'), ((3018, 3059), 'oauth2_provider.models.AccessToken.objects.get', 'AccessToken.objects.get', ([], {'pk': 'self.token.id'}), '(pk=self.token.id)\n', (3041, 3059), False, 'from oauth2_provider.models import AccessToken\n'), ((3161, 3224), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_disconnect"""'], {'kwargs': "{'app_id': self.app.id}"}), "('admin:app_disconnect', kwargs={'app_id': self.app.id})\n", (3168, 3224), False, 'from django.core.urlresolvers import reverse\n'), ((3295, 3310), 'django.contrib.auth.models.AnonymousUser', 'AnonymousUser', ([], {}), '()\n', (3308, 3310), False, 'from django.contrib.auth.models import AnonymousUser\n'), ((3612, 3675), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_disconnect"""'], {'kwargs': "{'app_id': self.app.id}"}), "('admin:app_disconnect', kwargs={'app_id': self.app.id})\n", (3619, 3675), False, 'from django.core.urlresolvers import reverse\n'), ((3746, 3766), 'geokey.projects.tests.model_factories.UserFactory.create', 'UserFactory.create', ([], {}), '()\n', (3764, 3766), False, 'from geokey.projects.tests.model_factories import UserFactory\n'), ((4020, 4049), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_register"""'], {}), "('admin:app_register')\n", (4027, 4049), False, 'from django.core.urlresolvers import reverse\n'), ((4120, 4140), 'geokey.projects.tests.model_factories.UserFactory.create', 'UserFactory.create', ([], {}), '()\n', (4138, 4140), False, 'from geokey.projects.tests.model_factories import UserFactory\n'), ((4332, 4361), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_register"""'], {}), "('admin:app_register')\n", (4339, 4361), False, 'from django.core.urlresolvers import reverse\n'), ((4432, 4447), 'django.contrib.auth.models.AnonymousUser', 'AnonymousUser', ([], {}), '()\n', (4445, 4447), False, 'from django.contrib.auth.models import AnonymousUser\n'), ((4925, 4954), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_register"""'], {}), "('admin:app_register')\n", (4932, 4954), False, 'from django.core.urlresolvers import reverse\n'), ((5032, 5052), 'geokey.projects.tests.model_factories.UserFactory.create', 'UserFactory.create', ([], {}), '()\n', (5050, 5052), False, 'from geokey.projects.tests.model_factories import UserFactory\n'), ((5197, 5221), 'django.contrib.messages.storage.fallback.FallbackStorage', 'FallbackStorage', (['request'], {}), '(request)\n', (5212, 5221), False, 'from django.contrib.messages.storage.fallback import FallbackStorage\n'), ((5793, 5822), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_register"""'], {}), "('admin:app_register')\n", (5800, 5822), False, 'from django.core.urlresolvers import reverse\n'), ((5900, 5915), 'django.contrib.auth.models.AnonymousUser', 'AnonymousUser', ([], {}), '()\n', (5913, 5915), False, 'from django.contrib.auth.models import AnonymousUser\n'), ((6146, 6166), 'geokey.projects.tests.model_factories.UserFactory.create', 'UserFactory.create', ([], {}), '()\n', (6164, 6166), False, 'from geokey.projects.tests.model_factories import UserFactory\n'), ((6335, 6396), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_settings"""'], {'kwargs': "{'app_id': self.app.id}"}), "('admin:app_settings', kwargs={'app_id': self.app.id})\n", (6342, 6396), False, 'from django.core.urlresolvers import reverse\n'), ((6868, 6929), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_settings"""'], {'kwargs': "{'app_id': self.app.id}"}), "('admin:app_settings', kwargs={'app_id': self.app.id})\n", (6875, 6929), False, 'from django.core.urlresolvers import reverse\n'), ((7000, 7020), 'geokey.projects.tests.model_factories.UserFactory.create', 'UserFactory.create', ([], {}), '()\n', (7018, 7020), False, 'from geokey.projects.tests.model_factories import UserFactory\n'), ((7412, 7473), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_settings"""'], {'kwargs': "{'app_id': self.app.id}"}), "('admin:app_settings', kwargs={'app_id': self.app.id})\n", (7419, 7473), False, 'from django.core.urlresolvers import reverse\n'), ((7544, 7559), 'django.contrib.auth.models.AnonymousUser', 'AnonymousUser', ([], {}), '()\n', (7557, 7559), False, 'from django.contrib.auth.models import AnonymousUser\n'), ((8060, 8121), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_settings"""'], {'kwargs': "{'app_id': self.app.id}"}), "('admin:app_settings', kwargs={'app_id': self.app.id})\n", (8067, 8121), False, 'from django.core.urlresolvers import reverse\n'), ((8356, 8380), 'django.contrib.messages.storage.fallback.FallbackStorage', 'FallbackStorage', (['request'], {}), '(request)\n', (8371, 8380), False, 'from django.contrib.messages.storage.fallback import FallbackStorage\n'), ((9561, 9622), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_settings"""'], {'kwargs': "{'app_id': self.app.id}"}), "('admin:app_settings', kwargs={'app_id': self.app.id})\n", (9568, 9622), False, 'from django.core.urlresolvers import reverse\n'), ((9700, 9720), 'geokey.projects.tests.model_factories.UserFactory.create', 'UserFactory.create', ([], {}), '()\n', (9718, 9720), False, 'from geokey.projects.tests.model_factories import UserFactory\n'), ((10870, 10931), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_settings"""'], {'kwargs': "{'app_id': self.app.id}"}), "('admin:app_settings', kwargs={'app_id': self.app.id})\n", (10877, 10931), False, 'from django.core.urlresolvers import reverse\n'), ((11009, 11024), 'django.contrib.auth.models.AnonymousUser', 'AnonymousUser', ([], {}), '()\n', (11022, 11024), False, 'from django.contrib.auth.models import AnonymousUser\n'), ((11688, 11708), 'geokey.projects.tests.model_factories.UserFactory.create', 'UserFactory.create', ([], {}), '()\n', (11706, 11708), False, 'from geokey.projects.tests.model_factories import UserFactory\n'), ((11875, 11934), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_delete"""'], {'kwargs': "{'app_id': self.app.id}"}), "('admin:app_delete', kwargs={'app_id': self.app.id})\n", (11882, 11934), False, 'from django.core.urlresolvers import reverse\n'), ((12126, 12150), 'django.contrib.messages.storage.fallback.FallbackStorage', 'FallbackStorage', (['request'], {}), '(request)\n', (12141, 12150), False, 'from django.contrib.messages.storage.fallback import FallbackStorage\n'), ((12449, 12508), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_delete"""'], {'kwargs': "{'app_id': self.app.id}"}), "('admin:app_delete', kwargs={'app_id': self.app.id})\n", (12456, 12508), False, 'from django.core.urlresolvers import reverse\n'), ((12579, 12599), 'geokey.projects.tests.model_factories.UserFactory.create', 'UserFactory.create', ([], {}), '()\n', (12597, 12599), False, 'from geokey.projects.tests.model_factories import UserFactory\n'), ((12990, 13049), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:app_delete"""'], {'kwargs': "{'app_id': self.app.id}"}), "('admin:app_delete', kwargs={'app_id': self.app.id})\n", (12997, 13049), False, 'from django.core.urlresolvers import reverse\n'), ((13120, 13135), 'django.contrib.auth.models.AnonymousUser', 'AnonymousUser', ([], {}), '()\n', (13133, 13135), False, 'from django.contrib.auth.models import AnonymousUser\n'), ((3461, 3502), 'oauth2_provider.models.AccessToken.objects.get', 'AccessToken.objects.get', ([], {'pk': 'self.token.id'}), '(pk=self.token.id)\n', (3484, 3502), False, 'from oauth2_provider.models import AccessToken\n'), ((831, 850), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (848, 850), False, 'from rest_framework.test import APIRequestFactory\n'), ((1145, 1164), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (1162, 1164), False, 'from rest_framework.test import APIRequestFactory\n'), ((1501, 1520), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (1518, 1520), False, 'from rest_framework.test import APIRequestFactory\n'), ((1818, 1837), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (1835, 1837), False, 'from rest_framework.test import APIRequestFactory\n'), ((2609, 2628), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (2626, 2628), False, 'from rest_framework.test import APIRequestFactory\n'), ((3243, 3262), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (3260, 3262), False, 'from rest_framework.test import APIRequestFactory\n'), ((3694, 3713), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (3711, 3713), False, 'from rest_framework.test import APIRequestFactory\n'), ((4068, 4087), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (4085, 4087), False, 'from rest_framework.test import APIRequestFactory\n'), ((4380, 4399), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (4397, 4399), False, 'from rest_framework.test import APIRequestFactory\n'), ((4973, 4992), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (4990, 4992), False, 'from rest_framework.test import APIRequestFactory\n'), ((5841, 5860), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (5858, 5860), False, 'from rest_framework.test import APIRequestFactory\n'), ((6415, 6434), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (6432, 6434), False, 'from rest_framework.test import APIRequestFactory\n'), ((6948, 6967), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (6965, 6967), False, 'from rest_framework.test import APIRequestFactory\n'), ((7492, 7511), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (7509, 7511), False, 'from rest_framework.test import APIRequestFactory\n'), ((8140, 8159), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (8157, 8159), False, 'from rest_framework.test import APIRequestFactory\n'), ((9641, 9660), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (9658, 9660), False, 'from rest_framework.test import APIRequestFactory\n'), ((10950, 10969), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (10967, 10969), False, 'from rest_framework.test import APIRequestFactory\n'), ((11953, 11972), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (11970, 11972), False, 'from rest_framework.test import APIRequestFactory\n'), ((12527, 12546), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (12544, 12546), False, 'from rest_framework.test import APIRequestFactory\n'), ((13068, 13087), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (13085, 13087), False, 'from rest_framework.test import APIRequestFactory\n')]
|
#!/usr/bin/env python3
from ev3dev2.motor import MoveSteering, MediumMotor, OUTPUT_A, OUTPUT_B, OUTPUT_C
from ev3dev2.sensor.lego import TouchSensor
from time import sleep
ts = TouchSensor()
steer_pair = MoveSteering(OUTPUT_A, OUTPUT_B)
mm = MediumMotor(OUTPUT_C)
mm.on(speed=100)
#teer_pair.on_for_rotations(steering=-20, speed=75, rotations=10)
steer_pair.on_for_degrees(steering=-100, speed=100, degrees=1440)
#while not ts.is_pressed: # while touch sensor is not pressed
# sleep(0.01)
mm.off()
steer_pair.off()
sleep(5)
|
[
"ev3dev2.motor.MediumMotor",
"ev3dev2.sensor.lego.TouchSensor",
"time.sleep",
"ev3dev2.motor.MoveSteering"
] |
[((178, 191), 'ev3dev2.sensor.lego.TouchSensor', 'TouchSensor', ([], {}), '()\n', (189, 191), False, 'from ev3dev2.sensor.lego import TouchSensor\n'), ((205, 237), 'ev3dev2.motor.MoveSteering', 'MoveSteering', (['OUTPUT_A', 'OUTPUT_B'], {}), '(OUTPUT_A, OUTPUT_B)\n', (217, 237), False, 'from ev3dev2.motor import MoveSteering, MediumMotor, OUTPUT_A, OUTPUT_B, OUTPUT_C\n'), ((243, 264), 'ev3dev2.motor.MediumMotor', 'MediumMotor', (['OUTPUT_C'], {}), '(OUTPUT_C)\n', (254, 264), False, 'from ev3dev2.motor import MoveSteering, MediumMotor, OUTPUT_A, OUTPUT_B, OUTPUT_C\n'), ((521, 529), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (526, 529), False, 'from time import sleep\n')]
|
import re
import math
from collections import Counter
import numpy as np
text1 = '<NAME> mangé du singe'
text2 = 'Nicole a mangé du rat'
class Similarity():
def compute_cosine_similarity(self, string1, string2):
# intersects the words that are common
# in the set of the two words
intersection = set(string1.keys()) & set(string2.keys())
# dot matrix of vec1 and vec2
numerator = sum([string1[x] * string2[x] for x in intersection])
# sum of the squares of each vector
# sum1 is the sum of text1 and same for sum2 for text2
sum1 = sum([string1[x]**2 for x in string1.keys()])
sum2 = sum([string2[x]**2 for x in string2.keys()])
# product of the square root of both sum(s)
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return round(numerator / float(denominator), 4)
def text_to_vector(self, text):
WORD = re.compile(r'\w+')
words = WORD.findall(text)
return Counter(words)
# Jaccard Similarity
def tokenize(self, string):
return string.lower().split(" ")
def jaccard_similarity(self, string1, string2):
intersection = set(string1).intersection(set(string2))
union = set(string1).union(set(string2))
return len(intersection) / float(len(union))
similarity = Similarity()
# vector space
vector1 = similarity.text_to_vector(text1)
vector2 = similarity.text_to_vector(text2)
# split words into tokens
token1 = similarity.tokenize(text1)
token2 = similarity.tokenize(text2)
cosine = similarity.compute_cosine_similarity(vector1, vector2)
print('Cosine Similarity:', cosine)
jaccard = similarity.jaccard_similarity(token1, token2)
print('Jaccard Similarity:', jaccard)
|
[
"collections.Counter",
"math.sqrt",
"re.compile"
] |
[((994, 1012), 're.compile', 're.compile', (['"""\\\\w+"""'], {}), "('\\\\w+')\n", (1004, 1012), False, 'import re\n'), ((1063, 1077), 'collections.Counter', 'Counter', (['words'], {}), '(words)\n', (1070, 1077), False, 'from collections import Counter\n'), ((783, 798), 'math.sqrt', 'math.sqrt', (['sum1'], {}), '(sum1)\n', (792, 798), False, 'import math\n'), ((801, 816), 'math.sqrt', 'math.sqrt', (['sum2'], {}), '(sum2)\n', (810, 816), False, 'import math\n')]
|
import data
import numpy as np
# TODO: split tests 1 test per assert statement
# TODO: move repeating constants out of functions
class TestHAPT:
def test_get_train_data(self):
d = data.HAPT()
assert d._train_attrs is None
d.get_train_data()
assert len(d._train_attrs) > 0
assert len(d.get_train_data()) > 0
def test_get_train_labels(self):
d = data.HAPT()
assert d._train_labels is None
d.get_train_labels()
assert len(d._train_labels) > 0
assert len(d.get_train_labels()) > 0
def test_get_test_data(self):
d = data.HAPT()
assert d._test_attrs is None
d.get_test_data()
assert len(d._test_attrs) > 0
assert len(d.get_test_data()) > 0
def test_get_test_labels(self):
d = data.HAPT()
assert d._test_labels is None
d.get_test_labels()
assert len(d._test_labels) > 0
assert len(d.get_test_labels()) > 0
def test_load_train_data(self):
d = data.HAPT()
assert d._train_attrs is None
assert d._train_labels is None
d.load_train_data()
assert len(d._train_attrs) > 0
assert len(d._train_labels) > 0
assert len(d._train_attrs) == len(d._train_labels)
assert len(d.get_train_data()) == len(d.get_train_labels())
def test_load_test_data(self):
d = data.HAPT()
assert d._test_attrs is None
assert d._test_labels is None
d.load_test_data()
assert len(d._test_attrs) > 0
assert len(d._test_labels) > 0
assert len(d._test_attrs) == len(d._test_labels)
assert len(d.get_test_data()) == len(d.get_test_labels())
def test_load_all_data(self):
d = data.HAPT()
assert d._train_attrs is None
assert d._train_labels is None
assert d._test_attrs is None
assert d._test_labels is None
d.load_all_data()
assert len(d._train_attrs) > 0
assert len(d._train_labels) > 0
assert len(d._test_attrs) > 0
assert len(d._test_labels) > 0
assert len(d._train_attrs) == len(d._train_labels)
assert len(d._test_attrs) == len(d._test_labels)
assert len(d.get_train_data()) == len(d.get_train_labels())
assert len(d.get_test_data()) == len(d.get_test_labels())
def test_get_labels_map(self):
orig_labels = {
1: "WALKING",
2: "WALKING_UPSTAIRS",
3: "WALKING_DOWNSTAIRS",
4: "SITTING",
5: "STANDING",
6: "LAYING",
7: "STAND_TO_SIT",
8: "SIT_TO_STAND",
9: "SIT_TO_LIE",
10: "LIE_TO_SIT",
11: "STAND_TO_LIE",
12: "LIE_TO_STAND"
}
d = data.HAPT()
assert d._labels == {}
d.get_labels_map()
assert d._labels == orig_labels
assert d.get_labels_map() == orig_labels
def test_aggregate_groups(self):
orig_labels = {
1: "WALKING",
2: "WALKING_UPSTAIRS",
3: "WALKING_DOWNSTAIRS",
4: "SITTING",
5: "STANDING",
6: "LAYING",
7: "STAND_TO_SIT",
8: "SIT_TO_STAND",
9: "SIT_TO_LIE",
10: "LIE_TO_SIT",
11: "STAND_TO_LIE",
12: "LIE_TO_STAND"
}
d = data.HAPT()
d._labels = orig_labels
d._test_labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
d._train_labels = [12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
d.aggregate_groups()
assert np.array_equal(d._aggregated_test_labels, np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2]))
assert np.array_equal(d._aggregated_train_labels, np.array([2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0]))
assert d._aggregated2initial_labels == {0: [1, 2, 3], 1: [4, 5, 6], 2: [7, 8, 9, 10, 11, 12]}
def test_get_aggr2initial_labs_map(self):
d = data.HAPT()
d.load_all_data()
d.aggregate_groups()
assert d.get_aggr2initial_labs_map() == {
'WALKING': ['WALKING', 'WALKING_UPSTAIRS', 'WALKING_DOWNSTAIRS'],
'STATIC': ['SITTING', 'STANDING', 'LAYING'],
'TRANSITION': ['STAND_TO_SIT', 'SIT_TO_STAND', 'SIT_TO_LIE', 'LIE_TO_SIT', 'STAND_TO_LIE', 'LIE_TO_STAND']
}
def test_get_aggregated_test_labels(self):
orig_labels = {
1: "WALKING",
2: "WALKING_UPSTAIRS",
3: "WALKING_DOWNSTAIRS",
4: "SITTING",
5: "STANDING",
6: "LAYING",
7: "STAND_TO_SIT",
8: "SIT_TO_STAND",
9: "SIT_TO_LIE",
10: "LIE_TO_SIT",
11: "STAND_TO_LIE",
12: "LIE_TO_STAND"
}
d = data.HAPT()
d._labels = orig_labels
d._test_labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
d._train_labels = [12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
assert d.get_aggregated_test_labels() == d._test_labels
d.aggregate_groups()
print(d._aggregated_test_labels)
assert np.array_equal(d.get_aggregated_test_labels(), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2]))
def test_get_aggregated_train_labels(self):
orig_labels = {
1: "WALKING",
2: "WALKING_UPSTAIRS",
3: "WALKING_DOWNSTAIRS",
4: "SITTING",
5: "STANDING",
6: "LAYING",
7: "STAND_TO_SIT",
8: "SIT_TO_STAND",
9: "SIT_TO_LIE",
10: "LIE_TO_SIT",
11: "STAND_TO_LIE",
12: "LIE_TO_STAND"
}
d = data.HAPT()
d._labels = orig_labels
d._test_labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
d._train_labels = [12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
assert d.get_aggregated_train_labels() == d._train_labels
d.aggregate_groups()
assert np.array_equal(d.get_aggregated_train_labels(), np.array([2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0]))
def test_get_aggregated_labels_map(self):
d = data.HAPT()
assert d.get_aggregated_labels_map() == {0: "WALKING", 1: "STATIC", 2: "TRANSITION"}
|
[
"data.HAPT",
"numpy.array"
] |
[((195, 206), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (204, 206), False, 'import data\n'), ((404, 415), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (413, 415), False, 'import data\n'), ((616, 627), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (625, 627), False, 'import data\n'), ((820, 831), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (829, 831), False, 'import data\n'), ((1030, 1041), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (1039, 1041), False, 'import data\n'), ((1401, 1412), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (1410, 1412), False, 'import data\n'), ((1762, 1773), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (1771, 1773), False, 'import data\n'), ((2800, 2811), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (2809, 2811), False, 'import data\n'), ((3403, 3414), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (3412, 3414), False, 'import data\n'), ((3979, 3990), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (3988, 3990), False, 'import data\n'), ((4814, 4825), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (4823, 4825), False, 'import data\n'), ((5688, 5699), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (5697, 5699), False, 'import data\n'), ((6128, 6139), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (6137, 6139), False, 'import data\n'), ((3664, 3710), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2])\n', (3672, 3710), True, 'import numpy as np\n'), ((3770, 3816), 'numpy.array', 'np.array', (['[2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0]'], {}), '([2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0])\n', (3778, 3816), True, 'import numpy as np\n'), ((5185, 5231), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2])\n', (5193, 5231), True, 'import numpy as np\n'), ((6021, 6067), 'numpy.array', 'np.array', (['[2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0]'], {}), '([2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0])\n', (6029, 6067), True, 'import numpy as np\n')]
|
import pytest
from auto_deprecator import deprecate
__version__ = "2.0.0"
@deprecate(
expiry="2.1.0",
version_module="tests.function.test_deprecate_version_module",
)
def simple_deprecate():
pass
@deprecate(
expiry="2.1.0", version_module="tests.function.conftest",
)
def failed_to_locate_version():
pass
@deprecate(
expiry="2.1.0", version_module="tests.function.not_existing_module",
)
def not_existing_module():
pass
def test_no_error_simple_deprecate():
with pytest.warns(DeprecationWarning) as warning:
simple_deprecate()
assert (
'Function "simple_deprecate" will be deprecated on version 2.1.0'
) in warning[0].message.args[0]
def test_failed_to_locate_version():
with pytest.raises(RuntimeError) as error:
failed_to_locate_version()
assert (
"Cannot find version (__version__) from the version module "
'"tests.function.conftest"'
) in str(error.value)
def test_not_existing_module():
with pytest.raises(RuntimeError) as error:
not_existing_module()
assert (
'Cannot locate version module "tests.function.not_existing_module"'
) in str(error.value)
|
[
"pytest.warns",
"auto_deprecator.deprecate",
"pytest.raises"
] |
[((80, 173), 'auto_deprecator.deprecate', 'deprecate', ([], {'expiry': '"""2.1.0"""', 'version_module': '"""tests.function.test_deprecate_version_module"""'}), "(expiry='2.1.0', version_module=\n 'tests.function.test_deprecate_version_module')\n", (89, 173), False, 'from auto_deprecator import deprecate\n'), ((216, 283), 'auto_deprecator.deprecate', 'deprecate', ([], {'expiry': '"""2.1.0"""', 'version_module': '"""tests.function.conftest"""'}), "(expiry='2.1.0', version_module='tests.function.conftest')\n", (225, 283), False, 'from auto_deprecator import deprecate\n'), ((335, 413), 'auto_deprecator.deprecate', 'deprecate', ([], {'expiry': '"""2.1.0"""', 'version_module': '"""tests.function.not_existing_module"""'}), "(expiry='2.1.0', version_module='tests.function.not_existing_module')\n", (344, 413), False, 'from auto_deprecator import deprecate\n'), ((506, 538), 'pytest.warns', 'pytest.warns', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (518, 538), False, 'import pytest\n'), ((750, 777), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (763, 777), False, 'import pytest\n'), ((1011, 1038), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (1024, 1038), False, 'import pytest\n')]
|
from urllib import request
url="http://www.renren.com/970973463"
headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0',
'Cookie':'anonymid=jw6ali52-qw6ldx; depovince=GUZ; _r01_=1; JSESSIONID=abcv45u4hL5Z0cQdde5Rw; ick_login=99f8241c-bfc0-4cda-9ed9-a1126aa9021e; t=dd1e75d66334a9699f53bc6ddb8c20ea3; societyguester=dd1e75d66334a9699f53bc6ddb8c20ea3; id=970973463; xnsid=6eedc27; jebe_key=5ac606a2-3b4f-4863-80e9-1f0a22bfec2e%7C5f5e2728ff534657c04151fc12f87207%7C1558956815778%7C1%7C1558956814761; XNESSESSIONID=cdf65a586a5f; jebecookies=3b4a8a1d-30fc-44c3-8fe6-fd6adc9781b7|||||; ver=7.0; loginfrom=null; wp_fold=0'
}
req=request.Request(url,headers=headers)
req1=request.urlopen(req)
with open("renren","w",encoding="utf-8") as fp:
fp.write(req1.read().decode("utf-8"))
|
[
"urllib.request.Request",
"urllib.request.urlopen"
] |
[((698, 735), 'urllib.request.Request', 'request.Request', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (713, 735), False, 'from urllib import request\n'), ((741, 761), 'urllib.request.urlopen', 'request.urlopen', (['req'], {}), '(req)\n', (756, 761), False, 'from urllib import request\n')]
|
import math
from math import sqrt
import cmath
print("Module math imported")
print(math.floor(32.9))
print(int(32.9))
print(math.ceil(32.3))
print(math.ceil(32))
print(sqrt(9))
print(sqrt(2))
#cmath and Complex Numbers
##print(sqrt(-1)) This will trigger a ValueError: math domain error
print(cmath.sqrt(-1))
print((1+3j)*(9+4j))
|
[
"cmath.sqrt",
"math.floor",
"math.sqrt",
"math.ceil"
] |
[((84, 100), 'math.floor', 'math.floor', (['(32.9)'], {}), '(32.9)\n', (94, 100), False, 'import math\n'), ((127, 142), 'math.ceil', 'math.ceil', (['(32.3)'], {}), '(32.3)\n', (136, 142), False, 'import math\n'), ((150, 163), 'math.ceil', 'math.ceil', (['(32)'], {}), '(32)\n', (159, 163), False, 'import math\n'), ((173, 180), 'math.sqrt', 'sqrt', (['(9)'], {}), '(9)\n', (177, 180), False, 'from math import sqrt\n'), ((188, 195), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (192, 195), False, 'from math import sqrt\n'), ((300, 314), 'cmath.sqrt', 'cmath.sqrt', (['(-1)'], {}), '(-1)\n', (310, 314), False, 'import cmath\n')]
|
from flask import Flask, jsonify, request
app = Flask(__name__) # Gives a unique name
stores = [
{
'name': 'MyStore',
'items': [
{
'name': 'My Item',
'price': 15.99
}
]
}
]
"""
@app.route('/') # Route of the endpoint 'http://www.google.com/'
def home():
return "Hello, world!"
"""
# POST /store. data: {name: }
@app.route('/store', methods=['POST'])
def create_store():
request_data = request.get_json()
new_store = {'name': request_data['name'], 'items': []}
stores.append(new_store)
return jsonify(new_store)
# GET /store/<string:name>
@app.route('/store/<string:name>')
def get_store(name):
store = list(filter(lambda store: store['name'] == name, stores))
if store == []:
return jsonify({'message': 'store not found'})
else:
return jsonify(store)
# GET /store
@app.route('/store')
def get_stores():
return jsonify({'stores': stores})
# POST /store/<string:name>/item
@app.route('/store/<string:name>/item', methods=['POST'])
def create_item_in_store(name):
request_data = request.get_json()
store = list(filter(lambda store: store['name'] == name, stores))
new_item = {'name': request_data['name'], 'price': request_data['price']}
if store == []:
return jsonify({'message': 'store not found'})
store[0]['items'].append(new_item)
return jsonify(new_item)
# GET /store/<string:name>/item
@app.route('/store/<string:name>/item')
def get_items_in_store(name):
store = list(filter(lambda store: store['name'] == name, stores))
if store == []:
return jsonify({'message': 'store not found'})
else:
return jsonify({'items': store[0]['items']})
app.run(host= '0.0.0.0', port=5000)
|
[
"flask.jsonify",
"flask.Flask",
"flask.request.get_json"
] |
[((49, 64), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (54, 64), False, 'from flask import Flask, jsonify, request\n'), ((435, 453), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (451, 453), False, 'from flask import Flask, jsonify, request\n'), ((554, 572), 'flask.jsonify', 'jsonify', (['new_store'], {}), '(new_store)\n', (561, 572), False, 'from flask import Flask, jsonify, request\n'), ((903, 930), 'flask.jsonify', 'jsonify', (["{'stores': stores}"], {}), "({'stores': stores})\n", (910, 930), False, 'from flask import Flask, jsonify, request\n'), ((1074, 1092), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1090, 1092), False, 'from flask import Flask, jsonify, request\n'), ((1367, 1384), 'flask.jsonify', 'jsonify', (['new_item'], {}), '(new_item)\n', (1374, 1384), False, 'from flask import Flask, jsonify, request\n'), ((762, 801), 'flask.jsonify', 'jsonify', (["{'message': 'store not found'}"], {}), "({'message': 'store not found'})\n", (769, 801), False, 'from flask import Flask, jsonify, request\n'), ((827, 841), 'flask.jsonify', 'jsonify', (['store'], {}), '(store)\n', (834, 841), False, 'from flask import Flask, jsonify, request\n'), ((1276, 1315), 'flask.jsonify', 'jsonify', (["{'message': 'store not found'}"], {}), "({'message': 'store not found'})\n", (1283, 1315), False, 'from flask import Flask, jsonify, request\n'), ((1599, 1638), 'flask.jsonify', 'jsonify', (["{'message': 'store not found'}"], {}), "({'message': 'store not found'})\n", (1606, 1638), False, 'from flask import Flask, jsonify, request\n'), ((1664, 1701), 'flask.jsonify', 'jsonify', (["{'items': store[0]['items']}"], {}), "({'items': store[0]['items']})\n", (1671, 1701), False, 'from flask import Flask, jsonify, request\n')]
|
#!/usr/bin/env python
"""fq2vcf
"""
from __future__ import division, print_function
import os
import glob
from setuptools import setup, find_packages
VERSION = '0.0.0'
scripts = ['scripts/fq2vcf']
scripts.extend(glob.glob('scripts/*.sh'))
scripts.extend(glob.glob('scripts/*.py'))
print(scripts)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="fq2vcf",
packages=find_packages(),
version=VERSION,
description="fq2vcf",
maintainer='yingnn',
author='yingnn',
long_description=read('README.md'),
keywords=['SNP InDel calling workflow', 'SNP InDel calling pipeline'],
licence='MIT license',
include_package_data=True,
platforms=["Linux", "Mac OS-X", "Unix"],
install_requires=['argparse',
'configparser'],
scripts=scripts,
)
|
[
"os.path.dirname",
"setuptools.find_packages",
"glob.glob"
] |
[((217, 242), 'glob.glob', 'glob.glob', (['"""scripts/*.sh"""'], {}), "('scripts/*.sh')\n", (226, 242), False, 'import glob\n'), ((259, 284), 'glob.glob', 'glob.glob', (['"""scripts/*.py"""'], {}), "('scripts/*.py')\n", (268, 284), False, 'import glob\n'), ((432, 447), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (445, 447), False, 'from setuptools import setup, find_packages\n'), ((349, 374), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (364, 374), False, 'import os\n')]
|
import numpy as np
from collections import namedtuple
from util import (
vec3d_to_array,
quat_to_array,
array_to_vec3d_pb,
array_to_quat_pb,
)
from radar_data_streamer import RadarData
from data_pb2 import Image
Extrinsic = namedtuple('Extrinsic', ['position', 'attitude'])
class RadarImage(RadarData):
"""
This class is a Python representation of the protobuf Image object for
convinent downstream operations
"""
def __init__(self, timestamp, frame_id, extrinsic, image_model, image):
self.timestamp = timestamp
self.frame_id = frame_id
self.extrinsic = extrinsic
self.image_model = image_model
self.image = image
@classmethod
def from_proto(cls, image_pb):
timestamp = image_pb.meta.timestamp
frame_id = image_pb.meta.frame_id
extrinsic = Extrinsic(
position=vec3d_to_array(image_pb.meta.position),
attitude=quat_to_array(image_pb.meta.attitude))
image_model = ImageModel(
origin=vec3d_to_array(image_pb.cartesian.model.origin),
di=vec3d_to_array(image_pb.cartesian.model.di),
dj=vec3d_to_array(image_pb.cartesian.model.dj))
# create the image array
image_shape = (image_pb.cartesian.data.cols,
image_pb.cartesian.data.rows)
image_data = np.frombuffer(image_pb.cartesian.data.data,
dtype=np.uint32)
# copy image_data because we do not own the memory
image = np.reshape(image_data.copy(), image_shape)
radar_image = cls(timestamp, frame_id, extrinsic, image_model, image)
return radar_image
def to_proto(self, timestamp, frame_id):
image_pb = Image()
image_pb.meta.timestamp = timestamp
image_pb.meta.frame_id = frame_id
# Setting the type to REAL_32U
image_pb.cartesian.data.type = 5
array_to_vec3d_pb(image_pb.meta.position,
self.extrinsic.position)
array_to_quat_pb(image_pb.meta.attitude,
self.extrinsic.attitude)
array_to_vec3d_pb(image_pb.cartesian.model.origin,
self.image_model.origin)
array_to_vec3d_pb(image_pb.cartesian.model.di,
self.image_model.di)
array_to_vec3d_pb(image_pb.cartesian.model.dj,
self.image_model.dj)
image_pb.cartesian.data.cols, image_pb.cartesian.data.rows = \
self.image.shape
return image_pb
class ImageModel(object):
"""
ImageModel describing mapping from world coordinate to image model
"""
def __init__(self, origin, di, dj):
self.di = di
self.dj = dj
self.origin = origin
def global_to_image(self, ecef_point):
radar_to_image = ecef_point - self.origin
i_res = np.linalg.norm(self.di)
j_res = np.linalg.norm(self.dj)
i_dir = self.di/i_res
j_dir = self.dj/j_res
i_proj = int(round(radar_to_image.dot(i_dir)/i_res))
j_proj = int(round(radar_to_image.dot(j_dir)/j_res))
pixel_point = (i_proj, j_proj)
return pixel_point
def image_to_global(self, pixel_point):
i_idx = pixel_point[0]
j_idx = pixel_point[1]
ecef_point = self.origin + (i_idx*self.di) + (j_idx*self.dj)
return ecef_point
|
[
"util.array_to_quat_pb",
"util.quat_to_array",
"data_pb2.Image",
"numpy.frombuffer",
"util.vec3d_to_array",
"numpy.linalg.norm",
"collections.namedtuple",
"util.array_to_vec3d_pb"
] |
[((241, 290), 'collections.namedtuple', 'namedtuple', (['"""Extrinsic"""', "['position', 'attitude']"], {}), "('Extrinsic', ['position', 'attitude'])\n", (251, 290), False, 'from collections import namedtuple\n'), ((1371, 1431), 'numpy.frombuffer', 'np.frombuffer', (['image_pb.cartesian.data.data'], {'dtype': 'np.uint32'}), '(image_pb.cartesian.data.data, dtype=np.uint32)\n', (1384, 1431), True, 'import numpy as np\n'), ((1758, 1765), 'data_pb2.Image', 'Image', ([], {}), '()\n', (1763, 1765), False, 'from data_pb2 import Image\n'), ((1941, 2007), 'util.array_to_vec3d_pb', 'array_to_vec3d_pb', (['image_pb.meta.position', 'self.extrinsic.position'], {}), '(image_pb.meta.position, self.extrinsic.position)\n', (1958, 2007), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((2043, 2108), 'util.array_to_quat_pb', 'array_to_quat_pb', (['image_pb.meta.attitude', 'self.extrinsic.attitude'], {}), '(image_pb.meta.attitude, self.extrinsic.attitude)\n', (2059, 2108), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((2143, 2218), 'util.array_to_vec3d_pb', 'array_to_vec3d_pb', (['image_pb.cartesian.model.origin', 'self.image_model.origin'], {}), '(image_pb.cartesian.model.origin, self.image_model.origin)\n', (2160, 2218), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((2254, 2321), 'util.array_to_vec3d_pb', 'array_to_vec3d_pb', (['image_pb.cartesian.model.di', 'self.image_model.di'], {}), '(image_pb.cartesian.model.di, self.image_model.di)\n', (2271, 2321), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((2357, 2424), 'util.array_to_vec3d_pb', 'array_to_vec3d_pb', (['image_pb.cartesian.model.dj', 'self.image_model.dj'], {}), '(image_pb.cartesian.model.dj, self.image_model.dj)\n', (2374, 2424), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((2913, 2936), 'numpy.linalg.norm', 'np.linalg.norm', (['self.di'], {}), '(self.di)\n', (2927, 2936), True, 'import numpy as np\n'), ((2953, 2976), 'numpy.linalg.norm', 'np.linalg.norm', (['self.dj'], {}), '(self.dj)\n', (2967, 2976), True, 'import numpy as np\n'), ((886, 924), 'util.vec3d_to_array', 'vec3d_to_array', (['image_pb.meta.position'], {}), '(image_pb.meta.position)\n', (900, 924), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((947, 984), 'util.quat_to_array', 'quat_to_array', (['image_pb.meta.attitude'], {}), '(image_pb.meta.attitude)\n', (960, 984), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((1040, 1087), 'util.vec3d_to_array', 'vec3d_to_array', (['image_pb.cartesian.model.origin'], {}), '(image_pb.cartesian.model.origin)\n', (1054, 1087), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((1104, 1147), 'util.vec3d_to_array', 'vec3d_to_array', (['image_pb.cartesian.model.di'], {}), '(image_pb.cartesian.model.di)\n', (1118, 1147), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((1164, 1207), 'util.vec3d_to_array', 'vec3d_to_array', (['image_pb.cartesian.model.dj'], {}), '(image_pb.cartesian.model.dj)\n', (1178, 1207), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n')]
|
import pygame
import Levels
from Sprites import *
is_fever = False
class Fever():
global fever_score
def __init__(self):
self.is_fever = False
def feverTime(self,hero_sprites,ghost_sprites):
pygame.sprite.groupcollide(hero_sprites, ghost_sprites, False, False)
return True
|
[
"pygame.sprite.groupcollide"
] |
[((223, 292), 'pygame.sprite.groupcollide', 'pygame.sprite.groupcollide', (['hero_sprites', 'ghost_sprites', '(False)', '(False)'], {}), '(hero_sprites, ghost_sprites, False, False)\n', (249, 292), False, 'import pygame\n')]
|
"""Get top N rated movies from MovieLens
This script allows user to get information about films.
This file can also be imported as a module and contains the following
functions:
* display_movies - Print data in csv format
* get_arguments - Construct the argument parser and get the arguments
* main - the main function of the script
"""
# import the necessary packages
import time
import argparse
import logging as log
from config import *
from mysql.connector import (connection)
def fetch_movies_data(cnx, n=None, regexp=None, year_from=None, year_to=None, genres=None):
""" Generator function to fetch data rows from stored procedure with arguments
Parameters
----------
cnx :
MySqlConnection to database
n : int, optional
The number of top rated movies for each genre, by default None
regexp : str, optional
Filter on name of the film, by default None
year_from : int, optional
The lower boundary of year filter, by default None
year_to : int, optional
The lower boundary of year filter, by default None
genres : str, optional
User-defined genre filter. can be multiple, by default None
Yields
-------
tuple
row of MySqlConnector data from stored procedure
"""
log.info('fetching movies')
cursor = cnx.cursor()
# NULL if None
if not n:
n = 'NULL'
if not regexp:
regexp = 'NULL'
else:
regexp = f"'{regexp}'"
if not year_from:
year_from = 'NULL'
if not year_to:
year_to = 'NULL'
if not genres:
genres = 'NULL'
else:
genres = f"'{genres}'"
try:
query_string = f"CALL spr_find_top_rated_movies({n}, {regexp}, {year_from}, {year_to}, {genres});"
for result in cursor.execute(query_string, multi=True):
if result.with_rows:
log.debug(f'Rows produced by statement "{result.statement}":')
for row in result.fetchall():
log.debug(row)
yield row
except Exception as e:
log.exception(e)
log.debug(query_string)
cursor.close()
def display_movies(cnx, n=None, regexp=None, year_from=None, year_to=None, genres=None, delimiter=',') -> None:
""" Display movies from called stored procedure in csv format
Parameters
----------
cnx :
MySqlConnection to database
n : int, optional
The number of top rated movies for each genre, by default None
regexp : str, optional
Filter on name of the film, by default None
year_from : int, optional
The lower boundary of year filter, by default None
year_to : int, optional
The lower boundary of year filter, by default None
genres : str, optional
User-defined genre filter. can be multiple, by default None
delimiter : str, optional
Separator of csv format, by default ','
"""
try:
column_names = ['movieId', 'title', 'genres', 'year', 'rating']
header = ', '.join(column_names)
print(header)
for row in fetch_movies_data(cnx, n, regexp, year_from, year_to, genres):
csv_row = ''
for attr in row:
if delimiter in str(attr):
attr = f'"{attr}"'
csv_row += delimiter + str(attr)
csv_row = csv_row[1:]
print(csv_row)
except Exception as e:
log.exception(e)
def get_arguments() -> dict:
"""Construct the argument parser and get the arguments
Returns
-------
dict
Dictionary of arguments and paramenters
"""
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument("-n", "--topN", type=int,
help="the number of top rated movies for each genre. (example: 3)")
ap.add_argument("-g", "--genres", type=str,
help="user-defined genre filter. can be multiple. (example: Comedy|Adventure)")
ap.add_argument("-f", "--year_from", type=int,
help="the lower boundary of year filter (example: 1980)")
ap.add_argument("-t", "--year_to", type=int,
help="the lower boundary of year filter (example: 2010)")
ap.add_argument("-r", "--regexp", type=str,
help="filter on name of the film (example: love)")
return vars(ap.parse_args())
def main():
log.basicConfig(level=log.getLevelName(CONFIG['logging']['level']),
filename=CONFIG['logging']['filename'],
filemode=CONFIG['logging']['filemode'],
format=CONFIG['logging']['format'],
datefmt=CONFIG['logging']['datefmt'])
log.info('Start')
# save start time for calculating
time_start = time.perf_counter()
# construct args
log.info('constructing argument parser')
args = get_arguments()
log.debug(f'arguments: {args}')
log.info('Done!')
try:
# DB connect
log.info('Opening connection to DB')
cnx = connection.MySQLConnection(**CONFIG['db_connect'])
log.info('Done!')
log.info('fetching and printing movies')
display_movies(cnx, args['topN'], args['regexp'],
args['year_from'], args['year_to'], args['genres'])
log.info('Done!')
except Exception as e:
log.error(e)
cnx.close()
log.info('Connection to DB closed')
time_elapsed = time.perf_counter() - time_start
log.info(f'Finish in {time_elapsed:.4f} secs')
if __name__ == "__main__":
main()
|
[
"logging.exception",
"logging.error",
"logging.debug",
"argparse.ArgumentParser",
"time.perf_counter",
"mysql.connector.connection.MySQLConnection",
"logging.info",
"logging.getLevelName"
] |
[((1296, 1323), 'logging.info', 'log.info', (['"""fetching movies"""'], {}), "('fetching movies')\n", (1304, 1323), True, 'import logging as log\n'), ((3682, 3726), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (3705, 3726), False, 'import argparse\n'), ((4742, 4759), 'logging.info', 'log.info', (['"""Start"""'], {}), "('Start')\n", (4750, 4759), True, 'import logging as log\n'), ((4815, 4834), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4832, 4834), False, 'import time\n'), ((4861, 4901), 'logging.info', 'log.info', (['"""constructing argument parser"""'], {}), "('constructing argument parser')\n", (4869, 4901), True, 'import logging as log\n'), ((4933, 4964), 'logging.debug', 'log.debug', (['f"""arguments: {args}"""'], {}), "(f'arguments: {args}')\n", (4942, 4964), True, 'import logging as log\n'), ((4969, 4986), 'logging.info', 'log.info', (['"""Done!"""'], {}), "('Done!')\n", (4977, 4986), True, 'import logging as log\n'), ((5431, 5466), 'logging.info', 'log.info', (['"""Connection to DB closed"""'], {}), "('Connection to DB closed')\n", (5439, 5466), True, 'import logging as log\n'), ((5524, 5570), 'logging.info', 'log.info', (['f"""Finish in {time_elapsed:.4f} secs"""'], {}), "(f'Finish in {time_elapsed:.4f} secs')\n", (5532, 5570), True, 'import logging as log\n'), ((5026, 5062), 'logging.info', 'log.info', (['"""Opening connection to DB"""'], {}), "('Opening connection to DB')\n", (5034, 5062), True, 'import logging as log\n'), ((5077, 5127), 'mysql.connector.connection.MySQLConnection', 'connection.MySQLConnection', ([], {}), "(**CONFIG['db_connect'])\n", (5103, 5127), False, 'from mysql.connector import connection\n'), ((5136, 5153), 'logging.info', 'log.info', (['"""Done!"""'], {}), "('Done!')\n", (5144, 5153), True, 'import logging as log\n'), ((5163, 5203), 'logging.info', 'log.info', (['"""fetching and printing movies"""'], {}), "('fetching and printing movies')\n", (5171, 5203), True, 'import logging as log\n'), ((5343, 5360), 'logging.info', 'log.info', (['"""Done!"""'], {}), "('Done!')\n", (5351, 5360), True, 'import logging as log\n'), ((5487, 5506), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5504, 5506), False, 'import time\n'), ((2105, 2121), 'logging.exception', 'log.exception', (['e'], {}), '(e)\n', (2118, 2121), True, 'import logging as log\n'), ((2130, 2153), 'logging.debug', 'log.debug', (['query_string'], {}), '(query_string)\n', (2139, 2153), True, 'import logging as log\n'), ((3476, 3492), 'logging.exception', 'log.exception', (['e'], {}), '(e)\n', (3489, 3492), True, 'import logging as log\n'), ((4458, 4502), 'logging.getLevelName', 'log.getLevelName', (["CONFIG['logging']['level']"], {}), "(CONFIG['logging']['level'])\n", (4474, 4502), True, 'import logging as log\n'), ((5397, 5409), 'logging.error', 'log.error', (['e'], {}), '(e)\n', (5406, 5409), True, 'import logging as log\n'), ((1895, 1957), 'logging.debug', 'log.debug', (['f"""Rows produced by statement "{result.statement}":"""'], {}), '(f\'Rows produced by statement "{result.statement}":\')\n', (1904, 1957), True, 'import logging as log\n'), ((2025, 2039), 'logging.debug', 'log.debug', (['row'], {}), '(row)\n', (2034, 2039), True, 'import logging as log\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import os
import sys
import settings
from datetime import datetime
class Logger(object):
def __init__(self):
log = logging.getLogger('')
log.setLevel(logging.INFO)
filename = datetime.utcnow().strftime('%Y.%m.%d_%H.%M_UTC.log')
log_dir = getattr(settings, 'LOG_DIR', 'logs')
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
fh = logging.FileHandler(os.path.join(log_dir, filename), mode='w')
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
log.addHandler(fh)
# Задействовать консоль для вывода лога
console = sys.stderr
if console is not None:
# Вывод лога производится и на консоль и в файл (одновременно)
console = logging.StreamHandler(console)
console.setLevel(logging.INFO)
console.setFormatter(formatter)
log.addHandler(console)
Logger()
log = logging.getLogger('')
|
[
"os.makedirs",
"os.path.isdir",
"logging.StreamHandler",
"logging.Formatter",
"datetime.datetime.utcnow",
"os.path.join",
"logging.getLogger"
] |
[((1114, 1135), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (1131, 1135), False, 'import logging\n'), ((209, 230), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (226, 230), False, 'import logging\n'), ((599, 672), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (616, 672), False, 'import logging\n'), ((410, 432), 'os.path.isdir', 'os.path.isdir', (['log_dir'], {}), '(log_dir)\n', (423, 432), False, 'import os\n'), ((446, 466), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (457, 466), False, 'import os\n'), ((501, 532), 'os.path.join', 'os.path.join', (['log_dir', 'filename'], {}), '(log_dir, filename)\n', (513, 532), False, 'import os\n'), ((943, 973), 'logging.StreamHandler', 'logging.StreamHandler', (['console'], {}), '(console)\n', (964, 973), False, 'import logging\n'), ((286, 303), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (301, 303), False, 'from datetime import datetime\n')]
|
import csv
import cv2
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Convolution2D,Flatten,Dense,Lambda
from keras import optimizers
from keras import regularizers
BATCH_SIZE=128
BINS=25
BIN_RANGE=[-1.0,1.0]
EPOCHS=5
LEARNING_RATE = 0.001
LEARNING_RATE_DECAY = 0.0001
L2_REGULARIZATION = 0.001
ANGLE_CORRECTION_FACTOR = 0.20
def load_driving_log(csv_path):
'''
Loads the driving data log(csv).
Returns the line data as a string array.
'''
samples = []
with open(csv_path) as csvfile:
header_present = csv.Sniffer().has_header(csvfile.read(1024))
csvfile.seek(0) # back to first line
reader = csv.reader(csvfile)
if header_present:
next(reader) # skip the header
for line in reader:
samples.append(line)
return samples
def cleanup_data(samples):
'''
Removes any data with speed = 0.
Returns cleansed data array.
'''
cleansed_samples = []
for sample in samples:
if (float(sample[6]) != 0.0):# don't add zero speed frames
cleansed_samples.append(sample)
return cleansed_samples
def draw_angles_distribution(samples,bins,angle_range):
'''
Draws a bar chart showing the histogram of the passed in data.
Returns the left edge for each bin (apart form the last one for which right edge is returned)
and the bin value. The no. of bin edges is 'bin' + 1.
'''
angles = []
for sample in samples:
angle = float(sample[3])
angles.append(angle)
plt.figure(figsize=(14,7))
plt.ylabel('Count');
plt.xlabel('Angle');
bar_height_if_uniform_dist = len(samples)/bins
plt.plot(angle_range,[bar_height_if_uniform_dist,bar_height_if_uniform_dist])
plt.text(angle_range[0],bar_height_if_uniform_dist+50,'Uniform Distribution')
plt.title('Angle Histogram')
bin_values,bin_edges,_=plt.hist(angles,bins=bins,range=angle_range)
plt.show()
return bin_edges,bin_values
def balance_dataset(samples,bin_edges,bin_values,bins):
'''
Removes data where:
(i) angle is = +- 1.0
(ii) the bin size is greater than the average bin size
Returns the balanced array of sample data.
'''
balanced_samples = []
for sample in samples:
angle = float(sample[3])
if (angle == 1.0 or angle == -1.0): # Remove extreme angles
continue
# Total bin edges are = no. of bins + 1
# Bin edges are the left most value of the bin range aprt from the last one which is the right most,
# hence check if less than
potential_bins = np.where(bin_edges < angle)
# if no bin found
if (len(potential_bins[0]) == 0):
# For catching cases where the angle is exactly -1 or +1
potential_bins = np.where(bin_edges == angle)
if (len(potential_bins[0]) == 0):
raise Exception('No bin match found for angle:{}'.format(angle))
matched_bin_index = np.max(potential_bins)
matched_bin_value = bin_values[matched_bin_index]
avg_bin_size = len(samples)/bins
# Higher the %, the more that bin gets penalized
keep_probability = 1 - ((matched_bin_value + 10*avg_bin_size)/len(samples))
if (matched_bin_value > avg_bin_size):
if (np.random.rand() < keep_probability):
balanced_samples.append(sample)
else:
balanced_samples.append(sample)
return balanced_samples
def generator(samples,data_dir,batch_size=32):
'''
Generates a batch of images and angles.
Reads-in the sample data and for each record, adds center,left & right images + corresponding angles
Keep in mind that the returned batch is 3 X the passed in batch_size because for each record, 3 images are added.
The benefit of using a generator is that the entire dataset doesn't need to be processed at the same time,
rather only a subset is processed and fed to the model, which greatly helps when working with constrained memory.
'''
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0,num_samples,batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for line in batch_samples:
center_angle = float(line[3])
angles.append(center_angle)
left_angle = center_angle + ANGLE_CORRECTION_FACTOR
angles.append(left_angle)
right_angle = center_angle - ANGLE_CORRECTION_FACTOR
angles.append(right_angle)
center_img_path = data_dir + line[0]
center_img = cv2.cvtColor(cv2.imread(center_img_path),cv2.COLOR_BGR2RGB)
# Crop 70 pixels from top and 24 pixels from bottom, output = 66 x 320
center_img = center_img[70:136,:]
# Resize to 66 x 200 as required by nVidia architecture
center_img = cv2.resize(center_img,(200,66),interpolation = cv2.INTER_AREA)
images.append(center_img)
left_img_path = data_dir + line[1]
left_img = cv2.cvtColor(cv2.imread(left_img_path),cv2.COLOR_BGR2RGB)
# Crop 70 pixels from top and 24 pixels from bottom, output = 66 x 320
left_img = left_img[70:136,:]
# Resize to 66 x 200 as required by nVidia architecture
left_img = cv2.resize(left_img,(200,66),interpolation = cv2.INTER_AREA)
images.append(left_img)
right_img_path = data_dir + line[2]
right_img = cv2.cvtColor(cv2.imread(right_img_path),cv2.COLOR_BGR2RGB)
# Crop 70 pixels from top and 24 pixels from bottom, output = 66 x 320
right_img = right_img[70:136,:]
# Resize to 66 x 200 as required by nVidia architecture
right_img = cv2.resize(right_img,(200,66),interpolation = cv2.INTER_AREA)
images.append(right_img)
X_train = np.array(images)
y_train = np.array(angles)
# Return processed images for this batch but remember the value of local variables for next iteration
yield sklearn.utils.shuffle(X_train, y_train)
def nVidiaNet(train_generator,validation_generator,steps_per_epoch,validation_steps,save_model_dir):
'''
Impelments the nVidia CNN architecture (https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/).
Returns the model history object + also saves the model as 'model.h5' in the current working directory.
'''
nVidiaModel = Sequential()
nVidiaModel.add(Lambda(lambda x:(x/255.0)-0.5,input_shape=(66,200,3)))
print('Input shape:{}'.format(nVidiaModel.input_shape))
print('Output shape - after normalization:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Convolution2D(24,(5,5),strides=(2,2),kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after first convolution:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Convolution2D(36,(5,5),strides=(2,2),kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after second convolution:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Convolution2D(48,(5,5),strides=(2,2),kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after third convolution:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Convolution2D(64,(3,3),strides=(1,1),kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after fourth convolution:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Convolution2D(64,(3,3),strides=(1,1),kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after fifth convolution:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Flatten())
print('Output shape - after flattening:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Dense(100,kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after first dense:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Dense(50,kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after second dense:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Dense(10,kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after third dense:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Dense(1))
print('Output shape - after fourth dense:{}'.format(nVidiaModel.output_shape))
adam_optzr = optimizers.Adam(lr=LEARNING_RATE,decay=LEARNING_RATE_DECAY)
nVidiaModel.compile(optimizer=adam_optzr,loss='mse',metrics = ['accuracy'])
nVidiaModel_history = nVidiaModel.fit_generator(train_generator,
validation_data=validation_generator,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
epochs=EPOCHS)
dt = datetime.now()
model_name_prefix = dt.strftime("%y-%m-%d-%H-%M")
nVidiaModel.save(save_model_dir + model_name_prefix + '-model.h5')
# Write out the model params
model_params_file = open(save_model_dir + model_name_prefix + '-model-params.txt', 'w')
model_params_file.write('EPOCHS >>> {}\n'.format(EPOCHS))
model_params_file.write('BATCH SIZE >>> {}\n'.format(BATCH_SIZE))
model_params_file.write('LEARNING RATE >>> {}\n'.format(LEARNING_RATE))
model_params_file.write('LEARNING RATE DECAY >>> {}\n'.format(LEARNING_RATE_DECAY))
model_params_file.write('ANGLE CORRECTION FACTOR >>> {}\n'.format(ANGLE_CORRECTION_FACTOR))
model_params_file.write('BINS >>> {}\n'.format(BINS))
model_params_file.write('BIN RANGE >>> {}\n'.format(BIN_RANGE))
model_params_file.close()
return nVidiaModel_history
def main():
data_dir = 'C:/Users/Admin/Desktop/Behavioral Cloning/driving-data/'
driving_log_filename = 'driving_log.csv'
save_model_dir = './saved-models/'
samples = load_driving_log(data_dir + driving_log_filename)
print('Total samples:{}'.format(len(samples)))
samples = cleanup_data(samples)
print('Total samples after removing zero angles:{}'.format(len(samples)))
bin_edges,bin_values = draw_angles_distribution(samples,BINS,BIN_RANGE)
samples = balance_dataset(samples,bin_edges,bin_values,BINS)
_,_ = draw_angles_distribution(samples,BINS,BIN_RANGE)
train_samples,validation_samples = train_test_split(samples,test_size=0.2)
# Set up the data generators
train_generator = generator(train_samples,data_dir,batch_size=BATCH_SIZE)
validation_generator = generator(validation_samples,data_dir,batch_size=BATCH_SIZE)
# As we are adding the left & right images as well, so need x 3 times
total_samples = len(samples) * 3
actual_batch_size = BATCH_SIZE * 3
len_train = len(train_samples) * 3
len_valid = len(validation_samples) * 3
steps_per_epoch = len_train/actual_batch_size
validation_steps = len_valid/actual_batch_size
print('Total number of images used for training & validation:{}'.format(total_samples))
nVidiaModel_history = nVidiaNet(train_generator,validation_generator,steps_per_epoch,validation_steps,save_model_dir)
plt.plot(nVidiaModel_history.history['loss'])
plt.plot(nVidiaModel_history.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.title",
"keras.regularizers.l2",
"csv.reader",
"sklearn.model_selection.train_test_split",
"csv.Sniffer",
"matplotlib.pyplot.figure",
"keras.layers.Flatten",
"numpy.max",
"datetime.datetime.now",
"cv2.resize",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"keras.optimizers.Adam",
"matplotlib.pyplot.text",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hist",
"cv2.imread",
"numpy.where",
"keras.layers.Lambda",
"keras.layers.Dense",
"numpy.array",
"numpy.random.rand",
"keras.models.Sequential",
"sklearn.utils.shuffle",
"matplotlib.pyplot.xlabel"
] |
[((1728, 1755), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 7)'}), '(figsize=(14, 7))\n', (1738, 1755), True, 'import matplotlib.pyplot as plt\n'), ((1759, 1778), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (1769, 1778), True, 'import matplotlib.pyplot as plt\n'), ((1784, 1803), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Angle"""'], {}), "('Angle')\n", (1794, 1803), True, 'import matplotlib.pyplot as plt\n'), ((1860, 1939), 'matplotlib.pyplot.plot', 'plt.plot', (['angle_range', '[bar_height_if_uniform_dist, bar_height_if_uniform_dist]'], {}), '(angle_range, [bar_height_if_uniform_dist, bar_height_if_uniform_dist])\n', (1868, 1939), True, 'import matplotlib.pyplot as plt\n'), ((1942, 2027), 'matplotlib.pyplot.text', 'plt.text', (['angle_range[0]', '(bar_height_if_uniform_dist + 50)', '"""Uniform Distribution"""'], {}), "(angle_range[0], bar_height_if_uniform_dist + 50,\n 'Uniform Distribution')\n", (1950, 2027), True, 'import matplotlib.pyplot as plt\n'), ((2024, 2052), 'matplotlib.pyplot.title', 'plt.title', (['"""Angle Histogram"""'], {}), "('Angle Histogram')\n", (2033, 2052), True, 'import matplotlib.pyplot as plt\n'), ((2080, 2126), 'matplotlib.pyplot.hist', 'plt.hist', (['angles'], {'bins': 'bins', 'range': 'angle_range'}), '(angles, bins=bins, range=angle_range)\n', (2088, 2126), True, 'import matplotlib.pyplot as plt\n'), ((2129, 2139), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2137, 2139), True, 'import matplotlib.pyplot as plt\n'), ((7079, 7091), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7089, 7091), False, 'from keras.models import Sequential\n'), ((9482, 9542), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': 'LEARNING_RATE', 'decay': 'LEARNING_RATE_DECAY'}), '(lr=LEARNING_RATE, decay=LEARNING_RATE_DECAY)\n', (9497, 9542), False, 'from keras import optimizers\n'), ((10026, 10040), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10038, 10040), False, 'from datetime import datetime\n'), ((11560, 11600), 'sklearn.model_selection.train_test_split', 'train_test_split', (['samples'], {'test_size': '(0.2)'}), '(samples, test_size=0.2)\n', (11576, 11600), False, 'from sklearn.model_selection import train_test_split\n'), ((12366, 12411), 'matplotlib.pyplot.plot', 'plt.plot', (["nVidiaModel_history.history['loss']"], {}), "(nVidiaModel_history.history['loss'])\n", (12374, 12411), True, 'import matplotlib.pyplot as plt\n'), ((12416, 12465), 'matplotlib.pyplot.plot', 'plt.plot', (["nVidiaModel_history.history['val_loss']"], {}), "(nVidiaModel_history.history['val_loss'])\n", (12424, 12465), True, 'import matplotlib.pyplot as plt\n'), ((12470, 12512), 'matplotlib.pyplot.title', 'plt.title', (['"""model mean squared error loss"""'], {}), "('model mean squared error loss')\n", (12479, 12512), True, 'import matplotlib.pyplot as plt\n'), ((12517, 12554), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean squared error loss"""'], {}), "('mean squared error loss')\n", (12527, 12554), True, 'import matplotlib.pyplot as plt\n'), ((12559, 12578), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (12569, 12578), True, 'import matplotlib.pyplot as plt\n'), ((12583, 12648), 'matplotlib.pyplot.legend', 'plt.legend', (["['training set', 'validation set']"], {'loc': '"""upper right"""'}), "(['training set', 'validation set'], loc='upper right')\n", (12593, 12648), True, 'import matplotlib.pyplot as plt\n'), ((12653, 12663), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12661, 12663), True, 'import matplotlib.pyplot as plt\n'), ((840, 859), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (850, 859), False, 'import csv\n'), ((2812, 2839), 'numpy.where', 'np.where', (['(bin_edges < angle)'], {}), '(bin_edges < angle)\n', (2820, 2839), True, 'import numpy as np\n'), ((3190, 3212), 'numpy.max', 'np.max', (['potential_bins'], {}), '(potential_bins)\n', (3196, 3212), True, 'import numpy as np\n'), ((4385, 4401), 'sklearn.utils.shuffle', 'shuffle', (['samples'], {}), '(samples)\n', (4392, 4401), False, 'from sklearn.utils import shuffle\n'), ((7117, 7176), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(66, 200, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(66, 200, 3))\n', (7123, 7176), False, 'from keras.layers import Convolution2D, Flatten, Dense, Lambda\n'), ((8617, 8626), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (8624, 8626), False, 'from keras.layers import Convolution2D, Flatten, Dense, Lambda\n'), ((9367, 9375), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (9372, 9375), False, 'from keras.layers import Convolution2D, Flatten, Dense, Lambda\n'), ((3006, 3034), 'numpy.where', 'np.where', (['(bin_edges == angle)'], {}), '(bin_edges == angle)\n', (3014, 3034), True, 'import numpy as np\n'), ((6475, 6491), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (6483, 6491), True, 'import numpy as np\n'), ((6514, 6530), 'numpy.array', 'np.array', (['angles'], {}), '(angles)\n', (6522, 6530), True, 'import numpy as np\n'), ((732, 745), 'csv.Sniffer', 'csv.Sniffer', ([], {}), '()\n', (743, 745), False, 'import csv\n'), ((3517, 3533), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3531, 3533), True, 'import numpy as np\n'), ((5367, 5430), 'cv2.resize', 'cv2.resize', (['center_img', '(200, 66)'], {'interpolation': 'cv2.INTER_AREA'}), '(center_img, (200, 66), interpolation=cv2.INTER_AREA)\n', (5377, 5430), False, 'import cv2\n'), ((5857, 5918), 'cv2.resize', 'cv2.resize', (['left_img', '(200, 66)'], {'interpolation': 'cv2.INTER_AREA'}), '(left_img, (200, 66), interpolation=cv2.INTER_AREA)\n', (5867, 5918), False, 'import cv2\n'), ((6349, 6411), 'cv2.resize', 'cv2.resize', (['right_img', '(200, 66)'], {'interpolation': 'cv2.INTER_AREA'}), '(right_img, (200, 66), interpolation=cv2.INTER_AREA)\n', (6359, 6411), False, 'import cv2\n'), ((6663, 6702), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (6684, 6702), False, 'import sklearn\n'), ((7425, 7459), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (7440, 7459), False, 'from keras import regularizers\n'), ((7679, 7713), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (7694, 7713), False, 'from keras import regularizers\n'), ((7935, 7969), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (7950, 7969), False, 'from keras import regularizers\n'), ((8190, 8224), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (8205, 8224), False, 'from keras import regularizers\n'), ((8446, 8480), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (8461, 8480), False, 'from keras import regularizers\n'), ((8786, 8820), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (8801, 8820), False, 'from keras import regularizers\n'), ((8998, 9032), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (9013, 9032), False, 'from keras import regularizers\n'), ((9211, 9245), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (9226, 9245), False, 'from keras import regularizers\n'), ((5082, 5109), 'cv2.imread', 'cv2.imread', (['center_img_path'], {}), '(center_img_path)\n', (5092, 5109), False, 'import cv2\n'), ((5580, 5605), 'cv2.imread', 'cv2.imread', (['left_img_path'], {}), '(left_img_path)\n', (5590, 5605), False, 'import cv2\n'), ((6068, 6094), 'cv2.imread', 'cv2.imread', (['right_img_path'], {}), '(right_img_path)\n', (6078, 6094), False, 'import cv2\n')]
|
import functools
import pathlib
import queue
import re
import sys
import time
import typing
from .line_timestamper import LineTimestamper
from .non_blocking_read_thread import stdin_read_thread
class LogLine:
def __init__(self, raw_text=None, raw_text_lines=None,
log_file=None, read_from_stdin=False, previous_line:typing.Optional[typing.TypeVar('LogLine')]=None,
line_timestamper:typing.Optional[LineTimestamper]=None, max_seconds_till_line_split:float=1,
next_line_index:int=0, allow_timestamp_format_changes:bool=False):
'''
If a - is given as the log_file, will read from stdin, (and ignore read_from_stdin)
'''
if (raw_text and log_file and raw_text_lines and read_from_stdin) or \
(raw_text is None and log_file is None and raw_text_lines is None and read_from_stdin is False):
raise ValueError("Please provide either raw_text or log_file or raw_text_lines... \
not more or less than one. Or we can use read_from_stdin without one of the others.")
# splitlines() is slow on big inputs... try to minimize how often we do it
self.raw_text_lines = []
self.read_from_stdin = read_from_stdin
self.next_line_index = next_line_index
if raw_text_lines:
self.raw_text_lines = raw_text_lines
elif raw_text:
self.raw_text_lines = raw_text.splitlines()
elif log_file:
if log_file == '-':
self.read_from_stdin = True
else:
self.raw_text_lines = pathlib.Path(log_file).read_text().splitlines()
# We can read_from_stdin AFTER raw_text_lines
if self.read_from_stdin:
stdin_read_thread.start_if_not_started_yet()
# when reading from stdin, we wait at most this much time before assuming a log line split
self.max_seconds_till_line_split = max_seconds_till_line_split
self.timestamp = None
self.log_line_lines = []
self.log_message = ''
self.previous_line = previous_line
self.line_timestamper = line_timestamper or LineTimestamper(allow_timestamp_format_changes=allow_timestamp_format_changes)
self._parse()
def _iter_lines(self):
''' yields a line from the given place... if it yields a None, assume that a line break happened '''
if self.raw_text_lines:
for idx in range(self.next_line_index, len(self.raw_text_lines), 1):
yield self.raw_text_lines[idx]
if self.read_from_stdin:
break_force_time = time.time() + self.max_seconds_till_line_split
while stdin_read_thread.is_alive():
try:
line = stdin_read_thread.lines_queue.get_nowait()
self.raw_text_lines.append(line)
break_force_time = time.time() + self.max_seconds_till_line_split
yield line
except queue.Empty:
if time.time() > break_force_time:
break_force_time = time.time() + self.max_seconds_till_line_split
yield None
time.sleep(.0001)
def _parse(self):
self.log_line_lines = []
# Key Assumption:
# All lines without timestamp are part of this log statement
for line in self._iter_lines():
if line is None:
# force a line break right now... timestamp should be set from earlier on
break
timestamp = self.line_timestamper.coerce_datetime_from_line(line)
if timestamp:
if len(self.log_line_lines) == 0:
self.timestamp = timestamp
self.log_line_lines.append(line)
else:
# new timestamp means we're done
break
else:
self.log_line_lines.append(line)
self.log_message = '\n'.join(self.log_line_lines)
@functools.lru_cache(maxsize=100)
def get_next_log_line(self) -> typing.Optional[typing.TypeVar('LogLine')]:
'''
Returns the next LogLine in the log.
Returns None if there is no more available
'''
new_next_line_index = self.next_line_index + len(self.log_line_lines)
if (new_next_line_index < len(self.raw_text_lines)) or (self.read_from_stdin and stdin_read_thread.is_alive()):
return LogLine(raw_text_lines=self.raw_text_lines,
previous_line=self,
read_from_stdin=self.read_from_stdin,
line_timestamper=self.line_timestamper,
next_line_index=new_next_line_index)
def iter_log_lines_with_regex(self, regex, ignore_case=True):
'''
Goes through all LogLines checking if the message matches the regex. For each that,
matches, yields the matching LogLine.
'''
current_line = self
regex_c = re.compile(regex, flags=re.IGNORECASE if ignore_case else 0)
# walk through all lines
while current_line is not None:
if re.findall(regex_c, current_line.log_message):
yield current_line
current_line = current_line.get_next_log_line()
|
[
"time.time",
"time.sleep",
"pathlib.Path",
"re.findall",
"typing.TypeVar",
"functools.lru_cache",
"re.compile"
] |
[((4138, 4170), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(100)'}), '(maxsize=100)\n', (4157, 4170), False, 'import functools\n'), ((4223, 4248), 'typing.TypeVar', 'typing.TypeVar', (['"""LogLine"""'], {}), "('LogLine')\n", (4237, 4248), False, 'import typing\n'), ((5181, 5241), 're.compile', 're.compile', (['regex'], {'flags': '(re.IGNORECASE if ignore_case else 0)'}), '(regex, flags=re.IGNORECASE if ignore_case else 0)\n', (5191, 5241), False, 'import re\n'), ((5333, 5378), 're.findall', 're.findall', (['regex_c', 'current_line.log_message'], {}), '(regex_c, current_line.log_message)\n', (5343, 5378), False, 'import re\n'), ((368, 393), 'typing.TypeVar', 'typing.TypeVar', (['"""LogLine"""'], {}), "('LogLine')\n", (382, 393), False, 'import typing\n'), ((2673, 2684), 'time.time', 'time.time', ([], {}), '()\n', (2682, 2684), False, 'import time\n'), ((3274, 3292), 'time.sleep', 'time.sleep', (['(0.0001)'], {}), '(0.0001)\n', (3284, 3292), False, 'import time\n'), ((2956, 2967), 'time.time', 'time.time', ([], {}), '()\n', (2965, 2967), False, 'import time\n'), ((3096, 3107), 'time.time', 'time.time', ([], {}), '()\n', (3105, 3107), False, 'import time\n'), ((3172, 3183), 'time.time', 'time.time', ([], {}), '()\n', (3181, 3183), False, 'import time\n'), ((1630, 1652), 'pathlib.Path', 'pathlib.Path', (['log_file'], {}), '(log_file)\n', (1642, 1652), False, 'import pathlib\n')]
|
# Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kernel.components.binning.vertfeaturebinning.param import FeatureBinningParam, TransformParam
from kernel.utils import consts
class HorzFeatureBinningParam(FeatureBinningParam):
def __init__(self, method=consts.VIRTUAL_SUMMARY,
compress_thres=consts.DEFAULT_COMPRESS_THRESHOLD,
head_size=consts.DEFAULT_HEAD_SIZE,
error=consts.DEFAULT_RELATIVE_ERROR,
sample_bins=100,
bin_num=consts.G_BIN_NUM, bin_indexes=-1, bin_names=None, adjustment_factor=0.5,
transform_param=TransformParam(),
category_indexes=None, category_names=None,
need_run=True, max_iter=100):
super(HorzFeatureBinningParam, self).__init__(method=method, compress_thres=compress_thres,
head_size=head_size, error=error,
bin_num=bin_num, bin_indexes=bin_indexes,
bin_names=bin_names, adjustment_factor=adjustment_factor,
transform_param=transform_param,
category_indexes=category_indexes, category_names=category_names,
need_run=need_run)
self.sample_bins = sample_bins
self.max_iter = max_iter
def check(self):
descr = "horz binning param's"
super(HorzFeatureBinningParam, self).check()
self.check_string(self.method, descr)
self.method = self.method.lower()
self.check_valid_value(self.method, descr, [consts.VIRTUAL_SUMMARY, consts.RECURSIVE_QUERY])
self.check_positive_integer(self.max_iter, descr)
if self.max_iter > 100:
raise ValueError("Max iter is not allowed exceed 100")
def set_bin_index(self, header):
self.bin_indexes = [header.index(name) for name in self.bin_names]
|
[
"kernel.components.binning.vertfeaturebinning.param.TransformParam"
] |
[((1780, 1796), 'kernel.components.binning.vertfeaturebinning.param.TransformParam', 'TransformParam', ([], {}), '()\n', (1794, 1796), False, 'from kernel.components.binning.vertfeaturebinning.param import FeatureBinningParam, TransformParam\n')]
|
import os
import sys
import torch
try:
import torchchimera
except:
# attempts to import local module
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
import torchchimera
from torchchimera.datasets import FolderTuple
from torchchimera.metrics import eval_snr
from torchchimera.metrics import eval_si_sdr
from _model_io import load_model
from _training_common import AdaptedChimeraMagPhasebook
from _training_common import exclude_silence
def add_evaluation_io_argument(parser):
parser.add_argument('--data-dir', nargs='+', required=True, help="directory of validation dataset")
parser.add_argument('--input-checkpoint', help='input checkpoint file')
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
parser.add_argument('--output-file', help='output file')
parser.add_argument('--log-file', help='log file')
parser.add_argument('--permutation-free', action='store_true', help='enable permutation-free evaluation function')
return parser
def validate_evaluation_io_argument(args, parser):
for d in args.data_dir:
if not os.path.isdir(d):
parser.error(f'"{d}" is not a directory')
if args.input_checkpoint and not os.path.isfile(args.input_checkpoint):
parser.error(f'input checkpoint "{args.input_checkpoint}" is not a file')
if args.batch_size <= 0:
parser.error('batch size must be positive')
return args
def evaluate(args):
# build dataset
dataset = FolderTuple(args.data_dir, args.sr, args.segment_duration)
loader = torch.utils.data.DataLoader(
dataset, batch_size=args.batch_size, shuffle=False
)
# load a model
model, update_args = load_model(
args.input_checkpoint, 'ChimeraMagPhasebook',
stft_setting=args.stft_setting
)
if args.bin_num != update_args['bin_num']:
bin_num = checkpoint['model']['parameter']['bin_num']
raise RuntimeError(
'the number of fft bin of input model and parameter are different '
f'--n-fft {(bin_num-1)*2} would work'
)
if len(args.data_dir) != update_args['n_channel']:
raise RuntimeError(
'the number of channels of the input model '
'and the output files are different'
)
model.to(args.device)
model.eval()
if args.permutation_free:
eval_snr = torchchimera.metrics.permutation_free(
torchchimera.metrics.eval_snr, aggregate_functionn=max
)
eval_si_sdr = torchchimera.metrics.permutation_free(
torchchimera.metrics.eval_si_sdr, aggregate_function=max
)
else:
eval_snr = torchchimera.metrics.eval_snr
eval_si_sdr = torchchimera.metrics.eval_si_sdr
# evaluation loop
if args.output_file is None:
of = sys.stdout
else:
of = open(args.output_file, 'w')
print('segment,channel,snr,si-sdr', file=of)
with torch.no_grad():
for batch_i, s in enumerate(loader, 0):
scale = torch.sqrt(
s.shape[-1] / torch.sum(s**2, dim=-1).clamp(min=1e-32)
)
scale_mix = 1. / torch.max(
torch.sum(scale.unsqueeze(-1) * s, dim=1).abs(), dim=-1
)[0]
scale_mix = torch.min(scale_mix, torch.ones_like(scale_mix))
scale *= scale_mix.unsqueeze(-1)
s *= scale.unsqueeze(-1) * 0.98
s = s.to(args.device)
_, _, shat, _ = model(s.sum(dim=1))
waveform_length = min(s.shape[-1], shat.shape[-1])
s = s[:, :, :waveform_length]
shat = shat[:, :, :waveform_length]
snr = eval_snr(shat, s)
si_sdr = eval_si_sdr(shat, s)
for i, (_snr, _si_sdr) in enumerate(zip(snr, si_sdr), 1):
sample_i = batch_i * args.batch_size + i
for channel_i, (__snr, __si_sdr) in \
enumerate(zip(_snr, _si_sdr), 1):
print(f'{sample_i},{channel_i},{__snr},{__si_sdr}', file=of)
of.close()
|
[
"torch.ones_like",
"torchchimera.datasets.FolderTuple",
"os.path.abspath",
"torch.utils.data.DataLoader",
"os.path.isdir",
"torchchimera.metrics.eval_si_sdr",
"torchchimera.metrics.eval_snr",
"os.path.isfile",
"_model_io.load_model",
"torchchimera.metrics.permutation_free",
"torch.no_grad",
"torch.sum"
] |
[((1518, 1576), 'torchchimera.datasets.FolderTuple', 'FolderTuple', (['args.data_dir', 'args.sr', 'args.segment_duration'], {}), '(args.data_dir, args.sr, args.segment_duration)\n', (1529, 1576), False, 'from torchchimera.datasets import FolderTuple\n'), ((1590, 1669), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(dataset, batch_size=args.batch_size, shuffle=False)\n', (1617, 1669), False, 'import torch\n'), ((1729, 1822), '_model_io.load_model', 'load_model', (['args.input_checkpoint', '"""ChimeraMagPhasebook"""'], {'stft_setting': 'args.stft_setting'}), "(args.input_checkpoint, 'ChimeraMagPhasebook', stft_setting=args.\n stft_setting)\n", (1739, 1822), False, 'from _model_io import load_model\n'), ((2409, 2506), 'torchchimera.metrics.permutation_free', 'torchchimera.metrics.permutation_free', (['torchchimera.metrics.eval_snr'], {'aggregate_functionn': 'max'}), '(torchchimera.metrics.eval_snr,\n aggregate_functionn=max)\n', (2446, 2506), False, 'import torchchimera\n'), ((2547, 2646), 'torchchimera.metrics.permutation_free', 'torchchimera.metrics.permutation_free', (['torchchimera.metrics.eval_si_sdr'], {'aggregate_function': 'max'}), '(torchchimera.metrics.eval_si_sdr,\n aggregate_function=max)\n', (2584, 2646), False, 'import torchchimera\n'), ((2968, 2983), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2981, 2983), False, 'import torch\n'), ((1136, 1152), 'os.path.isdir', 'os.path.isdir', (['d'], {}), '(d)\n', (1149, 1152), False, 'import os\n'), ((1245, 1282), 'os.path.isfile', 'os.path.isfile', (['args.input_checkpoint'], {}), '(args.input_checkpoint)\n', (1259, 1282), False, 'import os\n'), ((3696, 3713), 'torchchimera.metrics.eval_snr', 'eval_snr', (['shat', 's'], {}), '(shat, s)\n', (3704, 3713), False, 'from torchchimera.metrics import eval_snr\n'), ((3735, 3755), 'torchchimera.metrics.eval_si_sdr', 'eval_si_sdr', (['shat', 's'], {}), '(shat, s)\n', (3746, 3755), False, 'from torchchimera.metrics import eval_si_sdr\n'), ((3324, 3350), 'torch.ones_like', 'torch.ones_like', (['scale_mix'], {}), '(scale_mix)\n', (3339, 3350), False, 'import torch\n'), ((160, 185), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (175, 185), False, 'import os\n'), ((3095, 3120), 'torch.sum', 'torch.sum', (['(s ** 2)'], {'dim': '(-1)'}), '(s ** 2, dim=-1)\n', (3104, 3120), False, 'import torch\n')]
|
import glob
import os
# files = glob.glob("./finall/*")
# files = sorted(files)
# print(files)
import codecs
with open("result.txt",'w') as wf:
for num in range(1,401):
file = "./finall/入院记录现病史-"+str(num)+".txt"
with codecs.open(file,'r',encoding='utf-8') as rf:
for i,line in enumerate(rf):
result = []
name = os.path.basename(file)
name1 = name.split('.')[0]
name2 = name1.split('-')[-1]
line = line.strip()
if i == 0:
wf.write("{},{};".format(int(name2),line))
else:
wf.write("{};".format(line))
wf.write('\n')
|
[
"codecs.open",
"os.path.basename"
] |
[((246, 286), 'codecs.open', 'codecs.open', (['file', '"""r"""'], {'encoding': '"""utf-8"""'}), "(file, 'r', encoding='utf-8')\n", (257, 286), False, 'import codecs\n'), ((387, 409), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (403, 409), False, 'import os\n')]
|
from keras.models import Sequential, load_model
from keras.callbacks import History, EarlyStopping, Callback
from keras.layers.recurrent import LSTM
from keras.layers import Bidirectional
from keras.losses import mse, binary_crossentropy,cosine
from keras.layers.core import Dense, Activation, Dropout
import numpy as np
import os
from matplotlib import pyplot as plt
from tensorflow import keras
import tensorflow as tf
class LSTM_NETWORK(object):
def __init__(self, input_dim,layers,batch_size=32,l_s=5,l_p=1):
"""input_dim_list must include the original data dimension"""
assert len(layers) >= 2
self.l_s = l_s
self.l_p = l_p
self.batch_size = batch_size
self.loss = 0#zero for mse, 1 for cosine similarity
self.cbs = [History(),EarlyStopping(monitor='val_loss', patience=5, min_delta=0.0003, verbose=0)]
model = Sequential()
model.add((LSTM(layers[0], input_shape=(l_s, input_dim),
return_sequences=True)))
#return_sequences=True)))
model.add(Dropout(0.3))
model.add(LSTM(layers[1], return_sequences=True))#return_sequences=True))
model.add(Dropout(0.3))
model.add(Dense(self.l_p*input_dim))
model.add(Activation("linear"))
# model.add(Dense(activation='linear', units=y_train.shape[2]))
if self.loss == 0:
model.compile(loss='mse', optimizer='adam')
else:
loss_fn = keras.losses.CosineSimilarity()
model.compile(loss=loss_fn, optimizer='adam')
# print("here is model summary")
#print(model.summary())
self.model = model
return
def create_one_layer_model(self,input_dim,layers,batch_size=32,l_s=5,l_p=1):
assert len(layers) >= 2
self.l_s = l_s
self.l_p = l_p
self.batch_size = batch_size
self.cbs = [History(),EarlyStopping(monitor='val_loss', patience=15, min_delta=0.0003, verbose=0)]
model = Sequential()
model.add((LSTM(layers[0], input_shape=(None, input_dim))))
model.add(Dropout(0.3))
model.add(Dense(self.l_p*input_dim))
model.add(Activation("linear"))
# model.add(Dense(activation='linear', units=y_train.shape[2]))
if self.loss == 0:
model.compile(loss='mse', optimizer='adam')
else:
loss_fn = keras.losses.CosineSimilarity()
model.compile(loss=loss_fn, optimizer='adam')
#import tensorflow as tf
#model.compile(loss=tf.keras.losses.CosineSimilarity(), optimizer='adam')
# print("here is model summary")
#print(model.summary())
#print("this is neww model")
self.model = model
return
def fit(self, X,y, epochs=100,validation_split=0.15, verbose=False,model_num=-1):
history = self.model.fit(X, y, batch_size=self.batch_size, epochs=epochs,
validation_split=validation_split, verbose=verbose, callbacks=self.cbs)
#print(history.history.keys())
# "Accuracy"
'''
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
'''
# "Loss"
'''
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
'''
if model_num!=-1:
self.model.save("LSTM_v"+str(model_num)+".h5")
return
def load_model(self,num):
self.model=load_model(os.path.join("", "LSTM_v"+ str(num)+ ".h5"))
return self.model
def predict(self, X_test):
'''
Used trained LSTM model to predict test data arriving in batches
Args:
X_test (np array): numpy array of test inputs with dimensions [timesteps, l_s, input dimensions)
Returns:
y_hat (np array): predicted test values for each timestep in y_test
'''
print("Predicting by Patch")
y_hat = []#np.array([[[]]])
# print("y_hat intially",y_hat.shape)
num_batches = int((X_test.shape[0] - self.l_s) / self.batch_size)
print("number of batches",num_batches)
if num_batches < 0:
raise ValueError("l_s (%s) too large for stream with length %s." % (self.l_s, y_test.shape[0]))
# simulate data arriving in batches
for i in range(1, num_batches + 2):
#print("Inside the loop")
prior_idx = (i - 1) * self.batch_size
idx = i * self.batch_size
if i == num_batches + 1:
idx = X_test.shape[0] # remaining values won't necessarily equal batch size
X_test_period = X_test[prior_idx:idx]
#print("Predict for batch")
#print("X_test_period",type(X_test_period),len(X_test_period))
y_hat_period = self.model.predict(X_test_period)
#print("y_hat_period out",y_hat_period.shape)
#y_hat_period=np.array(y_hat_period)
#print("y_hat_period after reshape",y_hat_period.shape)
#print("y_hat now",y_hat_period.shape)
if i ==1:
y_hat =y_hat_period
#y_hat_period=np.array(y_hat_period)
#print("y_hat now",y_hat_period.shape)
else:
y_hat = np.append(y_hat, y_hat_period)
#print("y_hat", y_hat.shape)
print("Out of loop, final transformation")
y_hat = y_hat.reshape(X_test.shape[0], X_test.shape[2])
print("y_hat final", y_hat.shape)
# np.save(os.path.join("data", anom['run_id'], "y_hat", anom["chan_id"] + ".npy"), np.array(y_hat))
return y_hat
def predict_all(self, X_test):
'''
Used trained LSTM model to predict test data arriving in batches
Args:
y_test (np array): numpy array of test outputs corresponding to true values to be predicted at end of each sequence
X_test (np array): numpy array of test inputs with dimensions [timesteps, l_s, input dimensions)
Returns:
y_hat (np array): predicted test values for each timestep in y_test
'''
#print("Predicting All")
y_hat = self.model.predict(X_test)
#print("y_hat other",y_hat.shape)
return y_hat
|
[
"keras.layers.core.Dense",
"keras.callbacks.History",
"keras.layers.core.Activation",
"tensorflow.keras.losses.CosineSimilarity",
"numpy.append",
"keras.callbacks.EarlyStopping",
"keras.layers.core.Dropout",
"keras.layers.recurrent.LSTM",
"keras.models.Sequential"
] |
[((887, 899), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (897, 899), False, 'from keras.models import Sequential, load_model\n'), ((2012, 2024), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2022, 2024), False, 'from keras.models import Sequential, load_model\n'), ((784, 793), 'keras.callbacks.History', 'History', ([], {}), '()\n', (791, 793), False, 'from keras.callbacks import History, EarlyStopping, Callback\n'), ((794, 868), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(5)', 'min_delta': '(0.0003)', 'verbose': '(0)'}), "(monitor='val_loss', patience=5, min_delta=0.0003, verbose=0)\n", (807, 868), False, 'from keras.callbacks import History, EarlyStopping, Callback\n'), ((919, 987), 'keras.layers.recurrent.LSTM', 'LSTM', (['layers[0]'], {'input_shape': '(l_s, input_dim)', 'return_sequences': '(True)'}), '(layers[0], input_shape=(l_s, input_dim), return_sequences=True)\n', (923, 987), False, 'from keras.layers.recurrent import LSTM\n'), ((1082, 1094), 'keras.layers.core.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (1089, 1094), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((1114, 1152), 'keras.layers.recurrent.LSTM', 'LSTM', (['layers[1]'], {'return_sequences': '(True)'}), '(layers[1], return_sequences=True)\n', (1118, 1152), False, 'from keras.layers.recurrent import LSTM\n'), ((1196, 1208), 'keras.layers.core.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (1203, 1208), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((1228, 1255), 'keras.layers.core.Dense', 'Dense', (['(self.l_p * input_dim)'], {}), '(self.l_p * input_dim)\n', (1233, 1255), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((1273, 1293), 'keras.layers.core.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (1283, 1293), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((1487, 1518), 'tensorflow.keras.losses.CosineSimilarity', 'keras.losses.CosineSimilarity', ([], {}), '()\n', (1516, 1518), False, 'from tensorflow import keras\n'), ((1908, 1917), 'keras.callbacks.History', 'History', ([], {}), '()\n', (1915, 1917), False, 'from keras.callbacks import History, EarlyStopping, Callback\n'), ((1918, 1993), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(15)', 'min_delta': '(0.0003)', 'verbose': '(0)'}), "(monitor='val_loss', patience=15, min_delta=0.0003, verbose=0)\n", (1931, 1993), False, 'from keras.callbacks import History, EarlyStopping, Callback\n'), ((2044, 2090), 'keras.layers.recurrent.LSTM', 'LSTM', (['layers[0]'], {'input_shape': '(None, input_dim)'}), '(layers[0], input_shape=(None, input_dim))\n', (2048, 2090), False, 'from keras.layers.recurrent import LSTM\n'), ((2111, 2123), 'keras.layers.core.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (2118, 2123), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2143, 2170), 'keras.layers.core.Dense', 'Dense', (['(self.l_p * input_dim)'], {}), '(self.l_p * input_dim)\n', (2148, 2170), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2188, 2208), 'keras.layers.core.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (2198, 2208), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2402, 2433), 'tensorflow.keras.losses.CosineSimilarity', 'keras.losses.CosineSimilarity', ([], {}), '()\n', (2431, 2433), False, 'from tensorflow import keras\n'), ((5620, 5650), 'numpy.append', 'np.append', (['y_hat', 'y_hat_period'], {}), '(y_hat, y_hat_period)\n', (5629, 5650), True, 'import numpy as np\n')]
|
import asyncio
import json
from typing import Any, Dict, Generator, List, Optional, cast
import pytest
from robotcode.jsonrpc2.protocol import (
JsonRPCError,
JsonRPCErrorObject,
JsonRPCErrors,
JsonRPCMessage,
JsonRPCProtocol,
JsonRPCRequest,
JsonRPCResponse,
)
from robotcode.jsonrpc2.server import JsonRPCServer
from robotcode.language_server.common.types import MessageActionItem
class DummyJsonRPCProtocol(JsonRPCProtocol):
def __init__(self, server: Optional[JsonRPCServer["DummyJsonRPCProtocol"]]):
super().__init__()
self.handled_messages: List[JsonRPCMessage] = []
self.sended_message: Optional[JsonRPCMessage] = None
async def handle_message(self, message: JsonRPCMessage) -> None:
self.handled_messages.append(message)
return await super().handle_message(message)
def send_message(self, message: JsonRPCMessage) -> None:
self.sended_message = message
async def data_received_async(self, data: bytes) -> None:
self.data_received(data)
return await asyncio.sleep(0)
@pytest.fixture(scope="module")
def event_loop() -> Generator[asyncio.AbstractEventLoop, None, None]:
loop = asyncio.new_event_loop()
yield loop
loop.close()
@pytest.mark.asyncio
async def test_receive_a_request_message_should_work() -> None:
protocol = DummyJsonRPCProtocol(None)
message = JsonRPCRequest(id=1, method="doSomething", params={})
json_message = message.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
assert protocol.handled_messages == [message]
@pytest.mark.asyncio
async def test_receive_a_request_message_should_work_with_string_id() -> None:
protocol = DummyJsonRPCProtocol(None)
message = JsonRPCRequest(id="this is an id", method="doSomething", params={})
json_message = message.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
assert protocol.handled_messages == [message]
@pytest.mark.asyncio
async def test_receive_a_batch_request_should_work() -> None:
protocol = DummyJsonRPCProtocol(None)
message = [
JsonRPCRequest(id=1, method="doSomething", params={}),
JsonRPCRequest(id=2, method="doSomething", params={}),
JsonRPCRequest(id=3, method="doSomething", params={}),
]
json_message = json.dumps([e.dict() for e in message]).encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
assert protocol.handled_messages == message
@pytest.mark.asyncio
async def test_receive_invalid_jsonmessage_should_throw_send_an_error() -> None:
protocol = DummyJsonRPCProtocol(None)
json_message = b"{"
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
assert (
isinstance(protocol.sended_message, JsonRPCError)
and protocol.sended_message.error.code == JsonRPCErrors.PARSE_ERROR
)
@pytest.mark.asyncio
async def test_receive_a_request_with_invalid_protocol_version_should_send_an_error() -> None:
protocol = DummyJsonRPCProtocol(None)
message = JsonRPCRequest(id=1, method="doSomething", params={})
message.jsonrpc = "1.0"
json_message = message.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
assert (
isinstance(protocol.sended_message, JsonRPCError)
and protocol.sended_message.error.code == JsonRPCErrors.PARSE_ERROR
)
@pytest.mark.asyncio
async def test_receive_an_error_should_work() -> None:
protocol = DummyJsonRPCProtocol(None)
message = JsonRPCError(id=1, result=None, error=JsonRPCErrorObject(code=1, message="test", data="this is the data"))
json_message = message.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
assert protocol.handled_messages == [message]
@pytest.mark.asyncio
async def test_receive_response_should_work() -> None:
protocol = DummyJsonRPCProtocol(None)
r = protocol.send_request("dummy/method", ["dummy", "data"], list)
msg = JsonRPCResponse(id=cast(JsonRPCRequest, protocol.sended_message).id, result=["dummy", "data"])
json_message = msg.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
a = await asyncio.wait_for(r, 10)
assert a == ["dummy", "data"]
@pytest.mark.asyncio
async def test_receive_invalid_id_in_response_should_send_an_error() -> None:
protocol = DummyJsonRPCProtocol(None)
message = JsonRPCResponse(id=1, result=["dummy", "data"])
json_message = message.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
assert protocol.handled_messages == [message]
assert isinstance(protocol.sended_message, JsonRPCError)
@pytest.mark.asyncio
async def test_send_request_receive_response_should_work_without_param_type_work() -> None:
protocol = DummyJsonRPCProtocol(None)
r: Any = protocol.send_request("dummy/method", ["dummy", "data"])
msg = JsonRPCResponse(
id=cast(JsonRPCRequest, protocol.sended_message).id, result=MessageActionItem(title="hi there")
)
json_message = msg.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
a = await asyncio.wait_for(r, 10)
assert isinstance(a, dict)
assert a == {"title": "hi there"}
@pytest.mark.asyncio
async def test_receive_response_should_work_with_pydantic_model() -> None:
protocol = DummyJsonRPCProtocol(None)
r = protocol.send_request("dummy/method", ["dummy", "data"], MessageActionItem)
msg = JsonRPCResponse(
id=cast(JsonRPCRequest, protocol.sended_message).id, result=MessageActionItem(title="hi there")
)
json_message = msg.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
a = await asyncio.wait_for(r, 10)
assert a == MessageActionItem(title="hi there")
@pytest.mark.asyncio
async def test_receive_response_should_work_with_converter() -> None:
protocol = DummyJsonRPCProtocol(None)
r = protocol.send_request("dummy/method", ["dummy", "data"], lambda v: [MessageActionItem.parse_obj(e) for e in v])
msg = JsonRPCResponse(
id=cast(JsonRPCRequest, protocol.sended_message).id, result=[MessageActionItem(title="hi there")]
)
json_message = msg.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
a = await asyncio.wait_for(r, 10)
assert a == [MessageActionItem(title="hi there")]
@pytest.mark.asyncio
async def test_receive_response_should_work_with_generic_list() -> None:
protocol = DummyJsonRPCProtocol(None)
r = protocol.send_request("dummy/method", ["dummy", "data"], List[MessageActionItem])
msg = JsonRPCResponse(
id=cast(JsonRPCRequest, protocol.sended_message).id, result=[MessageActionItem(title="hi there")]
)
json_message = msg.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
a = await asyncio.wait_for(r, 10)
assert a == [MessageActionItem(title="hi there")]
@pytest.mark.asyncio
async def test_receive_response_with_generic_dict_should_return_unchanged() -> None:
protocol = DummyJsonRPCProtocol(None)
r = protocol.send_request("dummy/method", ["dummy", "data"], List[Dict[str, Any]])
msg = JsonRPCResponse(
id=cast(JsonRPCRequest, protocol.sended_message).id, result=[MessageActionItem(title="hi there")]
)
json_message = msg.json().encode("utf-8")
header = f"Content-Length: {len(json_message)}\r\n\r\n".encode("ascii")
data = header + json_message
await protocol.data_received_async(data)
a = await asyncio.wait_for(r, 10)
assert a == [MessageActionItem(title="hi there").dict()]
|
[
"robotcode.language_server.common.types.MessageActionItem.parse_obj",
"robotcode.language_server.common.types.MessageActionItem",
"asyncio.sleep",
"typing.cast",
"robotcode.jsonrpc2.protocol.JsonRPCResponse",
"pytest.fixture",
"robotcode.jsonrpc2.protocol.JsonRPCRequest",
"robotcode.jsonrpc2.protocol.JsonRPCErrorObject",
"asyncio.wait_for",
"asyncio.new_event_loop"
] |
[((1092, 1122), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1106, 1122), False, 'import pytest\n'), ((1204, 1228), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (1226, 1228), False, 'import asyncio\n'), ((1405, 1458), 'robotcode.jsonrpc2.protocol.JsonRPCRequest', 'JsonRPCRequest', ([], {'id': '(1)', 'method': '"""doSomething"""', 'params': '{}'}), "(id=1, method='doSomething', params={})\n", (1419, 1458), False, 'from robotcode.jsonrpc2.protocol import JsonRPCError, JsonRPCErrorObject, JsonRPCErrors, JsonRPCMessage, JsonRPCProtocol, JsonRPCRequest, JsonRPCResponse\n'), ((1875, 1942), 'robotcode.jsonrpc2.protocol.JsonRPCRequest', 'JsonRPCRequest', ([], {'id': '"""this is an id"""', 'method': '"""doSomething"""', 'params': '{}'}), "(id='this is an id', method='doSomething', params={})\n", (1889, 1942), False, 'from robotcode.jsonrpc2.protocol import JsonRPCError, JsonRPCErrorObject, JsonRPCErrors, JsonRPCMessage, JsonRPCProtocol, JsonRPCRequest, JsonRPCResponse\n'), ((3473, 3526), 'robotcode.jsonrpc2.protocol.JsonRPCRequest', 'JsonRPCRequest', ([], {'id': '(1)', 'method': '"""doSomething"""', 'params': '{}'}), "(id=1, method='doSomething', params={})\n", (3487, 3526), False, 'from robotcode.jsonrpc2.protocol import JsonRPCError, JsonRPCErrorObject, JsonRPCErrors, JsonRPCMessage, JsonRPCProtocol, JsonRPCRequest, JsonRPCResponse\n'), ((5140, 5187), 'robotcode.jsonrpc2.protocol.JsonRPCResponse', 'JsonRPCResponse', ([], {'id': '(1)', 'result': "['dummy', 'data']"}), "(id=1, result=['dummy', 'data'])\n", (5155, 5187), False, 'from robotcode.jsonrpc2.protocol import JsonRPCError, JsonRPCErrorObject, JsonRPCErrors, JsonRPCMessage, JsonRPCProtocol, JsonRPCRequest, JsonRPCResponse\n'), ((2352, 2405), 'robotcode.jsonrpc2.protocol.JsonRPCRequest', 'JsonRPCRequest', ([], {'id': '(1)', 'method': '"""doSomething"""', 'params': '{}'}), "(id=1, method='doSomething', params={})\n", (2366, 2405), False, 'from robotcode.jsonrpc2.protocol import JsonRPCError, JsonRPCErrorObject, JsonRPCErrors, JsonRPCMessage, JsonRPCProtocol, JsonRPCRequest, JsonRPCResponse\n'), ((2415, 2468), 'robotcode.jsonrpc2.protocol.JsonRPCRequest', 'JsonRPCRequest', ([], {'id': '(2)', 'method': '"""doSomething"""', 'params': '{}'}), "(id=2, method='doSomething', params={})\n", (2429, 2468), False, 'from robotcode.jsonrpc2.protocol import JsonRPCError, JsonRPCErrorObject, JsonRPCErrors, JsonRPCMessage, JsonRPCProtocol, JsonRPCRequest, JsonRPCResponse\n'), ((2478, 2531), 'robotcode.jsonrpc2.protocol.JsonRPCRequest', 'JsonRPCRequest', ([], {'id': '(3)', 'method': '"""doSomething"""', 'params': '{}'}), "(id=3, method='doSomething', params={})\n", (2492, 2531), False, 'from robotcode.jsonrpc2.protocol import JsonRPCError, JsonRPCErrorObject, JsonRPCErrors, JsonRPCMessage, JsonRPCProtocol, JsonRPCRequest, JsonRPCResponse\n'), ((4923, 4946), 'asyncio.wait_for', 'asyncio.wait_for', (['r', '(10)'], {}), '(r, 10)\n', (4939, 4946), False, 'import asyncio\n'), ((6085, 6108), 'asyncio.wait_for', 'asyncio.wait_for', (['r', '(10)'], {}), '(r, 10)\n', (6101, 6108), False, 'import asyncio\n'), ((6757, 6780), 'asyncio.wait_for', 'asyncio.wait_for', (['r', '(10)'], {}), '(r, 10)\n', (6773, 6780), False, 'import asyncio\n'), ((6798, 6833), 'robotcode.language_server.common.types.MessageActionItem', 'MessageActionItem', ([], {'title': '"""hi there"""'}), "(title='hi there')\n", (6815, 6833), False, 'from robotcode.language_server.common.types import MessageActionItem\n'), ((7445, 7468), 'asyncio.wait_for', 'asyncio.wait_for', (['r', '(10)'], {}), '(r, 10)\n', (7461, 7468), False, 'import asyncio\n'), ((8108, 8131), 'asyncio.wait_for', 'asyncio.wait_for', (['r', '(10)'], {}), '(r, 10)\n', (8124, 8131), False, 'import asyncio\n'), ((8780, 8803), 'asyncio.wait_for', 'asyncio.wait_for', (['r', '(10)'], {}), '(r, 10)\n', (8796, 8803), False, 'import asyncio\n'), ((1072, 1088), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (1085, 1088), False, 'import asyncio\n'), ((4086, 4153), 'robotcode.jsonrpc2.protocol.JsonRPCErrorObject', 'JsonRPCErrorObject', ([], {'code': '(1)', 'message': '"""test"""', 'data': '"""this is the data"""'}), "(code=1, message='test', data='this is the data')\n", (4104, 4153), False, 'from robotcode.jsonrpc2.protocol import JsonRPCError, JsonRPCErrorObject, JsonRPCErrors, JsonRPCMessage, JsonRPCProtocol, JsonRPCRequest, JsonRPCResponse\n'), ((5828, 5863), 'robotcode.language_server.common.types.MessageActionItem', 'MessageActionItem', ([], {'title': '"""hi there"""'}), "(title='hi there')\n", (5845, 5863), False, 'from robotcode.language_server.common.types import MessageActionItem\n'), ((6500, 6535), 'robotcode.language_server.common.types.MessageActionItem', 'MessageActionItem', ([], {'title': '"""hi there"""'}), "(title='hi there')\n", (6517, 6535), False, 'from robotcode.language_server.common.types import MessageActionItem\n'), ((7487, 7522), 'robotcode.language_server.common.types.MessageActionItem', 'MessageActionItem', ([], {'title': '"""hi there"""'}), "(title='hi there')\n", (7504, 7522), False, 'from robotcode.language_server.common.types import MessageActionItem\n'), ((8150, 8185), 'robotcode.language_server.common.types.MessageActionItem', 'MessageActionItem', ([], {'title': '"""hi there"""'}), "(title='hi there')\n", (8167, 8185), False, 'from robotcode.language_server.common.types import MessageActionItem\n'), ((4632, 4677), 'typing.cast', 'cast', (['JsonRPCRequest', 'protocol.sended_message'], {}), '(JsonRPCRequest, protocol.sended_message)\n', (4636, 4677), False, 'from typing import Any, Dict, Generator, List, Optional, cast\n'), ((5771, 5816), 'typing.cast', 'cast', (['JsonRPCRequest', 'protocol.sended_message'], {}), '(JsonRPCRequest, protocol.sended_message)\n', (5775, 5816), False, 'from typing import Any, Dict, Generator, List, Optional, cast\n'), ((6443, 6488), 'typing.cast', 'cast', (['JsonRPCRequest', 'protocol.sended_message'], {}), '(JsonRPCRequest, protocol.sended_message)\n', (6447, 6488), False, 'from typing import Any, Dict, Generator, List, Optional, cast\n'), ((7046, 7076), 'robotcode.language_server.common.types.MessageActionItem.parse_obj', 'MessageActionItem.parse_obj', (['e'], {}), '(e)\n', (7073, 7076), False, 'from robotcode.language_server.common.types import MessageActionItem\n'), ((7129, 7174), 'typing.cast', 'cast', (['JsonRPCRequest', 'protocol.sended_message'], {}), '(JsonRPCRequest, protocol.sended_message)\n', (7133, 7174), False, 'from typing import Any, Dict, Generator, List, Optional, cast\n'), ((7187, 7222), 'robotcode.language_server.common.types.MessageActionItem', 'MessageActionItem', ([], {'title': '"""hi there"""'}), "(title='hi there')\n", (7204, 7222), False, 'from robotcode.language_server.common.types import MessageActionItem\n'), ((7792, 7837), 'typing.cast', 'cast', (['JsonRPCRequest', 'protocol.sended_message'], {}), '(JsonRPCRequest, protocol.sended_message)\n', (7796, 7837), False, 'from typing import Any, Dict, Generator, List, Optional, cast\n'), ((7850, 7885), 'robotcode.language_server.common.types.MessageActionItem', 'MessageActionItem', ([], {'title': '"""hi there"""'}), "(title='hi there')\n", (7867, 7885), False, 'from robotcode.language_server.common.types import MessageActionItem\n'), ((8464, 8509), 'typing.cast', 'cast', (['JsonRPCRequest', 'protocol.sended_message'], {}), '(JsonRPCRequest, protocol.sended_message)\n', (8468, 8509), False, 'from typing import Any, Dict, Generator, List, Optional, cast\n'), ((8522, 8557), 'robotcode.language_server.common.types.MessageActionItem', 'MessageActionItem', ([], {'title': '"""hi there"""'}), "(title='hi there')\n", (8539, 8557), False, 'from robotcode.language_server.common.types import MessageActionItem\n'), ((8822, 8857), 'robotcode.language_server.common.types.MessageActionItem', 'MessageActionItem', ([], {'title': '"""hi there"""'}), "(title='hi there')\n", (8839, 8857), False, 'from robotcode.language_server.common.types import MessageActionItem\n')]
|
from __future__ import annotations
import ast
import inspect
import os
import sys
import traceback
from typing import Optional, Any, Callable
from collections.abc import Iterable
from subtypes import Str
from pathmagic import Dir
def is_running_in_ipython() -> bool:
"""Returns True if run from within a jupyter ipython interactive session, else False."""
try:
assert __IPYTHON__
return True
except (NameError, AttributeError):
return False
def executed_within_user_tree() -> bool:
"""Returns True if the '__main__' module is within the branches of the current user's filesystem tree, else False."""
main_dir = sys.modules["__main__"]._dh[0] if is_running_in_ipython() else sys.modules["__main__"].__file__
return Dir.from_home() > os.path.abspath(main_dir)
def issubclass_safe(candidate: Any, ancestor: Any) -> bool:
"""Returns True the candidate is a subclass of the ancestor, else False. Will return false instead of raising TypeError if the candidate is not a class."""
try:
return issubclass(candidate, ancestor)
except TypeError:
return False
def is_non_string_iterable(candidate: Any) -> bool:
return False if isinstance(candidate, (str, bytes)) else isinstance(candidate, Iterable)
def class_name(candidate: Any) -> str:
cls = candidate if isinstance(candidate, type) or issubclass_safe(candidate, type) else type(candidate)
try:
return cls.__name__
except AttributeError:
return Str(cls).slice.after_last("'").slice.before_first("'")
def traceback_from_exception(ex: Exception) -> str:
return "".join(traceback.format_exception(etype=type(ex), value=ex, tb=ex.__traceback__))
def beep() -> None:
"""Cross-platform implementation for producing a beeping sound. Only works on windows when used in an interactive IPython session (jupyter notebook)."""
if is_running_in_ipython():
import winsound
winsound.Beep(frequency=440, duration=2*1000)
else:
print("\a")
def get_short_lambda_source(lambda_func: Callable) -> Optional[str]:
"""Return the source of a (short) lambda function. If it's impossible to obtain, return None."""
try:
source_lines, _ = inspect.getsourcelines(lambda_func)
except (IOError, TypeError):
return None
if len(source_lines) != 1:
return None
source_text = os.linesep.join(source_lines).strip()
source_ast = ast.parse(source_text)
lambda_node = next((node for node in ast.walk(source_ast) if isinstance(node, ast.Lambda)), None)
if lambda_node is None:
return None
lambda_text = source_text[lambda_node.col_offset:]
lambda_body_text = source_text[lambda_node.body.col_offset:]
min_length = len('lambda:_')
while len(lambda_text) > min_length:
try:
code = compile(lambda_body_text, '<unused filename>', 'eval')
# noinspection PyUnresolvedReferences
if len(code.co_code) == len(lambda_func.__code__.co_code):
return lambda_text
except SyntaxError:
pass
lambda_text = lambda_text[:-1]
lambda_body_text = lambda_body_text[:-1]
return None
|
[
"os.path.abspath",
"inspect.getsourcelines",
"pathmagic.Dir.from_home",
"winsound.Beep",
"subtypes.Str",
"os.linesep.join",
"ast.parse",
"ast.walk"
] |
[((2453, 2475), 'ast.parse', 'ast.parse', (['source_text'], {}), '(source_text)\n', (2462, 2475), False, 'import ast\n'), ((768, 783), 'pathmagic.Dir.from_home', 'Dir.from_home', ([], {}), '()\n', (781, 783), False, 'from pathmagic import Dir\n'), ((786, 811), 'os.path.abspath', 'os.path.abspath', (['main_dir'], {}), '(main_dir)\n', (801, 811), False, 'import os\n'), ((1954, 2001), 'winsound.Beep', 'winsound.Beep', ([], {'frequency': '(440)', 'duration': '(2 * 1000)'}), '(frequency=440, duration=2 * 1000)\n', (1967, 2001), False, 'import winsound\n'), ((2237, 2272), 'inspect.getsourcelines', 'inspect.getsourcelines', (['lambda_func'], {}), '(lambda_func)\n', (2259, 2272), False, 'import inspect\n'), ((2397, 2426), 'os.linesep.join', 'os.linesep.join', (['source_lines'], {}), '(source_lines)\n', (2412, 2426), False, 'import os\n'), ((2517, 2537), 'ast.walk', 'ast.walk', (['source_ast'], {}), '(source_ast)\n', (2525, 2537), False, 'import ast\n'), ((1508, 1516), 'subtypes.Str', 'Str', (['cls'], {}), '(cls)\n', (1511, 1516), False, 'from subtypes import Str\n')]
|
import argparse
import logging
import sys
from typing import Dict
import pandas as pd
from analysis.src.python.data_analysis.model.column_name import IssuesColumns, SubmissionColumns
from analysis.src.python.data_analysis.utils.df_utils import read_df, write_df
from analysis.src.python.data_analysis.utils.parsing_utils import str_to_dict
def get_issues(issues: str, issue_class_column: str, issue_type_column: str, issues_types: Dict[str, str]):
""" Extracts issues classes and types from list with issue reports. """
for issue in str_to_dict(issues):
issues_types[issue[issue_class_column]] = issue.get(issue_type_column, 'Issues type undefined')
def get_issues_classes(issue_column_name: str,
issue_class_column: str,
issue_type_column: str,
submissions_with_issues_path: str,
issues_path: str):
""" Extracts all issues classes and types from lists with issue reports in submissions with issues dataset. """
logging.info(f'Reading submissions with issues from: {submissions_with_issues_path}')
df_submissions_with_issues = read_df(submissions_with_issues_path)
issues_types = {}
logging.info('Getting issues class and type from submissions with issues dataset')
df_submissions_with_issues[issue_column_name].apply(
lambda d: get_issues(d, issue_class_column, issue_type_column, issues_types))
logging.info(f'Saving issues classes and types to: {issues_path}')
write_df(pd.DataFrame.from_dict({
IssuesColumns.CLASS: issues_types.keys(),
IssuesColumns.TYPE: issues_types.values(),
}), issues_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('issues_type', type=str, help='Type of issues to analyse (can be raw or qodana).',
choices=[SubmissionColumns.RAW_ISSUES, SubmissionColumns.QODANA_ISSUES])
parser.add_argument('submissions_path', type=str, help='Path to .csv file with submissions with issues.')
parser.add_argument('issues_path', type=str, help='Path to .csv file where issues info will be saved')
args = parser.parse_args(sys.argv[1:])
issues_type = SubmissionColumns(args.issues_type)
issue_type_column_name = SubmissionColumns.ISSUE_TYPE
if issues_type == SubmissionColumns.QODANA_ISSUES:
issue_class_column_name = SubmissionColumns.QODANA_ISSUE_CLASS
else:
issue_class_column_name = SubmissionColumns.RAW_ISSUE_CLASS
get_issues_classes(issues_type,
issue_class_column_name,
issue_type_column_name,
args.submissions_path,
args.issues_path)
|
[
"analysis.src.python.data_analysis.utils.df_utils.read_df",
"argparse.ArgumentParser",
"analysis.src.python.data_analysis.model.column_name.SubmissionColumns",
"logging.info",
"analysis.src.python.data_analysis.utils.parsing_utils.str_to_dict"
] |
[((546, 565), 'analysis.src.python.data_analysis.utils.parsing_utils.str_to_dict', 'str_to_dict', (['issues'], {}), '(issues)\n', (557, 565), False, 'from analysis.src.python.data_analysis.utils.parsing_utils import str_to_dict\n'), ((1036, 1126), 'logging.info', 'logging.info', (['f"""Reading submissions with issues from: {submissions_with_issues_path}"""'], {}), "(\n f'Reading submissions with issues from: {submissions_with_issues_path}')\n", (1048, 1126), False, 'import logging\n'), ((1155, 1192), 'analysis.src.python.data_analysis.utils.df_utils.read_df', 'read_df', (['submissions_with_issues_path'], {}), '(submissions_with_issues_path)\n', (1162, 1192), False, 'from analysis.src.python.data_analysis.utils.df_utils import read_df, write_df\n'), ((1220, 1307), 'logging.info', 'logging.info', (['"""Getting issues class and type from submissions with issues dataset"""'], {}), "(\n 'Getting issues class and type from submissions with issues dataset')\n", (1232, 1307), False, 'import logging\n'), ((1451, 1517), 'logging.info', 'logging.info', (['f"""Saving issues classes and types to: {issues_path}"""'], {}), "(f'Saving issues classes and types to: {issues_path}')\n", (1463, 1517), False, 'import logging\n'), ((1720, 1745), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1743, 1745), False, 'import argparse\n'), ((2231, 2266), 'analysis.src.python.data_analysis.model.column_name.SubmissionColumns', 'SubmissionColumns', (['args.issues_type'], {}), '(args.issues_type)\n', (2248, 2266), False, 'from analysis.src.python.data_analysis.model.column_name import IssuesColumns, SubmissionColumns\n')]
|
"""Module containing the CLI programs for histoprint."""
import numpy as np
import click
from histoprint import *
import histoprint.formatter as formatter
@click.command()
@click.argument("infile", type=click.Path(exists=True, dir_okay=False, allow_dash=True))
@click.option(
"-b",
"--bins",
type=str,
default="10",
help="Number of bins or space-separated bin edges.",
)
@click.option("-t", "--title", type=str, default="", help="Title of the histogram.")
@click.option(
"--stack/--nostack", type=bool, default=False, help="Stack the histograms."
)
@click.option(
"-s/-S",
"--summary/--nosummary",
type=bool,
default=False,
help="Print summary statistics.",
)
@click.option(
"-l",
"--label",
"labels",
type=str,
multiple=True,
default=("",),
help="Labels for the data, one for each column.",
)
@click.option(
"--symbols",
type=str,
default=formatter.DEFAULT_SYMBOLS,
help="Symbol cycle for multiple histograms. Choices & default: '%s'"
% (formatter.DEFAULT_SYMBOLS,),
)
@click.option(
"--fg-colors",
type=str,
default=formatter.DEFAULT_FG_COLORS,
help="Colour cycle for foreground colours. Default: '%s', Choices: '0rgbcmykwRGBCMYKW'"
% (formatter.DEFAULT_FG_COLORS,),
)
@click.option(
"--bg-colors",
type=str,
default=formatter.DEFAULT_BG_COLORS,
help="Colour cycle for background colours. Default: '%s', Choices: '0rgbcmykwRGBCMYKW'"
% (formatter.DEFAULT_BG_COLORS,),
)
@click.option(
"-f",
"--field",
"fields",
type=str,
multiple=True,
help="Which fields to histogram. Interpretation of the fields depends on "
"the file format. TXT files only support integers for column numbers "
"starting at 0. For CSV files, the fields must be the names of the columns "
"as specified in the first line of the file. When plotting from ROOT files, "
"at least one field must be specified. This can either be the path to a "
"single TH1, or one or more paths to TTree branches.",
)
@click.version_option()
def histoprint(infile, **kwargs):
"""Read INFILE and print a histogram of the contained columns.
INFILE can be '-', in which case the data is read from STDIN.
"""
# Try to interpret file as textfile
try:
_histoprint_txt(infile, **kwargs)
exit(0)
except ValueError:
pass
# Try to interpret file as CSV file
try:
_histoprint_csv(infile, **kwargs)
exit(0)
except ImportError:
click.echo("Cannot try CSV file format. Pandas module not found.", err=True)
except UnicodeDecodeError:
pass
# Try to interpret file as ROOT file
try:
_histoprint_root(infile, **kwargs)
exit(0)
except ImportError:
click.echo("Cannot try ROOT file format. Uproot module not found.", err=True)
click.echo("Could not interpret file format.", err=True)
exit(1)
def _bin_edges(kwargs, data):
"""Get the desired bin edges."""
bins = kwargs.pop("bins", "10")
bins = np.fromiter(bins.split(), dtype=float)
if len(bins) == 1:
bins = int(bins[0])
if isinstance(bins, int):
minval = np.inf
maxval = -np.inf
for d in data:
minval = min(minval, np.nanmin(d))
maxval = max(maxval, np.nanmax(d))
bins = np.linspace(minval, maxval, bins + 1)
return bins
def _histoprint_txt(infile, **kwargs):
"""Interpret file as as simple whitespace separated table."""
# Read the data
data = np.loadtxt(click.open_file(infile), ndmin=2)
data = data.T
# Interpret field numbers
fields = kwargs.pop("fields", [])
if len(fields) > 0:
try:
fields = [int(f) for f in fields]
except ValueError:
click.echo("Fields for a TXT file must be integers.", err=True)
exit(1)
try:
data = data[fields]
except KeyError:
click.echo("Field out of bounds.", err=True)
exit(1)
# Interpret bins
bins = _bin_edges(kwargs, data)
# Create the histogram(s)
hist = [[], bins]
for d in data:
hist[0].append(np.histogram(d, bins=bins)[0])
# Print the histogram
print_hist(hist, **kwargs)
def _histoprint_csv(infile, **kwargs):
"""Interpret file as as CSV file."""
import pandas as pd
# Read the data
data = pd.read_csv(click.open_file(infile))
# Interpret field numbers/names
fields = list(kwargs.pop("fields", []))
if len(fields) > 0:
try:
data = data[fields]
except KeyError:
click.echo("Unknown column name.", err=True)
exit(1)
# Get default columns labels
if kwargs.get("labels", ("",)) == ("",):
kwargs["labels"] = data.columns
# Convert to array
data = data.to_numpy().T
# Interpret bins
bins = _bin_edges(kwargs, data)
# Create the histogram(s)
hist = [[], bins]
for d in data:
hist[0].append(np.histogram(d, bins=bins)[0])
# Print the histogram
print_hist(hist, **kwargs)
def _histoprint_root(infile, **kwargs):
"""Interpret file as as ROOT file."""
import uproot as up
# Open root file
F = up.open(infile)
# Interpret field names
fields = list(kwargs.pop("fields", []))
if len(fields) == 0:
click.echo("Must specify at least on field for ROOT files.", err=True)
click.echo(F.keys())
exit(1)
# Get default columns labels
if kwargs.get("labels", ("",)) == ("",):
kwargs["labels"] = [field.split("/")[-1] for field in fields]
# Read the data
if len(fields) == 1:
# Possible a single histogram
try:
hist = F[fields[0]].numpy()
except (AttributeError, KeyError):
pass
else:
kwargs.pop("bins", None) # Get rid of useless parameter
print_hist(hist, **kwargs)
return
data = []
for field in fields:
branch = F
for key in field.split("/"):
try:
branch = branch[key]
except KeyError:
click.echo(
"Could not find key '%s'. Possible values: %s"
% (key, branch.keys())
)
exit(1)
try:
d = np.array(branch.array().flatten())
except ValueError:
click.echo(
"Could not interpret root object '%s'. Possible child branches: %s"
% (key, branch.keys())
)
exit(1)
data.append(d)
# Interpret bins
bins = _bin_edges(kwargs, data)
# Create the histogram(s)
hist = [[], bins]
for d in data:
hist[0].append(np.histogram(d, bins=bins)[0])
# Print the histogram
print_hist(hist, **kwargs)
|
[
"click.version_option",
"click.option",
"click.echo",
"numpy.nanmin",
"click.command",
"click.open_file",
"numpy.histogram",
"numpy.linspace",
"click.Path",
"uproot.open",
"numpy.nanmax"
] |
[((159, 174), 'click.command', 'click.command', ([], {}), '()\n', (172, 174), False, 'import click\n'), ((265, 375), 'click.option', 'click.option', (['"""-b"""', '"""--bins"""'], {'type': 'str', 'default': '"""10"""', 'help': '"""Number of bins or space-separated bin edges."""'}), "('-b', '--bins', type=str, default='10', help=\n 'Number of bins or space-separated bin edges.')\n", (277, 375), False, 'import click\n'), ((395, 483), 'click.option', 'click.option', (['"""-t"""', '"""--title"""'], {'type': 'str', 'default': '""""""', 'help': '"""Title of the histogram."""'}), "('-t', '--title', type=str, default='', help=\n 'Title of the histogram.')\n", (407, 483), False, 'import click\n'), ((480, 574), 'click.option', 'click.option', (['"""--stack/--nostack"""'], {'type': 'bool', 'default': '(False)', 'help': '"""Stack the histograms."""'}), "('--stack/--nostack', type=bool, default=False, help=\n 'Stack the histograms.')\n", (492, 574), False, 'import click\n'), ((577, 687), 'click.option', 'click.option', (['"""-s/-S"""', '"""--summary/--nosummary"""'], {'type': 'bool', 'default': '(False)', 'help': '"""Print summary statistics."""'}), "('-s/-S', '--summary/--nosummary', type=bool, default=False,\n help='Print summary statistics.')\n", (589, 687), False, 'import click\n'), ((708, 842), 'click.option', 'click.option', (['"""-l"""', '"""--label"""', '"""labels"""'], {'type': 'str', 'multiple': '(True)', 'default': "('',)", 'help': '"""Labels for the data, one for each column."""'}), "('-l', '--label', 'labels', type=str, multiple=True, default=(\n '',), help='Labels for the data, one for each column.')\n", (720, 842), False, 'import click\n'), ((870, 1051), 'click.option', 'click.option', (['"""--symbols"""'], {'type': 'str', 'default': 'formatter.DEFAULT_SYMBOLS', 'help': '("Symbol cycle for multiple histograms. Choices & default: \'%s\'" % (\n formatter.DEFAULT_SYMBOLS,))'}), '(\'--symbols\', type=str, default=formatter.DEFAULT_SYMBOLS, help\n ="Symbol cycle for multiple histograms. Choices & default: \'%s\'" % (\n formatter.DEFAULT_SYMBOLS,))\n', (882, 1051), False, 'import click\n'), ((1066, 1276), 'click.option', 'click.option', (['"""--fg-colors"""'], {'type': 'str', 'default': 'formatter.DEFAULT_FG_COLORS', 'help': '("Colour cycle for foreground colours. Default: \'%s\', Choices: \'0rgbcmykwRGBCMYKW\'"\n % (formatter.DEFAULT_FG_COLORS,))'}), '(\'--fg-colors\', type=str, default=formatter.DEFAULT_FG_COLORS,\n help=\n "Colour cycle for foreground colours. Default: \'%s\', Choices: \'0rgbcmykwRGBCMYKW\'"\n % (formatter.DEFAULT_FG_COLORS,))\n', (1078, 1276), False, 'import click\n'), ((1287, 1497), 'click.option', 'click.option', (['"""--bg-colors"""'], {'type': 'str', 'default': 'formatter.DEFAULT_BG_COLORS', 'help': '("Colour cycle for background colours. Default: \'%s\', Choices: \'0rgbcmykwRGBCMYKW\'"\n % (formatter.DEFAULT_BG_COLORS,))'}), '(\'--bg-colors\', type=str, default=formatter.DEFAULT_BG_COLORS,\n help=\n "Colour cycle for background colours. Default: \'%s\', Choices: \'0rgbcmykwRGBCMYKW\'"\n % (formatter.DEFAULT_BG_COLORS,))\n', (1299, 1497), False, 'import click\n'), ((1508, 1997), 'click.option', 'click.option', (['"""-f"""', '"""--field"""', '"""fields"""'], {'type': 'str', 'multiple': '(True)', 'help': '"""Which fields to histogram. Interpretation of the fields depends on the file format. TXT files only support integers for column numbers starting at 0. For CSV files, the fields must be the names of the columns as specified in the first line of the file. When plotting from ROOT files, at least one field must be specified. This can either be the path to a single TH1, or one or more paths to TTree branches."""'}), "('-f', '--field', 'fields', type=str, multiple=True, help=\n 'Which fields to histogram. Interpretation of the fields depends on the file format. TXT files only support integers for column numbers starting at 0. For CSV files, the fields must be the names of the columns as specified in the first line of the file. When plotting from ROOT files, at least one field must be specified. This can either be the path to a single TH1, or one or more paths to TTree branches.'\n )\n", (1520, 1997), False, 'import click\n'), ((2051, 2073), 'click.version_option', 'click.version_option', ([], {}), '()\n', (2071, 2073), False, 'import click\n'), ((2880, 2936), 'click.echo', 'click.echo', (['"""Could not interpret file format."""'], {'err': '(True)'}), "('Could not interpret file format.', err=True)\n", (2890, 2936), False, 'import click\n'), ((5267, 5282), 'uproot.open', 'up.open', (['infile'], {}), '(infile)\n', (5274, 5282), True, 'import uproot as up\n'), ((206, 262), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'dir_okay': '(False)', 'allow_dash': '(True)'}), '(exists=True, dir_okay=False, allow_dash=True)\n', (216, 262), False, 'import click\n'), ((3366, 3403), 'numpy.linspace', 'np.linspace', (['minval', 'maxval', '(bins + 1)'], {}), '(minval, maxval, bins + 1)\n', (3377, 3403), True, 'import numpy as np\n'), ((3570, 3593), 'click.open_file', 'click.open_file', (['infile'], {}), '(infile)\n', (3585, 3593), False, 'import click\n'), ((4437, 4460), 'click.open_file', 'click.open_file', (['infile'], {}), '(infile)\n', (4452, 4460), False, 'import click\n'), ((5389, 5459), 'click.echo', 'click.echo', (['"""Must specify at least on field for ROOT files."""'], {'err': '(True)'}), "('Must specify at least on field for ROOT files.', err=True)\n", (5399, 5459), False, 'import click\n'), ((2534, 2610), 'click.echo', 'click.echo', (['"""Cannot try CSV file format. Pandas module not found."""'], {'err': '(True)'}), "('Cannot try CSV file format. Pandas module not found.', err=True)\n", (2544, 2610), False, 'import click\n'), ((2797, 2874), 'click.echo', 'click.echo', (['"""Cannot try ROOT file format. Uproot module not found."""'], {'err': '(True)'}), "('Cannot try ROOT file format. Uproot module not found.', err=True)\n", (2807, 2874), False, 'import click\n'), ((3290, 3302), 'numpy.nanmin', 'np.nanmin', (['d'], {}), '(d)\n', (3299, 3302), True, 'import numpy as np\n'), ((3337, 3349), 'numpy.nanmax', 'np.nanmax', (['d'], {}), '(d)\n', (3346, 3349), True, 'import numpy as np\n'), ((3813, 3876), 'click.echo', 'click.echo', (['"""Fields for a TXT file must be integers."""'], {'err': '(True)'}), "('Fields for a TXT file must be integers.', err=True)\n", (3823, 3876), False, 'import click\n'), ((3979, 4023), 'click.echo', 'click.echo', (['"""Field out of bounds."""'], {'err': '(True)'}), "('Field out of bounds.', err=True)\n", (3989, 4023), False, 'import click\n'), ((4197, 4223), 'numpy.histogram', 'np.histogram', (['d'], {'bins': 'bins'}), '(d, bins=bins)\n', (4209, 4223), True, 'import numpy as np\n'), ((4649, 4693), 'click.echo', 'click.echo', (['"""Unknown column name."""'], {'err': '(True)'}), "('Unknown column name.', err=True)\n", (4659, 4693), False, 'import click\n'), ((5039, 5065), 'numpy.histogram', 'np.histogram', (['d'], {'bins': 'bins'}), '(d, bins=bins)\n', (5051, 5065), True, 'import numpy as np\n'), ((6799, 6825), 'numpy.histogram', 'np.histogram', (['d'], {'bins': 'bins'}), '(d, bins=bins)\n', (6811, 6825), True, 'import numpy as np\n')]
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import proto_pb2 as proto__pb2
class EdgeDeviceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Sync = channel.stream_stream(
'/aranya.EdgeDevice/Sync',
request_serializer=proto__pb2.Msg.SerializeToString,
response_deserializer=proto__pb2.Cmd.FromString,
)
class EdgeDeviceServicer(object):
"""Missing associated documentation comment in .proto file."""
def Sync(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_EdgeDeviceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Sync': grpc.stream_stream_rpc_method_handler(
servicer.Sync,
request_deserializer=proto__pb2.Msg.FromString,
response_serializer=proto__pb2.Cmd.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'aranya.EdgeDevice', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class EdgeDevice(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Sync(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/aranya.EdgeDevice/Sync',
proto__pb2.Msg.SerializeToString,
proto__pb2.Cmd.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
[
"grpc.method_handlers_generic_handler",
"grpc.stream_stream_rpc_method_handler",
"grpc.experimental.stream_stream"
] |
[((1408, 1486), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""aranya.EdgeDevice"""', 'rpc_method_handlers'], {}), "('aranya.EdgeDevice', rpc_method_handlers)\n", (1444, 1486), False, 'import grpc\n'), ((1149, 1313), 'grpc.stream_stream_rpc_method_handler', 'grpc.stream_stream_rpc_method_handler', (['servicer.Sync'], {'request_deserializer': 'proto__pb2.Msg.FromString', 'response_serializer': 'proto__pb2.Cmd.SerializeToString'}), '(servicer.Sync, request_deserializer=\n proto__pb2.Msg.FromString, response_serializer=proto__pb2.Cmd.\n SerializeToString)\n', (1186, 1313), False, 'import grpc\n'), ((2024, 2288), 'grpc.experimental.stream_stream', 'grpc.experimental.stream_stream', (['request_iterator', 'target', '"""/aranya.EdgeDevice/Sync"""', 'proto__pb2.Msg.SerializeToString', 'proto__pb2.Cmd.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request_iterator, target,\n '/aranya.EdgeDevice/Sync', proto__pb2.Msg.SerializeToString, proto__pb2\n .Cmd.FromString, options, channel_credentials, insecure,\n call_credentials, compression, wait_for_ready, timeout, metadata)\n", (2055, 2288), False, 'import grpc\n')]
|
from django.conf.urls import url
from django.urls import path, include
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url('^$', views.home, name='home'),
path('account/', include('django.contrib.auth.urls')),
path('profile/<id>/', views.profile, name='profile'),
path('profile/<id>/update/', views.update_profile, name='update_profile'),
path('project/new/', views.new_project, name='new_project'),
path('project/<title>/reviews/', views.single_project, name='single_project'),
path('project/<title>/', views.single_project, name='project'),
path('project/<id>/review/', views.add_review, name='review'),
url('search/',views.search_projects,name="search"),
url(r'^api/profiles/$', views.ProfileView.as_view(), name='api_profiles'),
url(r'^api/projects/$', views.ProjectView.as_view(), name='api_projects')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
[
"django.urls.path",
"django.conf.urls.static.static",
"django.conf.urls.url",
"django.urls.include"
] |
[((188, 222), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.home'], {'name': '"""home"""'}), "('^$', views.home, name='home')\n", (191, 222), False, 'from django.conf.urls import url\n'), ((287, 339), 'django.urls.path', 'path', (['"""profile/<id>/"""', 'views.profile'], {'name': '"""profile"""'}), "('profile/<id>/', views.profile, name='profile')\n", (291, 339), False, 'from django.urls import path, include\n'), ((345, 418), 'django.urls.path', 'path', (['"""profile/<id>/update/"""', 'views.update_profile'], {'name': '"""update_profile"""'}), "('profile/<id>/update/', views.update_profile, name='update_profile')\n", (349, 418), False, 'from django.urls import path, include\n'), ((424, 483), 'django.urls.path', 'path', (['"""project/new/"""', 'views.new_project'], {'name': '"""new_project"""'}), "('project/new/', views.new_project, name='new_project')\n", (428, 483), False, 'from django.urls import path, include\n'), ((489, 566), 'django.urls.path', 'path', (['"""project/<title>/reviews/"""', 'views.single_project'], {'name': '"""single_project"""'}), "('project/<title>/reviews/', views.single_project, name='single_project')\n", (493, 566), False, 'from django.urls import path, include\n'), ((572, 634), 'django.urls.path', 'path', (['"""project/<title>/"""', 'views.single_project'], {'name': '"""project"""'}), "('project/<title>/', views.single_project, name='project')\n", (576, 634), False, 'from django.urls import path, include\n'), ((640, 701), 'django.urls.path', 'path', (['"""project/<id>/review/"""', 'views.add_review'], {'name': '"""review"""'}), "('project/<id>/review/', views.add_review, name='review')\n", (644, 701), False, 'from django.urls import path, include\n'), ((707, 759), 'django.conf.urls.url', 'url', (['"""search/"""', 'views.search_projects'], {'name': '"""search"""'}), "('search/', views.search_projects, name='search')\n", (710, 759), False, 'from django.conf.urls import url\n'), ((956, 1017), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (962, 1017), False, 'from django.conf.urls.static import static\n'), ((245, 280), 'django.urls.include', 'include', (['"""django.contrib.auth.urls"""'], {}), "('django.contrib.auth.urls')\n", (252, 280), False, 'from django.urls import path, include\n')]
|
from extutils.logger import LoggerSkeleton
logger = LoggerSkeleton("sys.handle", logger_name_env="EVT_HANDLER")
|
[
"extutils.logger.LoggerSkeleton"
] |
[((54, 113), 'extutils.logger.LoggerSkeleton', 'LoggerSkeleton', (['"""sys.handle"""'], {'logger_name_env': '"""EVT_HANDLER"""'}), "('sys.handle', logger_name_env='EVT_HANDLER')\n", (68, 113), False, 'from extutils.logger import LoggerSkeleton\n')]
|
from typing import List, Dict
from injecta.service.argument.ArgumentInterface import ArgumentInterface
from injecta.service.class_.InspectedArgument import InspectedArgument
from injecta.service.resolved.ResolvedArgument import ResolvedArgument
from injecta.service.argument.validator.ArgumentsValidator import ArgumentsValidator
from injecta.service.class_.InspectedArgumentsResolver import InspectedArgumentsResolver
class NamedArgumentsResolver:
def __init__(self):
self.__inspected_arguments_resolver = InspectedArgumentsResolver()
self.__arguments_validator = ArgumentsValidator()
def resolve(self, arguments: List[ArgumentInterface], inspected_arguments: List[InspectedArgument], service_name: str):
inspected_arguments = [inspected_argument for inspected_argument in inspected_arguments if inspected_argument.name != "args"]
inspected_arguments_indexed = {inspected_argument.name: inspected_argument for inspected_argument in inspected_arguments}
arguments_indexed = {argument.name: argument for argument in arguments}
if self.__contains_kwargs(inspected_arguments):
return self.__resolve_arguments_kwargs(arguments_indexed, inspected_arguments_indexed)
for argument_name, argument in arguments_indexed.items():
if argument_name not in inspected_arguments_indexed:
raise Exception(f'Unknown argument "{argument_name}" in service "{service_name}"')
return self.__resolve_arguments(arguments_indexed, inspected_arguments_indexed)
def __resolve_arguments_kwargs(
self, arguments_indexed: Dict[str, ArgumentInterface], inspected_arguments_indexed: Dict[str, InspectedArgument]
):
del inspected_arguments_indexed["kwargs"]
resolved_arguments = self.__resolve_arguments(arguments_indexed, inspected_arguments_indexed)
for resolved_argument in resolved_arguments:
del arguments_indexed[resolved_argument.name]
for _, argument in arguments_indexed.items():
resolved_arguments.append(ResolvedArgument(argument.name, argument, None))
return resolved_arguments
def __resolve_arguments(
self, arguments_indexed: Dict[str, ArgumentInterface], inspected_arguments_indexed: Dict[str, InspectedArgument]
):
resolved_arguments = []
for argument_name, inspected_argument in inspected_arguments_indexed.items():
argument = arguments_indexed[argument_name] if argument_name in arguments_indexed else None
# argument with default value, no value defined in service configuration
if inspected_argument.has_default_value() and argument is None:
continue
resolved_argument = ResolvedArgument(inspected_argument.name, argument, inspected_argument)
resolved_arguments.append(resolved_argument)
return resolved_arguments
def __contains_kwargs(self, inspected_arguments: List[InspectedArgument]):
return inspected_arguments and inspected_arguments[-1].name == "kwargs"
|
[
"injecta.service.resolved.ResolvedArgument.ResolvedArgument",
"injecta.service.argument.validator.ArgumentsValidator.ArgumentsValidator",
"injecta.service.class_.InspectedArgumentsResolver.InspectedArgumentsResolver"
] |
[((521, 549), 'injecta.service.class_.InspectedArgumentsResolver.InspectedArgumentsResolver', 'InspectedArgumentsResolver', ([], {}), '()\n', (547, 549), False, 'from injecta.service.class_.InspectedArgumentsResolver import InspectedArgumentsResolver\n'), ((587, 607), 'injecta.service.argument.validator.ArgumentsValidator.ArgumentsValidator', 'ArgumentsValidator', ([], {}), '()\n', (605, 607), False, 'from injecta.service.argument.validator.ArgumentsValidator import ArgumentsValidator\n'), ((2760, 2831), 'injecta.service.resolved.ResolvedArgument.ResolvedArgument', 'ResolvedArgument', (['inspected_argument.name', 'argument', 'inspected_argument'], {}), '(inspected_argument.name, argument, inspected_argument)\n', (2776, 2831), False, 'from injecta.service.resolved.ResolvedArgument import ResolvedArgument\n'), ((2075, 2122), 'injecta.service.resolved.ResolvedArgument.ResolvedArgument', 'ResolvedArgument', (['argument.name', 'argument', 'None'], {}), '(argument.name, argument, None)\n', (2091, 2122), False, 'from injecta.service.resolved.ResolvedArgument import ResolvedArgument\n')]
|
import unittest
import libpysal
from libpysal.common import pandas, RTOL, ATOL
from esda.geary_local_mv import Geary_Local_MV
import numpy as np
PANDAS_EXTINCT = pandas is None
class Geary_Local_MV_Tester(unittest.TestCase):
def setUp(self):
np.random.seed(100)
self.w = libpysal.io.open(libpysal.examples.get_path("stl.gal")).read()
f = libpysal.io.open(libpysal.examples.get_path("stl_hom.txt"))
self.y1 = np.array(f.by_col['HR8893'])
self.y2 = np.array(f.by_col['HC8488'])
def test_local_geary_mv(self):
lG_mv = Geary_Local_MV(connectivity=self.w).fit([self.y1, self.y2])
print(lG_mv.p_sim[0])
self.assertAlmostEqual(lG_mv.localG[0], 0.4096931479581422)
self.assertAlmostEqual(lG_mv.p_sim[0], 0.211)
suite = unittest.TestSuite()
test_classes = [
Geary_Local_MV_Tester
]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(suite)
|
[
"numpy.random.seed",
"unittest.TextTestRunner",
"unittest.TestSuite",
"esda.geary_local_mv.Geary_Local_MV",
"numpy.array",
"unittest.TestLoader",
"libpysal.examples.get_path"
] |
[((803, 823), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (821, 823), False, 'import unittest\n'), ((1009, 1034), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (1032, 1034), False, 'import unittest\n'), ((256, 275), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (270, 275), True, 'import numpy as np\n'), ((446, 474), 'numpy.array', 'np.array', (["f.by_col['HR8893']"], {}), "(f.by_col['HR8893'])\n", (454, 474), True, 'import numpy as np\n'), ((493, 521), 'numpy.array', 'np.array', (["f.by_col['HC8488']"], {}), "(f.by_col['HC8488'])\n", (501, 521), True, 'import numpy as np\n'), ((385, 426), 'libpysal.examples.get_path', 'libpysal.examples.get_path', (['"""stl_hom.txt"""'], {}), "('stl_hom.txt')\n", (411, 426), False, 'import libpysal\n'), ((900, 921), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (919, 921), False, 'import unittest\n'), ((574, 609), 'esda.geary_local_mv.Geary_Local_MV', 'Geary_Local_MV', ([], {'connectivity': 'self.w'}), '(connectivity=self.w)\n', (588, 609), False, 'from esda.geary_local_mv import Geary_Local_MV\n'), ((310, 347), 'libpysal.examples.get_path', 'libpysal.examples.get_path', (['"""stl.gal"""'], {}), "('stl.gal')\n", (336, 347), False, 'import libpysal\n')]
|
'''
Module that makes timeline graphs from csv data.
'''
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def plot_timeline(file_name):
'''
Makes timeline graphs from csv data.
'''
# data frame from rounded data file
df = pd.read_csv(file_name)
# find all par for graphs
time = df['computer_time']
# plotting
fig, (x_acc_1, y_acc_1, x_gyro_1, y_gyro_1, x_acc_2,
y_acc_2, x_gyro_2, y_gyro_2) = plt.subplots(8, 1)
x_acc_1.plot(time, df['x_acc_1'].tolist())
x_acc_1.set_title('x_acc_1')
y_acc_1.plot(time, df['y_acc_1'].tolist())
y_acc_1.set_title('y_acc_1')
x_gyro_1.plot(time, df['x_gyro_1'].tolist())
x_gyro_1.set_title('x_gyro_1')
y_gyro_1.plot(time, df['y_gyro_1'].tolist())
y_gyro_1.set_title('y_gyro_1')
x_acc_2.plot(time, df['x_acc_2'].tolist())
x_acc_2.set_title('x_acc_2')
y_acc_2.plot(time, df['y_acc_2'].tolist())
y_acc_2.set_title('y_acc_2')
x_gyro_2.plot(time, df['x_gyro_2'].tolist())
x_gyro_2.set_title('x_gyro_2')
y_gyro_2.plot(time, df['y_gyro_2'].tolist())
y_gyro_2.set_title('y_gyro_2')
fig.subplots_adjust(hspace=0.5)
plt.show()
# plt.savefig(new)
# if __name__ == "__main__":
# plot_timeline('walking.csv')
|
[
"pandas.read_csv",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((267, 289), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {}), '(file_name)\n', (278, 289), True, 'import pandas as pd\n'), ((466, 484), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(8)', '(1)'], {}), '(8, 1)\n', (478, 484), True, 'import matplotlib.pyplot as plt\n'), ((1191, 1201), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1199, 1201), True, 'import matplotlib.pyplot as plt\n')]
|
import pytorch_lightning as pl
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from typing import List, Tuple, Dict, Any, Union, Iterable
try:
import genomics_gans
except:
exec(open('__init__.py').read())
import genomics_gans
from genomics_gans.prepare_data.data_modules import TabularDataset
class LitFFNN(pl.LightningModule):
# ----------------------------------
# Initialize constants and NN architecture
# ----------------------------------
def __init__(self, network: nn.Module, train_set: TabularDataset,
val_set: TabularDataset, test_set: TabularDataset):
""" Feed-Forward Neural Network System
Args:
X (np.ndarray): Feature matrix
"""
super().__init__()
# TODO: train-val-test splits
self.network = network
# Hard-coded constants
self.loss_fn = nn.NLLLoss()
self.lr = 1e-2
self.N_CLASSES = 3
self.epoch = 0
self.epoch_train_losses = []
self.epoch_val_losses = []
self.best_val_epoch = 0
def forward(self, x):
logits = self.network(x)
return logits
def configure_optimizers(self):
optimizer = torch.optim.Adam(
params = self.parameters(), lr = self.lr)
return optimizer
# ----------------------------------
# Training, validation, and test steps
# ----------------------------------
def training_step(self, batch, batch_idx):
x, y = batch
y = y.flatten().long()
logits = self(x)
loss = self.loss_fn(logits, y)
self.log('train_loss', loss, on_step=True, on_epoch=True,
prog_bar=True)
return loss
def validation_step(self, batch, batch_idx, val=True):
x, y = batch
y = y.flatten().long()
# compute loss
logits = self(x)
loss = self.loss_fn(logits, y)
self.log('val_loss', loss, on_step=True, on_epoch=True,
prog_bar=True) # self.log interacts with TensorBoard
return loss
def test_step(self, batch, batch_idx):
x, y = batch
y = y.flatten().long()
# compute loss
logits = self(x)
loss = self.loss_fn(logits, y)
self.log('test_loss', loss, on_step=True, on_epoch=True,
prog_bar=False)
return loss
def training_epoch_end(self, outputs: List[Any]):
outputs: List[torch.Tensor] = [list(d.values())[0] for d in outputs]
sum = torch.zeros(1, dtype=float).to(self.device)
for batch_idx, batch_loss in enumerate(outputs):
sum += batch_loss.to(self.device)
avg_batch_loss = (sum / batch_idx)
self.epoch_train_losses.append({avg_batch_loss[0].item()})
def validation_epoch_end(self, outputs: List[Any]):
sum = torch.zeros(1, dtype=float).to(self.device)
for batch_idx, batch_loss in enumerate(outputs):
sum += batch_loss.to(self.device)
avg_batch_loss = (sum / batch_idx)
self.epoch_val_losses.append({avg_batch_loss[0].item()})
# ---------------------------------------------------------------
# Custom training for evolutionary algorithm
# --------------------------------------------------------------
def custom_training_step(self, verbose=False):
self.network.train()
train_loader = self.train_dl
train_loss: float = 0
for idx, batch in enumerate(train_loader):
self.optimizer.zero_grad() # clears paramter gradient buffers
inputs, targets = batch
# transfer batch data to computation device
inputs, targets = [
tensor.to(self.device) for tensor in [inputs, targets]]
targets = targets.long() # converts dtype to Long
output = self.network(inputs)
loss = self.loss_fn(output, targets.flatten())
loss.backward() # back propagation
self.optimizer.step() # update model weights
train_loss += loss.data.item()
if (idx % 10 == 0) and verbose:
print(f"epoch {self.epoch+1}/{self.n_epochs}, "
+ f"batch {idx}.")
train_loss = train_loss / len(train_loader)
return train_loss
def custom_validation_step(self):
val_loader = self.test_dl
val_loss = 0.0
self.network.eval()
for batch in val_loader:
inputs, targets = batch
inputs, targets = [tensor.to(self.device) for tensor in batch]
targets = targets.long() # converts dtype to Long
output = self.network(inputs)
loss = self.loss_fn(output, targets.flatten())
val_loss += loss.data.item()
val_loss = val_loss / len(val_loader)
return val_loss
def custom_train(self, n_epochs, plot=True, verbose=False, plot_train=False):
train_loader = self.train_dl
val_loader = self.test_dl
device=self.device
self.network.to(self.device)
train_losses, val_losses = [], []
best_val_loss = np.infty
best_val_epoch = 0
early_stopping_buffer = 10
epoch = 0
best_params = None
for epoch in range(n_epochs):
# Training
train_loss = self.custom_training_step()
train_losses.append(train_loss)
# Validation
val_loss = self.custom_validation_step()
val_losses.append(val_loss)
if val_loss < best_val_loss:
best_params = self.network.parameters()
best_val_loss = val_loss
best_val_epoch = epoch
# If validation loss fails to decrease for some number of epochs
# end training
if np.abs(epoch - best_val_epoch) > early_stopping_buffer:
break
print(f"Epoch: {epoch}, Training Loss: {train_loss:.3f}, "
+f"Validation loss: {val_loss:.3f}")
#self.network.parameters = best_params
self.best_val_loss = best_val_loss
self.best_val_epoch = best_val_epoch
if plot:
skip_frames = 3
fig, ax = plt.subplots()
fig.tight_layout()
if plot_train:
ax.plot(np.arange(epoch + 1)[skip_frames:],
train_losses[skip_frames:], '-', label="training set")
ax.plot(np.arange(epoch + 1)[skip_frames:],
val_losses[skip_frames:], '-', label="test set")
ax.set(xlabel="Epoch", ylabel="Loss")
ax.legend()
plt.show()
# ----------------------------------
# Helper functions - Use post-training
# ----------------------------------
def predict(self, x: torch.Tensor) -> torch.Tensor:
self.eval()
x.to(self.device)
logits = self.network(x)
preds = torch.argmax(input = logits, dim=1)
return preds
def accuracy(self, pred: torch.Tensor, target: torch.Tensor):
self.eval()
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
pred, target = [t.to(self.device) for t in [pred, target]]
elif isinstance(pred, np.ndarray) and isinstance(target, np.ndarray):
tensors = [torch.Tensor(t).to(self.device) for t in [pred, target]]
pred, target = tensors
else:
raise ValueError("The types of `pred` and `target` must match. "
+ "These can be np.ndarrays or torch.Tensors.")
accuracy = pl.metrics.functional.accuracy(pred, target)
return accuracy
def f1(self, pred: torch.Tensor, target: torch.Tensor):
self.eval()
pred, target = [t.flatten() for t in [pred, target]]
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
pred, target = [t.to(self.device) for t in [pred, target]]
elif isinstance(pred, np.ndarray) and isinstance(target, np.ndarray):
tensors = [torch.Tensor(t).to(self.device) for t in [pred, target]]
pred, target = tensors
else:
raise ValueError("The types of `pred` and `target` must match. "
+ "These can be np.ndarrays or torch.Tensors.")
f1 = pl.metrics.functional.f1(
preds = pred, target = target, num_classes = 3, multilabel = True)
return f1
def multiclass_aucroc(self, pred: torch.Tensor, target: torch.Tensor):
self.eval()
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
pred, target = [t.to(self.device) for t in [pred, target]]
elif isinstance(pred, np.ndarray) and isinstance(target, np.ndarray):
tensors = [torch.Tensor(t).to(self.device) for t in [pred, target]]
pred, target = tensors
else:
raise ValueError("The types of `pred` and `target` must match. "
+ "These can be np.ndarrays or torch.Tensors.")
auc_roc = pl.metrics.functional.classification.multiclass_auroc(
pred = pred, target = target)
return auc_roc
def plot_losses(self, plot_train=True):
skip_frames = 1
fig, ax = plt.subplots()
fig.tight_layout()
n_epochs = len(self.epoch_val_losses)
self.epoch_train_losses = [s.pop() for s in self.epoch_train_losses]
self.epoch_val_losses = [s.pop() for s in self.epoch_val_losses]
if plot_train:
n_epochs = len(self.epoch_train_losses)
ax.plot(np.arange(n_epochs)[skip_frames:],
self.epoch_train_losses[skip_frames:], label="train")
ax.plot(np.arange(n_epochs)[skip_frames:],
self.epoch_val_losses[1:][skip_frames:], label="val")
ax.set(xlabel="Epoch", ylabel="Loss")
ax.legend()
plt.show()
|
[
"pytorch_lightning.metrics.functional.classification.multiclass_auroc",
"matplotlib.pyplot.show",
"numpy.abs",
"torch.argmax",
"pytorch_lightning.metrics.functional.f1",
"pytorch_lightning.metrics.functional.accuracy",
"torch.nn.NLLLoss",
"torch.Tensor",
"numpy.arange",
"torch.zeros",
"matplotlib.pyplot.subplots"
] |
[((917, 929), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (927, 929), True, 'import torch.nn as nn\n'), ((7044, 7077), 'torch.argmax', 'torch.argmax', ([], {'input': 'logits', 'dim': '(1)'}), '(input=logits, dim=1)\n', (7056, 7077), False, 'import torch\n'), ((7707, 7751), 'pytorch_lightning.metrics.functional.accuracy', 'pl.metrics.functional.accuracy', (['pred', 'target'], {}), '(pred, target)\n', (7737, 7751), True, 'import pytorch_lightning as pl\n'), ((8430, 8517), 'pytorch_lightning.metrics.functional.f1', 'pl.metrics.functional.f1', ([], {'preds': 'pred', 'target': 'target', 'num_classes': '(3)', 'multilabel': '(True)'}), '(preds=pred, target=target, num_classes=3,\n multilabel=True)\n', (8454, 8517), True, 'import pytorch_lightning as pl\n'), ((9174, 9253), 'pytorch_lightning.metrics.functional.classification.multiclass_auroc', 'pl.metrics.functional.classification.multiclass_auroc', ([], {'pred': 'pred', 'target': 'target'}), '(pred=pred, target=target)\n', (9227, 9253), True, 'import pytorch_lightning as pl\n'), ((9381, 9395), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9393, 9395), True, 'import matplotlib.pyplot as plt\n'), ((10021, 10031), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10029, 10031), True, 'import matplotlib.pyplot as plt\n'), ((6325, 6339), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6337, 6339), True, 'import matplotlib.pyplot as plt\n'), ((6746, 6756), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6754, 6756), True, 'import matplotlib.pyplot as plt\n'), ((2565, 2592), 'torch.zeros', 'torch.zeros', (['(1)'], {'dtype': 'float'}), '(1, dtype=float)\n', (2576, 2592), False, 'import torch\n'), ((2893, 2920), 'torch.zeros', 'torch.zeros', (['(1)'], {'dtype': 'float'}), '(1, dtype=float)\n', (2904, 2920), False, 'import torch\n'), ((5902, 5932), 'numpy.abs', 'np.abs', (['(epoch - best_val_epoch)'], {}), '(epoch - best_val_epoch)\n', (5908, 5932), True, 'import numpy as np\n'), ((9841, 9860), 'numpy.arange', 'np.arange', (['n_epochs'], {}), '(n_epochs)\n', (9850, 9860), True, 'import numpy as np\n'), ((6554, 6574), 'numpy.arange', 'np.arange', (['(epoch + 1)'], {}), '(epoch + 1)\n', (6563, 6574), True, 'import numpy as np\n'), ((9715, 9734), 'numpy.arange', 'np.arange', (['n_epochs'], {}), '(n_epochs)\n', (9724, 9734), True, 'import numpy as np\n'), ((6422, 6442), 'numpy.arange', 'np.arange', (['(epoch + 1)'], {}), '(epoch + 1)\n', (6431, 6442), True, 'import numpy as np\n'), ((7440, 7455), 'torch.Tensor', 'torch.Tensor', (['t'], {}), '(t)\n', (7452, 7455), False, 'import torch\n'), ((8170, 8185), 'torch.Tensor', 'torch.Tensor', (['t'], {}), '(t)\n', (8182, 8185), False, 'import torch\n'), ((8909, 8924), 'torch.Tensor', 'torch.Tensor', (['t'], {}), '(t)\n', (8921, 8924), False, 'import torch\n')]
|
import json
import os
import pathlib
from typing import Any, Dict, List, Union, cast
JSONData = Union[List[Any], Dict[str, Any]]
# Splitting this out for testing with no side effects
def mkdir(directory: str) -> None:
return pathlib.Path(directory).mkdir(parents=True, exist_ok=True)
# Splitting this out for testing with no side effects
def remove(filename: str) -> None:
return os.remove(filename)
def safe_jsonify(directory: str, filename: str, data: JSONData) -> None:
mkdir(directory)
fname = os.path.join(directory, filename)
with open(fname, 'w') as json_file:
json.dump(data, json_file)
def load_json(filename: str) -> JSONData:
with open(filename) as json_file:
return cast(JSONData, json.load(json_file))
|
[
"json.dump",
"os.remove",
"json.load",
"pathlib.Path",
"os.path.join"
] |
[((393, 412), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (402, 412), False, 'import os\n'), ((521, 554), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (533, 554), False, 'import os\n'), ((603, 629), 'json.dump', 'json.dump', (['data', 'json_file'], {}), '(data, json_file)\n', (612, 629), False, 'import json\n'), ((232, 255), 'pathlib.Path', 'pathlib.Path', (['directory'], {}), '(directory)\n', (244, 255), False, 'import pathlib\n'), ((742, 762), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (751, 762), False, 'import json\n')]
|
#!/usr/bin/python3
"""
Script for generating the data set (128b, 256b, 1kB, 1MB, 100MB, 1GB).
Context : Projet BCS - Master 2 SSI - Istic (Univ. Rennes1)
Authors : <NAME> and <NAME>
This script also executes the time measurement into 4 contexts
=> Sequential encryption
=> Sequential decryption
=> Parallel encryption
=> Parallel decryption
"""
import os
import time
import subprocess
from collections import OrderedDict
import pygal
B_NUMBER = 1024
B_SIZE = 16
SIXTEEN_B = None
DATASET_DIR = './dataset/'
REPORT_DIR = './report/'
DATASET_EXTENSION = '.i'
CIPHERED_EXTENSION = '.encrypted'
UNCIPHERED_EXTENSION = '.decrypted'
EXEC_NAME = './myAE.exe'
FILESIZES = OrderedDict([
('128b', 16),
('256b', 32),
('1kB', 1000),
('1MB', 1000000),
('100MB', 100000000),
('1GB', 1000000000)
])
def generate_file(name, size):
"""Generate an input file containing random bits."""
print('=> Generating %s file' % name)
with open(DATASET_DIR+name+DATASET_EXTENSION, 'wb+') as fout:
fout.write(os.urandom(size))
def generate_dataset():
"""Generate dataset files."""
print(" ### Generating the dataset files ### ")
print('/!\\ This function can take a lot of time /!\\')
# For every filesize, generate the file
for key, value in FILESIZES.items():
generate_file(key, value)
def cipher_execution(op, input, output, password):
"""Launch the encryption and measure the time it took."""
command = [
EXEC_NAME,
op,
input,
'-o',
output,
'-k',
password
]
start_time = time.time()
subprocess.call(command, 1)
end_time = time.time() - start_time
print("%s took %f seconds" % (input, end_time))
return end_time
def generate_encryption_statistics():
"""Generate the figure of encryption time given the input size."""
print("\nGeneration of the encryption statistics:")
# Password
password = 'password'
# The table of the results
results = []
# For every filesize, generate the file
for key in FILESIZES:
results.append(
cipher_execution(
'-c',
DATASET_DIR+key+DATASET_EXTENSION,
DATASET_DIR+key+CIPHERED_EXTENSION,
password
)
)
line_chart = pygal.Line()
line_chart.title = 'Execution time of encryption in sequential mode'
line_chart.x_title = 'Size of input file'
line_chart.x_labels = FILESIZES
line_chart.y_title = 'Execution time in seconds'
line_chart.add('Time', results)
line_chart.render_to_png(REPORT_DIR+'encryption_sequential.png')
def generate_decryption_statistics():
"""Generate the figure of decryption time given the input size."""
print("\nGeneration of the decryption statistics:")
# Password
password = 'password'
# The table of the results
results = []
# For every filesize, generate the file
for key in FILESIZES:
results.append(
cipher_execution(
'-d',
DATASET_DIR+key+CIPHERED_EXTENSION,
DATASET_DIR+key+UNCIPHERED_EXTENSION,
password
)
)
line_chart = pygal.Line()
line_chart.title = 'Execution time of decryption in sequential mode'
line_chart.x_title = 'Size of input file'
line_chart.x_labels = FILESIZES
line_chart.y_title = 'Execution time in seconds'
line_chart.add('Time', results)
line_chart.render_to_png(REPORT_DIR+'decryption_sequential.png')
# Main function to be launched when this script is called
if __name__ == '__main__':
# Generation of the dataset
gen = input("Do you want to generate dataset? [y/n] ")
if gen == 'y':
generate_dataset()
# Process statistics on it
generate_encryption_statistics()
generate_decryption_statistics()
|
[
"pygal.Line",
"time.time",
"subprocess.call",
"collections.OrderedDict",
"os.urandom"
] |
[((679, 801), 'collections.OrderedDict', 'OrderedDict', (["[('128b', 16), ('256b', 32), ('1kB', 1000), ('1MB', 1000000), ('100MB', \n 100000000), ('1GB', 1000000000)]"], {}), "([('128b', 16), ('256b', 32), ('1kB', 1000), ('1MB', 1000000), (\n '100MB', 100000000), ('1GB', 1000000000)])\n", (690, 801), False, 'from collections import OrderedDict\n'), ((1611, 1622), 'time.time', 'time.time', ([], {}), '()\n', (1620, 1622), False, 'import time\n'), ((1627, 1654), 'subprocess.call', 'subprocess.call', (['command', '(1)'], {}), '(command, 1)\n', (1642, 1654), False, 'import subprocess\n'), ((2342, 2354), 'pygal.Line', 'pygal.Line', ([], {}), '()\n', (2352, 2354), False, 'import pygal\n'), ((3246, 3258), 'pygal.Line', 'pygal.Line', ([], {}), '()\n', (3256, 3258), False, 'import pygal\n'), ((1670, 1681), 'time.time', 'time.time', ([], {}), '()\n', (1679, 1681), False, 'import time\n'), ((1040, 1056), 'os.urandom', 'os.urandom', (['size'], {}), '(size)\n', (1050, 1056), False, 'import os\n')]
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
"""Tests for finding sensitive strings."""
__author__ = '<EMAIL> (<NAME>)'
from google.apputils import resources
from google.apputils import basetest
from moe import config_utils
from moe.scrubber import sensitive_string_scrubber
import test_util
STRINGS_JSON = config_utils.ReadConfigResource(
test_util.TestResourceName('sensitive_strings.json'))
class SensitiveWordsTest(basetest.TestCase):
"""Unittests for the sensitive word search."""
def setUp(self):
self.word_scrubber = sensitive_string_scrubber.SensitiveWordScrubber(
STRINGS_JSON[u'sensitive_words'])
def assertMatch(self, expected_word, line):
self.assertEquals([expected_word],
self.word_scrubber.FindSensitiveStrings(line))
def assertNoMatch(self, line):
self.assertEquals([], self.word_scrubber.FindSensitiveStrings(line))
def testObviousWords(self):
self.assertMatch(u'testy', u'testy.delegate()')
self.assertMatch(u'secrety', u'void fixForSecrety')
self.assertMatch(u'testy', u'http://foo.com/testy/1234')
self.assertMatch(u'http://secret.wiki/', u'http://secret.wiki/secret-url')
self.assertMatch(u'internal.website.com', u'foo.internal.website.com')
self.assertMatch(u'http://secret.wiki/',
u'here is one line\nhttp://secret.wiki/secret-url')
def testCapitalization(self):
self.assertMatch(u'abc', u'void fixForABC')
self.assertMatch(u'testy', u'check out the Testy')
self.assertMatch(u'secrety', u'notSECRETY')
self.assertNoMatch(u'NOTSECRETY')
self.assertNoMatch(u'latEsty') # does not match testy
self.assertNoMatch(u'notsecretY') # does not match secrety
def testNonMatches(self):
self.assertNoMatch(u'go to the next line')
def testWordExtraction(self):
self.assertMatch(u'testy', u'testy')
self.assertMatch(u'testy', u' testy ')
self.assertMatch(u'testy', u'ThisIsATestyString')
self.assertMatch(u'testy', u' public void buildTesty(')
self.assertMatch(u'testy', u'THIS_IS_TESTY_A_SECRET_PROJECT')
self.assertNoMatch(u'kittens attesty')
class SensitiveResTest(basetest.TestCase):
"""Unittests for the sensitive word search."""
def setUp(self):
self.re_scrubber = sensitive_string_scrubber.SensitiveReScrubber(
STRINGS_JSON[u'sensitive_res'])
def assertMatch(self, expected_string, line):
self.assertEquals([expected_string],
self.re_scrubber.FindSensitiveStrings(line))
def assertNoMatch(self, line):
self.assertEquals([], self.re_scrubber.FindSensitiveStrings(line))
def testSensitiveRes(self):
self.assertMatch(u'supersecret',
u'thisissosupersecretweneedtoscrubitevenwithinaword')
self.assertMatch(u'SUPERSECRET',
u'THISISSOSUPERSECRETWENEEDTOSCRUBITEVENWITHINAWORD')
self.assertMatch(u'SuPeRsEcReT',
u'ThIsIsSoSuPeRsEcReTwEnEeDtOsCrUbItEvEnWiThInAwOrD')
self.assertNoMatch(u'notasecret')
self.assertMatch(u'.secretcode1.', u'.secretcode1.')
self.assertMatch(u' secret_code123 ', u'the secret_code123 is secret')
self.assertNoMatch(u'SECRET_CODE_123')
self.assertNoMatch(u'THESECRETCODE123')
if __name__ == '__main__':
basetest.main()
|
[
"test_util.TestResourceName",
"google.apputils.basetest.main",
"moe.scrubber.sensitive_string_scrubber.SensitiveReScrubber",
"moe.scrubber.sensitive_string_scrubber.SensitiveWordScrubber"
] |
[((377, 429), 'test_util.TestResourceName', 'test_util.TestResourceName', (['"""sensitive_strings.json"""'], {}), "('sensitive_strings.json')\n", (403, 429), False, 'import test_util\n'), ((3304, 3319), 'google.apputils.basetest.main', 'basetest.main', ([], {}), '()\n', (3317, 3319), False, 'from google.apputils import basetest\n'), ((572, 658), 'moe.scrubber.sensitive_string_scrubber.SensitiveWordScrubber', 'sensitive_string_scrubber.SensitiveWordScrubber', (["STRINGS_JSON[u'sensitive_words']"], {}), "(STRINGS_JSON[\n u'sensitive_words'])\n", (619, 658), False, 'from moe.scrubber import sensitive_string_scrubber\n'), ((2299, 2376), 'moe.scrubber.sensitive_string_scrubber.SensitiveReScrubber', 'sensitive_string_scrubber.SensitiveReScrubber', (["STRINGS_JSON[u'sensitive_res']"], {}), "(STRINGS_JSON[u'sensitive_res'])\n", (2344, 2376), False, 'from moe.scrubber import sensitive_string_scrubber\n')]
|
from torch.utils.data import Dataset
from torchvision import transforms
import torch
class HypertrophyDataset(Dataset):
def __init__(self, images, targets, device):
self.images = images
self.targets = targets
self.device = device
self.augmenter = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomAffine([-90, 90]),
transforms.ToTensor()
])
self.preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()
])
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image_ = self.images[index]
# image_ = self.augmenter(image_)
sample = {
'image': torch.tensor(image_, dtype=torch.float, device=self.device),
'target': torch.tensor(self.targets[index], dtype=torch.long, device=self.device)
}
return sample
|
[
"torchvision.transforms.RandomAffine",
"torchvision.transforms.ToPILImage",
"torchvision.transforms.ToTensor",
"torchvision.transforms.CenterCrop",
"torch.tensor",
"torchvision.transforms.Resize"
] |
[((812, 871), 'torch.tensor', 'torch.tensor', (['image_'], {'dtype': 'torch.float', 'device': 'self.device'}), '(image_, dtype=torch.float, device=self.device)\n', (824, 871), False, 'import torch\n'), ((895, 966), 'torch.tensor', 'torch.tensor', (['self.targets[index]'], {'dtype': 'torch.long', 'device': 'self.device'}), '(self.targets[index], dtype=torch.long, device=self.device)\n', (907, 966), False, 'import torch\n'), ((318, 341), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (339, 341), False, 'from torchvision import transforms\n'), ((355, 389), 'torchvision.transforms.RandomAffine', 'transforms.RandomAffine', (['[-90, 90]'], {}), '([-90, 90])\n', (378, 389), False, 'from torchvision import transforms\n'), ((403, 424), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (422, 424), False, 'from torchvision import transforms\n'), ((495, 517), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (512, 517), False, 'from torchvision import transforms\n'), ((531, 557), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (552, 557), False, 'from torchvision import transforms\n'), ((571, 592), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (590, 592), False, 'from torchvision import transforms\n')]
|
# -*- encoding=utf-8 -*-
"""
# **********************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# [oecp] is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
# Author:
# Create: 2021-09-06
# Description: package category
# **********************************************************************************
"""
import json
import logging
from enum import Enum, unique
from oecp.proxy.rpm_proxy import RPMProxy
logger = logging.getLogger("oecp")
@unique
class CategoryLevel(Enum):
CATEGORY_LEVEL_ZERO = 0 # 核心包等级
CATEGORY_LEVEL_ONE = 1
CATEGORY_LEVEL_TWO = 2
CATEGORY_LEVEL_THREE = 3
CATEGORY_LEVEL_NOT_SPECIFIED = 4 # 未指定
@classmethod
def level_name_2_enum(cls, name):
return {"level1": cls.CATEGORY_LEVEL_ONE, "level2": cls.CATEGORY_LEVEL_TWO,
"level3": cls.CATEGORY_LEVEL_THREE}.get(name, cls.CATEGORY_LEVEL_NOT_SPECIFIED)
class Category(object):
def __init__(self, path):
"""
:param path: 分类文件,json格式
"""
self._src_categories = {}
self._bin_categories = {}
self.CORE_PKG = {'gcc', 'glibc', 'qemu', 'libvirt', 'docker-engine', 'java-11-openjdk', 'java-1.8.0-openjdk',
'systemd', 'openssh', 'lvm2', 'busybox', 'initscripts'}
self._load(path)
def _load(self, path):
"""
:param path:
:return:
"""
try:
with open(path, "r") as f:
categories = json.load(f)
for category in categories:
level = CategoryLevel.level_name_2_enum(category["level"])
try:
if category["src"]:
name = RPMProxy.rpm_n_v_r_d_a(category["src"], dist="category")[0]
self._src_categories[name] = level
if category["bin"]:
name = RPMProxy.rpm_n_v_r_d_a(category["bin"], dist="category")[0]
self._bin_categories[name] = level
except AttributeError as e:
logger.exception(f"\"{category['oecp']}\" or \"{category['bin']}\" is illegal rpm name")
raise
except FileNotFoundError:
logger.exception(f"{path} not exist")
raise
def category_of_src_package(self, name):
"""
:param name:
:return:
"""
return self._src_categories.get(name, CategoryLevel.CATEGORY_LEVEL_NOT_SPECIFIED)
def category_of_bin_package(self, name):
"""
:param name:
:return:
"""
if name in self.CORE_PKG:
return CategoryLevel.CATEGORY_LEVEL_ZERO
return self._bin_categories.get(name, CategoryLevel.CATEGORY_LEVEL_NOT_SPECIFIED)
|
[
"oecp.proxy.rpm_proxy.RPMProxy.rpm_n_v_r_d_a",
"json.load",
"logging.getLogger"
] |
[((922, 947), 'logging.getLogger', 'logging.getLogger', (['"""oecp"""'], {}), "('oecp')\n", (939, 947), False, 'import logging\n'), ((1965, 1977), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1974, 1977), False, 'import json\n'), ((2206, 2262), 'oecp.proxy.rpm_proxy.RPMProxy.rpm_n_v_r_d_a', 'RPMProxy.rpm_n_v_r_d_a', (["category['src']"], {'dist': '"""category"""'}), "(category['src'], dist='category')\n", (2228, 2262), False, 'from oecp.proxy.rpm_proxy import RPMProxy\n'), ((2408, 2464), 'oecp.proxy.rpm_proxy.RPMProxy.rpm_n_v_r_d_a', 'RPMProxy.rpm_n_v_r_d_a', (["category['bin']"], {'dist': '"""category"""'}), "(category['bin'], dist='category')\n", (2430, 2464), False, 'from oecp.proxy.rpm_proxy import RPMProxy\n')]
|
#!/usr/bin/env python3
from pathlib import Path
from sqlite3 import Connection
from bleanser.core import logger
from bleanser.core.utils import get_tables
from bleanser.core.sqlite import SqliteNormaliser, Tool
class Normaliser(SqliteNormaliser):
DELETE_DOMINATED = True
MULTIWAY = True
def __init__(self, db: Path) -> None:
# todo not sure about this?.. also makes sense to run checked for cleanup/extract?
with self.checked(db) as conn:
self.tables = get_tables(conn)
def check_table(name: str) -> None:
assert name in self.tables, (name, self.tables)
check_table('moz_bookmarks')
check_table('moz_historyvisits')
# moz_annos -- apparently, downloads?
def cleanup(self, c: Connection) -> None:
tool = Tool(c)
tool.drop_index('moz_places_guid_uniqueindex')
tool.drop_index('guid_uniqueindex') # on mobile only
[(visits_before,)] = c.execute('SELECT count(*) FROM moz_historyvisits')
tool.drop_cols(
table='moz_places',
cols=[
# aggregates, changing all the time
'frecency',
'last_visit_date',
'visit_count',
# ugh... sometimes changes because of notifications, e.g. twitter/youtube?, or during page load
'hidden',
'typed',
'title',
'description',
'preview_image_url',
'foreign_count', # jus some internal refcount thing... https://bugzilla.mozilla.org/show_bug.cgi?id=1017502
## mobile only
'visit_count_local',
'last_visit_date_local',
'last_visit_date_remote',
'sync_status',
'sync_change_counter',
##
]
)
# ugh. sometimes changes for no reason...
# and anyway, for history the historyvisits table refers place_id (this table's actual id)
# also use update instead delete because phone db used to have UNIQUE constraint...
c.execute('UPDATE moz_places SET guid=id')
tool.drop_cols(
table='moz_bookmarks',
cols=['lastModified'], # changing all the time for no reason?
# todo hmm dateAdded might change when e.g. firefox reinstalls and it adds default bookmarks
# probably not worth the trouble
)
tool.drop('moz_meta')
tool.drop('moz_origins') # prefix/host/frequency -- not interesting
# todo not sure...
tool.drop('moz_inputhistory')
# sanity check just in case... can remove after we get rid of triggers properly...
[(visits_after,)] = c.execute('SELECT count(*) FROM moz_historyvisits')
assert visits_before == visits_after, (visits_before, visits_after)
if __name__ == '__main__':
from bleanser.core import main
main(Normaliser=Normaliser)
|
[
"bleanser.core.utils.get_tables",
"bleanser.core.main",
"bleanser.core.sqlite.Tool"
] |
[((2951, 2978), 'bleanser.core.main', 'main', ([], {'Normaliser': 'Normaliser'}), '(Normaliser=Normaliser)\n', (2955, 2978), False, 'from bleanser.core import main\n'), ((805, 812), 'bleanser.core.sqlite.Tool', 'Tool', (['c'], {}), '(c)\n', (809, 812), False, 'from bleanser.core.sqlite import SqliteNormaliser, Tool\n'), ((498, 514), 'bleanser.core.utils.get_tables', 'get_tables', (['conn'], {}), '(conn)\n', (508, 514), False, 'from bleanser.core.utils import get_tables\n')]
|
import requests
import json
class CovidData:
__data = [{}]
__province = ''
__population = -1
def __init__(self, province):
self.__province = province.upper()
reports = json.loads(
requests.get(
'https://api.covid19tracker.ca/reports/province/' +
self.__province
).text
)
self.__data = reports['data']
provinces = json.loads(
requests.get('https://api.covid19tracker.ca/provinces').text
)
for prov in provinces:
if prov['code'].upper() == self.__province:
self.__population = prov['population']
break
@property
def data(self):
return self.__data
@property
def __latest_data(self):
return self.__data[-1]
@property
def latest_data(self):
return self.__latest_data;
@property
def __last_week_data(self):
return self.__data[-7:]
@property
def province(self):
return self.__province
@property
def date(self):
return self.__latest_data['date']
@property
def new_cases(self):
return self.__latest_data['change_cases']
@property
def total_active(self):
return (
self.__latest_data['total_cases'] -
self.__latest_data['total_recoveries'] -
self.__latest_data['total_fatalities']
)
@property
def new_deaths(self):
return self.__latest_data['change_fatalities']
@property
def total_deaths(self):
return self.__latest_data['total_fatalities']
@property
def test_positivity(self):
cases = 0
tests = 0
for data in self.__data[-5:]:
cases += data['change_cases']
tests += data['change_tests']
return cases / tests
@property
def new_vaccinations(self):
return self.__latest_data['change_vaccinations']
@property
def total_vaccinations(self):
return self.__latest_data['total_vaccinations']
@property
def total_vaccines_recieved(self):
return self.__latest_data['total_vaccines_distributed']
@property
def population(self):
return self.__population
@property
def percent_vaccinated(self):
return self.total_vaccinations / self.population
@property
def percent_vaccines_recieved(self):
return self.total_vaccines_recieved / self.population
@property
def days_until_one_dose_per_person(self):
vaccines = 0
for data in self.__last_week_data:
vaccines += data['change_vaccinations']
vaccine_rate = vaccines / len(self.__last_week_data)
return (
(self.population - self.total_vaccinations) /
vaccine_rate
)
if __name__ == '__main__':
data = CovidData('MB')
print(f'{data.province} {data.date}')
print(f'-----------------------------------------')
print(f'New Cases: {data.new_cases}')
print(f'Total Active: {data.total_active}')
print(f"Test Positivity: {data.test_positivity:.2%}")
print('')
print(f'New Deaths: {data.new_deaths}')
print(f'Total Deaths: {data.total_deaths}')
print('')
print(f'New Vaccinations: {data.new_vaccinations}')
print(f'Total Vaccinations: {data.total_vaccinations}')
print(f"Percent Vaccinated: {data.percent_vaccinated:.2%}")
print(f'Percent Vaccine Recieved: {data.percent_vaccines_recieved:.2%}')
print(f"Days Until One Dose Per Person: {data.days_until_one_dose_per_person:.0f}")
|
[
"requests.get"
] |
[((226, 312), 'requests.get', 'requests.get', (["('https://api.covid19tracker.ca/reports/province/' + self.__province)"], {}), "('https://api.covid19tracker.ca/reports/province/' + self.\n __province)\n", (238, 312), False, 'import requests\n'), ((458, 513), 'requests.get', 'requests.get', (['"""https://api.covid19tracker.ca/provinces"""'], {}), "('https://api.covid19tracker.ca/provinces')\n", (470, 513), False, 'import requests\n')]
|
from typing import Dict
import requests
import helpscout.exceptions as exc
class Endpoint:
"""Base endpoint class."""
def __init__(self, client, base_url: str):
"""
Params:
client: helpscout client with credentials
base_url: url for endpoint
"""
self.client = client
self.base_url = base_url
def process_get_result(self, response: requests.Response) -> Dict:
"""Process response with coresponding status code."""
if response.status_code == 400:
raise exc.BadRequestException(response.json())
elif response.status_code == 401:
raise exc.NotAuthorizedException
elif response.status_code == 404:
return {}
return response.json()
def process_result_with_status_code(self, response: requests.Response, status_code):
"""Process result with given status code.
Raise exception if response status code does't match provided one
"""
if response.status_code != status_code:
print(status_code)
raise exc.BadRequestException(response.json())
return response.status_code
def base_get_request(self, base_url: str, **kwargs) -> requests.Response:
"""Base get request."""
return requests.get(
base_url,
headers={"Authorization": f"Bearer {self.client.access_token}"},
params={**kwargs},
)
def base_put_request(self, base_url: str, **kwargs) -> requests.Response:
"""Base put request."""
return requests.put(
base_url,
headers={
"Authorization": f"Bearer {self.client.access_token}",
"Content-Type": "application/json; charset=UTF-8",
},
json={**kwargs},
)
def base_patch_request(self, base_url: str, **kwargs) -> requests.Response:
"""Base patch request."""
return requests.patch(
base_url,
headers={
"Authorization": f"Bearer {self.client.access_token}",
"Content-Type": "application/json; charset=UTF-8",
},
json={**kwargs},
)
def base_post_request(self, base_url: str, **kwargs) -> requests.Response:
"""Base post request."""
return requests.post(
base_url,
headers={
"Authorization": f"Bearer {self.client.access_token}",
"Content-Type": "application/json; charset=UTF-8",
},
json={**kwargs},
)
def base_delete_request(self, base_url: str, **kwargs) -> requests.Response:
"""Base delete request."""
return requests.delete(
base_url, headers={"Authorization": f"Bearer {self.client.access_token}"}
)
|
[
"requests.patch",
"requests.delete",
"requests.get",
"requests.put",
"requests.post"
] |
[((1313, 1423), 'requests.get', 'requests.get', (['base_url'], {'headers': "{'Authorization': f'Bearer {self.client.access_token}'}", 'params': '{**kwargs}'}), "(base_url, headers={'Authorization':\n f'Bearer {self.client.access_token}'}, params={**kwargs})\n", (1325, 1423), False, 'import requests\n'), ((1593, 1756), 'requests.put', 'requests.put', (['base_url'], {'headers': "{'Authorization': f'Bearer {self.client.access_token}', 'Content-Type':\n 'application/json; charset=UTF-8'}", 'json': '{**kwargs}'}), "(base_url, headers={'Authorization':\n f'Bearer {self.client.access_token}', 'Content-Type':\n 'application/json; charset=UTF-8'}, json={**kwargs})\n", (1605, 1756), False, 'import requests\n'), ((1973, 2138), 'requests.patch', 'requests.patch', (['base_url'], {'headers': "{'Authorization': f'Bearer {self.client.access_token}', 'Content-Type':\n 'application/json; charset=UTF-8'}", 'json': '{**kwargs}'}), "(base_url, headers={'Authorization':\n f'Bearer {self.client.access_token}', 'Content-Type':\n 'application/json; charset=UTF-8'}, json={**kwargs})\n", (1987, 2138), False, 'import requests\n'), ((2353, 2517), 'requests.post', 'requests.post', (['base_url'], {'headers': "{'Authorization': f'Bearer {self.client.access_token}', 'Content-Type':\n 'application/json; charset=UTF-8'}", 'json': '{**kwargs}'}), "(base_url, headers={'Authorization':\n f'Bearer {self.client.access_token}', 'Content-Type':\n 'application/json; charset=UTF-8'}, json={**kwargs})\n", (2366, 2517), False, 'import requests\n'), ((2736, 2830), 'requests.delete', 'requests.delete', (['base_url'], {'headers': "{'Authorization': f'Bearer {self.client.access_token}'}"}), "(base_url, headers={'Authorization':\n f'Bearer {self.client.access_token}'})\n", (2751, 2830), False, 'import requests\n')]
|
#!/usr/bin/env python
# __BEGIN_LICENSE__
#Copyright (c) 2015, United States Government, as represented by the
#Administrator of the National Aeronautics and Space Administration.
#All rights reserved.
# __END_LICENSE__
import os
import logging
import stat
from glob import glob
import shutil
import itertools
from geocamUtil.Builder import Builder
from django.conf import settings
class Installer(object):
def __init__(self, builder=None, logger=None):
if builder is None:
builder = Builder()
if logger is None:
logger = logging
self.builder = builder
self.logger = logger
@staticmethod
def joinNoTrailingSlash(a, b):
if b == '':
return a
else:
return a + os.path.sep + b
def dosys(self, cmd):
self.logger.info('running:', cmd)
ret = os.system(cmd)
if ret != 0:
self.logger.warning('[command exited with non-zero return value %d]' % ret)
def getFiles(self, src, suffix=''):
path = self.joinNoTrailingSlash(src, suffix)
try:
pathMode = os.stat(path)[stat.ST_MODE]
except OSError:
# couldn't stat file, e.g. broken symlink, ignore it
return []
if stat.S_ISREG(pathMode):
return [suffix]
elif stat.S_ISDIR(pathMode):
return itertools.chain([suffix],
*[self.getFiles(src, os.path.join(suffix, f))
for f in os.listdir(path)])
else:
return [] # not a dir or regular file, ignore
def installFile(self, src, dst):
if os.path.isdir(src):
if os.path.exists(dst):
if not os.path.isdir(dst):
# replace plain file with directory
os.unlink(dst)
os.makedirs(dst)
else:
# make directory
os.makedirs(dst)
else:
# install plain file
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
if settings.GEOCAM_UTIL_INSTALLER_USE_SYMLINKS:
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
if os.path.lexists(dst):
os.unlink(dst)
os.symlink(os.path.realpath(src), dst)
else:
shutil.copy(src, dst)
def installRecurse0(self, src, dst):
for f in self.getFiles(src):
dst1 = self.joinNoTrailingSlash(dst, f)
src1 = self.joinNoTrailingSlash(src, f)
self.builder.applyRule(dst1, [src1],
lambda: self.installFile(src1, dst1))
def installRecurse(self, src, dst):
logging.info('installRecurse %s %s', src, dst)
self.installRecurse0(src, dst)
def installRecurseGlob0(self, srcs, dst):
logging.debug('installRecurseGlob0 srcs=%s dst=%s', srcs, dst)
for src in srcs:
self.installRecurse0(src, os.path.join(dst, os.path.basename(src)))
def installRecurseGlob(self, pat, dst):
logging.info('installRecurseGlob %s %s', pat, dst)
self.installRecurseGlob0(glob(pat), dst)
|
[
"geocamUtil.Builder.Builder",
"os.unlink",
"glob.glob",
"os.path.join",
"shutil.copy",
"os.path.lexists",
"os.path.dirname",
"os.path.exists",
"stat.S_ISDIR",
"stat.S_ISREG",
"os.stat",
"os.path.basename",
"os.path.realpath",
"os.system",
"os.listdir",
"logging.debug",
"os.makedirs",
"os.path.isdir",
"logging.info"
] |
[((871, 885), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (880, 885), False, 'import os\n'), ((1275, 1297), 'stat.S_ISREG', 'stat.S_ISREG', (['pathMode'], {}), '(pathMode)\n', (1287, 1297), False, 'import stat\n'), ((1677, 1695), 'os.path.isdir', 'os.path.isdir', (['src'], {}), '(src)\n', (1690, 1695), False, 'import os\n'), ((2849, 2895), 'logging.info', 'logging.info', (['"""installRecurse %s %s"""', 'src', 'dst'], {}), "('installRecurse %s %s', src, dst)\n", (2861, 2895), False, 'import logging\n'), ((2990, 3052), 'logging.debug', 'logging.debug', (['"""installRecurseGlob0 srcs=%s dst=%s"""', 'srcs', 'dst'], {}), "('installRecurseGlob0 srcs=%s dst=%s', srcs, dst)\n", (3003, 3052), False, 'import logging\n'), ((3211, 3261), 'logging.info', 'logging.info', (['"""installRecurseGlob %s %s"""', 'pat', 'dst'], {}), "('installRecurseGlob %s %s', pat, dst)\n", (3223, 3261), False, 'import logging\n'), ((514, 523), 'geocamUtil.Builder.Builder', 'Builder', ([], {}), '()\n', (521, 523), False, 'from geocamUtil.Builder import Builder\n'), ((1340, 1362), 'stat.S_ISDIR', 'stat.S_ISDIR', (['pathMode'], {}), '(pathMode)\n', (1352, 1362), False, 'import stat\n'), ((1712, 1731), 'os.path.exists', 'os.path.exists', (['dst'], {}), '(dst)\n', (1726, 1731), False, 'import os\n'), ((3295, 3304), 'glob.glob', 'glob', (['pat'], {}), '(pat)\n', (3299, 3304), False, 'from glob import glob\n'), ((1125, 1138), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (1132, 1138), False, 'import os\n'), ((1971, 1987), 'os.makedirs', 'os.makedirs', (['dst'], {}), '(dst)\n', (1982, 1987), False, 'import os\n'), ((2221, 2239), 'os.path.isdir', 'os.path.isdir', (['dst'], {}), '(dst)\n', (2234, 2239), False, 'import os\n'), ((2327, 2347), 'os.path.lexists', 'os.path.lexists', (['dst'], {}), '(dst)\n', (2342, 2347), False, 'import os\n'), ((2473, 2494), 'shutil.copy', 'shutil.copy', (['src', 'dst'], {}), '(src, dst)\n', (2484, 2494), False, 'import shutil\n'), ((1756, 1774), 'os.path.isdir', 'os.path.isdir', (['dst'], {}), '(dst)\n', (1769, 1774), False, 'import os\n'), ((1852, 1866), 'os.unlink', 'os.unlink', (['dst'], {}), '(dst)\n', (1861, 1866), False, 'import os\n'), ((1887, 1903), 'os.makedirs', 'os.makedirs', (['dst'], {}), '(dst)\n', (1898, 1903), False, 'import os\n'), ((2069, 2089), 'os.path.dirname', 'os.path.dirname', (['dst'], {}), '(dst)\n', (2084, 2089), False, 'import os\n'), ((2120, 2140), 'os.path.dirname', 'os.path.dirname', (['dst'], {}), '(dst)\n', (2135, 2140), False, 'import os\n'), ((2369, 2383), 'os.unlink', 'os.unlink', (['dst'], {}), '(dst)\n', (2378, 2383), False, 'import os\n'), ((2411, 2432), 'os.path.realpath', 'os.path.realpath', (['src'], {}), '(src)\n', (2427, 2432), False, 'import os\n'), ((3134, 3155), 'os.path.basename', 'os.path.basename', (['src'], {}), '(src)\n', (3150, 3155), False, 'import os\n'), ((2285, 2306), 'os.path.basename', 'os.path.basename', (['src'], {}), '(src)\n', (2301, 2306), False, 'import os\n'), ((1465, 1488), 'os.path.join', 'os.path.join', (['suffix', 'f'], {}), '(suffix, f)\n', (1477, 1488), False, 'import os\n'), ((1536, 1552), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1546, 1552), False, 'import os\n')]
|
#from models.baseline_net import BaseNet
import torch
from data_loaders import *
from image_dataloaders import get_dataloaders
from loss import compute_ADD_L1_loss, compute_disentangled_ADD_L1_loss, compute_scaled_disentl_ADD_L1_loss
from rotation_representation import calculate_T_CO_pred
#from models.efficient_net import
from models import fetch_network
import os
from parser_config import get_dict_from_cli
import pickle
import matplotlib.pyplot as plt
from visualization import visualize_examples
from test_model import evaluate_model, validate_model
from torch.utils.tensorboard import SummaryWriter
import time
import datetime
torch.autograd.set_detect_anomaly(True)
def pickle_log_dict(log_dict, logdir):
save_path = os.path.join(logdir, "log_dict.pkl")
with open(save_path, 'wb') as handle:
pickle.dump(log_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
def calculate_eta(start_time, perc_complete):
curr_time = time.time()
sec_since_start = curr_time - start_time
est_total_time = sec_since_start/perc_complete
est_remaining = est_total_time-sec_since_start
return str(datetime.timedelta(seconds=est_remaining))
def save_loss_plot(losses, training_examples, loss_name, logdir):
assert len(losses) == len(training_examples)
fig,ax = plt.subplots()
fig.set_size_inches(9.5, 5.5)
ax.set_title(loss_name)
ax.set_xlabel("Training examples")
ax.set_ylabel(loss_name)
ax.set_yscale('log')
plt.plot(training_examples, losses)
save_path = os.path.join(logdir, loss_name.replace(" ", "-")+".png")
plt.savefig(save_path)
plt.close()
def save_plot_validation_loss(val_data_struct,logdir, loss_name):
fig,ax = plt.subplots()
fig.set_size_inches(9.5, 5.5)
ax.set_title("Validation " + loss_name)
ax.set_xlabel("Training examples")
ax.set_ylabel(loss_name)
ax.set_yscale('log')
train_exs = []
val_losses_arr = []
for (train_ex, val_losses) in val_data_struct:
train_exs.append(train_ex)
val_losses_arr.append(val_losses)
val_losses = np.array(val_losses_arr)
train_exs = np.array(train_exs)
legends = []
for pred_iter in range(val_losses.shape[1]):
legends.append("Pred.iter"+str(pred_iter+1))
iter_val_losses = val_losses[:,pred_iter]
plt.plot(train_exs,iter_val_losses, label="Iter. "+str(pred_iter))
ax.legend(legends)
save_path = os.path.join(logdir, "validation-"+loss_name.replace(" ", "-")+".png")
plt.savefig(save_path)
plt.close()
def logging(model, config, writer, log_dict, logdir, batch_num, train_examples):
log_interval = config["logging"]["log_save_interval"]
if(batch_num%log_interval == 0):
current_loss = log_dict["loss"]["add_l1"][:batch_num]
current_train_ex =log_dict["loss"]["train_ex"][:batch_num]
save_loss_plot(current_loss, current_train_ex, "ADD L1 Loss", logdir)
pickle_log_dict(log_dict, logdir)
save_viz_batches = config["logging"]["save_visualization_at_batches"]
save_viz_every_n_batch = config["logging"]["save_viz_every_n_batch"]
if((batch_num in save_viz_batches) or (batch_num%save_viz_every_n_batch==0 and batch_num!=0)):
save_dir = os.path.join(logdir, "visualizations")
os.makedirs(save_dir, exist_ok=True)
visualize_examples(model, config, "train", show_fig=False, save_dir=save_dir, n_train_examples=train_examples)
visualize_examples(model, config, "val", show_fig=False, save_dir=save_dir, n_train_examples=train_examples)
validation_interval = config["logging"]["validation_interval"]
if(batch_num%validation_interval == 0 and batch_num != 0):
val_ex = config["logging"]["val_examples_from_each_class"]
#loss_dict, mean_losses = evaluate_model(model, config, "train", use_all_examples=False, max_examples_from_each_class=val_ex)
mean_losses = validate_model(model, config, "val")
#log_dict["val_loss_dicts"].append((train_examples, loss_dict))
log_dict["val_loss"].append((train_examples, mean_losses))
pickle_log_dict(log_dict, logdir)
save_plot_validation_loss(log_dict["val_loss"], logdir, "ADD L1 loss")
#tensorboard
iter_dict = {}
for i in range(len(mean_losses)):
writer.add_scalar(f'Validation_ADD_L1_loss/Iter{i}', mean_losses[i], train_examples)
iter_dict[f'Iter{i}'] = mean_losses[i]
writer.add_scalars('Validation_ADD_L1_loss_iters', iter_dict, train_examples)
model.train()
def train(config):
scene_config = config["scene_config"]
# dataset config
model3d_dataset = config["dataset_config"]["model3d_dataset"]
train_classes = config["dataset_config"]["train_classes"]
train_from_imgs = config["dataset_config"]["train_from_images"]
ds_conf = config["dataset_config"]
batch_size = config["train_params"]["batch_size"]
img_ds_name = ds_conf["img_dataset"]
if train_from_imgs:
train_loader, val_loader, test_loader = get_dataloaders(ds_conf, batch_size)
# model load parameters
model_name = config["network"]["backend_network"]
rotation_repr = config["network"]["rotation_representation"]
device = config["train_params"]["device"]
use_pretrained = config["model_io"]["use_pretrained_model"]
model_save_dir = config["model_io"]["model_save_dir"]
os.makedirs(model_save_dir, exist_ok=True)
pretrained_name = config["model_io"]["pretrained_model_name"]
pretrained_path = os.path.join(model_save_dir, pretrained_name)
use_norm_depth = config["advanced"]["use_normalized_depth"]
# model saving
save_every_n_batch = config["model_io"]["batch_model_save_interval"]
model_save_name = config["model_io"]["model_save_name"]
model_save_path = os.path.join(model_save_dir, model_save_name)
cam_intrinsics = config["camera_intrinsics"]
img_size = cam_intrinsics["image_resolution"]
model = fetch_network(model_name, rotation_repr, use_norm_depth, use_pretrained, pretrained_path)
model = model.to(device)
#train params
learning_rate = config["train_params"]["learning_rate"]
opt_name = config["train_params"]["optimizer"]
num_train_batches = config["train_params"]["num_batches_to_train"]
num_sample_verts = config["train_params"]["num_sample_vertices"]
device = config["train_params"]["device"]
loss_fn_name = config["train_params"]["loss"]
# train iteration policy, i.e. determine how many iterations per batch
train_iter_policy_name = config["advanced"]["train_iter_policy"]
policy_argument = config["advanced"]["train_iter_policy_argument"]
if train_iter_policy_name == 'constant':
train_iter_policy = train_iter_policy_constant
elif train_iter_policy_name == 'incremental':
train_iter_policy = train_iter_policy_incremental
else:
assert False
# parallel rendering
use_par_render = config["scene_config"]["use_parallel_rendering"]
if(opt_name == "adam"):
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
elif(opt_name == 'sgd'):
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
else:
assert False
# print training info
print("")
print(" ### TRAINING IS STARTING ### ")
print("Loading backend network", model_name.upper(), "with rotation representation", rotation_repr)
print("Batch size", batch_size, "Learning rate", learning_rate, "Optimizer", opt_name.upper())
print("Training on device", device)
if use_pretrained:
print("Pretrained model is loaded from", pretrained_path)
else:
print("No pretrained model used, training from scratch")
print("The model will be saved to", model_save_path)
if use_norm_depth:
print("The model is trained with the normalized depth from the CAD model (advanced)")
print("")
# logging
log_dict = {}
log_dict["loss"] = {}
log_dict["loss"]["add_l1"] = np.zeros((num_train_batches+1))
log_dict["loss"]["train_ex"] = np.zeros((num_train_batches+1))
log_dict["val_loss_dicts"] = []
log_dict["val_loss"] = []
logdir = config["logging"]["logdir"]
os.makedirs(logdir, exist_ok=True)
writer = SummaryWriter(log_dir=os.path.join("tensorboard", img_ds_name, config["config_name"]))
start_time = time.time()
"""
TRAINING LOOP
"""
train_examples=0
new_batch_num=0
batch_num=0
while(True):
start_time = time.time()
init_imgs, gt_imgs, T_CO_init, T_CO_gt, mesh_verts, mesh_paths, depths, cam_mats = next(iter(train_loader))
init_imgs = init_imgs.numpy()
gt_imgs = gt_imgs.numpy()
depths = depths.numpy()
T_CO_gt = T_CO_gt.to(device)
mesh_verts = mesh_verts.to(device)
#cam_mats = get_camera_mat_tensor(cam_intrinsics, batch_size).to(device)
T_CO_pred = T_CO_init # current pred is initial
train_iterations = train_iter_policy(batch_num, policy_argument)
for j in range(train_iterations):
optimizer.zero_grad()
if(j==0 and train_from_imgs):
pred_imgs = init_imgs
T_CO_pred = T_CO_pred.to(device)
else:
pred_imgs, depths = render_batch(T_CO_pred, mesh_paths, cam_mats, img_size, use_par_render)
T_CO_pred = torch.tensor(T_CO_pred).to(device)
model_input = prepare_model_input(pred_imgs, gt_imgs, depths, use_norm_depth).to(device)
model_output = model(model_input)
T_CO_pred_new = calculate_T_CO_pred(model_output, T_CO_pred, rotation_repr, cam_mats)
addl1_loss = compute_ADD_L1_loss(T_CO_gt, T_CO_pred_new, mesh_verts)
loss_handler(loss_fn_name, addl1_loss, T_CO_pred_new, T_CO_pred, T_CO_gt, mesh_verts)
optimizer.step()
T_CO_pred = T_CO_pred_new.detach().cpu().numpy()
# Printing and logging
elapsed = time.time() - start_time
print(f'ADD L1 loss for train batch {batch_num}, with {new_batch_num} new batches, train iter {j}: {addl1_loss.item():.4f}, batch time: {elapsed:.3f}')
log_dict["loss"]["add_l1"][batch_num] = addl1_loss.item()
log_dict["loss"]["train_ex"][batch_num] = train_examples
logging(model, config, writer, log_dict, logdir, batch_num, train_examples)
if batch_num != 0 and batch_num%save_every_n_batch == 0:
writer.add_scalar("ADD_L1_loss", addl1_loss.item(), train_examples)
perc_complete = (batch_num*1.0)/num_train_batches
print("Saving model to", model_save_path)
print(f'Trained {batch_num} of {num_train_batches}. Training {(perc_complete*100.0):.3f} % complete.')
print(f'Estimated remaining training time (hour,min,sec): {calculate_eta(start_time, perc_complete)}')
torch.save(model.state_dict(), model_save_path)
if batch_num >= num_train_batches:
break
train_examples=train_examples+batch_size
batch_num += 1
new_batch_num += 1
if batch_num >= num_train_batches:
break
"""
END TRAIN LOOP
"""
def loss_handler(loss_fn_name, addl1_loss, T_CO_pred_new, T_CO_pred, T_CO_gt, mesh_verts):
if loss_fn_name == "add_l1":
addl1_loss.backward()
elif loss_fn_name == "add_l1_disentangled":
disentl_loss = compute_disentangled_ADD_L1_loss(T_CO_gt, T_CO_pred_new, mesh_verts)
disentl_loss.backward()
elif loss_fn_name == "add_l1_disentl_scaled":
sc_disentl_loss = compute_scaled_disentl_ADD_L1_loss(T_CO_pred, T_CO_pred_new, T_CO_gt, mesh_verts)
sc_disentl_loss.backward()
def train_iter_policy_constant(current_batch, num):
return num
def train_iter_policy_incremental(current_batch, increments_tuple_list):
# input must have form [(300, 2), (1000,3), (3000,4)]
new_train_iters = 1
for (batch_num, train_iters) in increments_tuple_list:
if (current_batch>batch_num):
new_train_iters = train_iters
return new_train_iters
if __name__ == '__main__':
config = get_dict_from_cli()
train(config)
|
[
"loss.compute_ADD_L1_loss",
"pickle.dump",
"loss.compute_disentangled_ADD_L1_loss",
"loss.compute_scaled_disentl_ADD_L1_loss",
"torch.autograd.set_detect_anomaly",
"os.path.join",
"matplotlib.pyplot.close",
"image_dataloaders.get_dataloaders",
"datetime.timedelta",
"matplotlib.pyplot.subplots",
"parser_config.get_dict_from_cli",
"test_model.validate_model",
"models.fetch_network",
"visualization.visualize_examples",
"os.makedirs",
"matplotlib.pyplot.plot",
"rotation_representation.calculate_T_CO_pred",
"time.time",
"torch.tensor",
"matplotlib.pyplot.savefig"
] |
[((636, 675), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (669, 675), False, 'import torch\n'), ((732, 768), 'os.path.join', 'os.path.join', (['logdir', '"""log_dict.pkl"""'], {}), "(logdir, 'log_dict.pkl')\n", (744, 768), False, 'import os\n'), ((946, 957), 'time.time', 'time.time', ([], {}), '()\n', (955, 957), False, 'import time\n'), ((1297, 1311), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1309, 1311), True, 'import matplotlib.pyplot as plt\n'), ((1471, 1506), 'matplotlib.pyplot.plot', 'plt.plot', (['training_examples', 'losses'], {}), '(training_examples, losses)\n', (1479, 1506), True, 'import matplotlib.pyplot as plt\n'), ((1584, 1606), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (1595, 1606), True, 'import matplotlib.pyplot as plt\n'), ((1611, 1622), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1620, 1622), True, 'import matplotlib.pyplot as plt\n'), ((1703, 1717), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1715, 1717), True, 'import matplotlib.pyplot as plt\n'), ((2496, 2518), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (2507, 2518), True, 'import matplotlib.pyplot as plt\n'), ((2523, 2534), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2532, 2534), True, 'import matplotlib.pyplot as plt\n'), ((5400, 5442), 'os.makedirs', 'os.makedirs', (['model_save_dir'], {'exist_ok': '(True)'}), '(model_save_dir, exist_ok=True)\n', (5411, 5442), False, 'import os\n'), ((5531, 5576), 'os.path.join', 'os.path.join', (['model_save_dir', 'pretrained_name'], {}), '(model_save_dir, pretrained_name)\n', (5543, 5576), False, 'import os\n'), ((5815, 5860), 'os.path.join', 'os.path.join', (['model_save_dir', 'model_save_name'], {}), '(model_save_dir, model_save_name)\n', (5827, 5860), False, 'import os\n'), ((5979, 6072), 'models.fetch_network', 'fetch_network', (['model_name', 'rotation_repr', 'use_norm_depth', 'use_pretrained', 'pretrained_path'], {}), '(model_name, rotation_repr, use_norm_depth, use_pretrained,\n pretrained_path)\n', (5992, 6072), False, 'from models import fetch_network\n'), ((8244, 8278), 'os.makedirs', 'os.makedirs', (['logdir'], {'exist_ok': '(True)'}), '(logdir, exist_ok=True)\n', (8255, 8278), False, 'import os\n'), ((8406, 8417), 'time.time', 'time.time', ([], {}), '()\n', (8415, 8417), False, 'import time\n'), ((12265, 12284), 'parser_config.get_dict_from_cli', 'get_dict_from_cli', ([], {}), '()\n', (12282, 12284), False, 'from parser_config import get_dict_from_cli\n'), ((819, 882), 'pickle.dump', 'pickle.dump', (['log_dict', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(log_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (830, 882), False, 'import pickle\n'), ((1120, 1161), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'est_remaining'}), '(seconds=est_remaining)\n', (1138, 1161), False, 'import datetime\n'), ((3242, 3280), 'os.path.join', 'os.path.join', (['logdir', '"""visualizations"""'], {}), "(logdir, 'visualizations')\n", (3254, 3280), False, 'import os\n'), ((3289, 3325), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (3300, 3325), False, 'import os\n'), ((3334, 3449), 'visualization.visualize_examples', 'visualize_examples', (['model', 'config', '"""train"""'], {'show_fig': '(False)', 'save_dir': 'save_dir', 'n_train_examples': 'train_examples'}), "(model, config, 'train', show_fig=False, save_dir=\n save_dir, n_train_examples=train_examples)\n", (3352, 3449), False, 'from visualization import visualize_examples\n'), ((3453, 3565), 'visualization.visualize_examples', 'visualize_examples', (['model', 'config', '"""val"""'], {'show_fig': '(False)', 'save_dir': 'save_dir', 'n_train_examples': 'train_examples'}), "(model, config, 'val', show_fig=False, save_dir=save_dir,\n n_train_examples=train_examples)\n", (3471, 3565), False, 'from visualization import visualize_examples\n'), ((3916, 3952), 'test_model.validate_model', 'validate_model', (['model', 'config', '"""val"""'], {}), "(model, config, 'val')\n", (3930, 3952), False, 'from test_model import evaluate_model, validate_model\n'), ((5042, 5078), 'image_dataloaders.get_dataloaders', 'get_dataloaders', (['ds_conf', 'batch_size'], {}), '(ds_conf, batch_size)\n', (5057, 5078), False, 'from image_dataloaders import get_dataloaders\n'), ((8550, 8561), 'time.time', 'time.time', ([], {}), '()\n', (8559, 8561), False, 'import time\n'), ((8319, 8382), 'os.path.join', 'os.path.join', (['"""tensorboard"""', 'img_ds_name', "config['config_name']"], {}), "('tensorboard', img_ds_name, config['config_name'])\n", (8331, 8382), False, 'import os\n'), ((9643, 9712), 'rotation_representation.calculate_T_CO_pred', 'calculate_T_CO_pred', (['model_output', 'T_CO_pred', 'rotation_repr', 'cam_mats'], {}), '(model_output, T_CO_pred, rotation_repr, cam_mats)\n', (9662, 9712), False, 'from rotation_representation import calculate_T_CO_pred\n'), ((9738, 9793), 'loss.compute_ADD_L1_loss', 'compute_ADD_L1_loss', (['T_CO_gt', 'T_CO_pred_new', 'mesh_verts'], {}), '(T_CO_gt, T_CO_pred_new, mesh_verts)\n', (9757, 9793), False, 'from loss import compute_ADD_L1_loss, compute_disentangled_ADD_L1_loss, compute_scaled_disentl_ADD_L1_loss\n'), ((11535, 11603), 'loss.compute_disentangled_ADD_L1_loss', 'compute_disentangled_ADD_L1_loss', (['T_CO_gt', 'T_CO_pred_new', 'mesh_verts'], {}), '(T_CO_gt, T_CO_pred_new, mesh_verts)\n', (11567, 11603), False, 'from loss import compute_ADD_L1_loss, compute_disentangled_ADD_L1_loss, compute_scaled_disentl_ADD_L1_loss\n'), ((10041, 10052), 'time.time', 'time.time', ([], {}), '()\n', (10050, 10052), False, 'import time\n'), ((11712, 11797), 'loss.compute_scaled_disentl_ADD_L1_loss', 'compute_scaled_disentl_ADD_L1_loss', (['T_CO_pred', 'T_CO_pred_new', 'T_CO_gt', 'mesh_verts'], {}), '(T_CO_pred, T_CO_pred_new, T_CO_gt,\n mesh_verts)\n', (11746, 11797), False, 'from loss import compute_ADD_L1_loss, compute_disentangled_ADD_L1_loss, compute_scaled_disentl_ADD_L1_loss\n'), ((9432, 9455), 'torch.tensor', 'torch.tensor', (['T_CO_pred'], {}), '(T_CO_pred)\n', (9444, 9455), False, 'import torch\n')]
|
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
import numpy as np
from misc import set_size
from scipy import stats
from scipy.interpolate import interp1d
from pandas.plotting import table
import statsmodels.api as sm
df_knolls_grund = pd.read_csv("data-set\knolls_grund.csv", sep=";", parse_dates=["Datum Tid (UTC)"], index_col="Datum Tid (UTC)", usecols = ['Datum Tid (UTC)','Havstemperatur'])
df_huvudskar = pd.read_csv("data-set\huvudskar.csv", sep=";", parse_dates=["Datum Tid (UTC)"], index_col="Datum Tid (UTC)")
df_huvudskar = df_huvudskar.loc[df_huvudskar["Matdjup"]==1]
df_huvudskar = df_huvudskar.drop(columns=["Kvalitet", "Matdjup"])
df_finngrundet = pd.read_csv("data-set/finngrundet.csv", sep=";", parse_dates=["Datum Tid (UTC)"], index_col="Datum Tid (UTC)", usecols = ['Datum Tid (UTC)','Havstemperatur'])
start, end = '2020-09-28', '2020-11-29'
df_finngrundet = df_finngrundet.loc[start:end]
df_huvudskar = df_huvudskar.loc[start:end]
df_knolls_grund = df_knolls_grund.loc[start:end]
smhi_mean = pd.concat([df_knolls_grund, df_huvudskar, df_finngrundet]).groupby(level=0).mean()
smhi_mean = smhi_mean["Havstemperatur"].rolling(3, center=True).mean()
df1 = pd.read_csv("data-set/sst.csv", sep=",", parse_dates=["Datum Tid (UTC)"], index_col="Datum Tid (UTC)")
df1.sort_values(by=['Datum Tid (UTC)'], inplace=True)
df1 = df1.loc[start:end]
df1['month'] = [d.strftime('%b') for d in df1.index]
df1['week'] = [d.strftime('%U') for d in df1.index]
#print(smhi_mean)
#temp_bias = 3.35
#df1["Havstemperatur"] = df1["Havstemperatur"] + temp_bias
def bias(df):
df_1d = df["Havstemperatur"].resample('D').mean()
smhi_1d = smhi_mean["Havstemperatur"].resample('D').mean()
concatTemp = pd.concat([df_1d, smhi_1d]).groupby(level=0)
print(concatTemp.head(20))
print(concatTemp)
def data_comp(df):
pd.set_option("display.max_rows", None, "display.max_columns", None)
df_1d = df["Havstemperatur"].resample('D').mean()
smhi_1d = smhi_mean.resample('D').mean()
df_1d, smhi_1d = df_1d.align(smhi_1d)
print(df_1d)
#df_1d = df_1d.interpolate(method='time')
#diff = smhi_1d - df_1d
#slope = pd.Series(np.gradient(df_1d.values), df_1d.index, name='slope')
#print(slope.mean())
def smhi():
df_finngrundet.reset_index(inplace=True)
df_huvudskar.reset_index(inplace=True)
df_knolls_grund.reset_index(inplace=True)
#smhi_7d.reset_index(inplace=True)
fig, ax = plt.subplots()
ax.plot(df_finngrundet["Datum Tid (UTC)"], df_finngrundet["Havstemperatur"],linestyle='--', label='Finngrundet')
ax.plot(df_huvudskar["Datum Tid (UTC)"], df_huvudskar["Havstemperatur"],linestyle='--', label='Huvudskär')
ax.plot(df_knolls_grund["Datum Tid (UTC)"], df_knolls_grund["Havstemperatur"],linestyle='--', label='Knolls grund')
ax.plot(smhi_mean.loc[start:end], label='Medelvärde (Referensdata)')
ax.legend()
ax.set_ylabel('Temperatur [°C]', fontweight='demi')
ax.yaxis.set_label_position("right")
ax.set_xlabel("Vecka", fontweight='demi')
ax.set_title("Temperaturutveckling på 0,5 m - SMHIs bojar", fontweight='demi')
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=0))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%U'))
ax.set_ylim(ymin=4)
def seasonality(df):
end = "2020-11-28"
df = df.loc[:end]
sns.boxplot(data=df, x='week', y="Havstemperatur").set(ylabel= 'Temperatur [°C]', xlabel="Vecka")
plt.ylim(4)
def histogram(df):
df["Havstemperatur"].hist(bins=11, range=(0,11))
plt.xlabel("Temperatur [°C]")
def observations(df):
obs = df.groupby(df.index.date).count()
#print(obs["Havstemperatur"].std())
obs["Havstemperatur"].hist(bins=24, range=(0,12))
#df.groupby([df.index.date,]).count().plot(kind='bar')
plt.ylabel("Frekvens")
plt.xlabel("Observation/dag")
def average(df):
df_weekly_mean = df["Havstemperatur"].resample('W', label='left', loffset=pd.DateOffset(days=4.5)).mean()
smhi_weekly_mean = smhi_mean.resample('W', label='left', loffset=pd.DateOffset(days=4.5)).mean()
df_1d = df["Havstemperatur"].resample('D').mean()
df_5d = df["Havstemperatur"].rolling("5d").mean()
df_std = smhi_mean.resample("D").std().mean()
print(df_weekly_mean)
# Plot daily and weekly resampled time series together
fig, ax = plt.subplots()
ax.plot(df.loc[start:end, 'Havstemperatur'], marker='.', linestyle='None', alpha=0.5, label='Observation: $SST_{skin}$')
ax.plot(df_5d.loc[start:end], marker='.', linestyle='-', label='5-d rullande medelvärde')
#ax.plot(intdf.loc[start:end], marker='.', linestyle='-', label='Dagligt medelvärde')
ax.plot(df_weekly_mean.loc[start:end], marker='D', linestyle='--', markersize=7, label='Veckovis medelvärde')
ax.plot(smhi_mean.loc[start:end], label="Referensdata: 0,5 m (SMHI)")
#ax.fill_between(df_std.index, df_7d - 2 * df_std, df_7d + 2 * df_std, color='b', alpha=0.2)
ax.set_ylabel('Temperatur [°C]', fontweight='demi')
ax.yaxis.set_label_position("right")
ax.set_xlabel("Vecka", fontweight='demi')
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=0))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%U'))
ax.set_title('Havstemperaturutveckling i Östersjöområdet', fontweight='demi')
ax.set_ylim(ymin=4)
ax.legend()
def pixel_average(df):
px_std = df.std(axis=0)["Pixlar"]
px_mean = df.mean(axis=0)["Pixlar"]
df_px_std = df[df["Pixlar"] < (px_mean-px_std)]
df.reset_index(inplace=True)
df_px_std.reset_index(inplace=True)
# Plot daily and weekly resampled time series together
#fig, ax = plt.subplots()
df.plot.scatter("Datum Tid (UTC)", "Havstemperatur", c="Pixlar", colormap="inferno", label='Observation')
ax = df.plot.scatter("Datum Tid (UTC)", "Havstemperatur", color='Red', label='Observation')
df_px_std.plot.scatter("Datum Tid (UTC)", "Havstemperatur", label='Observation', ax=ax)
def satellites(df):
N15 = df.loc[df['Satellit'] == "NOAA 15"]
N18 = df.loc[df['Satellit'] == "NOAA 18"]
N19 = df.loc[df['Satellit'] == "NOAA 19"]
print(N15["Havstemperatur"].mean())
print(N18["Havstemperatur"].mean())
print(N19["Havstemperatur"].mean())
fig, ax = plt.subplots()
ax.plot(N15.loc[start:end, "Havstemperatur"].rolling("5d").mean(), marker=".", label=("NOAA 15"), linestyle="-")
ax.plot(N18.loc[start:end, "Havstemperatur"].rolling("5d").mean(), marker=".", label=("NOAA 18"), linestyle="-")
ax.plot(N19.loc[start:end, "Havstemperatur"].rolling("5d").mean(), marker=".", label=("NOAA 19"), linestyle="-")
#ax.plot(df.loc[start:end, "Havstemperatur"].rolling("5d").mean(), label=("Kombinerade observationer"), linestyle="-")
ax.set_ylabel('Temperatur [°C]')
ax.set_xlabel("Vecka")
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=0))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%U'))
ax.set_ylim(ymin=4)
ax.legend()
def dist(df):
#sns.displot(df, x="Pixlar", binwidth=1000, kde=True) #, kind="kde"
#sns.distplot(df["Pixlar"], rug=True, kde=True)
#sns.displot(df, x="Pixlar", y="Havstemperatur")
# Note the difference in argument order
model = sm.OLS(df["Pixlar"], df["Elevation"]).fit()
predictions = model.predict(df["Pixlar"]) # make the predictions by the model
# Print out the statistics
print(model.summary())
""" ax=sns.jointplot(x="Elevation", y='Pixlar', data=df, kind="reg")
ax.ax_joint.set_ylabel("Pixlar")
ax.ax_joint.set_xlabel("Elevation [°]")
ax.ax_marg_x.set_xlim(0, 90) """
tex_fonts = {
# Use LaTeX to write all text
#"text.usetex": False,
"font.family": "sans-serif",
"font.sans-serif": "Avenir Next LT Pro",
"font.weight": "demi",
# Use 10pt font in plots, to match 10pt font in document
"axes.labelsize": 12,
"font.size": 12,
# Make the legend/label fonts a little smaller
"legend.fontsize": 10,
"xtick.labelsize": 10,
"ytick.labelsize": 10
}
sns.set(rc={'figure.figsize':(set_size(600))})
sns.set_theme(style="whitegrid")
#plt.rcParams.update(tex_fonts)
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = 'Avenir Next LT Pro'
#plt.rcParams['font.weight'] = 'demi'
#plt.rcParams["figure.figsize"] = set_size(390)
#seasonality(df1)
#histogram(df1)
average(df1)
#satellites(df1)
#regression(df1)
#dist(df1)
#pixel_average(df1)
#smhi()
#observations(df1)
#calendar(df1)
#bias(df1)
#data_comp(df1)
#plt.tight_layout(pad=0.0,h_pad=0.0,w_pad=0.0)
plt.tight_layout()
#plt.show()
#plt.savefig("exported/bias.svg", format="svg")
plt.savefig("exported/6.png", dpi=300)
|
[
"seaborn.set_theme",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylim",
"pandas.read_csv",
"statsmodels.api.OLS",
"misc.set_size",
"matplotlib.pyplot.subplots",
"matplotlib.dates.WeekdayLocator",
"matplotlib.dates.DateFormatter",
"seaborn.boxplot",
"matplotlib.pyplot.ylabel",
"pandas.DateOffset",
"matplotlib.pyplot.xlabel",
"pandas.set_option",
"pandas.concat",
"matplotlib.pyplot.savefig"
] |
[((299, 468), 'pandas.read_csv', 'pd.read_csv', (['"""data-set\\\\knolls_grund.csv"""'], {'sep': '""";"""', 'parse_dates': "['Datum Tid (UTC)']", 'index_col': '"""Datum Tid (UTC)"""', 'usecols': "['Datum Tid (UTC)', 'Havstemperatur']"}), "('data-set\\\\knolls_grund.csv', sep=';', parse_dates=[\n 'Datum Tid (UTC)'], index_col='Datum Tid (UTC)', usecols=[\n 'Datum Tid (UTC)', 'Havstemperatur'])\n", (310, 468), True, 'import pandas as pd\n'), ((475, 589), 'pandas.read_csv', 'pd.read_csv', (['"""data-set\\\\huvudskar.csv"""'], {'sep': '""";"""', 'parse_dates': "['Datum Tid (UTC)']", 'index_col': '"""Datum Tid (UTC)"""'}), "('data-set\\\\huvudskar.csv', sep=';', parse_dates=[\n 'Datum Tid (UTC)'], index_col='Datum Tid (UTC)')\n", (486, 589), True, 'import pandas as pd\n'), ((729, 896), 'pandas.read_csv', 'pd.read_csv', (['"""data-set/finngrundet.csv"""'], {'sep': '""";"""', 'parse_dates': "['Datum Tid (UTC)']", 'index_col': '"""Datum Tid (UTC)"""', 'usecols': "['Datum Tid (UTC)', 'Havstemperatur']"}), "('data-set/finngrundet.csv', sep=';', parse_dates=[\n 'Datum Tid (UTC)'], index_col='Datum Tid (UTC)', usecols=[\n 'Datum Tid (UTC)', 'Havstemperatur'])\n", (740, 896), True, 'import pandas as pd\n'), ((1242, 1348), 'pandas.read_csv', 'pd.read_csv', (['"""data-set/sst.csv"""'], {'sep': '""","""', 'parse_dates': "['Datum Tid (UTC)']", 'index_col': '"""Datum Tid (UTC)"""'}), "('data-set/sst.csv', sep=',', parse_dates=['Datum Tid (UTC)'],\n index_col='Datum Tid (UTC)')\n", (1253, 1348), True, 'import pandas as pd\n'), ((8180, 8212), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""whitegrid"""'}), "(style='whitegrid')\n", (8193, 8212), True, 'import seaborn as sns\n'), ((8659, 8677), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8675, 8677), True, 'import matplotlib.pyplot as plt\n'), ((8738, 8776), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""exported/6.png"""'], {'dpi': '(300)'}), "('exported/6.png', dpi=300)\n", (8749, 8776), True, 'import matplotlib.pyplot as plt\n'), ((1899, 1967), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None', '"""display.max_columns"""', 'None'], {}), "('display.max_rows', None, 'display.max_columns', None)\n", (1912, 1967), True, 'import pandas as pd\n'), ((2519, 2533), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2531, 2533), True, 'import matplotlib.pyplot as plt\n'), ((3529, 3540), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(4)'], {}), '(4)\n', (3537, 3540), True, 'import matplotlib.pyplot as plt\n'), ((3627, 3656), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temperatur [°C]"""'], {}), "('Temperatur [°C]')\n", (3637, 3656), True, 'import matplotlib.pyplot as plt\n'), ((3881, 3903), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frekvens"""'], {}), "('Frekvens')\n", (3891, 3903), True, 'import matplotlib.pyplot as plt\n'), ((3908, 3937), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Observation/dag"""'], {}), "('Observation/dag')\n", (3918, 3937), True, 'import matplotlib.pyplot as plt\n'), ((4429, 4443), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4441, 4443), True, 'import matplotlib.pyplot as plt\n'), ((6358, 6372), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6370, 6372), True, 'import matplotlib.pyplot as plt\n'), ((3235, 3269), 'matplotlib.dates.WeekdayLocator', 'mdates.WeekdayLocator', ([], {'byweekday': '(0)'}), '(byweekday=0)\n', (3256, 3269), True, 'import matplotlib.dates as mdates\n'), ((3304, 3330), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%U"""'], {}), "('%U')\n", (3324, 3330), True, 'import matplotlib.dates as mdates\n'), ((5219, 5253), 'matplotlib.dates.WeekdayLocator', 'mdates.WeekdayLocator', ([], {'byweekday': '(0)'}), '(byweekday=0)\n', (5240, 5253), True, 'import matplotlib.dates as mdates\n'), ((5288, 5314), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%U"""'], {}), "('%U')\n", (5308, 5314), True, 'import matplotlib.dates as mdates\n'), ((6948, 6982), 'matplotlib.dates.WeekdayLocator', 'mdates.WeekdayLocator', ([], {'byweekday': '(0)'}), '(byweekday=0)\n', (6969, 6982), True, 'import matplotlib.dates as mdates\n'), ((7017, 7043), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%U"""'], {}), "('%U')\n", (7037, 7043), True, 'import matplotlib.dates as mdates\n'), ((1773, 1800), 'pandas.concat', 'pd.concat', (['[df_1d, smhi_1d]'], {}), '([df_1d, smhi_1d])\n', (1782, 1800), True, 'import pandas as pd\n'), ((3427, 3477), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'df', 'x': '"""week"""', 'y': '"""Havstemperatur"""'}), "(data=df, x='week', y='Havstemperatur')\n", (3438, 3477), True, 'import seaborn as sns\n'), ((7334, 7371), 'statsmodels.api.OLS', 'sm.OLS', (["df['Pixlar']", "df['Elevation']"], {}), "(df['Pixlar'], df['Elevation'])\n", (7340, 7371), True, 'import statsmodels.api as sm\n'), ((8163, 8176), 'misc.set_size', 'set_size', (['(600)'], {}), '(600)\n', (8171, 8176), False, 'from misc import set_size\n'), ((1081, 1139), 'pandas.concat', 'pd.concat', (['[df_knolls_grund, df_huvudskar, df_finngrundet]'], {}), '([df_knolls_grund, df_huvudskar, df_finngrundet])\n', (1090, 1139), True, 'import pandas as pd\n'), ((4034, 4057), 'pandas.DateOffset', 'pd.DateOffset', ([], {'days': '(4.5)'}), '(days=4.5)\n', (4047, 4057), True, 'import pandas as pd\n'), ((4135, 4158), 'pandas.DateOffset', 'pd.DateOffset', ([], {'days': '(4.5)'}), '(days=4.5)\n', (4148, 4158), True, 'import pandas as pd\n')]
|
mat = [
'сука', "блять", "пиздец", "нахуй", "<NAME>", "епта"]
import random
import re
# strong_emotions = re.sub('[^а-я]', ' ', open('strong_emotions').read().lower()).split()
def process(txt, ch):
words = txt.split(" ")
nxt = words[0] + ' '
i = 1
while i < len(words) - 1:
if words[i - 1][-1] != '.' and random.random() < ch:
nxt += random.choice(mat) + " "
else:
nxt += words[i] + " "
i += 1
nxt += words[-1]
return nxt
|
[
"random.random",
"random.choice"
] |
[((337, 352), 'random.random', 'random.random', ([], {}), '()\n', (350, 352), False, 'import random\n'), ((378, 396), 'random.choice', 'random.choice', (['mat'], {}), '(mat)\n', (391, 396), False, 'import random\n')]
|
import fnmatch
import executePythonResources
import writeEndPointsFile
import executeResources
import changeLogGenerator
import sys
import os
import shutil
from datetime import datetime
import json
import git # if git module is not found, use 'pip install gitpython'
resource_dict = {
'FC Networks': 'fc_networks',
'FCoE Networks': 'fcoe_networks',
'Ethernet Networks': 'ethernet_networks',
'Network Sets': 'network_sets',
'Connection Templates': 'connection_templates',
'Certificates Server': 'certificates_server',
'Enclosures': 'enclosures',
'Enclosure Groups': 'enclosure_groups',
'Firmware Drivers': 'firmware_drivers',
'Hypervisor Cluster Profiles': 'hypervisor_cluster_profiles',
'Hypervisor Managers': 'hypervisor_managers',
'Interconnects': 'interconnects',
'Interconnect Types': 'interconnect_types',
'Logical Enclosures': 'logical_enclosures',
'Logical Interconnects': 'logical_interconnects',
'Logical Interconnect Groups': 'logical_interconnect_groups',
'Scopes': 'scopes',
'Server Hardware': 'server_hardware',
'Server Hardware Types': 'server_hardware_types',
'Server Profiles': 'server_profiles',
'Server Profile Templates': 'server_profile_templates',
'Storage Pools': 'storage_pools',
'Storage Systems': 'storage_systems',
'Storage Volume Templates': 'storage_volume_templates',
'Storage Volume Attachments': 'storage_volume_attachments',
'Volumes': 'volumes',
'Tasks': 'tasks',
'Uplink Sets': 'uplink_sets'
}
class LogWriter(object):
"""
To show logs on console and flushing the same to logs file.
"""
def __init__(self, filename):
self.stdout = sys.stdout
self.file = filename
def write(self, obj):
self.file.write(obj)
self.stdout.write(obj)
self.file.flush()
def flush(self):
self.stdout.flush()
self.file.flush()
def clean_up_files():
print("---------Removing all log files---------------")
for rootDir, subdirs, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, 'logfile*.log'):
try:
os.remove(os.path.join(rootDir, filename))
except OSError:
print("Error while deleting file")
print("---------Completed removing log files--------------")
try:
folder_names = ['oneview-python', 'oneview-ansible-collections','oneview-golang','oneview-terraform-provider']
for i in range(len(folder_names)):
os.remove(os.getcwd() + '/' + str(i))
except Exception as e:
print("Error {} occurred while deleting folder {}".format(str(e), str(i)))
def createGitRepositories(selected_sdk):
git_url = 'https://github.com/HewlettPackard/oneview' + str(selected_sdk)
repo = git.Repo.clone_from(git_url,
os.getcwd() + '/' + str(selected_sdk) + '/')
return repo
def createFeatureBranch(repo, branchName):
remote_branches = []
num = 0
for ref in repo.git.branch('-r').split('\n'):
remote_branches.append(ref.replace(" ", ""))
branch_present = True if 'origin/' + branchName in remote_branches else False
if branch_present:
branchName = branchName + '_' + str(num)
num = num + 1
createFeatureBranch(repo, branchName)
else:
new_branch = repo.create_head(branchName)
new_branch.checkout()
return
def updateJsonFile():
jsonFile = open("./auto_config.json", "r") # Open the JSON file for reading
data = json.load(jsonFile) # Read the JSON into the buffer
jsonFile.close() # Close the JSON file
ipAddressFile = open('ipaddress.txt', 'r')
oneview_ip = ipAddressFile.read()
## Working with buffered content
tmp = data["oneview_ip"]
data["oneview_ip"] = oneview_ip
## Save our changes to JSON file
jsonFile = open("auto_config.json", "w+")
jsonFile.write(json.dumps(data))
jsonFile.close()
if __name__ == '__main__':
updateJsonFile()
selected_sdk = sys.argv[1]
api_version = sys.argv[2]
#repo = createGitRepositories(selected_sdk)
#branchName = createFeatureBranch(repo, 'feature')
print("---------Started executing files---------")
# LOG_FILENAME = datetime.now().strftime('logfile_%H_%M_%d_%m_%Y.log')
# f = open(LOG_FILENAME, 'w')
# original = sys.stdout
# sys.stdout = LogWriter(f)
resources_executor = executeResources.executeResources(selected_sdk, api_version)
executed_files = resources_executor.execute(resource_dict)
# sys.stdout = original
if executed_files:
print("---------Started writing to CHANGELOG.md---------")
changelog_generator = changeLogGenerator.changeLogGenerator(resource_dict, api_version)
changelog_generator.write_data()
print("---------Completed writing to CHANGELOG.md---------")
endpointsfile_writer = writeEndPointsFile.writeEndpointsFile('## HPE OneView', resource_dict, api_version)
endpointsfile_writer.main()
repo.git.add(A=True)
repo.git.commit('-m', 'PR for reelase changes #pr',
author='<EMAIL>') # to commit changes
repo.git.push('--set-upstream', 'origin', branchName)
repo.close()
os.chdir(path) # Navigate to parent directory
# Delete git cloned directory as cleanup
if os.path.exists(os.getcwd() + '/' + str(selected_sdk)):
shutil.rmtree(os.getcwd() + '/' + str(selected_sdk) + '/', ignore_errors=True)
# clean_up_files()
|
[
"fnmatch.filter",
"json.load",
"executeResources.executeResources",
"writeEndPointsFile.writeEndpointsFile",
"os.getcwd",
"changeLogGenerator.changeLogGenerator",
"json.dumps",
"os.path.join",
"os.chdir"
] |
[((3830, 3849), 'json.load', 'json.load', (['jsonFile'], {}), '(jsonFile)\n', (3839, 3849), False, 'import json\n'), ((4719, 4779), 'executeResources.executeResources', 'executeResources.executeResources', (['selected_sdk', 'api_version'], {}), '(selected_sdk, api_version)\n', (4752, 4779), False, 'import executeResources\n'), ((5537, 5551), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (5545, 5551), False, 'import os\n'), ((2303, 2314), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2312, 2314), False, 'import os\n'), ((2341, 2382), 'fnmatch.filter', 'fnmatch.filter', (['filenames', '"""logfile*.log"""'], {}), "(filenames, 'logfile*.log')\n", (2355, 2382), False, 'import fnmatch\n'), ((4218, 4234), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (4228, 4234), False, 'import json\n'), ((4991, 5056), 'changeLogGenerator.changeLogGenerator', 'changeLogGenerator.changeLogGenerator', (['resource_dict', 'api_version'], {}), '(resource_dict, api_version)\n', (5028, 5056), False, 'import changeLogGenerator\n'), ((5198, 5285), 'writeEndPointsFile.writeEndpointsFile', 'writeEndPointsFile.writeEndpointsFile', (['"""## HPE OneView"""', 'resource_dict', 'api_version'], {}), "('## HPE OneView', resource_dict,\n api_version)\n", (5235, 5285), False, 'import writeEndPointsFile\n'), ((5650, 5661), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5659, 5661), False, 'import os\n'), ((2427, 2458), 'os.path.join', 'os.path.join', (['rootDir', 'filename'], {}), '(rootDir, filename)\n', (2439, 2458), False, 'import os\n'), ((3134, 3145), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3143, 3145), False, 'import os\n'), ((2809, 2820), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2818, 2820), False, 'import os\n'), ((5712, 5723), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5721, 5723), False, 'import os\n')]
|
from django.conf import settings
from openpersonen.features.country_code_and_omschrijving.models import (
CountryCodeAndOmschrijving,
)
from openpersonen.features.gemeente_code_and_omschrijving.models import (
GemeenteCodeAndOmschrijving,
)
from openpersonen.utils.helpers import is_valid_date_format
def convert_ouder_instance_to_dict(ouder):
ouder_dict = {
"burgerservicenummer": ouder.burgerservicenummer_ouder,
"geslachtsaanduiding": ouder.geslachtsaanduiding_ouder,
"ouderAanduiding": ouder.geslachtsaanduiding_ouder,
"datumIngangFamilierechtelijkeBetrekking": {
"dag": int(
ouder.datum_ingang_familierechtelijke_betrekking_ouder[
settings.OPENPERSONEN_DAY_START : settings.OPENPERSONEN_DAY_END
]
if is_valid_date_format(
ouder.datum_ingang_familierechtelijke_betrekking_ouder
)
else 0
),
"datum": ouder.datum_ingang_familierechtelijke_betrekking_ouder,
"jaar": int(
ouder.datum_ingang_familierechtelijke_betrekking_ouder[
settings.OPENPERSONEN_YEAR_START : settings.OPENPERSONEN_YEAR_END
]
if is_valid_date_format(
ouder.datum_ingang_familierechtelijke_betrekking_ouder
)
else 0
),
"maand": int(
ouder.datum_ingang_familierechtelijke_betrekking_ouder[
settings.OPENPERSONEN_MONTH_START : settings.OPENPERSONEN_MONTH_END
]
if is_valid_date_format(
ouder.datum_ingang_familierechtelijke_betrekking_ouder
)
else 0
),
},
"naam": {
"geslachtsnaam": ouder.geslachtsnaam_ouder,
"voorletters": "string",
"voornamen": ouder.voornamen_ouder,
"voorvoegsel": ouder.voorvoegsel_geslachtsnaam_ouder,
"inOnderzoek": {
"geslachtsnaam": bool(ouder.geslachtsnaam_ouder),
"voornamen": bool(ouder.voornamen_ouder),
"voorvoegsel": bool(ouder.voorvoegsel_geslachtsnaam_ouder),
"datumIngangOnderzoek": {
"dag": 0,
"datum": "string",
"jaar": 0,
"maand": 0,
},
},
},
"inOnderzoek": {
"burgerservicenummer": bool(ouder.burgerservicenummer_ouder),
"datumIngangFamilierechtelijkeBetrekking": bool(
ouder.datum_ingang_familierechtelijke_betrekking_ouder
),
"geslachtsaanduiding": bool(ouder.geslachtsaanduiding_ouder),
"datumIngangOnderzoek": {
"dag": int(
ouder.datum_ingang_onderzoek[
settings.OPENPERSONEN_DAY_START : settings.OPENPERSONEN_DAY_END
]
if is_valid_date_format(ouder.datum_ingang_onderzoek)
else 0
),
"datum": ouder.datum_ingang_onderzoek,
"jaar": int(
ouder.datum_ingang_onderzoek[
settings.OPENPERSONEN_YEAR_START : settings.OPENPERSONEN_YEAR_END
]
if is_valid_date_format(ouder.datum_ingang_onderzoek)
else 0
),
"maand": int(
ouder.datum_ingang_onderzoek[
settings.OPENPERSONEN_MONTH_START : settings.OPENPERSONEN_MONTH_END
]
if is_valid_date_format(ouder.datum_ingang_onderzoek)
else 0
),
},
},
"geboorte": {
"datum": {
"dag": int(
ouder.geboortedatum_ouder[
settings.OPENPERSONEN_DAY_START : settings.OPENPERSONEN_DAY_END
]
)
if is_valid_date_format(ouder.geboortedatum_ouder)
else 0,
"datum": ouder.geboortedatum_ouder,
"jaar": int(
ouder.geboortedatum_ouder[
settings.OPENPERSONEN_YEAR_START : settings.OPENPERSONEN_YEAR_END
]
)
if is_valid_date_format(ouder.geboortedatum_ouder)
else 0,
"maand": int(
ouder.geboortedatum_ouder[
settings.OPENPERSONEN_MONTH_START : settings.OPENPERSONEN_MONTH_END
]
)
if is_valid_date_format(ouder.geboortedatum_ouder)
else 0,
},
"land": {
"code": ouder.geboorteland_ouder,
"omschrijving": CountryCodeAndOmschrijving.get_omschrijving_from_code(
ouder.geboorteland_ouder
),
},
"plaats": {
"code": ouder.geboorteplaats_ouder,
"omschrijving": GemeenteCodeAndOmschrijving.get_omschrijving_from_code(
ouder.geboorteplaats_ouder
),
},
"inOnderzoek": {
"datum": bool(ouder.geboortedatum_ouder),
"land": bool(ouder.geboorteland_ouder),
"plaats": bool(ouder.geboorteplaats_ouder),
"datumIngangOnderzoek": {
"dag": 0,
"datum": "string",
"jaar": 0,
"maand": 0,
},
},
},
"geheimhoudingPersoonsgegevens": True,
}
return ouder_dict
|
[
"openpersonen.utils.helpers.is_valid_date_format",
"openpersonen.features.country_code_and_omschrijving.models.CountryCodeAndOmschrijving.get_omschrijving_from_code",
"openpersonen.features.gemeente_code_and_omschrijving.models.GemeenteCodeAndOmschrijving.get_omschrijving_from_code"
] |
[((4974, 5053), 'openpersonen.features.country_code_and_omschrijving.models.CountryCodeAndOmschrijving.get_omschrijving_from_code', 'CountryCodeAndOmschrijving.get_omschrijving_from_code', (['ouder.geboorteland_ouder'], {}), '(ouder.geboorteland_ouder)\n', (5027, 5053), False, 'from openpersonen.features.country_code_and_omschrijving.models import CountryCodeAndOmschrijving\n'), ((5216, 5303), 'openpersonen.features.gemeente_code_and_omschrijving.models.GemeenteCodeAndOmschrijving.get_omschrijving_from_code', 'GemeenteCodeAndOmschrijving.get_omschrijving_from_code', (['ouder.geboorteplaats_ouder'], {}), '(ouder.\n geboorteplaats_ouder)\n', (5270, 5303), False, 'from openpersonen.features.gemeente_code_and_omschrijving.models import GemeenteCodeAndOmschrijving\n'), ((832, 908), 'openpersonen.utils.helpers.is_valid_date_format', 'is_valid_date_format', (['ouder.datum_ingang_familierechtelijke_betrekking_ouder'], {}), '(ouder.datum_ingang_familierechtelijke_betrekking_ouder)\n', (852, 908), False, 'from openpersonen.utils.helpers import is_valid_date_format\n'), ((1282, 1358), 'openpersonen.utils.helpers.is_valid_date_format', 'is_valid_date_format', (['ouder.datum_ingang_familierechtelijke_betrekking_ouder'], {}), '(ouder.datum_ingang_familierechtelijke_betrekking_ouder)\n', (1302, 1358), False, 'from openpersonen.utils.helpers import is_valid_date_format\n'), ((1658, 1734), 'openpersonen.utils.helpers.is_valid_date_format', 'is_valid_date_format', (['ouder.datum_ingang_familierechtelijke_betrekking_ouder'], {}), '(ouder.datum_ingang_familierechtelijke_betrekking_ouder)\n', (1678, 1734), False, 'from openpersonen.utils.helpers import is_valid_date_format\n'), ((4134, 4181), 'openpersonen.utils.helpers.is_valid_date_format', 'is_valid_date_format', (['ouder.geboortedatum_ouder'], {}), '(ouder.geboortedatum_ouder)\n', (4154, 4181), False, 'from openpersonen.utils.helpers import is_valid_date_format\n'), ((4483, 4530), 'openpersonen.utils.helpers.is_valid_date_format', 'is_valid_date_format', (['ouder.geboortedatum_ouder'], {}), '(ouder.geboortedatum_ouder)\n', (4503, 4530), False, 'from openpersonen.utils.helpers import is_valid_date_format\n'), ((4783, 4830), 'openpersonen.utils.helpers.is_valid_date_format', 'is_valid_date_format', (['ouder.geboortedatum_ouder'], {}), '(ouder.geboortedatum_ouder)\n', (4803, 4830), False, 'from openpersonen.utils.helpers import is_valid_date_format\n'), ((3064, 3114), 'openpersonen.utils.helpers.is_valid_date_format', 'is_valid_date_format', (['ouder.datum_ingang_onderzoek'], {}), '(ouder.datum_ingang_onderzoek)\n', (3084, 3114), False, 'from openpersonen.utils.helpers import is_valid_date_format\n'), ((3430, 3480), 'openpersonen.utils.helpers.is_valid_date_format', 'is_valid_date_format', (['ouder.datum_ingang_onderzoek'], {}), '(ouder.datum_ingang_onderzoek)\n', (3450, 3480), False, 'from openpersonen.utils.helpers import is_valid_date_format\n'), ((3744, 3794), 'openpersonen.utils.helpers.is_valid_date_format', 'is_valid_date_format', (['ouder.datum_ingang_onderzoek'], {}), '(ouder.datum_ingang_onderzoek)\n', (3764, 3794), False, 'from openpersonen.utils.helpers import is_valid_date_format\n')]
|
from flask.ext.wtf import Form
from wtforms import BooleanField, TextField, PasswordField, validators
from wtforms.validators import DataRequired
from flask_wtf.file import FileField
class LoginForm(Form):
first_name = TextField('first_name', validators=[DataRequired()])
last_name = TextField('first_name', validators=[DataRequired()])
email = TextField('Email Address', [validators.required(),validators.Length(min=6, max=35)])
resume = FileField()
remember_me = BooleanField('remember_me', default=False)
|
[
"wtforms.validators.Length",
"wtforms.BooleanField",
"wtforms.validators.required",
"flask_wtf.file.FileField",
"wtforms.validators.DataRequired"
] |
[((456, 467), 'flask_wtf.file.FileField', 'FileField', ([], {}), '()\n', (465, 467), False, 'from flask_wtf.file import FileField\n'), ((486, 528), 'wtforms.BooleanField', 'BooleanField', (['"""remember_me"""'], {'default': '(False)'}), "('remember_me', default=False)\n", (498, 528), False, 'from wtforms import BooleanField, TextField, PasswordField, validators\n'), ((386, 407), 'wtforms.validators.required', 'validators.required', ([], {}), '()\n', (405, 407), False, 'from wtforms import BooleanField, TextField, PasswordField, validators\n'), ((408, 440), 'wtforms.validators.Length', 'validators.Length', ([], {'min': '(6)', 'max': '(35)'}), '(min=6, max=35)\n', (425, 440), False, 'from wtforms import BooleanField, TextField, PasswordField, validators\n'), ((260, 274), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (272, 274), False, 'from wtforms.validators import DataRequired\n'), ((329, 343), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (341, 343), False, 'from wtforms.validators import DataRequired\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template
from flask_login import login_required
from vhoops.modules.teams.api.controllers import get_all_teams_func
from vhoops.modules.on_call.forms.new_on_call import NewOnCallSchedule
on_call_router = Blueprint("on_call_router", __name__)
@on_call_router.route("/on-call", methods=["GET"])
@login_required
def on_call_page():
# Form config
teams = get_all_teams_func(as_object=True)
form = NewOnCallSchedule()
form.user.choices = [
(member.id, member.username)
for team in teams["data"]
for member in team.members
]
return render_template(
"on-call/on-call.html",
teams=teams["data"],
form=form
)
|
[
"vhoops.modules.teams.api.controllers.get_all_teams_func",
"vhoops.modules.on_call.forms.new_on_call.NewOnCallSchedule",
"flask.Blueprint",
"flask.render_template"
] |
[((286, 323), 'flask.Blueprint', 'Blueprint', (['"""on_call_router"""', '__name__'], {}), "('on_call_router', __name__)\n", (295, 323), False, 'from flask import Blueprint, render_template\n'), ((443, 477), 'vhoops.modules.teams.api.controllers.get_all_teams_func', 'get_all_teams_func', ([], {'as_object': '(True)'}), '(as_object=True)\n', (461, 477), False, 'from vhoops.modules.teams.api.controllers import get_all_teams_func\n'), ((489, 508), 'vhoops.modules.on_call.forms.new_on_call.NewOnCallSchedule', 'NewOnCallSchedule', ([], {}), '()\n', (506, 508), False, 'from vhoops.modules.on_call.forms.new_on_call import NewOnCallSchedule\n'), ((658, 729), 'flask.render_template', 'render_template', (['"""on-call/on-call.html"""'], {'teams': "teams['data']", 'form': 'form'}), "('on-call/on-call.html', teams=teams['data'], form=form)\n", (673, 729), False, 'from flask import Blueprint, render_template\n')]
|
import os
import pickle
from collections import defaultdict
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
def add_capitals(dico):
return {**dico, **{key[0].capitalize() + key[1:]: item for key, item in dico.items()}}
COLORS = {
'causal': 'blue',
'anti': 'red',
'joint': 'green',
'causal_average': 'darkblue',
'anti_average': 'darkred',
'joint_average': 'darkgreen',
'MAP_uniform': 'yellow',
'MAP_source': 'gold',
# guess
'CausalGuessX': 'skyblue',
'CausalGuessY': 'darkcyan',
'AntiGuessX': 'salmon',
'AntiGuessY': 'chocolate',
}
MARKERS = {key: 'o' for key in COLORS}
MARKERS['causal'] = '^'
MARKERS['anti'] = 'v'
COLORS = add_capitals(COLORS)
MARKERS = add_capitals(MARKERS)
def value_at_step(trajectory, nsteps=1000):
"""Return the KL and the integral KL up to nsteps."""
steps = trajectory['steps']
index = np.searchsorted(steps, nsteps) - 1
ans = {}
# ans['end_step'] = steps[index]
for key, item in trajectory.items():
if key.startswith('kl_'):
ans[key[3:]] = item[index].mean()
# ans['endkl_' + key[3:]] = item[index].mean()
# ans['intkl_' + key[3:]] = item[:index].mean()
return ans
def get_best(results, nsteps):
"""Store per model each parameter and kl values
then for each model return the argmax parameters and curves
for kl and integral kl
"""
by_model = {}
# dictionary where each key is a model,
# and each value is a list of this model's hyperparameter
# and outcome at step nsteps
for exp in results:
trajectory = exp['trajectory']
for model, metric in value_at_step(trajectory, nsteps).items():
if model not in by_model:
by_model[model] = []
toadd = {
'hyperparameters': exp['hyperparameters'],
**exp['hyperparameters'],
'value': metric,
'kl': trajectory['kl_' + model],
'steps': trajectory['steps']
}
if 'scoredist_' + model in trajectory:
toadd['scoredist'] = trajectory['scoredist_' + model]
by_model[model] += [toadd]
# select only the best hyperparameters for this model.
for model, metrics in by_model.items():
dalist = sorted(metrics, key=lambda x: x['value'])
# Ensure that the optimal configuration does not diverge as optimization goes on.
for duh in dalist:
if duh['kl'][0].mean() * 2 > duh['kl'][-1].mean():
break
by_model[model] = duh
# print the outcome
for model, item in by_model.items():
if 'MAP' in model:
print(model, ('\t n0={n0:.0f},'
'\t kl={value:.3f}').format(**item))
else:
print(model, ('\t alpha={scheduler_exponent},'
'\t lr={lr:.1e},'
'\t kl={value:.3f}').format(**item))
return by_model
def curve_plot(bestof, nsteps, figsize, logscale=False, endstep=400, confidence=(5, 95)):
"""Draw mean trajectory plot with percentiles"""
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
for model, item in sorted(bestof.items()):
xx = item['steps']
values = item['kl']
# truncate plot for k-invariance
end_id = np.searchsorted(xx, endstep) + 1
xx = xx[:end_id]
values = values[:end_id]
# plot mean and percentile statistics
ax.plot(xx, values.mean(axis=1), label=model,
marker=MARKERS[model], markevery=len(xx) // 6, markeredgewidth=0,
color=COLORS[model], alpha=.9)
ax.fill_between(
xx,
np.percentile(values, confidence[0], axis=1),
np.percentile(values, confidence[1], axis=1),
alpha=.4,
color=COLORS[model]
)
ax.axvline(nsteps, linestyle='--', color='black')
ax.grid(True)
if logscale:
ax.set_yscale('log')
ax.set_ylabel(r'$\mathrm{KL}(\mathbf{p}^*, \mathbf{p}^{(t)})$')
ax.set_xlabel('number of samples t')
ax.legend()
return fig, ax
def scatter_plot(bestof, nsteps, figsize, logscale=False):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
alldist = []
allkl = []
for model, item in sorted(bestof.items()):
if 'scoredist' not in item:
continue
index = min(np.searchsorted(item['steps'], nsteps), len(item['steps']) - 1)
initial_distances = item['scoredist'][0]
end_kl = item['kl'][index]
ax.scatter(
initial_distances,
end_kl,
alpha=.3,
color=COLORS[model],
marker=MARKERS[model],
linewidth=0,
label=model if False else None
)
alldist += list(initial_distances)
allkl += list(end_kl)
# linear regression
slope, intercept, rval, pval, _ = scipy.stats.linregress(alldist, allkl)
x_vals = np.array(ax.get_xlim())
y_vals = intercept + slope * x_vals
ax.plot(
x_vals, y_vals, '--', color='black', alpha=.8,
label=f'y=ax+b, r2={rval ** 2:.2f}'
f',\na={slope:.1e}, b={intercept:.2f}'
)
# look
ax.legend()
ax.grid(True)
if logscale:
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(min(alldist), max(alldist))
else:
ax.ticklabel_format(axis='both', style='sci', scilimits=(0, 0), useMathText=True)
ax.set_ylabel(r'$\mathrm{KL}(\mathbf{p}^*, \mathbf{p}^{(t)}); T=$' + str(nsteps))
ax.set_xlabel(r'$||\theta^{(0)} - \theta^* ||^2$')
return fig, ax
def two_plots(results, nsteps, plotname, dirname, verbose=False, figsize=(6, 3)):
print(dirname, plotname)
bestof = get_best(results, nsteps)
# remove the models I don't want to compare
# eg remove SGD, MAP. Keep ASGD and rename them to remove average.
selected = {
key[0].capitalize() + key[1:-len('_average')].replace('A', 'X').replace('B', 'Y'): item
for key, item in bestof.items()
if key.endswith('_average')}
for key in ['MAP_uniform', 'MAP_source']:
# selected[key] = bestof[key]
pass
if dirname.startswith('guess'):
selected.pop('Joint', None)
curves, ax1 = curve_plot(selected, nsteps, figsize, logscale=False)
# initstring = 'denseinit' if results[0]["is_init_dense"] else 'sparseinit'
# curves.suptitle(f'Average KL tuned for {nsteps} samples with {confidence} percentiles, '
# f'{initstring}, k={results[0]["k"]}')
scatter, ax2 = scatter_plot(selected, nsteps, figsize,
logscale=(dirname == 'guess_sparseinit'))
if verbose:
for ax in [ax1, ax2]:
info = str(next(iter(selected.values()))['hyperparameters'])
txt = ax.text(0.5, 1, info, ha='center', va='top',
wrap=True, transform=ax.transAxes,
# bbox=dict(boxstyle='square')
)
txt._get_wrap_line_width = lambda: 400. # wrap to 600 screen pixels
# small adjustments for intervention guessing
if dirname.startswith('guess'):
curves.axes[0].set_ylim(0, 1.5)
for fig in [curves, scatter]:
fig.axes[0].set_xlabel('')
fig.axes[0].set_ylabel('')
for style, fig in {'curves': curves, 'scatter': scatter}.items():
for figpath in [
os.path.join('plots', dirname, f'{style}_{plotname}.pdf')]:
print("Saving ", figpath)
os.makedirs(os.path.dirname(figpath), exist_ok=True)
# os.path.join('plots/sweep/png', f'{style}_{plotname}.png')]:
fig.savefig(figpath, bbox_inches='tight')
plt.close(curves)
plt.close(scatter)
print()
def plot_marginal_likelihoods(results, intervention, k, dirname):
exp = results[0]
values = {}
for whom in ['A', 'B']:
values[whom] = exp['loglikelihood' + whom][:100].cumsum(0)
xx = np.arange(1, values[whom].shape[0] + 1)
values[whom] /= xx[:, np.newaxis]
if intervention == 'cause':
right, wrong = 'A', 'B'
else:
right, wrong = 'B', 'A'
plt.plot(values[wrong] - values[right], alpha=.2)
plt.hlines(0, 0, values['B'].shape[0])
plt.grid()
plt.ylim(-1, 1)
figpath = os.path.join('plots', dirname, 'guessing', f'guess_{intervention}_k={k}.pdf')
os.makedirs(os.path.dirname(figpath), exist_ok=True)
plt.savefig(figpath, bbox_inches='tight')
plt.close()
def merge_results(results1, results2, bs=5):
"""Combine results from intervention on cause and effect.
Also report statistics about pooled results.
Pooled records the average over 10 cause and 10 effect interventions
the goal is to have tighter percentile curves
which are representative of the algorithm's performance
"""
combined = []
pooled = []
for e1, e2 in zip(results1, results2):
h1, h2 = e1['hyperparameters'], e2['hyperparameters']
assert h1['lr'] == h2['lr']
t1, t2 = e1['trajectory'], e2['trajectory']
combined_trajs = {'steps': t1['steps']}
pooled_trajs = combined_trajs.copy()
for key in t1.keys():
if key.startswith(('scoredist', 'kl')):
combined_trajs[key] = np.concatenate((t1[key], t2[key]), axis=1)
meantraj = (t1[key] + t2[key]) / 2
pooled_trajs[key] = np.array([
meantraj[:, bs * i:bs * (i + 1)].mean(axis=1)
for i in range(meantraj.shape[1] // bs)
]).T
combined += [{'hyperparameters': h1, 'trajectory': combined_trajs}]
pooled += [{'hyperparameters': h2, 'trajectory': pooled_trajs}]
return combined, pooled
def all_plot(guess=False, dense=True,
input_dir='categorical_results', output_dir='camera_ready',
figsize=(3.6, 2.2)):
basefile = '_'.join(['guess' if guess else 'sweep2',
'denseinit' if dense else 'sparseinit'])
print(basefile, '\n---------------------')
prior_string = 'dense' if dense else 'sparse'
for k in [20]: # [10, 20, 50]:
# Optimize hyperparameters for nsteps such that curves are k-invariant
nsteps = k ** 2 // 4
allresults = defaultdict(list)
for intervention in ['cause', 'effect']:
# 'singlecond', 'gmechanism', 'independent', 'geometric', 'weightedgeo']:
plotname = f'{prior_string}_{intervention}_k={k}'
file = f'{basefile}_{intervention}_k={k}.pkl'
filepath = os.path.join(input_dir, file)
print(os.path.abspath(filepath))
if os.path.isfile(filepath):
with open(filepath, 'rb') as fin:
results = pickle.load(fin)
print(1)
two_plots(results, nsteps,
plotname=plotname,
dirname=output_dir,
figsize=figsize)
allresults[intervention] = results
# if guess:
# plot_marginal_likelihoods(results, intervention, k, basefile)
# if not guess and 'cause' in allresults and 'effect' in allresults:
# combined, pooled = merge_results(allresults['cause'], allresults['effect'])
# if len(combined) > 0:
# for key, item in {'combined': combined, 'pooled': pooled}.items():
# two_plots(item, nsteps,
# plotname=f'{prior_string}_{key}_k={k}',
# dirname=output_dir,
# figsize=figsize)
if __name__ == '__main__':
np.set_printoptions(precision=2)
matplotlib.use('pgf')
matplotlib.rcParams['mathtext.fontset'] = 'cm'
matplotlib.rcParams['pdf.fonttype'] = 42
# all_plot(guess=True, dense=True)
# all_plot(guess=True, dense=False)
all_plot(guess=False, dense=True)
all_plot(guess=False, dense=False)
|
[
"collections.defaultdict",
"os.path.isfile",
"pickle.load",
"numpy.arange",
"os.path.join",
"matplotlib.pyplot.hlines",
"os.path.abspath",
"numpy.set_printoptions",
"matplotlib.pyplot.close",
"os.path.dirname",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.ylim",
"numpy.percentile",
"matplotlib.use",
"matplotlib.pyplot.grid",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.searchsorted",
"matplotlib.pyplot.savefig"
] |
[((3201, 3248), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': 'figsize'}), '(nrows=1, ncols=1, figsize=figsize)\n', (3213, 3248), True, 'import matplotlib.pyplot as plt\n'), ((4291, 4338), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': 'figsize'}), '(nrows=1, ncols=1, figsize=figsize)\n', (4303, 4338), True, 'import matplotlib.pyplot as plt\n'), ((7865, 7882), 'matplotlib.pyplot.close', 'plt.close', (['curves'], {}), '(curves)\n', (7874, 7882), True, 'import matplotlib.pyplot as plt\n'), ((7887, 7905), 'matplotlib.pyplot.close', 'plt.close', (['scatter'], {}), '(scatter)\n', (7896, 7905), True, 'import matplotlib.pyplot as plt\n'), ((8325, 8375), 'matplotlib.pyplot.plot', 'plt.plot', (['(values[wrong] - values[right])'], {'alpha': '(0.2)'}), '(values[wrong] - values[right], alpha=0.2)\n', (8333, 8375), True, 'import matplotlib.pyplot as plt\n'), ((8379, 8417), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(0)', '(0)', "values['B'].shape[0]"], {}), "(0, 0, values['B'].shape[0])\n", (8389, 8417), True, 'import matplotlib.pyplot as plt\n'), ((8422, 8432), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8430, 8432), True, 'import matplotlib.pyplot as plt\n'), ((8437, 8452), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (8445, 8452), True, 'import matplotlib.pyplot as plt\n'), ((8467, 8544), 'os.path.join', 'os.path.join', (['"""plots"""', 'dirname', '"""guessing"""', 'f"""guess_{intervention}_k={k}.pdf"""'], {}), "('plots', dirname, 'guessing', f'guess_{intervention}_k={k}.pdf')\n", (8479, 8544), False, 'import os\n'), ((8606, 8647), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figpath'], {'bbox_inches': '"""tight"""'}), "(figpath, bbox_inches='tight')\n", (8617, 8647), True, 'import matplotlib.pyplot as plt\n'), ((8652, 8663), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8661, 8663), True, 'import matplotlib.pyplot as plt\n'), ((11899, 11931), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (11918, 11931), True, 'import numpy as np\n'), ((11936, 11957), 'matplotlib.use', 'matplotlib.use', (['"""pgf"""'], {}), "('pgf')\n", (11950, 11957), False, 'import matplotlib\n'), ((928, 958), 'numpy.searchsorted', 'np.searchsorted', (['steps', 'nsteps'], {}), '(steps, nsteps)\n', (943, 958), True, 'import numpy as np\n'), ((8131, 8170), 'numpy.arange', 'np.arange', (['(1)', '(values[whom].shape[0] + 1)'], {}), '(1, values[whom].shape[0] + 1)\n', (8140, 8170), True, 'import numpy as np\n'), ((8561, 8585), 'os.path.dirname', 'os.path.dirname', (['figpath'], {}), '(figpath)\n', (8576, 8585), False, 'import os\n'), ((10452, 10469), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10463, 10469), False, 'from collections import defaultdict\n'), ((3410, 3438), 'numpy.searchsorted', 'np.searchsorted', (['xx', 'endstep'], {}), '(xx, endstep)\n', (3425, 3438), True, 'import numpy as np\n'), ((3784, 3828), 'numpy.percentile', 'np.percentile', (['values', 'confidence[0]'], {'axis': '(1)'}), '(values, confidence[0], axis=1)\n', (3797, 3828), True, 'import numpy as np\n'), ((3842, 3886), 'numpy.percentile', 'np.percentile', (['values', 'confidence[1]'], {'axis': '(1)'}), '(values, confidence[1], axis=1)\n', (3855, 3886), True, 'import numpy as np\n'), ((4495, 4533), 'numpy.searchsorted', 'np.searchsorted', (["item['steps']", 'nsteps'], {}), "(item['steps'], nsteps)\n", (4510, 4533), True, 'import numpy as np\n'), ((7569, 7626), 'os.path.join', 'os.path.join', (['"""plots"""', 'dirname', 'f"""{style}_{plotname}.pdf"""'], {}), "('plots', dirname, f'{style}_{plotname}.pdf')\n", (7581, 7626), False, 'import os\n'), ((10748, 10777), 'os.path.join', 'os.path.join', (['input_dir', 'file'], {}), '(input_dir, file)\n', (10760, 10777), False, 'import os\n'), ((10838, 10862), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (10852, 10862), False, 'import os\n'), ((7691, 7715), 'os.path.dirname', 'os.path.dirname', (['figpath'], {}), '(figpath)\n', (7706, 7715), False, 'import os\n'), ((9454, 9496), 'numpy.concatenate', 'np.concatenate', (['(t1[key], t2[key])'], {'axis': '(1)'}), '((t1[key], t2[key]), axis=1)\n', (9468, 9496), True, 'import numpy as np\n'), ((10796, 10821), 'os.path.abspath', 'os.path.abspath', (['filepath'], {}), '(filepath)\n', (10811, 10821), False, 'import os\n'), ((10944, 10960), 'pickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (10955, 10960), False, 'import pickle\n')]
|
from tests.v1tests import BaseTestCase
import json
class OfficeEndpointsTestCase(BaseTestCase):
def test_create_office(self):
"""Tests valid data POST Http method request on /offices endpoint"""
# Post, uses office specification model
response = self.client.post('api/v1/offices', data=json.dumps(self.office))
# Data section returned as per response specification
expected_response_json = {
'data': [{
'id': 1,
'type': 'Senior',
'name': 'Permanent Secretary'
}],
'status': 201
}
self.assertEqual(response.status_code, 201, "Should Return a 201 HTTP Status Code Response:Created")
self.assertEqual(expected_response_json, response.json)
def test_create_office_invalid_forbidden(self):
"""Tests invalid data on POST method request on /offices endpoint"""
response = self.client.post('api/v1/offices',
json={
'type': 'n',
'name': 'p'
})
self.assertEqual(response.status_code, 400, "Should Return a 400 HTTP Status Code Response:Bad Request")
# Should return error message
self.assertIn("Check Input Values", response.json['error'])
def test_create_office_bad_request(self):
"""Tests malformed POST Http method request on /offices endpoint"""
response = self.client.post('api/v1/offices',
json={
# Missing type key
'name': 'Permanent Secretary'
})
self.assertEqual(response.status_code, 400, "Should Return a 400 HTTP Status Code Response:Bad Request")
# Should return error message
self.assertIn("Missing Key value", response.json['error'])
def test_view_all_offices(self):
"""Tests GET Http method request on /offices endpoint"""
# Post, create an office first
self.client.post('api/v1/offices', data=json.dumps(self.office))
# Retrieve the office
response = self.client.get('api/v1/offices')
self.assertEqual(response.status_code, 200, "Should Return a 200 HTTP Status Code Response:Success")
expected_response_json = {
"data": [{
"id": 1,
'type': 'Senior',
'name': '<NAME>'
}],
"status": 200
}
# Converts to string
self.assertEqual(response.json, expected_response_json)
def test_view_all_offices_bad_request(self):
"""Tests malformed GET Http method request on /office endpoint"""
response = self.client.get('api/v1/ofices')
self.assertEqual(response.status_code, 404, "Should Return a 404 HTTP Status Code Response:Resource Not Found")
# Should return error message
self.assertEqual(response.json, self.error_default_not_found)
def test_view_specific_office(self):
"""Tests GET Http method request on /office/{:id} endpoint"""
# Post, add an office
self.client.post('api/v1/offices', data=json.dumps(self.office))
# Get data for specific office
response = self.client.get('api/v1/offices/1')
expected_response = {
"id": 1,
"name": "<NAME>",
"type": "Senior"
}
self.assertEqual(response.status_code, 200, "Should Return a 200 HTTP Status Code:Success")
# Returns Dict as string and compares if its in response
self.assertEqual(response.json['data'][0], expected_response)
def test_view_specific_office_invalid_id(self):
"""Tests malformed GET Http method request on /office/{:id} endpoint"""
response = self.client.get('api/v1/offices/{}'.format(4578))
self.assertEqual(response.status_code, 404, "Should Return a 404 HTTP Status Code Response:Bad Request")
# Should return error message
self.assertEqual(response.json['error'], "Invalid Id Not Found", "Should return resource not found response")
def test_view_specific_office_not_found(self):
"""Tests malformed GET Http method request on /office/{:id} endpoint"""
response = self.client.get('api/v1/offies/{}'.format(0))
self.assertEqual(response.status_code, 404, "Should Return a 404 HTTP Status Code Response:Not Found")
# Should return error message
self.assertEqual(response.json, self.error_default_not_found, "Should return resource not found response")
def test_view_specific_office_invalid_id_value_error(self):
"""Tests valid request but invalid data on DELETE request on /parties/{:id}/name endpoint"""
self.client.post('api/v1/offices', data=json.dumps(self.office))
response = self.client.get('api/v1/offices/e')
self.assertEqual(response.status_code, 400, "Should Return a 404 HTTP Status Code Response:Not Found")
# Should return error message
self.assertEqual(response.json['error'], 'Invalid Id',
'Should return not found response')
def test_edit_government_office(self):
"""Tests PATCH Http method request on /offices/{:id}/name endpoint"""
# Save Post First
self.client.post('api/v1/offices', data=json.dumps(self.office))
edit_request_json = {
"name": "<NAME>"
}
# Update Name
response = self.client.patch('api/v1/offices/{}/name'.format(1),
data=json.dumps(edit_request_json))
self.assertEqual(response.status_code, 200, "Should Return a 200 HTTP Status Code Response:Updated")
self.assertEqual(edit_request_json.get('name'), response.json[0]['data'][0]['name'])
def test_edit_office_invalid_id(self):
"""Tests invalid id on PATCH request on /offices/{:id}/name endpoint"""
edit_request_json = {
"name": "<NAME>"
}
response = self.client.patch('api/v1/offices/0/name', data=json.dumps(edit_request_json))
self.assertEqual(response.status_code, 404, "Should Return a 404 HTTP Status Code Response:Not Found")
# Should return error message
self.assertEqual(response.json['error'], 'Invalid Id Not Found',
'Should return invalid id response')
def test_edit_offices_not_found(self):
"""Tests valid but non existent id on PATCH request on /parties/{:id}/name endpoint"""
edit_request_json = {
"name": "Secretary"
}
response = self.client.patch('api/v1/offices/3/name', data=json.dumps(edit_request_json))
self.assertEqual(response.status_code, 404, "Should Return a 404 HTTP Status Code Response:Not Found")
# Should return error message
self.assertEqual(response.json['error'], 'Invalid Id Not Found',
'Should return not found response')
def test_edit_office_invalid_data(self):
"""Tests valid request but invalid data on PATCH request on /offices/{:id}/name endpoint"""
self.client.post('api/v1/offices', data=json.dumps(self.office))
edit_request_json = {
"name": "D"
}
response = self.client.patch('api/v1/offices/1/name', data=json.dumps(edit_request_json))
self.assertEqual(response.status_code, 400, "Should Return a 404 HTTP Status Code Response:Not Found")
# Should return error message
self.assertEqual(response.json['error'], 'Incorrect Data Received,Bad request',
'Should return not found response')
def test_edit_office_invalid_id_value_error(self):
"""Tests valid request but invalid data on PATCH request on /offices/{:id}/name endpoint"""
self.client.post('api/v1/offices', data=json.dumps(self.office))
edit_request_json = {
"name": "<NAME>"
}
response = self.client.patch('api/v1/offices/e/name', data=json.dumps(edit_request_json))
self.assertEqual(response.status_code, 400, "Should Return a 404 HTTP Status Code Response:Not Found")
# Should return error message
self.assertEqual(response.json['error'], 'Invalid Id',
'Should return not found response')
def test_delete_office(self):
"""Tests DELETE Http method request on /offices/{:id} endpoint"""
# Save Post First
self.client.post('api/v1/offices', data=json.dumps(self.office))
# Delete Party
response = self.client.delete('api/v1/offices/{0}'.format(1))
self.assertEqual(response.status_code, 200, "Should Return a 200 HTTP Status Code Response:Deleted")
self.assertEqual("Deleted Successfully", response.json['message'])
def test_delete_office_not_found(self):
""""Tests malformed DELETE Http method request on /offices/{:id} endpoint"""
# Save Post First
response = self.client.delete('api/v1/offices/{0}'.format(-1))
self.assertEqual(response.status_code, 404, "Should Return a 404 HTTP Status Code Response:Not Found")
# Should return error message
self.assertEqual(response.json['error'], 'Invalid Id Not Found', "Should return resource not found response")
def test_delete_office_invalid_id_value_error(self):
"""Tests valid request but invalid data on DELETE request on /offices/{:id}/name endpoint"""
self.client.post('api/v1/offices', data=json.dumps(self.office))
response = self.client.delete('api/v1/offices/e')
self.assertEqual(response.status_code, 400, "Should Return a 404 HTTP Status Code Response:Not Found")
# Should return error message
self.assertEqual(response.json['error'], 'Invalid Id')
def test_no_duplication(self):
# Create
self.client.post('api/v1/offices', data=json.dumps(self.office))
response = self.client.post('api/v1/offices', data=json.dumps(self.office))
self.assertEqual(response.status_code, 409, "Should Create Party")
self.assertEqual(response.json['error'], "Office Already Exists", "Should Create Non Duplicate Ids")
|
[
"json.dumps"
] |
[((317, 340), 'json.dumps', 'json.dumps', (['self.office'], {}), '(self.office)\n', (327, 340), False, 'import json\n'), ((2178, 2201), 'json.dumps', 'json.dumps', (['self.office'], {}), '(self.office)\n', (2188, 2201), False, 'import json\n'), ((3284, 3307), 'json.dumps', 'json.dumps', (['self.office'], {}), '(self.office)\n', (3294, 3307), False, 'import json\n'), ((4904, 4927), 'json.dumps', 'json.dumps', (['self.office'], {}), '(self.office)\n', (4914, 4927), False, 'import json\n'), ((5453, 5476), 'json.dumps', 'json.dumps', (['self.office'], {}), '(self.office)\n', (5463, 5476), False, 'import json\n'), ((5684, 5713), 'json.dumps', 'json.dumps', (['edit_request_json'], {}), '(edit_request_json)\n', (5694, 5713), False, 'import json\n'), ((6177, 6206), 'json.dumps', 'json.dumps', (['edit_request_json'], {}), '(edit_request_json)\n', (6187, 6206), False, 'import json\n'), ((6770, 6799), 'json.dumps', 'json.dumps', (['edit_request_json'], {}), '(edit_request_json)\n', (6780, 6799), False, 'import json\n'), ((7278, 7301), 'json.dumps', 'json.dumps', (['self.office'], {}), '(self.office)\n', (7288, 7301), False, 'import json\n'), ((7434, 7463), 'json.dumps', 'json.dumps', (['edit_request_json'], {}), '(edit_request_json)\n', (7444, 7463), False, 'import json\n'), ((7967, 7990), 'json.dumps', 'json.dumps', (['self.office'], {}), '(self.office)\n', (7977, 7990), False, 'import json\n'), ((8128, 8157), 'json.dumps', 'json.dumps', (['edit_request_json'], {}), '(edit_request_json)\n', (8138, 8157), False, 'import json\n'), ((8615, 8638), 'json.dumps', 'json.dumps', (['self.office'], {}), '(self.office)\n', (8625, 8638), False, 'import json\n'), ((9618, 9641), 'json.dumps', 'json.dumps', (['self.office'], {}), '(self.office)\n', (9628, 9641), False, 'import json\n'), ((10014, 10037), 'json.dumps', 'json.dumps', (['self.office'], {}), '(self.office)\n', (10024, 10037), False, 'import json\n'), ((10099, 10122), 'json.dumps', 'json.dumps', (['self.office'], {}), '(self.office)\n', (10109, 10122), False, 'import json\n')]
|
from rest_framework import serializers
class ValidateSerializer(serializers.Serializer):
class_label = serializers.IntegerField()
confidence = serializers.FloatField()
|
[
"rest_framework.serializers.IntegerField",
"rest_framework.serializers.FloatField"
] |
[((108, 134), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '()\n', (132, 134), False, 'from rest_framework import serializers\n'), ((152, 176), 'rest_framework.serializers.FloatField', 'serializers.FloatField', ([], {}), '()\n', (174, 176), False, 'from rest_framework import serializers\n')]
|
"""
Copyright 2018 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: <EMAIL>
"""
import os
import re
from abc import ABC, abstractmethod
from typing import Generic, List, Mapping, Optional, Sequence, Set, Type, TypeVar
from jinja2 import Environment, PackageLoader
from inmanta.ast import CompilerException, ModifiedAfterFreezeException
from inmanta.ast.statements import AssignStatement
from inmanta.ast.statements.generator import Constructor
from inmanta.execute.runtime import OptionVariable
from inmanta.module import ModuleV2InV1PathException
def bold(content: Optional[str] = None) -> str:
if content is None:
return "\033[1m"
return "\033[1m{0}\033[0m".format(content)
def underline(content: Optional[str] = None) -> str:
if content is None:
return "\033[4m"
return "\033[4m{0}\033[0m".format(content)
def noformat(content: Optional[str] = None) -> str:
return "\033[0m"
CUSTOM_FILTERS = {"bold": bold, "underline": underline, "noformat": noformat}
class ExplainerABC(ABC):
"""
Abstract base class for explainers. This class is purposely kept non-Generic to present a public interface that is invariant
of the compiler exception type. This allows correct typing of sequences of explainers.
"""
@abstractmethod
def explain(self, problem: CompilerException) -> List[str]:
...
Explainable = TypeVar("Explainable", bound=CompilerException)
class Explainer(Generic[Explainable], ExplainerABC, ABC):
"""
Abstract explainer, Generic in the compiler exception subtype to allow correct typing of the exception for subtype-specific
explanation logic.
Concrete subclasses must not be generic in the exception type because this would break explainable checking.
"""
explainable_type: Type[Explainable]
def explain(self, problem: CompilerException) -> List[str]:
"""
Returns a list of explanations for this exception. If neither the exception or any of its causes (recursively)
is explainable by this explainer, returns an empty list.
"""
allcauses: Set[CompilerException] = set()
work: List[CompilerException] = [problem]
while work:
w = work.pop()
allcauses.add(w)
work.extend(w.get_causes())
return [self.do_explain(c) for c in allcauses if isinstance(c, self.explainable_type)]
@abstractmethod
def do_explain(self, problem: Explainable) -> str:
"""
Explain a single exception, explainable by this explainer. Does not recurse on its causes.
"""
...
class JinjaExplainer(Explainer[Explainable], ABC):
"""
Abstract explainer for explanations based on a Jinja template.
:param template: path to the Jinja template to use for the explanation.
"""
def __init__(self, template: str) -> None:
self.template: str = template
def get_template(self, problem: Explainable) -> str:
path = os.path.join(os.path.dirname(__file__), self.template)
with open(path, "r", encoding="utf-8") as fh:
return fh.read()
def do_explain(self, problem: Explainable) -> str:
env = Environment(loader=PackageLoader("inmanta.compiler.help"))
for name, filter in CUSTOM_FILTERS.items():
env.filters[name] = filter
template = env.get_template(self.template)
return template.render(**self.get_arguments(problem))
@abstractmethod
def get_arguments(self, problem: Explainable) -> Mapping[str, object]:
"""
Returns a mapping for names that are used in the Jinja template.
"""
...
class ModifiedAfterFreezeExplainer(JinjaExplainer[ModifiedAfterFreezeException]):
"""
Explainer for ModifiedAfterFreezeException.
"""
explainable_type: Type[ModifiedAfterFreezeException] = ModifiedAfterFreezeException
def __init__(self) -> None:
super().__init__("modified_after_freeze.j2")
def build_reverse_hint(self, problem: ModifiedAfterFreezeException) -> str:
if isinstance(problem.stmt, AssignStatement):
return "%s.%s = %s" % (
problem.stmt.rhs.pretty_print(),
problem.attribute.get_name(),
problem.stmt.lhs.pretty_print(),
)
if isinstance(problem.stmt, Constructor):
# find right parameter:
attr = problem.attribute.end.get_name()
if attr not in problem.stmt.get_attributes():
attr_rhs = "?"
else:
attr_rhs = problem.stmt.get_attributes()[attr].pretty_print()
return "%s.%s = %s" % (attr_rhs, problem.attribute.get_name(), problem.stmt.pretty_print())
def get_arguments(self, problem: ModifiedAfterFreezeException) -> Mapping[str, object]:
return {
"relation": problem.attribute.get_name(),
"instance": problem.instance,
"values": problem.resultvariable.value,
"value": problem.value,
"location": problem.location,
"reverse": problem.reverse,
"reverse_example": "" if not problem.reverse else self.build_reverse_hint(problem),
"optional": isinstance(problem.resultvariable, OptionVariable),
}
class ModuleV2InV1PathExplainer(JinjaExplainer[ModuleV2InV1PathException]):
"""
Explainer for ModuleV2InV1PathException
"""
explainable_type: Type[ModuleV2InV1PathException] = ModuleV2InV1PathException
def __init__(self) -> None:
super().__init__("module_v2_in_v1_path.j2")
def get_arguments(self, problem: ModuleV2InV1PathException) -> Mapping[str, object]:
v2_source_configured: bool = problem.project.module_v2_source_configured() if problem.project is not None else False
return {
"name": problem.module.name,
"path": problem.module.path,
"project": problem.project is not None,
"v2_source_configured": v2_source_configured,
}
def escape_ansi(line: str) -> str:
ansi_escape = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]")
return ansi_escape.sub("", line)
class ExplainerFactory:
def get_explainers(self) -> Sequence[ExplainerABC]:
return [ModifiedAfterFreezeExplainer(), ModuleV2InV1PathExplainer()]
def explain(self, problem: CompilerException) -> List[str]:
return [explanation for explainer in self.get_explainers() for explanation in explainer.explain(problem)]
def explain_and_format(self, problem: CompilerException, plain: bool = True) -> Optional[str]:
"""
:param plain: remove tty color codes, only return plain text
"""
raw = self.explain(problem)
if not raw:
return None
else:
pre = """
\033[1mException explanation
=====================\033[0m
"""
pre += "\n\n".join(raw)
if not plain:
return pre
else:
return escape_ansi(pre)
|
[
"jinja2.PackageLoader",
"typing.TypeVar",
"os.path.dirname",
"re.compile"
] |
[((1916, 1963), 'typing.TypeVar', 'TypeVar', (['"""Explainable"""'], {'bound': 'CompilerException'}), "('Explainable', bound=CompilerException)\n", (1923, 1963), False, 'from typing import Generic, List, Mapping, Optional, Sequence, Set, Type, TypeVar\n'), ((6623, 6670), 're.compile', 're.compile', (['"""(\\\\x9B|\\\\x1B\\\\[)[0-?]*[ -/]*[@-~]"""'], {}), "('(\\\\x9B|\\\\x1B\\\\[)[0-?]*[ -/]*[@-~]')\n", (6633, 6670), False, 'import re\n'), ((3526, 3551), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3541, 3551), False, 'import os\n'), ((3740, 3778), 'jinja2.PackageLoader', 'PackageLoader', (['"""inmanta.compiler.help"""'], {}), "('inmanta.compiler.help')\n", (3753, 3778), False, 'from jinja2 import Environment, PackageLoader\n')]
|
# -*- coding: utf-8 -*-
"""
CW, FGSM, and IFGSM Attack CNN
"""
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torch.autograd import Variable
import copy
import math
import numpy as np
import os
import argparse
import torch.utils.data as data
#from utils import *
import numpy.matlib
import matplotlib.pyplot as plt
import pickle
# import cPickle
from collections import OrderedDict
import models.cifar as models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='Fool EnResNet')
ap = parser.add_argument
ap('--method', help='Attack Method', type=str, default="ifgsm") # fgsm, ifgsm, cwl2
ap('--epsilon', help='Attack Strength', type=float, default=0.031) # May 2
ap('--num-ensembles', '--ne', default=2, type=int, metavar='N')
ap('--noise-coef', '--nc', default=0.1, type=float, metavar='W', help='forward noise (default: 0.0)')
ap('--noise-coef-eval', '--nce', default=0.0, type=float, metavar='W', help='forward noise (default: 0.)')
ap('--arch', '-a', metavar='ARCH', default='resnet20',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
ap('--depth', type=int, default=29, help='Model depth.')
ap('--block-name', type=str, default='BasicBlock',
help='the building block for Resnet and Preresnet: BasicBlock, Bottleneck (default: Basicblock for cifar10/cifar100)')
ap('--cardinality', type=int, default=8, help='Model cardinality (group).')
ap('--widen-factor', type=int, default=4, help='Widen factor. 4 -> 64, 8 -> 128, ...')
ap('--growthRate', type=int, default=12, help='Growth rate for DenseNet.')
ap('--compressionRate', type=int, default=2, help='Compression Rate (theta) for DenseNet.')
ap('--feature_vec', default='x', type=str)
ap('-c', '--checkpoint', default='checkpoint', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
ap('-d', '--dataset', default='cifar10', type=str)
ap('--eta', default=1.0, type=float, help='eta in HOResNet')
ap('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
opt = parser.parse_args()
def conv3x3(in_planes, out_planes, stride=1):
"""
3x3 convolution with padding
"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
if __name__ == '__main__':
"""
Load the trained DNN, and attack the DNN, finally save the adversarial images
"""
# Model
if opt.dataset == 'cifar10':
dataloader = dset.CIFAR10
num_classes = 10
else:
dataloader = dset.CIFAR100
num_classes = 100
print("==> creating model '{}'".format(opt.arch))
if opt.arch.startswith('resnext'):
net = models.__dict__[opt.arch](
cardinality=opt.cardinality,
num_classes=num_classes,
depth=opt.depth,
widen_factor=opt.widen_factor,
dropRate=opt.drop,
)
elif opt.arch.startswith('densenet'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
growthRate=opt.growthRate,
compressionRate=opt.compressionRate,
dropRate=opt.drop,
)
elif opt.arch.startswith('wrn'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
widen_factor=opt.widen_factor,
dropRate=opt.drop,
)
elif opt.arch.startswith('resnet'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
block_name=opt.block_name,
)
elif opt.arch.startswith('preresnet'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
block_name=opt.block_name,
)
elif opt.arch.startswith('horesnet'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
eta=opt.eta,
block_name=opt.block_name,
feature_vec=opt.feature_vec
)
elif opt.arch.startswith('hopreresnet'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
eta=opt.eta,
block_name=opt.block_name,
feature_vec=opt.feature_vec
)
else:
net = models.__dict__[opt.arch](num_classes=num_classes)
# Load the model
print('==> Resuming from checkpoint..')
assert os.path.isfile(opt.checkpoint), 'Error: no checkpoint directory found!'
opt.checkpoint_dir = os.path.dirname(opt.checkpoint)
checkpoint = torch.load(opt.checkpoint)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
net.load_state_dict(checkpoint['state_dict'])
net = net.cuda()
epsilon = opt.epsilon
attack_type = opt.method
# Load the original test data
print('==> Load the clean image')
root = './data'
download = False
kwargs = {'num_workers':1, 'pin_memory':True}
batchsize_test = 1000
if attack_type == 'cw':
batchsize_test = 1
print('Batch size of the test set: ', batchsize_test)
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
test_set = dataloader(root='./data', train=False, download=False, transform=transform_test)
test_loader = data.DataLoader(test_set, batch_size=batchsize_test, shuffle=False, num_workers=1, pin_memory=True)
criterion = nn.CrossEntropyLoss()
#--------------------------------------------------------------------------
# Testing
# images: the original images
# labels: labels of the original images
# images_adv: adversarial image
# labels_pred: the predicted labels of the adversarial images
# noise: the added noise
#--------------------------------------------------------------------------
images, labels, images_adv, labels_pred, noise = [], [], [], [], []
total_fooled = 0; total_correct_classified = 0
if attack_type == 'fgsm':
for batch_idx, (x1, y1_true) in enumerate(test_loader):
#if batch_idx < 2:
x_Test = x1.numpy()
#print x_Test.min(), x_Test.max()
#x_Test = ((x_Test - x_Test.min())/(x_Test.max() - x_Test.min()) - 0.5)*2
#x_Test = (x_Test - x_Test.min() )/(x_Test.max() - x_Test.min())
y_Test = y1_true.numpy()
#x = Variable(torch.cuda.FloatTensor(x_Test.reshape(1, 1, 28, 28)), requires_grad=True)
x = Variable(torch.cuda.FloatTensor(x_Test.reshape(batchsize_test, 3, 32, 32)), requires_grad=True)
y = Variable(torch.cuda.LongTensor(y_Test), requires_grad=False)
# Classification before perturbation
pred_tmp = net(x)
y_pred = np.argmax(pred_tmp.cpu().data.numpy())
loss = criterion(pred_tmp, y)
# Attack
net.zero_grad()
if x.grad is not None:
x.grad.data.fill_(0)
loss.backward()
x_val_min = 0.0
x_val_max = 1.0
x.grad.sign_()
x_adversarial = x + epsilon*x.grad
x_adversarial = torch.clamp(x_adversarial, x_val_min, x_val_max)
x_adversarial = x_adversarial.data
# Classify the perturbed data
x_adversarial_tmp = Variable(x_adversarial)
pred_tmp = net(x_adversarial_tmp)
y_pred_adversarial = np.argmax(pred_tmp.cpu().data.numpy(), axis=1)
for i in range(len(x_Test)):
#print y_pred_adversarial
if y_Test[i] == y_pred_adversarial[i]:
#if y_Test == y_pred_adversarial:
total_correct_classified += 1
for i in range(len(x_Test)):
# Save the perturbed data
images.append(x_Test[i, :, :, :]) # Original image
images_adv.append(x_adversarial.cpu().numpy()[i, :, :, :]) # Perturbed image
noise.append(x_adversarial.cpu().numpy()[i, :, :, :]-x_Test[i, :, :, :]) # Noise
labels.append(y_Test[i])
labels_pred.append(y_pred_adversarial[i])
elif attack_type == 'ifgsm':
for batch_idx, (x1, y1_true) in enumerate(test_loader):
#if batch_idx < 100:
#x_Test = (x_Test - x_Test.min())/(x_Test.max()-x_Test.min())
x_Test = ((x1 - x1.min())/(x1.max() - x1.min()) - 0.5)*2
x_Test = x_Test.numpy()
y_Test = y1_true.numpy()
x = Variable(torch.cuda.FloatTensor(x_Test.reshape(batchsize_test, 3, 32, 32)), requires_grad=True)
y = Variable(torch.cuda.LongTensor(y_Test), requires_grad=False)
# Classification before perturbation
pred_tmp = net(x)
y_pred = np.argmax(pred_tmp.cpu().data.numpy())
loss = criterion(pred_tmp, y)
# Attack
alpha = epsilon
#iteration = 10
iteration = 20
x_val_min = 0.; x_val_max = 1.
epsilon1 = 0.031
# Helper function
def where(cond, x, y):
"""
code from :
https://discuss.pytorch.org/t/how-can-i-do-the-operation-the-same-as-np-where/1329/8
"""
cond = cond.float()
return (cond*x) + ((1-cond)*y)
# Random perturbation
#x = x + torch.zeros_like(x).uniform_(-epsilon1, epsilon1) # May 2
x_adv = Variable(x.data, requires_grad=True)
for i in range(iteration):
h_adv = net(x_adv)
loss = criterion(h_adv, y)
net.zero_grad()
if x_adv.grad is not None:
x_adv.grad.data.fill_(0)
loss.backward()
x_adv.grad.sign_()
x_adv = x_adv + alpha*x_adv.grad
x_adv = where(x_adv > x+epsilon1, x+epsilon1, x_adv)
x_adv = where(x_adv < x-epsilon1, x-epsilon1, x_adv)
x_adv = torch.clamp(x_adv, x_val_min, x_val_max)
x_adv = Variable(x_adv.data, requires_grad=True)
x_adversarial = x_adv.data
x_adversarial_tmp = Variable(x_adversarial)
pred_tmp = net(x_adversarial_tmp)
loss = criterion(pred_tmp, y)
y_pred_adversarial = np.argmax(pred_tmp.cpu().data.numpy(), axis=1)
#if y_Test == y_pred_adversarial:
# total_correct_classified += 1
for i in range(len(x_Test)):
#print y_pred_adversarial
if y_Test[i] == y_pred_adversarial[i]:
#if y_Test == y_pred_adversarial:
total_correct_classified += 1
for i in range(len(x_Test)):
# Save the perturbed data
images.append(x_Test[i, :, :, :]) # Original image
images_adv.append(x_adversarial.cpu().numpy()[i, :, :, :]) # Perturbed image
noise.append(x_adversarial.cpu().numpy()[i, :, :, :]-x_Test[i, :, :, :]) # Noise
labels.append(y_Test[i])
labels_pred.append(y_pred_adversarial[i])
elif attack_type == 'cw':
for batch_idx, (x1, y1_true) in enumerate(test_loader):
#if batch_idx < 10:
if batch_idx - int(int(batch_idx/50.)*50) == 0:
print(batch_idx)
x_Test = x1.numpy()
y_Test = y1_true.numpy()
x = Variable(torch.cuda.FloatTensor(x_Test.reshape(batchsize_test, 3, 32, 32)), requires_grad=True)
y = Variable(torch.cuda.LongTensor(y_Test), requires_grad=False)
# Classification before perturbation
pred_tmp = net(x)
loss = criterion(pred_tmp, y)
y_pred = np.argmax(pred_tmp.cpu().data.numpy())
# Attack
cwl2_learning_rate = 0.0006#0.01
max_iter = 50
lambdaf = 10.0
kappa = 0.0
# The input image we will perturb
input = torch.FloatTensor(x_Test.reshape(batchsize_test, 3, 32, 32))
input_var = Variable(input)
# w is the variable we will optimize over. We will also save the best w and loss
w = Variable(input, requires_grad=True)
best_w = input.clone()
best_loss = float('inf')
# Use the Adam optimizer for the minimization
optimizer = optim.Adam([w], lr=cwl2_learning_rate)
# Get the top2 predictions of the model. Get the argmaxes for the objective function
probs = net(input_var.cuda())
probs_data = probs.data.cpu()
top1_idx = torch.max(probs_data, 1)[1]
probs_data[0][top1_idx] = -1 # making the previous top1 the lowest so we get the top2
top2_idx = torch.max(probs_data, 1)[1]
# Set the argmax (but maybe argmax will just equal top2_idx always?)
argmax = top1_idx[0]
if argmax == y_pred:
argmax = top2_idx[0]
# The iteration
for i in range(0, max_iter):
if i > 0:
w.grad.data.fill_(0)
# Zero grad (Only one line needed actually)
net.zero_grad()
optimizer.zero_grad()
# Compute L2 Loss
loss = torch.pow(w - input_var, 2).sum()
# w variable
w_data = w.data
w_in = Variable(w_data, requires_grad=True)
# Compute output
output = net.forward(w_in.cuda()) #second argument is unneeded
# Calculating the (hinge) loss
loss += lambdaf * torch.clamp( output[0][y_pred] - output[0][argmax] + kappa, min=0).cpu()
# Backprop the loss
loss.backward()
# Work on w (Don't think we need this)
w.grad.data.add_(w_in.grad.data)
# Optimizer step
optimizer.step()
# Save the best w and loss
total_loss = loss.data.cpu()[0]
if total_loss < best_loss:
best_loss = total_loss
##best_w = torch.clamp(best_w, 0., 1.) # BW Added Aug 26
best_w = w.data.clone()
# Set final adversarial image as the best-found w
x_adversarial = best_w
##x_adversarial = torch.clamp(x_adversarial, 0., 1.) # BW Added Aug 26
#--------------- Add to introduce the noise
noise_tmp = x_adversarial.cpu().numpy() - x_Test
x_adversarial = x_Test + epsilon * noise_tmp
#---------------
# Classify the perturbed data
x_adversarial_tmp = Variable(torch.cuda.FloatTensor(x_adversarial), requires_grad=False) #Variable(x_adversarial).cuda()
pred_tmp = net(x_adversarial_tmp)
y_pred_adversarial = np.argmax(pred_tmp.cpu().data.numpy()) # axis=1
if y_Test == y_pred_adversarial:
total_correct_classified += 1
# Save the perturbed data
images.append(x_Test) # Original image
images_adv.append(x_adversarial) # Perturbed image
noise.append(x_adversarial-x_Test) # Noise
labels.append(y_Test)
labels_pred.append(y_pred_adversarial)
elif attack_type == 'clean':
for batch_idx, (x1, y1_true) in enumerate(test_loader):
#if batch_idx < 2:
x_Test = x1.numpy()
#print x_Test.min(), x_Test.max()
#x_Test = ((x_Test - x_Test.min())/(x_Test.max() - x_Test.min()) - 0.5)*2
#x_Test = (x_Test - x_Test.min() )/(x_Test.max() - x_Test.min())
y_Test = y1_true.numpy()
#x = Variable(torch.cuda.FloatTensor(x_Test.reshape(1, 1, 28, 28)), requires_grad=True)
#x, y = torch.autograd.Variable(torch.cuda.FloatTensor(x_Test), volatile=True), torch.autograd.Variable(torch.cuda.LongTensor(y_Test))
x = Variable(torch.cuda.FloatTensor(x_Test.reshape(batchsize_test, 3, 32, 32)), requires_grad=True)
y = Variable(torch.cuda.LongTensor(y_Test), requires_grad=False)
# Classification before perturbation
pred_tmp = net(x)
y_pred = np.argmax(pred_tmp.cpu().data.numpy(), axis=1)
for i in range(len(x_Test)):
#print y_pred_adversarial
if y_Test[i] == y_pred[i]:
#if y_Test == y_pred_adversarial:
total_correct_classified += 1
else:
ValueError('Unsupported Attack')
print(opt.checkpoint)
print('Number of correctly classified images: ', total_correct_classified)
# Save data
#with open("Adversarial" + attack_type + str(int(10*epsilon)) + ".pkl", "w") as f:
#with open("Adversarial" + attack_type + str(int(100*epsilon)) + ".pkl", "w") as f:
# adv_data_dict = {"images":images_adv, "labels":labels}
# cPickle.dump(adv_data_dict, f)
images = np.array(images).squeeze()
images_adv = np.array(images_adv).squeeze()
noise = np.array(noise).squeeze()
labels = np.array(labels).squeeze()
labels_pred = np.array(labels_pred).squeeze()
print([images.shape, images_adv.shape, noise.shape, labels.shape, labels_pred.shape])
# with open("fooled_EnResNet5_20_PGD_10iters_" + attack_type + str(int(1000*epsilon)) + ".pkl", "w") as f:
# #with open("fooled_EnResNet5_20_PGD_20iters_" + attack_type + str(int(1000*epsilon)) + ".pkl", "w") as f:
# adv_data_dict = {
# "images" : images,
# "images_adversarial" : images_adv,
# "y_trues" : labels,
# "noises" : noise,
# "y_preds_adversarial" : labels_pred
# }
# pickle.dump(adv_data_dict, f)
|
[
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.autograd.Variable",
"torch.load",
"torch.nn.Conv2d",
"os.path.dirname",
"torch.nn.CrossEntropyLoss",
"torch._utils._rebuild_tensor",
"torch.cuda.FloatTensor",
"os.path.isfile",
"torch.clamp",
"numpy.array",
"torch.optim.Adam",
"torch.max",
"torch.cuda.LongTensor",
"torch.pow",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor"
] |
[((1203, 1255), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Fool EnResNet"""'}), "(description='Fool EnResNet')\n", (1226, 1255), False, 'import argparse\n'), ((3100, 3189), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (3109, 3189), True, 'import torch.nn as nn\n'), ((5708, 5738), 'os.path.isfile', 'os.path.isfile', (['opt.checkpoint'], {}), '(opt.checkpoint)\n', (5722, 5738), False, 'import os\n'), ((5805, 5836), 'os.path.dirname', 'os.path.dirname', (['opt.checkpoint'], {}), '(opt.checkpoint)\n', (5820, 5836), False, 'import os\n'), ((5854, 5880), 'torch.load', 'torch.load', (['opt.checkpoint'], {}), '(opt.checkpoint)\n', (5864, 5880), False, 'import torch\n'), ((6681, 6784), 'torch.utils.data.DataLoader', 'data.DataLoader', (['test_set'], {'batch_size': 'batchsize_test', 'shuffle': '(False)', 'num_workers': '(1)', 'pin_memory': '(True)'}), '(test_set, batch_size=batchsize_test, shuffle=False,\n num_workers=1, pin_memory=True)\n', (6696, 6784), True, 'import torch.utils.data as data\n'), ((6802, 6823), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6821, 6823), True, 'import torch.nn as nn\n'), ((262, 329), 'torch._utils._rebuild_tensor', 'torch._utils._rebuild_tensor', (['storage', 'storage_offset', 'size', 'stride'], {}), '(storage, storage_offset, size, stride)\n', (290, 329), False, 'import torch\n'), ((6450, 6471), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6469, 6471), True, 'import torchvision.transforms as transforms\n'), ((6481, 6552), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (6501, 6552), True, 'import torchvision.transforms as transforms\n'), ((8565, 8613), 'torch.clamp', 'torch.clamp', (['x_adversarial', 'x_val_min', 'x_val_max'], {}), '(x_adversarial, x_val_min, x_val_max)\n', (8576, 8613), False, 'import torch\n'), ((8748, 8771), 'torch.autograd.Variable', 'Variable', (['x_adversarial'], {}), '(x_adversarial)\n', (8756, 8771), False, 'from torch.autograd import Variable\n'), ((19211, 19227), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (19219, 19227), True, 'import numpy as np\n'), ((19255, 19275), 'numpy.array', 'np.array', (['images_adv'], {}), '(images_adv)\n', (19263, 19275), True, 'import numpy as np\n'), ((19298, 19313), 'numpy.array', 'np.array', (['noise'], {}), '(noise)\n', (19306, 19313), True, 'import numpy as np\n'), ((19337, 19353), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (19345, 19353), True, 'import numpy as np\n'), ((19382, 19403), 'numpy.array', 'np.array', (['labels_pred'], {}), '(labels_pred)\n', (19390, 19403), True, 'import numpy as np\n'), ((7986, 8015), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['y_Test'], {}), '(y_Test)\n', (8007, 8015), False, 'import torch\n'), ((11002, 11038), 'torch.autograd.Variable', 'Variable', (['x.data'], {'requires_grad': '(True)'}), '(x.data, requires_grad=True)\n', (11010, 11038), False, 'from torch.autograd import Variable\n'), ((11791, 11814), 'torch.autograd.Variable', 'Variable', (['x_adversarial'], {}), '(x_adversarial)\n', (11799, 11814), False, 'from torch.autograd import Variable\n'), ((10104, 10133), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['y_Test'], {}), '(y_Test)\n', (10125, 10133), False, 'import torch\n'), ((11584, 11624), 'torch.clamp', 'torch.clamp', (['x_adv', 'x_val_min', 'x_val_max'], {}), '(x_adv, x_val_min, x_val_max)\n', (11595, 11624), False, 'import torch\n'), ((11649, 11689), 'torch.autograd.Variable', 'Variable', (['x_adv.data'], {'requires_grad': '(True)'}), '(x_adv.data, requires_grad=True)\n', (11657, 11689), False, 'from torch.autograd import Variable\n'), ((13795, 13810), 'torch.autograd.Variable', 'Variable', (['input'], {}), '(input)\n', (13803, 13810), False, 'from torch.autograd import Variable\n'), ((13933, 13968), 'torch.autograd.Variable', 'Variable', (['input'], {'requires_grad': '(True)'}), '(input, requires_grad=True)\n', (13941, 13968), False, 'from torch.autograd import Variable\n'), ((14137, 14175), 'torch.optim.Adam', 'optim.Adam', (['[w]'], {'lr': 'cwl2_learning_rate'}), '([w], lr=cwl2_learning_rate)\n', (14147, 14175), True, 'import torch.optim as optim\n'), ((13228, 13257), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['y_Test'], {}), '(y_Test)\n', (13249, 13257), False, 'import torch\n'), ((14406, 14430), 'torch.max', 'torch.max', (['probs_data', '(1)'], {}), '(probs_data, 1)\n', (14415, 14430), False, 'import torch\n'), ((14555, 14579), 'torch.max', 'torch.max', (['probs_data', '(1)'], {}), '(probs_data, 1)\n', (14564, 14579), False, 'import torch\n'), ((15289, 15325), 'torch.autograd.Variable', 'Variable', (['w_data'], {'requires_grad': '(True)'}), '(w_data, requires_grad=True)\n', (15297, 15325), False, 'from torch.autograd import Variable\n'), ((16807, 16844), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['x_adversarial'], {}), '(x_adversarial)\n', (16829, 16844), False, 'import torch\n'), ((18261, 18290), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['y_Test'], {}), '(y_Test)\n', (18282, 18290), False, 'import torch\n'), ((15154, 15181), 'torch.pow', 'torch.pow', (['(w - input_var)', '(2)'], {}), '(w - input_var, 2)\n', (15163, 15181), False, 'import torch\n'), ((15553, 15618), 'torch.clamp', 'torch.clamp', (['(output[0][y_pred] - output[0][argmax] + kappa)'], {'min': '(0)'}), '(output[0][y_pred] - output[0][argmax] + kappa, min=0)\n', (15564, 15618), False, 'import torch\n')]
|
# Write a Python program to get the name of the host on which the routine is running.
import socket
host_name = socket.gethostname()
print("Host name:", host_name)
|
[
"socket.gethostname"
] |
[((113, 133), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (131, 133), False, 'import socket\n')]
|
# encoding: utf-8
__author__ = "<NAME>"
# Parts of the code have been taken from https://github.com/facebookresearch/fastMRI
import numpy as np
import pytest
import torch
from tests.collections.reconstruction.fastmri.create_temp_data import create_temp_data
# these are really slow - skip by default
SKIP_INTEGRATIONS = True
def create_input(shape):
"""
Create a random input tensor of the given shape.
Args:
shape: The shape of the input tensor.
Returns:
A random input tensor.
"""
x = np.arange(np.product(shape)).reshape(shape)
x = torch.from_numpy(x).float()
return x
@pytest.fixture(scope="session")
def fastmri_mock_dataset(tmp_path_factory):
"""
Create a mock dataset for testing.
Args:
tmp_path_factory: A temporary path factory.
Returns:
A mock dataset.
"""
path = tmp_path_factory.mktemp("fastmri_data")
return create_temp_data(path)
@pytest.fixture
def skip_integration_tests():
"""
Skip integration tests if the environment variable is set.
Returns:
A boolean indicating whether to skip integration tests.
"""
return SKIP_INTEGRATIONS
@pytest.fixture
def knee_split_lens():
"""
The split lengths for the knee dataset.
Returns:
A dictionary with the split lengths.
"""
return {
"multicoil_train": 34742,
"multicoil_val": 7135,
"multicoil_test": 4092,
"singlecoil_train": 34742,
"singlecoil_val": 7135,
"singlecoil_test": 3903,
}
@pytest.fixture
def brain_split_lens():
"""
The split lengths for the brain dataset.
Returns:
A dictionary with the split lengths.
"""
return {
"multicoil_train": 70748,
"multicoil_val": 21842,
"multicoil_test": 8852,
}
|
[
"numpy.product",
"tests.collections.reconstruction.fastmri.create_temp_data.create_temp_data",
"pytest.fixture",
"torch.from_numpy"
] |
[((632, 663), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (646, 663), False, 'import pytest\n'), ((927, 949), 'tests.collections.reconstruction.fastmri.create_temp_data.create_temp_data', 'create_temp_data', (['path'], {}), '(path)\n', (943, 949), False, 'from tests.collections.reconstruction.fastmri.create_temp_data import create_temp_data\n'), ((587, 606), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (603, 606), False, 'import torch\n'), ((545, 562), 'numpy.product', 'np.product', (['shape'], {}), '(shape)\n', (555, 562), True, 'import numpy as np\n')]
|
import discord
from discord import Forbidden
from discord.ext import commands
from discord.http import Route
from utils import checks
MUTED_ROLE = "316134780976758786"
class Moderation:
def __init__(self, bot):
self.bot = bot
self.no_ban_logs = set()
@commands.command(hidden=True, pass_context=True, no_pm=True)
@checks.mod_or_permissions(manage_messages=True)
async def slowmode(self, ctx, timeout: int = 10, channel: discord.Channel = None):
"""Slows a channel."""
if channel is None:
channel = ctx.message.channel
try:
await self.bot.http.request(Route('PATCH', '/channels/{channel_id}', channel_id=channel.id),
json={"rate_limit_per_user": timeout})
await self.bot.say(f"Ratelimit set to {timeout} seconds in {channel}.")
except:
await self.bot.say("Failed to set ratelimit.")
@commands.command(hidden=True, pass_context=True, no_pm=True)
@checks.mod_or_permissions(manage_messages=True)
async def purge_bot(self, ctx, limit: int = 50):
"""Purges bot messages from the last [limit] messages (default 50)."""
deleted = await self.bot.purge_from(ctx.message.channel, check=lambda m: m.author.bot, limit=limit)
await self.bot.say("Cleaned {} messages.".format(len(deleted)))
@commands.command(pass_context=True)
@checks.mod_or_permissions(manage_messages=True)
async def purge(self, ctx, num: int):
"""Purges messages from the channel.
Requires: Bot Mod or Manage Messages"""
try:
await self.bot.purge_from(ctx.message.channel, limit=(num + 1))
except Exception as e:
await self.bot.say('Failed to purge: ' + str(e))
@commands.command(hidden=True, pass_context=True, no_pm=True)
@checks.mod_or_permissions(manage_roles=True)
async def copyperms(self, ctx, role: discord.Role, source: discord.Channel, overwrite: bool = False):
"""Copies permission overrides for one role from one channel to all others of the same type."""
source_chan = source
source_role = role
source_overrides = source_chan.overwrites_for(source_role)
skipped = []
for chan in ctx.message.server.channels:
if chan.type != source_chan.type:
continue
chan_overrides = chan.overwrites_for(source_role)
if chan_overrides.is_empty() or overwrite:
await self.bot.edit_channel_permissions(chan, source_role, source_overrides)
else:
skipped.append(chan.name)
if skipped:
skipped_str = ', '.join(skipped)
await self.bot.say(f":ok_hand:\n"
f"Skipped {skipped_str}; use `.copyperms {role} {source} true` to overwrite existing.")
else:
await self.bot.say(f":ok_hand:")
@commands.command(hidden=True, pass_context=True, no_pm=True)
@checks.mod_or_permissions(ban_members=True)
async def raidmode(self, ctx, method='kick'):
"""Toggles raidmode in a server.
Methods: kick, ban, lockdown"""
if method not in ("kick", "ban", "lockdown"):
return await self.bot.say("Raidmode method must be kick, ban, or lockdown.")
server_settings = await self.get_server_settings(ctx.message.server.id, ['raidmode', 'locked_channels'])
if server_settings['raidmode']:
if server_settings['raidmode'] == 'lockdown':
await self.end_lockdown(ctx, server_settings)
server_settings['raidmode'] = None
out = "Raid mode disabled."
else:
if method == 'lockdown':
await self.start_lockdown(ctx, server_settings)
server_settings['raidmode'] = method
out = f"Raid mode enabled. Method: {method}"
await self.set_server_settings(ctx.message.server.id, server_settings)
await self.bot.say(out)
@commands.command(hidden=True, pass_context=True)
@checks.mod_or_permissions(manage_roles=True)
async def mute(self, ctx, target: discord.Member, *, reason="Unknown reason"):
"""Toggles mute on a member."""
role = discord.utils.get(ctx.message.server.roles, id=MUTED_ROLE)
server_settings = await self.get_server_settings(ctx.message.server.id, ['cases', 'casenum'])
if role in target.roles:
try:
self.no_ban_logs.add(ctx.message.server.id)
await self.bot.remove_roles(target, role)
except Forbidden:
return await self.bot.say("Error: The bot does not have `manage_roles` permission.")
finally:
self.no_ban_logs.remove(ctx.message.server.id)
case = Case.new(num=server_settings['casenum'], type_='unmute', user=target.id, username=str(target),
reason=reason, mod=str(ctx.message.author))
else:
try:
self.no_ban_logs.add(ctx.message.server.id)
await self.bot.add_roles(target, role)
except Forbidden:
return await self.bot.say("Error: The bot does not have `manage_roles` permission.")
finally:
self.no_ban_logs.remove(ctx.message.server.id)
case = Case.new(num=server_settings['casenum'], type_='mute', user=target.id, username=str(target),
reason=reason, mod=str(ctx.message.author))
await self.post_action(ctx.message.server, server_settings, case)
@commands.command(hidden=True, pass_context=True)
@checks.mod_or_permissions(kick_members=True)
async def kick(self, ctx, user: discord.Member, *, reason='Unknown reason'):
"""Kicks a member and logs it to #mod-log."""
try:
await self.bot.kick(user)
except Forbidden:
return await self.bot.say('Error: The bot does not have `kick_members` permission.')
server_settings = await self.get_server_settings(ctx.message.server.id, ['cases', 'casenum'])
case = Case.new(num=server_settings['casenum'], type_='kick', user=user.id, username=str(user), reason=reason,
mod=str(ctx.message.author))
await self.post_action(ctx.message.server, server_settings, case)
@commands.command(hidden=True, pass_context=True)
@checks.mod_or_permissions(ban_members=True)
async def ban(self, ctx, user: discord.Member, *, reason='Unknown reason'):
"""Bans a member and logs it to #mod-log."""
try:
self.no_ban_logs.add(ctx.message.server.id)
await self.bot.ban(user)
except Forbidden:
return await self.bot.say('Error: The bot does not have `ban_members` permission.')
finally:
self.no_ban_logs.remove(ctx.message.server.id)
server_settings = await self.get_server_settings(ctx.message.server.id, ['cases', 'casenum'])
case = Case.new(num=server_settings['casenum'], type_='ban', user=user.id, username=str(user), reason=reason,
mod=str(ctx.message.author))
await self.post_action(ctx.message.server, server_settings, case)
@commands.command(hidden=True, pass_context=True)
@checks.mod_or_permissions(ban_members=True)
async def forceban(self, ctx, user, *, reason='Unknown reason'):
"""Force-bans a member ID and logs it to #mod-log."""
member = discord.utils.get(ctx.message.server.members, id=user)
if member: # if they're still in the server, normal ban them
return await ctx.invoke(self.ban, member, reason=reason)
user_obj = await self.bot.get_user_info(user)
server_settings = await self.get_server_settings(ctx.message.server.id, ['cases', 'casenum', 'forcebanned'])
server_settings['forcebanned'].append(user)
case = Case.new(num=server_settings['casenum'], type_='forceban', user=user, username=str(user_obj),
reason=reason, mod=str(ctx.message.author))
await self.post_action(ctx.message.server, server_settings, case)
@commands.command(hidden=True, pass_context=True)
@checks.mod_or_permissions(ban_members=True)
async def softban(self, ctx, user: discord.Member, *, reason='Unknown reason'):
"""Softbans a member and logs it to #mod-log."""
try:
self.no_ban_logs.add(ctx.message.server.id)
await self.bot.ban(user)
await self.bot.unban(ctx.message.server, user)
except Forbidden:
return await self.bot.say('Error: The bot does not have `ban_members` permission.')
finally:
self.no_ban_logs.remove(ctx.message.server.id)
server_settings = await self.get_server_settings(ctx.message.server.id, ['cases', 'casenum'])
case = Case.new(num=server_settings['casenum'], type_='softban', user=user.id, username=str(user),
reason=reason, mod=str(ctx.message.author))
await self.post_action(ctx.message.server, server_settings, case)
@commands.command(hidden=True, pass_context=True)
@checks.mod_or_permissions(kick_members=True)
async def reason(self, ctx, case_num: int, *, reason):
"""Sets the reason for a post in mod-log."""
server_settings = await self.get_server_settings(ctx.message.server.id, ['cases'])
cases = server_settings['cases']
case = next((c for c in cases if c['num'] == case_num), None)
if case is None:
return await self.bot.say(f"Case {case_num} not found.")
case = Case.from_dict(case)
case.reason = reason
case.mod = str(ctx.message.author)
mod_log = discord.utils.get(ctx.message.server.channels, name='mod-log')
if mod_log is not None and case.log_msg:
log_message = await self.bot.get_message(mod_log, case.log_msg)
await self.bot.edit_message(log_message, str(case))
await self.set_server_settings(ctx.message.server.id, server_settings)
await self.bot.say(':ok_hand:')
async def post_action(self, server, server_settings, case, no_msg=False):
"""Common function after a moderative action."""
server_settings['casenum'] += 1
mod_log = discord.utils.get(server.channels, name='mod-log')
if mod_log is not None:
msg = await self.bot.send_message(mod_log, str(case))
case.log_msg = msg.id
server_settings['cases'].append(case.to_dict())
await self.set_server_settings(server.id, server_settings)
if not no_msg:
await self.bot.say(':ok_hand:')
async def start_lockdown(self, ctx, server_settings):
"""Disables Send Messages permission for everyone in every channel."""
server_settings['locked_channels'] = []
everyone_role = ctx.message.server.default_role
for channel in ctx.message.server.channels:
if not channel.type == discord.ChannelType.text:
continue
overwrites = channel.overwrites_for(everyone_role)
if overwrites.send_messages is not False: # is not false, since it could be None
overwrites.send_messages = False
server_settings['locked_channels'].append(channel.id)
await self.bot.edit_channel_permissions(channel, everyone_role, overwrite=overwrites)
await self.bot.say(f"Locked down {len(server_settings['locked_channels'])} channels.")
async def end_lockdown(self, ctx, server_settings):
"""Reenables Send Messages for everyone in locked-down channels."""
everyone_role = ctx.message.server.default_role
for chan in server_settings['locked_channels']:
channel = discord.utils.get(ctx.message.server.channels, id=chan)
overwrites = channel.overwrites_for(everyone_role)
overwrites.send_messages = None
await self.bot.edit_channel_permissions(channel, everyone_role, overwrite=overwrites)
await self.bot.say(f"Unlocked {len(server_settings['locked_channels'])} channels.")
server_settings['locked_channels'] = []
async def check_raidmode(self, server_settings, member):
"""Checks whether a newly-joined member should be removed due to raidmode."""
try:
self.no_ban_logs.add(member.server.id)
if not server_settings['raidmode']:
return
elif server_settings['raidmode'] == 'kick':
await self.bot.kick(member)
action = 'kick'
else:
await self.bot.ban(member)
action = 'ban'
except Forbidden:
return
finally:
self.no_ban_logs.remove(member.server.id)
case = Case.new(num=server_settings['casenum'], type_=action, user=member.id, username=str(member),
reason=f"Raidmode auto{action}", mod=str(self.bot.user))
await self.post_action(member.server, server_settings, case, no_msg=True)
async def check_forceban(self, server_settings, member):
"""Checks whether a newly-joined member should be removed due to forceban."""
if member.id in server_settings['forcebanned']:
try:
self.no_ban_logs.add(member.server.id)
await self.bot.ban(member)
except Forbidden:
return
finally:
self.no_ban_logs.remove(member.server.id)
case = Case.new(num=server_settings['casenum'], type_='ban', user=member.id, username=str(member),
reason="User forcebanned previously", mod=str(self.bot.user))
await self.post_action(member.server, server_settings, case, no_msg=True)
async def on_message_delete(self, message):
if not message.server:
return # PMs
msg_log = discord.utils.get(message.server.channels, name="message-log")
if not msg_log:
return
embed = discord.Embed()
embed.title = f"{message.author} deleted a message in {message.channel}."
if message.content:
embed.description = message.content
for attachment in message.attachments:
embed.add_field(name="Attachment", value=attachment['url'])
embed.colour = 0xff615b
embed.set_footer(text="Originally sent")
embed.timestamp = message.timestamp
await self.bot.send_message(msg_log, embed=embed)
async def on_message_edit(self, before, after):
if not before.server:
return # PMs
msg_log = discord.utils.get(before.server.channels, name="message-log")
if not msg_log:
return
if before.content == after.content:
return
embed = discord.Embed()
embed.title = f"{before.author} edited a message in {before.channel} (below is original message)."
if before.content:
embed.description = before.content
for attachment in before.attachments:
embed.add_field(name="Attachment", value=attachment['url'])
embed.colour = 0x5b92ff
if len(after.content) < 1000:
new = after.content
else:
new = str(after.content)[:1000] + "..."
embed.add_field(name="New Content", value=new)
await self.bot.send_message(msg_log, embed=embed)
async def on_member_join(self, member):
server_settings = await self.get_server_settings(member.server.id)
await self.check_raidmode(server_settings, member)
await self.check_forceban(server_settings, member)
async def on_member_ban(self, member):
if member.server.id in self.no_ban_logs:
return
server_settings = await self.get_server_settings(member.server.id, ['cases', 'casenum'])
case = Case.new(num=server_settings['casenum'], type_='ban', user=member.id, username=str(member),
reason="Unknown reason")
await self.post_action(member.server, server_settings, case, no_msg=True)
async def on_member_unban(self, server, user):
if server.id in self.no_ban_logs:
return
server_settings = await self.get_server_settings(server.id, ['cases', 'casenum'])
case = Case.new(num=server_settings['casenum'], type_='unban', user=user.id, username=str(user),
reason="Unknown reason")
await self.post_action(server, server_settings, case, no_msg=True)
async def on_member_update(self, before, after):
if before.server.id in self.no_ban_logs:
return
role = discord.utils.get(before.server.roles, id=MUTED_ROLE)
if role not in before.roles and role in after.roles: # just muted
server_settings = await self.get_server_settings(before.server.id, ['cases', 'casenum'])
case = Case.new(num=server_settings['casenum'], type_='mute', user=after.id, username=str(after),
reason="Unknown reason")
elif role in before.roles and role not in after.roles: # just unmuted
server_settings = await self.get_server_settings(before.server.id, ['cases', 'casenum'])
case = Case.new(num=server_settings['casenum'], type_='unmute', user=after.id, username=str(after),
reason="Unknown reason")
else:
return
await self.post_action(before.server, server_settings, case, no_msg=True)
async def get_server_settings(self, server_id, projection=None):
server_settings = await self.bot.mdb.mod.find_one({"server": server_id}, projection)
if server_settings is None:
server_settings = get_default_settings(server_id)
return server_settings
async def set_server_settings(self, server_id, settings):
await self.bot.mdb.mod.update_one(
{"server": server_id},
{"$set": settings}, upsert=True
)
def get_default_settings(server):
return {
"server": server,
"raidmode": None,
"cases": [],
"casenum": 1,
"forcebanned": [],
"locked_channels": []
}
class Case:
def __init__(self, num, type_, user, reason, mod=None, log_msg=None, username=None):
self.num = num
self.type = type_
self.user = user
self.username = username
self.reason = reason
self.mod = mod
self.log_msg = log_msg
@classmethod
def new(cls, num, type_, user, reason, mod=None, username=None):
return cls(num, type_, user, reason, mod=mod, username=username)
@classmethod
def from_dict(cls, raw):
raw['type_'] = raw.pop('type')
return cls(**raw)
def to_dict(self):
return {"num": self.num, "type": self.type, "user": self.user, "reason": self.reason, "mod": self.mod,
"log_msg": self.log_msg, "username": self.username}
def __str__(self):
if self.username:
user = f"{self.username} ({self.user})"
else:
user = self.user
if self.mod:
modstr = self.mod
else:
modstr = f"Responsible moderator, do `.reason {self.num} <reason>`"
return f'**{self.type.title()}** | Case {self.num}\n' \
f'**User**: {user}\n' \
f'**Reason**: {self.reason}\n' \
f'**Responsible Mod**: {modstr}'
def setup(bot):
bot.add_cog(Moderation(bot))
|
[
"discord.utils.get",
"discord.ext.commands.command",
"discord.Embed",
"utils.checks.mod_or_permissions",
"discord.http.Route"
] |
[((281, 341), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)', 'pass_context': '(True)', 'no_pm': '(True)'}), '(hidden=True, pass_context=True, no_pm=True)\n', (297, 341), False, 'from discord.ext import commands\n'), ((347, 394), 'utils.checks.mod_or_permissions', 'checks.mod_or_permissions', ([], {'manage_messages': '(True)'}), '(manage_messages=True)\n', (372, 394), False, 'from utils import checks\n'), ((945, 1005), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)', 'pass_context': '(True)', 'no_pm': '(True)'}), '(hidden=True, pass_context=True, no_pm=True)\n', (961, 1005), False, 'from discord.ext import commands\n'), ((1011, 1058), 'utils.checks.mod_or_permissions', 'checks.mod_or_permissions', ([], {'manage_messages': '(True)'}), '(manage_messages=True)\n', (1036, 1058), False, 'from utils import checks\n'), ((1377, 1412), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (1393, 1412), False, 'from discord.ext import commands\n'), ((1418, 1465), 'utils.checks.mod_or_permissions', 'checks.mod_or_permissions', ([], {'manage_messages': '(True)'}), '(manage_messages=True)\n', (1443, 1465), False, 'from utils import checks\n'), ((1788, 1848), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)', 'pass_context': '(True)', 'no_pm': '(True)'}), '(hidden=True, pass_context=True, no_pm=True)\n', (1804, 1848), False, 'from discord.ext import commands\n'), ((1854, 1898), 'utils.checks.mod_or_permissions', 'checks.mod_or_permissions', ([], {'manage_roles': '(True)'}), '(manage_roles=True)\n', (1879, 1898), False, 'from utils import checks\n'), ((2939, 2999), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)', 'pass_context': '(True)', 'no_pm': '(True)'}), '(hidden=True, pass_context=True, no_pm=True)\n', (2955, 2999), False, 'from discord.ext import commands\n'), ((3005, 3048), 'utils.checks.mod_or_permissions', 'checks.mod_or_permissions', ([], {'ban_members': '(True)'}), '(ban_members=True)\n', (3030, 3048), False, 'from utils import checks\n'), ((4024, 4072), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)', 'pass_context': '(True)'}), '(hidden=True, pass_context=True)\n', (4040, 4072), False, 'from discord.ext import commands\n'), ((4078, 4122), 'utils.checks.mod_or_permissions', 'checks.mod_or_permissions', ([], {'manage_roles': '(True)'}), '(manage_roles=True)\n', (4103, 4122), False, 'from utils import checks\n'), ((5618, 5666), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)', 'pass_context': '(True)'}), '(hidden=True, pass_context=True)\n', (5634, 5666), False, 'from discord.ext import commands\n'), ((5672, 5716), 'utils.checks.mod_or_permissions', 'checks.mod_or_permissions', ([], {'kick_members': '(True)'}), '(kick_members=True)\n', (5697, 5716), False, 'from utils import checks\n'), ((6382, 6430), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)', 'pass_context': '(True)'}), '(hidden=True, pass_context=True)\n', (6398, 6430), False, 'from discord.ext import commands\n'), ((6436, 6479), 'utils.checks.mod_or_permissions', 'checks.mod_or_permissions', ([], {'ban_members': '(True)'}), '(ban_members=True)\n', (6461, 6479), False, 'from utils import checks\n'), ((7272, 7320), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)', 'pass_context': '(True)'}), '(hidden=True, pass_context=True)\n', (7288, 7320), False, 'from discord.ext import commands\n'), ((7326, 7369), 'utils.checks.mod_or_permissions', 'checks.mod_or_permissions', ([], {'ban_members': '(True)'}), '(ban_members=True)\n', (7351, 7369), False, 'from utils import checks\n'), ((8195, 8243), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)', 'pass_context': '(True)'}), '(hidden=True, pass_context=True)\n', (8211, 8243), False, 'from discord.ext import commands\n'), ((8249, 8292), 'utils.checks.mod_or_permissions', 'checks.mod_or_permissions', ([], {'ban_members': '(True)'}), '(ban_members=True)\n', (8274, 8292), False, 'from utils import checks\n'), ((9156, 9204), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)', 'pass_context': '(True)'}), '(hidden=True, pass_context=True)\n', (9172, 9204), False, 'from discord.ext import commands\n'), ((9210, 9254), 'utils.checks.mod_or_permissions', 'checks.mod_or_permissions', ([], {'kick_members': '(True)'}), '(kick_members=True)\n', (9235, 9254), False, 'from utils import checks\n'), ((4261, 4319), 'discord.utils.get', 'discord.utils.get', (['ctx.message.server.roles'], {'id': 'MUTED_ROLE'}), '(ctx.message.server.roles, id=MUTED_ROLE)\n', (4278, 4319), False, 'import discord\n'), ((7518, 7572), 'discord.utils.get', 'discord.utils.get', (['ctx.message.server.members'], {'id': 'user'}), '(ctx.message.server.members, id=user)\n', (7535, 7572), False, 'import discord\n'), ((9791, 9853), 'discord.utils.get', 'discord.utils.get', (['ctx.message.server.channels'], {'name': '"""mod-log"""'}), "(ctx.message.server.channels, name='mod-log')\n", (9808, 9853), False, 'import discord\n'), ((10357, 10407), 'discord.utils.get', 'discord.utils.get', (['server.channels'], {'name': '"""mod-log"""'}), "(server.channels, name='mod-log')\n", (10374, 10407), False, 'import discord\n'), ((14011, 14073), 'discord.utils.get', 'discord.utils.get', (['message.server.channels'], {'name': '"""message-log"""'}), "(message.server.channels, name='message-log')\n", (14028, 14073), False, 'import discord\n'), ((14133, 14148), 'discord.Embed', 'discord.Embed', ([], {}), '()\n', (14146, 14148), False, 'import discord\n'), ((14736, 14797), 'discord.utils.get', 'discord.utils.get', (['before.server.channels'], {'name': '"""message-log"""'}), "(before.server.channels, name='message-log')\n", (14753, 14797), False, 'import discord\n'), ((14920, 14935), 'discord.Embed', 'discord.Embed', ([], {}), '()\n', (14933, 14935), False, 'import discord\n'), ((16772, 16825), 'discord.utils.get', 'discord.utils.get', (['before.server.roles'], {'id': 'MUTED_ROLE'}), '(before.server.roles, id=MUTED_ROLE)\n', (16789, 16825), False, 'import discord\n'), ((11853, 11908), 'discord.utils.get', 'discord.utils.get', (['ctx.message.server.channels'], {'id': 'chan'}), '(ctx.message.server.channels, id=chan)\n', (11870, 11908), False, 'import discord\n'), ((636, 699), 'discord.http.Route', 'Route', (['"""PATCH"""', '"""/channels/{channel_id}"""'], {'channel_id': 'channel.id'}), "('PATCH', '/channels/{channel_id}', channel_id=channel.id)\n", (641, 699), False, 'from discord.http import Route\n')]
|
"""Tibber custom"""
import logging
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.const import EVENT_HOMEASSISTANT_START
from homeassistant.helpers import discovery
DOMAIN = "tibber_custom"
CONF_USE_DARK_MODE = "use_dark_mode"
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_USE_DARK_MODE, default=False): cv.boolean,
})
}, extra=vol.ALLOW_EXTRA)
DEPENDENCIES = ["tibber"]
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
"""Setup component."""
use_dark_mode = config[DOMAIN][CONF_USE_DARK_MODE]
def ha_started(_):
discovery.load_platform(hass, "camera", DOMAIN, {}, config)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, ha_started)
return True
|
[
"homeassistant.helpers.discovery.load_platform",
"voluptuous.Optional",
"logging.getLogger"
] |
[((477, 504), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (494, 504), False, 'import logging\n'), ((647, 706), 'homeassistant.helpers.discovery.load_platform', 'discovery.load_platform', (['hass', '"""camera"""', 'DOMAIN', '{}', 'config'], {}), "(hass, 'camera', DOMAIN, {}, config)\n", (670, 706), False, 'from homeassistant.helpers import discovery\n'), ((344, 391), 'voluptuous.Optional', 'vol.Optional', (['CONF_USE_DARK_MODE'], {'default': '(False)'}), '(CONF_USE_DARK_MODE, default=False)\n', (356, 391), True, 'import voluptuous as vol\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SubnetCidrReservationArgs', 'SubnetCidrReservation']
@pulumi.input_type
class SubnetCidrReservationArgs:
def __init__(__self__, *,
cidr_block: pulumi.Input[str],
reservation_type: pulumi.Input[str],
subnet_id: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SubnetCidrReservation resource.
:param pulumi.Input[str] cidr_block: The CIDR block for the reservation.
:param pulumi.Input[str] reservation_type: The type of reservation to create. Valid values: `explicit`, `prefix`
:param pulumi.Input[str] subnet_id: The ID of the subnet to create the reservation for.
:param pulumi.Input[str] description: A brief description of the reservation.
"""
pulumi.set(__self__, "cidr_block", cidr_block)
pulumi.set(__self__, "reservation_type", reservation_type)
pulumi.set(__self__, "subnet_id", subnet_id)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter(name="cidrBlock")
def cidr_block(self) -> pulumi.Input[str]:
"""
The CIDR block for the reservation.
"""
return pulumi.get(self, "cidr_block")
@cidr_block.setter
def cidr_block(self, value: pulumi.Input[str]):
pulumi.set(self, "cidr_block", value)
@property
@pulumi.getter(name="reservationType")
def reservation_type(self) -> pulumi.Input[str]:
"""
The type of reservation to create. Valid values: `explicit`, `prefix`
"""
return pulumi.get(self, "reservation_type")
@reservation_type.setter
def reservation_type(self, value: pulumi.Input[str]):
pulumi.set(self, "reservation_type", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Input[str]:
"""
The ID of the subnet to create the reservation for.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: pulumi.Input[str]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A brief description of the reservation.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class _SubnetCidrReservationState:
def __init__(__self__, *,
cidr_block: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
owner_id: Optional[pulumi.Input[str]] = None,
reservation_type: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering SubnetCidrReservation resources.
:param pulumi.Input[str] cidr_block: The CIDR block for the reservation.
:param pulumi.Input[str] description: A brief description of the reservation.
:param pulumi.Input[str] owner_id: ID of the AWS account that owns this CIDR reservation.
:param pulumi.Input[str] reservation_type: The type of reservation to create. Valid values: `explicit`, `prefix`
:param pulumi.Input[str] subnet_id: The ID of the subnet to create the reservation for.
"""
if cidr_block is not None:
pulumi.set(__self__, "cidr_block", cidr_block)
if description is not None:
pulumi.set(__self__, "description", description)
if owner_id is not None:
pulumi.set(__self__, "owner_id", owner_id)
if reservation_type is not None:
pulumi.set(__self__, "reservation_type", reservation_type)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
@property
@pulumi.getter(name="cidrBlock")
def cidr_block(self) -> Optional[pulumi.Input[str]]:
"""
The CIDR block for the reservation.
"""
return pulumi.get(self, "cidr_block")
@cidr_block.setter
def cidr_block(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cidr_block", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A brief description of the reservation.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the AWS account that owns this CIDR reservation.
"""
return pulumi.get(self, "owner_id")
@owner_id.setter
def owner_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "owner_id", value)
@property
@pulumi.getter(name="reservationType")
def reservation_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of reservation to create. Valid values: `explicit`, `prefix`
"""
return pulumi.get(self, "reservation_type")
@reservation_type.setter
def reservation_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reservation_type", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the subnet to create the reservation for.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
class SubnetCidrReservation(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cidr_block: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
reservation_type: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a subnet CIDR reservation resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.ec2.SubnetCidrReservation("example",
cidr_block="10.0.0.16/28",
reservation_type="prefix",
subnet_id=aws_subnet["example"]["id"])
```
## Import
Existing CIDR reservations can be imported using `SUBNET_ID:RESERVATION_ID`, e.g.,
```sh
$ pulumi import aws:ec2/subnetCidrReservation:SubnetCidrReservation example subnet-01llsxvsxabqiymcz:scr-4mnvz6wb7otksjcs9
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cidr_block: The CIDR block for the reservation.
:param pulumi.Input[str] description: A brief description of the reservation.
:param pulumi.Input[str] reservation_type: The type of reservation to create. Valid values: `explicit`, `prefix`
:param pulumi.Input[str] subnet_id: The ID of the subnet to create the reservation for.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SubnetCidrReservationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a subnet CIDR reservation resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.ec2.SubnetCidrReservation("example",
cidr_block="10.0.0.16/28",
reservation_type="prefix",
subnet_id=aws_subnet["example"]["id"])
```
## Import
Existing CIDR reservations can be imported using `SUBNET_ID:RESERVATION_ID`, e.g.,
```sh
$ pulumi import aws:ec2/subnetCidrReservation:SubnetCidrReservation example subnet-01llsxvsxabqiymcz:scr-4mnvz6wb7otksjcs9
```
:param str resource_name: The name of the resource.
:param SubnetCidrReservationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SubnetCidrReservationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cidr_block: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
reservation_type: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SubnetCidrReservationArgs.__new__(SubnetCidrReservationArgs)
if cidr_block is None and not opts.urn:
raise TypeError("Missing required property 'cidr_block'")
__props__.__dict__["cidr_block"] = cidr_block
__props__.__dict__["description"] = description
if reservation_type is None and not opts.urn:
raise TypeError("Missing required property 'reservation_type'")
__props__.__dict__["reservation_type"] = reservation_type
if subnet_id is None and not opts.urn:
raise TypeError("Missing required property 'subnet_id'")
__props__.__dict__["subnet_id"] = subnet_id
__props__.__dict__["owner_id"] = None
super(SubnetCidrReservation, __self__).__init__(
'aws:ec2/subnetCidrReservation:SubnetCidrReservation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
cidr_block: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
owner_id: Optional[pulumi.Input[str]] = None,
reservation_type: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None) -> 'SubnetCidrReservation':
"""
Get an existing SubnetCidrReservation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cidr_block: The CIDR block for the reservation.
:param pulumi.Input[str] description: A brief description of the reservation.
:param pulumi.Input[str] owner_id: ID of the AWS account that owns this CIDR reservation.
:param pulumi.Input[str] reservation_type: The type of reservation to create. Valid values: `explicit`, `prefix`
:param pulumi.Input[str] subnet_id: The ID of the subnet to create the reservation for.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SubnetCidrReservationState.__new__(_SubnetCidrReservationState)
__props__.__dict__["cidr_block"] = cidr_block
__props__.__dict__["description"] = description
__props__.__dict__["owner_id"] = owner_id
__props__.__dict__["reservation_type"] = reservation_type
__props__.__dict__["subnet_id"] = subnet_id
return SubnetCidrReservation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="cidrBlock")
def cidr_block(self) -> pulumi.Output[str]:
"""
The CIDR block for the reservation.
"""
return pulumi.get(self, "cidr_block")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A brief description of the reservation.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> pulumi.Output[str]:
"""
ID of the AWS account that owns this CIDR reservation.
"""
return pulumi.get(self, "owner_id")
@property
@pulumi.getter(name="reservationType")
def reservation_type(self) -> pulumi.Output[str]:
"""
The type of reservation to create. Valid values: `explicit`, `prefix`
"""
return pulumi.get(self, "reservation_type")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Output[str]:
"""
The ID of the subnet to create the reservation for.
"""
return pulumi.get(self, "subnet_id")
|
[
"pulumi.get",
"pulumi.getter",
"pulumi.ResourceOptions",
"pulumi.set"
] |
[((1477, 1508), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cidrBlock"""'}), "(name='cidrBlock')\n", (1490, 1508), False, 'import pulumi\n'), ((1812, 1849), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""reservationType"""'}), "(name='reservationType')\n", (1825, 1849), False, 'import pulumi\n'), ((2217, 2247), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""subnetId"""'}), "(name='subnetId')\n", (2230, 2247), False, 'import pulumi\n'), ((4404, 4435), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cidrBlock"""'}), "(name='cidrBlock')\n", (4417, 4435), False, 'import pulumi\n'), ((5105, 5134), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""ownerId"""'}), "(name='ownerId')\n", (5118, 5134), False, 'import pulumi\n'), ((5467, 5504), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""reservationType"""'}), "(name='reservationType')\n", (5480, 5504), False, 'import pulumi\n'), ((5892, 5922), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""subnetId"""'}), "(name='subnetId')\n", (5905, 5922), False, 'import pulumi\n'), ((13219, 13250), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cidrBlock"""'}), "(name='cidrBlock')\n", (13232, 13250), False, 'import pulumi\n'), ((13645, 13674), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""ownerId"""'}), "(name='ownerId')\n", (13658, 13674), False, 'import pulumi\n'), ((13872, 13909), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""reservationType"""'}), "(name='reservationType')\n", (13885, 13909), False, 'import pulumi\n'), ((14138, 14168), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""subnetId"""'}), "(name='subnetId')\n", (14151, 14168), False, 'import pulumi\n'), ((1193, 1239), 'pulumi.set', 'pulumi.set', (['__self__', '"""cidr_block"""', 'cidr_block'], {}), "(__self__, 'cidr_block', cidr_block)\n", (1203, 1239), False, 'import pulumi\n'), ((1248, 1306), 'pulumi.set', 'pulumi.set', (['__self__', '"""reservation_type"""', 'reservation_type'], {}), "(__self__, 'reservation_type', reservation_type)\n", (1258, 1306), False, 'import pulumi\n'), ((1315, 1359), 'pulumi.set', 'pulumi.set', (['__self__', '"""subnet_id"""', 'subnet_id'], {}), "(__self__, 'subnet_id', subnet_id)\n", (1325, 1359), False, 'import pulumi\n'), ((1639, 1669), 'pulumi.get', 'pulumi.get', (['self', '"""cidr_block"""'], {}), "(self, 'cidr_block')\n", (1649, 1669), False, 'import pulumi\n'), ((1754, 1791), 'pulumi.set', 'pulumi.set', (['self', '"""cidr_block"""', 'value'], {}), "(self, 'cidr_block', value)\n", (1764, 1791), False, 'import pulumi\n'), ((2020, 2056), 'pulumi.get', 'pulumi.get', (['self', '"""reservation_type"""'], {}), "(self, 'reservation_type')\n", (2030, 2056), False, 'import pulumi\n'), ((2153, 2196), 'pulumi.set', 'pulumi.set', (['self', '"""reservation_type"""', 'value'], {}), "(self, 'reservation_type', value)\n", (2163, 2196), False, 'import pulumi\n'), ((2393, 2422), 'pulumi.get', 'pulumi.get', (['self', '"""subnet_id"""'], {}), "(self, 'subnet_id')\n", (2403, 2422), False, 'import pulumi\n'), ((2505, 2541), 'pulumi.set', 'pulumi.set', (['self', '"""subnet_id"""', 'value'], {}), "(self, 'subnet_id', value)\n", (2515, 2541), False, 'import pulumi\n'), ((2721, 2752), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (2731, 2752), False, 'import pulumi\n'), ((2849, 2887), 'pulumi.set', 'pulumi.set', (['self', '"""description"""', 'value'], {}), "(self, 'description', value)\n", (2859, 2887), False, 'import pulumi\n'), ((4576, 4606), 'pulumi.get', 'pulumi.get', (['self', '"""cidr_block"""'], {}), "(self, 'cidr_block')\n", (4586, 4606), False, 'import pulumi\n'), ((4701, 4738), 'pulumi.set', 'pulumi.set', (['self', '"""cidr_block"""', 'value'], {}), "(self, 'cidr_block', value)\n", (4711, 4738), False, 'import pulumi\n'), ((4918, 4949), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (4928, 4949), False, 'import pulumi\n'), ((5046, 5084), 'pulumi.set', 'pulumi.set', (['self', '"""description"""', 'value'], {}), "(self, 'description', value)\n", (5056, 5084), False, 'import pulumi\n'), ((5292, 5320), 'pulumi.get', 'pulumi.get', (['self', '"""owner_id"""'], {}), "(self, 'owner_id')\n", (5302, 5320), False, 'import pulumi\n'), ((5411, 5446), 'pulumi.set', 'pulumi.set', (['self', '"""owner_id"""', 'value'], {}), "(self, 'owner_id', value)\n", (5421, 5446), False, 'import pulumi\n'), ((5685, 5721), 'pulumi.get', 'pulumi.get', (['self', '"""reservation_type"""'], {}), "(self, 'reservation_type')\n", (5695, 5721), False, 'import pulumi\n'), ((5828, 5871), 'pulumi.set', 'pulumi.set', (['self', '"""reservation_type"""', 'value'], {}), "(self, 'reservation_type', value)\n", (5838, 5871), False, 'import pulumi\n'), ((6078, 6107), 'pulumi.get', 'pulumi.get', (['self', '"""subnet_id"""'], {}), "(self, 'subnet_id')\n", (6088, 6107), False, 'import pulumi\n'), ((6200, 6236), 'pulumi.set', 'pulumi.set', (['self', '"""subnet_id"""', 'value'], {}), "(self, 'subnet_id', value)\n", (6210, 6236), False, 'import pulumi\n'), ((13382, 13412), 'pulumi.get', 'pulumi.get', (['self', '"""cidr_block"""'], {}), "(self, 'cidr_block')\n", (13392, 13412), False, 'import pulumi\n'), ((13593, 13624), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (13603, 13624), False, 'import pulumi\n'), ((13823, 13851), 'pulumi.get', 'pulumi.get', (['self', '"""owner_id"""'], {}), "(self, 'owner_id')\n", (13833, 13851), False, 'import pulumi\n'), ((14081, 14117), 'pulumi.get', 'pulumi.get', (['self', '"""reservation_type"""'], {}), "(self, 'reservation_type')\n", (14091, 14117), False, 'import pulumi\n'), ((14315, 14344), 'pulumi.get', 'pulumi.get', (['self', '"""subnet_id"""'], {}), "(self, 'subnet_id')\n", (14325, 14344), False, 'import pulumi\n'), ((1408, 1456), 'pulumi.set', 'pulumi.set', (['__self__', '"""description"""', 'description'], {}), "(__self__, 'description', description)\n", (1418, 1456), False, 'import pulumi\n'), ((3949, 3995), 'pulumi.set', 'pulumi.set', (['__self__', '"""cidr_block"""', 'cidr_block'], {}), "(__self__, 'cidr_block', cidr_block)\n", (3959, 3995), False, 'import pulumi\n'), ((4044, 4092), 'pulumi.set', 'pulumi.set', (['__self__', '"""description"""', 'description'], {}), "(__self__, 'description', description)\n", (4054, 4092), False, 'import pulumi\n'), ((4138, 4180), 'pulumi.set', 'pulumi.set', (['__self__', '"""owner_id"""', 'owner_id'], {}), "(__self__, 'owner_id', owner_id)\n", (4148, 4180), False, 'import pulumi\n'), ((4234, 4292), 'pulumi.set', 'pulumi.set', (['__self__', '"""reservation_type"""', 'reservation_type'], {}), "(__self__, 'reservation_type', reservation_type)\n", (4244, 4292), False, 'import pulumi\n'), ((4339, 4383), 'pulumi.set', 'pulumi.set', (['__self__', '"""subnet_id"""', 'subnet_id'], {}), "(__self__, 'subnet_id', subnet_id)\n", (4349, 4383), False, 'import pulumi\n'), ((9879, 9903), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (9901, 9903), False, 'import pulumi\n'), ((12719, 12748), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (12741, 12748), False, 'import pulumi\n')]
|
import os
def complier():
print("If ur using this ur so dumb on god just read the install instructions!\n"
"PLEASE HAVE THE requirements.txt FILE IN THE SAME DIRECTORY!!!!")
os.system("pip install -r requirements.txt")
def cleanup():
cmds = ["RD __pycache__ /Q /S",
"RD build /Q /S",
"DEL trojan.spec /Q"]
for commands in cmds:
os.system(commands)
logo = input("Use custom icon (Y/N) ")
args = "pyinstaller --onefile --windowed"
if logo.lower() == "y":
try:
logo_name = input("Input name of logo (must be .ico) ")
args += f" --icon={os.getcwd()}{logo_name} trojan.py"
print(f"Using args: {args}")
os.system(args)
cleanup()
except Exception as e:
print(e)
cleanup()
os.system("pause")
if logo.lower() == "n":
try:
os.system("pyinstaller --onefile --windowed trojan.py")
print(f"Using args: {args}")
cleanup()
except Exception as e:
print(e)
cleanup()
os.system("pause")
complier()
|
[
"os.getcwd",
"os.system"
] |
[((198, 242), 'os.system', 'os.system', (['"""pip install -r requirements.txt"""'], {}), "('pip install -r requirements.txt')\n", (207, 242), False, 'import os\n'), ((434, 453), 'os.system', 'os.system', (['commands'], {}), '(commands)\n', (443, 453), False, 'import os\n'), ((799, 814), 'os.system', 'os.system', (['args'], {}), '(args)\n', (808, 814), False, 'import os\n'), ((1005, 1060), 'os.system', 'os.system', (['"""pyinstaller --onefile --windowed trojan.py"""'], {}), "('pyinstaller --onefile --windowed trojan.py')\n", (1014, 1060), False, 'import os\n'), ((928, 946), 'os.system', 'os.system', (['"""pause"""'], {}), "('pause')\n", (937, 946), False, 'import os\n'), ((1216, 1234), 'os.system', 'os.system', (['"""pause"""'], {}), "('pause')\n", (1225, 1234), False, 'import os\n'), ((709, 720), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (718, 720), False, 'import os\n')]
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ATS UI flask server."""
import os
import flask
from multitest_transport.models import ndb_models
from multitest_transport.util import env
ROOT_PATH = os.path.dirname(__file__)
STATIC_PATH = os.path.join(ROOT_PATH, 'static')
APP = flask.Flask(
__name__,
root_path=ROOT_PATH,
static_folder=None,
template_folder=ROOT_PATH)
@APP.route('/static/<path:path>')
def Static(path):
"""Returns static files."""
return flask.send_from_directory(STATIC_PATH, path, conditional=False)
@APP.route('/app.js')
def App():
"""Returns application script."""
script = 'dev_sources.concat.js' if env.IS_DEV_MODE else 'app.js'
return flask.send_from_directory(ROOT_PATH, script, conditional=False)
@APP.route('/', defaults={'_': ''})
@APP.route('/<path:_>')
def Root(_):
"""Routes all other requests to index.html and angular."""
private_node_config = ndb_models.GetPrivateNodeConfig()
analytics_tracking_id = ''
if not env.IS_DEV_MODE and private_node_config.metrics_enabled:
analytics_tracking_id = 'UA-140187490-1'
return flask.render_template(
'index.html',
analytics_tracking_id=analytics_tracking_id,
env=env,
private_node_config=private_node_config)
|
[
"multitest_transport.models.ndb_models.GetPrivateNodeConfig",
"os.path.dirname",
"flask.Flask",
"flask.render_template",
"flask.send_from_directory",
"os.path.join"
] |
[((732, 757), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (747, 757), False, 'import os\n'), ((772, 805), 'os.path.join', 'os.path.join', (['ROOT_PATH', '"""static"""'], {}), "(ROOT_PATH, 'static')\n", (784, 805), False, 'import os\n'), ((813, 906), 'flask.Flask', 'flask.Flask', (['__name__'], {'root_path': 'ROOT_PATH', 'static_folder': 'None', 'template_folder': 'ROOT_PATH'}), '(__name__, root_path=ROOT_PATH, static_folder=None,\n template_folder=ROOT_PATH)\n', (824, 906), False, 'import flask\n'), ((1013, 1076), 'flask.send_from_directory', 'flask.send_from_directory', (['STATIC_PATH', 'path'], {'conditional': '(False)'}), '(STATIC_PATH, path, conditional=False)\n', (1038, 1076), False, 'import flask\n'), ((1225, 1288), 'flask.send_from_directory', 'flask.send_from_directory', (['ROOT_PATH', 'script'], {'conditional': '(False)'}), '(ROOT_PATH, script, conditional=False)\n', (1250, 1288), False, 'import flask\n'), ((1449, 1482), 'multitest_transport.models.ndb_models.GetPrivateNodeConfig', 'ndb_models.GetPrivateNodeConfig', ([], {}), '()\n', (1480, 1482), False, 'from multitest_transport.models import ndb_models\n'), ((1632, 1767), 'flask.render_template', 'flask.render_template', (['"""index.html"""'], {'analytics_tracking_id': 'analytics_tracking_id', 'env': 'env', 'private_node_config': 'private_node_config'}), "('index.html', analytics_tracking_id=\n analytics_tracking_id, env=env, private_node_config=private_node_config)\n", (1653, 1767), False, 'import flask\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2019-01-10 07:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0003_auto_20190110_1037'),
]
operations = [
migrations.RenameModel(
old_name='Meeting_room',
new_name='MeetingRoom',
),
migrations.RenameModel(
old_name='Scheduled_record',
new_name='ReserveRecord',
),
]
|
[
"django.db.migrations.RenameModel"
] |
[((288, 359), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""Meeting_room"""', 'new_name': '"""MeetingRoom"""'}), "(old_name='Meeting_room', new_name='MeetingRoom')\n", (310, 359), False, 'from django.db import migrations\n'), ((404, 481), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""Scheduled_record"""', 'new_name': '"""ReserveRecord"""'}), "(old_name='Scheduled_record', new_name='ReserveRecord')\n", (426, 481), False, 'from django.db import migrations\n')]
|
'''
async fetching of urls.
Assumes robots checks have already been done.
Supports server mocking; proxies are not yet implemented.
Success returns response object and response bytes (which were already
read in order to shake out all potential network-related exceptions.)
Failure returns enough details for the caller to do something smart:
503, other 5xx, DNS fail, connect timeout, error between connect and
full response, proxy failure. Plus an errorstring good enough for logging.
'''
import time
import traceback
from collections import namedtuple
import ssl
import urllib
import asyncio
import logging
import aiohttp
from . import stats
from . import config
from . import content
LOGGER = logging.getLogger(__name__)
# these errors get printed deep in aiohttp but they also bubble up
aiohttp_errors = {
'SSL handshake failed',
'SSL error errno:1 reason: CERTIFICATE_VERIFY_FAILED',
'SSL handshake failed on verifying the certificate',
'Fatal error on transport TCPTransport',
'Fatal error on SSL transport',
'SSL error errno:1 reason: UNKNOWN_PROTOCOL',
'Future exception was never retrieved',
'Unclosed connection',
'SSL error errno:1 reason: TLSV1_UNRECOGNIZED_NAME',
'SSL error errno:1 reason: SSLV3_ALERT_HANDSHAKE_FAILURE',
'SSL error errno:1 reason: TLSV1_ALERT_INTERNAL_ERROR',
}
class AsyncioSSLFilter(logging.Filter):
def filter(self, record):
if record.name == 'asyncio' and record.levelname == 'ERROR':
msg = record.getMessage()
for ae in aiohttp_errors:
if msg.startswith(ae):
return False
return True
def establish_filters():
f = AsyncioSSLFilter()
logging.getLogger('asyncio').addFilter(f)
# XXX should be a policy plugin
# XXX cookie handling -- no way to have a cookie jar other than at session level
# need to directly manipulate domain-level cookie jars to get cookies
def apply_url_policies(url, crawler):
headers = {}
proxy = None
mock_url = None
mock_robots = None
headers['User-Agent'] = crawler.ua
test_host = config.read('Testing', 'TestHostmapAll')
if test_host:
headers['Host'] = url.urlsplit.netloc
(scheme, netloc, path, query, fragment) = url.urlsplit
netloc = test_host
mock_url = urllib.parse.urlunsplit((scheme, netloc, path, query, fragment))
mock_robots = url.urlsplit.scheme + '://' + test_host + '/robots.txt'
if crawler.prevent_compression:
headers['Accept-Encoding'] = 'identity'
else:
headers['Accept-Encoding'] = content.get_accept_encoding()
if crawler.upgrade_insecure_requests:
headers['Upgrade-Insecure-Requests'] = '1'
return headers, proxy, mock_url, mock_robots
FetcherResponse = namedtuple('FetcherResponse', ['response', 'body_bytes', 'req_headers',
't_first_byte', 't_last_byte', 'is_truncated',
'last_exception'])
async def fetch(url, session, headers=None, proxy=None, mock_url=None,
allow_redirects=None, max_redirects=None,
stats_prefix='', max_page_size=-1):
if proxy: # pragma: no cover
proxy = aiohttp.ProxyConnector(proxy=proxy)
# XXX we need to preserve the existing connector config (see cocrawler.__init__ for conn_kwargs)
# XXX we should rotate proxies every fetch in case some are borked
# XXX use proxy history to decide not to use some
raise ValueError('not yet implemented')
last_exception = None
is_truncated = False
try:
t0 = time.time()
last_exception = None
body_bytes = b''
blocks = []
left = max_page_size
with stats.coroutine_state(stats_prefix+'fetcher fetching'):
with stats.record_latency(stats_prefix+'fetcher fetching', url=url.url):
response = await session.get(mock_url or url.url,
allow_redirects=allow_redirects,
max_redirects=max_redirects,
headers=headers)
# https://aiohttp.readthedocs.io/en/stable/tracing_reference.html
# XXX should use tracing events to get t_first_byte
t_first_byte = '{:.3f}'.format(time.time() - t0)
while left > 0:
block = await response.content.read(left)
if not block:
body_bytes = b''.join(blocks)
break
blocks.append(block)
left -= len(block)
else:
body_bytes = b''.join(blocks)
if not response.content.at_eof():
stats.stats_sum('fetch truncated length', 1)
response.close() # this does interrupt the network transfer
is_truncated = 'length' # testme WARC
t_last_byte = '{:.3f}'.format(time.time() - t0)
except asyncio.TimeoutError as e:
stats.stats_sum('fetch timeout', 1)
last_exception = 'TimeoutError'
body_bytes = b''.join(blocks)
if len(body_bytes):
is_truncated = 'time' # testme WARC
stats.stats_sum('fetch timeout body bytes found', 1)
stats.stats_sum('fetch timeout body bytes found bytes', len(body_bytes))
# except (aiohttp.ClientError.ClientResponseError.TooManyRedirects) as e:
# # XXX remove me when I stop using redirects for robots.txt fetching
# raise
except (aiohttp.ClientError) as e:
# ClientError is a catchall for a bunch of things
# e.g. DNS errors, '400' errors for http parser errors
# ClientConnectorCertificateError for an SSL cert that doesn't match hostname
# ClientConnectorError(None, None) caused by robots redir to DNS fail
# ServerDisconnectedError(None,) caused by servers that return 0 bytes for robots.txt fetches
# TooManyRedirects("0, message=''",) caused by too many robots.txt redirs
stats.stats_sum('fetch ClientError', 1)
detailed_name = str(type(e).__name__)
last_exception = 'ClientError: ' + detailed_name + ': ' + str(e)
body_bytes = b''.join(blocks)
if len(body_bytes):
is_truncated = 'disconnect' # testme WARC
stats.stats_sum('fetch ClientError body bytes found', 1)
stats.stats_sum('fetch ClientError body bytes found bytes', len(body_bytes))
except ssl.CertificateError as e:
# unfortunately many ssl errors raise and have tracebacks printed deep in aiohttp
# so this doesn't go off much
stats.stats_sum('fetch SSL error', 1)
last_exception = 'CertificateError: ' + str(e)
#except (ValueError, AttributeError, RuntimeError) as e:
# supposedly aiohttp 2.1 only fires these on programmer error, but here's what I've seen in the past:
# ValueError Location: https:/// 'Host could not be detected' -- robots fetch
# ValueError Location: http:// /URL should be absolute/ -- robots fetch
# ValueError 'Can redirect only to http or https' -- robots fetch -- looked OK to curl!
# AttributeError: ?
# RuntimeError: ?
except ValueError as e:
# no A records found -- raised by my dns code
stats.stats_sum('fetch other error - ValueError', 1)
last_exception = 'ValueErorr: ' + str(e)
except AttributeError as e:
stats.stats_sum('fetch other error - AttributeError', 1)
last_exception = 'AttributeError: ' + str(e)
except RuntimeError as e:
stats.stats_sum('fetch other error - RuntimeError', 1)
last_exception = 'RuntimeError: ' + str(e)
except asyncio.CancelledError:
raise
except Exception as e:
last_exception = 'Exception: ' + str(e)
stats.stats_sum('fetch surprising error', 1)
LOGGER.info('Saw surprising exception in fetcher working on %s:\n%s', mock_url or url.url, last_exception)
traceback.print_exc()
if last_exception is not None:
LOGGER.info('we failed working on %s, the last exception is %s', mock_url or url.url, last_exception)
return FetcherResponse(None, None, None, None, None, False, last_exception)
fr = FetcherResponse(response, body_bytes, response.request_info.headers,
t_first_byte, t_last_byte, is_truncated, None)
if response.status >= 500:
LOGGER.debug('server returned http status %d', response.status)
stats.stats_sum('fetch bytes', len(body_bytes) + len(response.raw_headers))
stats.stats_sum(stats_prefix+'fetch URLs', 1)
stats.stats_sum(stats_prefix+'fetch http code=' + str(response.status), 1)
# checks after fetch:
# hsts header?
# if ssl, check strict-transport-security header, remember max-age=foo part., other stuff like includeSubDomains
# did we receive cookies? was the security bit set?
return fr
def upgrade_scheme(url):
'''
Upgrade crawled scheme to https, if reasonable. This helps to reduce MITM attacks against the crawler.
https://chromium.googlesource.com/chromium/src/net/+/master/http/transport_security_state_static.json
Alternately, the return headers from a site might have strict-transport-security set ... a bit more
dangerous as we'd have to respect the timeout to avoid permanently learning something that's broken
TODO: use HTTPSEverwhere? would have to have a fallback if https failed, which it occasionally will
'''
return url
|
[
"traceback.print_exc",
"urllib.parse.urlunsplit",
"time.time",
"collections.namedtuple",
"aiohttp.ProxyConnector",
"logging.getLogger"
] |
[((705, 732), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (722, 732), False, 'import logging\n'), ((2800, 2941), 'collections.namedtuple', 'namedtuple', (['"""FetcherResponse"""', "['response', 'body_bytes', 'req_headers', 't_first_byte', 't_last_byte',\n 'is_truncated', 'last_exception']"], {}), "('FetcherResponse', ['response', 'body_bytes', 'req_headers',\n 't_first_byte', 't_last_byte', 'is_truncated', 'last_exception'])\n", (2810, 2941), False, 'from collections import namedtuple\n'), ((2331, 2395), 'urllib.parse.urlunsplit', 'urllib.parse.urlunsplit', (['(scheme, netloc, path, query, fragment)'], {}), '((scheme, netloc, path, query, fragment))\n', (2354, 2395), False, 'import urllib\n'), ((3270, 3305), 'aiohttp.ProxyConnector', 'aiohttp.ProxyConnector', ([], {'proxy': 'proxy'}), '(proxy=proxy)\n', (3292, 3305), False, 'import aiohttp\n'), ((3667, 3678), 'time.time', 'time.time', ([], {}), '()\n', (3676, 3678), False, 'import time\n'), ((1715, 1743), 'logging.getLogger', 'logging.getLogger', (['"""asyncio"""'], {}), "('asyncio')\n", (1732, 1743), False, 'import logging\n'), ((8171, 8192), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8190, 8192), False, 'import traceback\n'), ((4416, 4427), 'time.time', 'time.time', ([], {}), '()\n', (4425, 4427), False, 'import time\n'), ((5102, 5113), 'time.time', 'time.time', ([], {}), '()\n', (5111, 5113), False, 'import time\n')]
|
# Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains a humanoid to run in the +x direction."""
from typing import Tuple, List
import functools
import dataclasses
import jax
import jax.numpy as jnp
import numpy as np
import brax
# from brax.envs import multiagent_env
from brax.envs import env
from brax.physics import bodies
from brax.physics.base import take
from google.protobuf import text_format
class DoubleHumanoid(env.Env):
"""Trains a humanoid to run in the +x direction."""
def __init__(self, **kwargs):
# TODO: define a function to copy the system config automatically based on num_agents
self.num_agents = 2
config = text_format.Parse(_SYSTEM_CONFIG, brax.Config())
super().__init__(config, **kwargs)
# TODO: define these as properties in multiagent env
self.agent_action_size = 17
self.agent_observation_size = 293
# body info
self.body_parts = ["torso", "lwaist", "pelvis",
"right_thigh", "right_shin",
"left_thigh", "left_shin",
"right_upper_arm", "right_lower_arm",
"left_upper_arm", "left_lower_arm"
]
self.world_parts = ["floor"]
# actuator info
self.agent_dof = 17
assert self.agent_dof * self.num_agents == self.sys.num_joint_dof
self.torque_1d_act_idx = jnp.array([2, 6, 10, 13, 16])
self.torque_2d_act_idx = jnp.array([[0, 1], [11, 12], [14, 15]])
self.torque_3d_act_idx = jnp.array([[3, 4, 5], [7, 8, 9]])
# joint info
self.agent_joints = 10
self.num_joints_1d = 5
self.num_joints_2d = 3
self.num_joints_3d = 2
# info to differentiate humanoids
all_bodies = bodies.Body.from_config(config) # body object only used to get object mass and inertia
all_bodies = take(all_bodies, all_bodies.idx[:-len(self.world_parts)]) # skip the world bodies
self.num_body_parts = len(self.body_parts)
for i in range(self.num_agents):
# get system body idx from self.sys
body_idxs = {f"{body_part}{i}": self.sys.body_idx[f"{body_part}{i}"] for body_part in self.body_parts}
setattr(self, f"agent{i}_idxs", body_idxs)
# get mass, inertia from Body object
body = take(all_bodies, all_bodies.idx[i * self.num_body_parts: i * self.num_body_parts + self.num_body_parts ])
assert len(body.idx) == self.num_body_parts
setattr(self, f"mass{i}", body.mass.reshape(-1, 1))
setattr(self, f"inertia{i}", body.inertia)
self.mass = jnp.array([getattr(self, f"mass{i}") for i in range(self.num_agents)])
self.inertia = jnp.array([getattr(self, f"inertia{i}") for i in range(self.num_agents)])
self.floor_idx = self.sys.body_idx["floor"]
# how far apart to initialize humanoids
self.field_distance = 20
def update_parts_xyz(self, carry, part_idx):
qp_pos, xyz_offset = carry
qp_pos = jax.ops.index_update(qp_pos, jax.ops.index[part_idx],
xyz_offset+qp_pos[jax.ops.index[part_idx]]
)
return (qp_pos, xyz_offset), ()
def set_agent_xyz(self, carry, part_idxs):
qp_pos, rng = carry
rng, xyz_offset = self._random_target(rng)
(qp_pos, xyz_offset), _ = jax.lax.scan(
self.update_parts_xyz, (qp_pos, xyz_offset), part_idxs
)
return (qp_pos, rng), ()
def reset(self, rng: jnp.ndarray) -> env.State:
"""Resets the environment to an initial state."""
qp = self.sys.default_qp()
# move the humanoids to different positions
pos = qp.pos
agents_parts_idxs = jnp.array([list(getattr(self, f"agent{i}_idxs").values()) for i in range(self.num_agents)])
(pos, rng), _ = jax.lax.scan(
self.set_agent_xyz, (pos, rng), agents_parts_idxs
)
qp = dataclasses.replace(qp, pos=pos)
info = self.sys.info(qp)
qp, info = self.sys.step(qp,
jax.random.uniform(rng, (self.action_size,)) * .5) # action size is for all agents
all_obs = self._get_obs(qp, info, jnp.zeros((self.num_agents, self.agent_dof)))
reward = jnp.zeros((self.num_agents,))
done = 0
steps = jnp.zeros(1)
metrics = {
'reward_linvel': jnp.zeros((self.num_agents,)),
'reward_quadctrl': jnp.zeros((self.num_agents,)),
'reward_alive': jnp.zeros((self.num_agents,)),
'reward_impact': jnp.zeros((self.num_agents,))
}
return env.State(rng, qp, info, all_obs, reward, done, steps, metrics)
def step(self, state: env.State, action: jnp.ndarray) -> env.State:
"""Run one timestep of the environment's dynamics."""
rng = state.rng
# note the minus sign. reverse torque improves performance over a range of
# hparams. as to why: ¯\_(ツ)_/¯
qp, info = self.sys.step(state.qp, -action.flatten())
all_obs = self._get_obs(qp, info, action) # should this be - action?
reward, lin_vel_cost, quad_ctrl_cost, alive_bonus, quad_impact_cost = self._compute_reward(state, action, qp)
metrics = {
'reward_linvel': lin_vel_cost,
'reward_quadctrl': -quad_ctrl_cost,
'reward_alive': alive_bonus,
'reward_impact': -quad_impact_cost
}
steps = state.steps + self.action_repeat
done = self._compute_done(qp, steps)
return env.State(rng, qp, info, all_obs, reward, done, steps, metrics)
def _get_obs(self, qp: brax.QP, info: brax.Info, action: jnp.ndarray):
all_obs = []
# TODO: figure out how to jit self._get_agent_obs
# (qp, info, action), all_obs = jax.lax.scan(
# self._get_agent_obs, (qp, info, action), jnp.arange(self.num_agents))
for agent_idx in range(self.num_agents):
(qp, info, action), obs = self._get_agent_obs((qp, info, action), agent_idx)
all_obs.append(obs)
all_obs = jnp.array(all_obs)
# humanoid: (128, 299)
# double humanoid: (128, 2, 293)
# TODO: Add world features! (floor loc)
return all_obs
def _compute_reward(self, state: env.State, action: jnp.ndarray, qp: brax.QP):
# TODO: how to ensure ordering of reshaping is correct??
pos_before = jnp.reshape(state.qp.pos[:-1], (self.num_agents, self.num_body_parts, 3)) # ignore floor at last index
pos_after = jnp.reshape(qp.pos[:-1], (self.num_agents, self.num_body_parts, 3)) # ignore floor at last index
com_before = jnp.sum(pos_before * self.mass, axis=1) / jnp.sum(self.mass, axis=1)
com_after = jnp.sum(pos_after * self.mass, axis=1) / jnp.sum(self.mass, axis=1)
lin_vel_cost = 1.25 * (com_after[:, 0] - com_before[:, 0]) / self.sys.config.dt
reshaped_actions = jnp.reshape(action, (self.num_agents, self.agent_dof))
quad_ctrl_cost = .01 * jnp.sum(jnp.square(reshaped_actions), axis=1)
# can ignore contact cost, see: https://github.com/openai/gym/issues/1541
quad_impact_cost = jnp.zeros(self.num_agents)
alive_bonus = 5.0 * jnp.ones(self.num_agents)
reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus
return reward, lin_vel_cost, quad_ctrl_cost, alive_bonus, quad_impact_cost
def _compute_done(self, qp: brax.QP, steps: int, done_thres=0.75):
"""Return done if the proportion of agents that are done surpasses
done_thres
"""
torsos_idxs = jnp.arange(self.num_agents) * self.num_body_parts
torsos_zdim = take(qp.pos[:, 2], torsos_idxs)
done_cond0 = jnp.where(steps >= self.episode_length, x=1.0, y=0.0)
done_cond1 = jnp.where(torsos_zdim < 0.6, x=1.0, y=0.0)
done_cond2 = jnp.where(torsos_zdim > 2.1, x=1.0, y=0.0)
done_vec = done_cond0 + done_cond1 + done_cond2
done_vec = jnp.where(done_vec > 0.0, x=1.0, y=0.0)
done_ratio = jnp.sum(done_vec) / self.num_agents
done = jnp.where(done_ratio > done_thres, x=1.0, y=0.0)
return done
def _get_agent_obs(self, carry, agent_idx) -> jnp.ndarray:
"""Observe humanoid body position, velocities, and angles."""
qp, info, action = carry
qpos, qvel = self._get_agent_qpos_qvel(agent_idx, qp)
qfrc_actuator = self._get_agent_qfrc(agent_idx, action[agent_idx])
cfrc_ext = self._get_agent_cfrc_ext(agent_idx, info)
cinert, cvel = self._get_agent_com_obs(agent_idx, qp)
# obs = jnp.expand_dims(jnp.concatenate(qpos + qvel + cinert + cvel + qfrc_actuator + \
# cfrc_ext), axis=0)
obs = jnp.concatenate(qpos + qvel + cinert + cvel + qfrc_actuator + \
cfrc_ext)
return (qp, info, action), obs
def _get_agent_qpos_qvel(self, agent_idx: int, qp: brax.QP) -> Tuple[List[jnp.ndarray], List[jnp.ndarray]]:
"""
Some pre-processing to pull joint angles and velocities
"""
# TODO: move outside this function
joint_1d_angle, joint_1d_vel = self.sys.joint_revolute.angle_vel(qp)
joint_2d_angle, joint_2d_vel = self.sys.joint_universal.angle_vel(qp)
joint_3d_angle, joint_3d_vel = self.sys.joint_spherical.angle_vel(qp)
idx_offset = agent_idx * self.num_joints_1d
joint_1d_angle = take(joint_1d_angle, jnp.arange(idx_offset, idx_offset + self.num_joints_1d))
joint_1d_vel = take(joint_1d_vel, jnp.arange(idx_offset, idx_offset + self.num_joints_1d))
idx_offset = agent_idx * self.num_joints_2d
joint_2d_angle = take(joint_2d_angle, jnp.arange(idx_offset, idx_offset + self.num_joints_2d))
joint_2d_vel = take(joint_2d_vel, jnp.arange(idx_offset, idx_offset + self.num_joints_2d))
idx_offset = agent_idx * self.num_joints_3d
joint_3d_angle = take(joint_3d_angle, jnp.arange(idx_offset, idx_offset + self.num_joints_3d))
joint_3d_vel = take(joint_3d_vel, jnp.arange(idx_offset, idx_offset + self.num_joints_3d))
# qpos:
# Z of the torso of agent idx (1,)
# orientation of the torso as quaternion (4,)
# joint angles, all dofs (8,)
agent_torso_idx = agent_idx * self.num_body_parts
qpos = [
qp.pos[agent_torso_idx, 2:], qp.rot[agent_torso_idx],
*joint_1d_angle, *joint_2d_angle, *joint_3d_angle
]
# qvel:
# velocity of the torso (3,)
# angular velocity of the torso (3,)
# joint angle velocities, all dofs (8,)
qvel = [
qp.vel[agent_torso_idx], qp.ang[agent_torso_idx],
*joint_1d_vel, *joint_2d_vel, *joint_3d_vel
]
return qpos, qvel
def _get_agent_qfrc(self, agent_idx: int, agent_action: jnp.ndarray) -> List[jnp.ndarray]:
# actuator forces
idx_offset = agent_idx * self.num_joints_1d
torque_1d = take(agent_action, self.torque_1d_act_idx)
torque_1d *= take(self.sys.torque_1d.strength,
jnp.arange(idx_offset, idx_offset + self.num_joints_1d))
idx_offset = agent_idx * self.num_joints_2d
torque_2d = take(agent_action, self.torque_2d_act_idx)
torque_2d = torque_2d.reshape(torque_2d.shape[:-2] + (-1,))
torque_2d *= jnp.repeat(take(self.sys.torque_2d.strength,
jnp.arange(idx_offset, idx_offset + self.num_joints_2d)),
2)
idx_offset = agent_idx * self.num_joints_3d
torque_3d = take(agent_action, self.torque_3d_act_idx)
torque_3d = torque_3d.reshape(torque_3d.shape[:-2] + (-1,))
torque_3d *= jnp.repeat(take(self.sys.torque_3d.strength,
jnp.arange(idx_offset, idx_offset + self.num_joints_3d)),
3)
qfrc_actuator = [torque_1d, torque_2d, torque_3d]
return qfrc_actuator
def _get_agent_cfrc_ext(self, agent_idx: int, info: brax.Info) -> List[jnp.ndarray]:
agent_torso_idx = agent_idx * self.num_body_parts
# external contact forces:
# delta velocity (3,), delta ang (3,) * num bodies in the system
cfrc_ext = [info.contact.vel[agent_torso_idx:agent_torso_idx + self.num_body_parts],
info.contact.ang[agent_torso_idx:agent_torso_idx + self.num_body_parts]
]
# flatten bottom dimension
cfrc_ext = [x.reshape(x.shape[:-2] + (-1,)) for x in cfrc_ext]
return cfrc_ext
def _get_agent_com_obs(self, agent_idx: int, qp: brax.QP) -> Tuple[List[jnp.ndarray], List[jnp.ndarray]]:
"""Get center of mass observations for one agent"""
agent_torso_idx = agent_idx * self.num_body_parts
agent_mass = getattr(self, f"mass{agent_idx}")
agent_inertia = getattr(self, f"inertia{agent_idx}")
body_pos = qp.pos[agent_torso_idx:agent_torso_idx + self.num_body_parts] # ignore floor at last index
body_vel = qp.vel[agent_torso_idx:agent_torso_idx + self.num_body_parts] # ignore floor at last index
com_vec = jnp.sum(body_pos * agent_mass, axis=0) / jnp.sum(agent_mass)
com_vel = body_vel * agent_mass / jnp.sum(agent_mass)
def v_outer(a):
return jnp.outer(a, a)
def v_cross(a, b):
return jnp.cross(a, b)
v_outer = jax.vmap(v_outer, in_axes=[0])
v_cross = jax.vmap(v_cross, in_axes=[0, 0])
disp_vec = body_pos - com_vec
# there are 11 bodies for each humanoid
com_inert = agent_inertia + agent_mass.reshape(
(11, 1, 1)) * ((jnp.linalg.norm(disp_vec, axis=1)**2.).reshape(
(11, 1, 1)) * jnp.stack([jnp.eye(3)] * 11) - v_outer(disp_vec))
cinert = [com_inert.reshape(-1)]
square_disp = (1e-7 + (jnp.linalg.norm(disp_vec, axis=1)**2.)).reshape(
(11, 1))
com_angular_vel = (v_cross(disp_vec, body_vel) / square_disp)
cvel = [com_vel.reshape(-1), com_angular_vel.reshape(-1)]
return cinert, cvel
def _random_target(self, rng: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Returns a target location in a random circle on xz plane."""
rng, rng1, rng2 = jax.random.split(rng, 3)
dist = self.field_distance * jax.random.uniform(rng1)
ang = jnp.pi * 2. * jax.random.uniform(rng2)
target_x = dist * jnp.cos(ang)
target_y = dist * jnp.sin(ang)
target_z = 0
target = jnp.array([target_x, target_y, target_z]).transpose()
return rng, target
_HUMANOID0_CONFIG ="""
bodies {
name: "torso0"
colliders {
position {
}
rotation {
x: -90.0
}
capsule {
radius: 0.07
length: 0.28
}
}
colliders {
position {
z: 0.19
}
capsule {
radius: 0.09
length: 0.18
}
}
colliders {
position {
x: -0.01
z: -0.12
}
rotation {
x: -90.0
}
capsule {
radius: 0.06
length: 0.24
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 8.907463
}
bodies {
name: "lwaist0"
colliders {
position {
}
rotation {
x: -90.0
}
capsule {
radius: 0.06
length: 0.24
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 2.2619467
}
bodies {
name: "pelvis0"
colliders {
position {
x: -0.02
}
rotation {
x: -90.0
}
capsule {
radius: 0.09
length: 0.32
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 6.6161942
}
bodies {
name: "right_thigh0"
colliders {
position {
y: 0.005
z: -0.17
}
rotation {
x: -178.31532
}
capsule {
radius: 0.06
length: 0.46014702
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 4.751751
}
bodies {
name: "right_shin0"
colliders {
position {
z: -0.15
}
rotation {
x: -180.0
}
capsule {
radius: 0.049
length: 0.398
end: -1
}
}
colliders {
position {
z: -0.35
}
capsule {
radius: 0.075
length: 0.15
end: 1
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 4.5228419
}
bodies {
name: "left_thigh0"
colliders {
position {
y: -0.005
z: -0.17
}
rotation {
x: 178.31532
}
capsule {
radius: 0.06
length: 0.46014702
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 4.751751
}
bodies {
name: "left_shin0"
colliders {
position {
z: -0.15
}
rotation {
x: -180.0
}
capsule {
radius: 0.049
length: 0.398
end: -1
}
}
colliders {
position {
z: -0.35
}
capsule {
radius: 0.075
length: 0.15
end: 1
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 4.5228419
}
bodies {
name: "right_upper_arm0"
colliders {
position {
x: 0.08
y: -0.08
z: -0.08
}
rotation {
x: 135.0
y: 35.26439
z: -75.0
}
capsule {
radius: 0.04
length: 0.35712814
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.6610805
}
bodies {
name: "right_lower_arm0"
colliders {
position {
x: 0.09
y: 0.09
z: 0.09
}
rotation {
x: -45.0
y: 35.26439
z: 15.0
}
capsule {
radius: 0.031
length: 0.33912814
}
}
colliders {
position {
x: 0.18
y: 0.18
z: 0.18
}
capsule {
radius: 0.04
length: 0.08
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.2295402
}
bodies {
name: "left_upper_arm0"
colliders {
position {
x: 0.08
y: 0.08
z: -0.08
}
rotation {
x: -135.0
y: 35.26439
z: 75.0
}
capsule {
radius: 0.04
length: 0.35712814
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.6610805
}
bodies {
name: "left_lower_arm0"
colliders {
position {
x: 0.09
y: -0.09
z: 0.09
}
rotation {
x: 45.0
y: 35.26439
z: -15.0
}
capsule {
radius: 0.031
length: 0.33912814
}
}
colliders {
position {
x: 0.18
y: -0.18
z: 0.18
}
capsule {
radius: 0.04
length: 0.08
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.2295402
}
joints {
name: "abdomen_z0"
stiffness: 15000.0
parent: "torso0"
child: "lwaist0"
parent_offset {
x: -0.01
z: -0.195
}
child_offset {
z: 0.065
}
rotation {
y: -90.0
}
angular_damping: 20.0
angle_limit {
min: -45.0
max: 45.0
}
angle_limit {
min: -75.0
max: 30.0
}
}
joints {
name: "abdomen_x0"
stiffness: 15000.0
parent: "lwaist0"
child: "pelvis0"
parent_offset {
z: -0.065
}
child_offset {
z: 0.1
}
rotation {
x: 90.0
}
angular_damping: 20.0
angle_limit {
min: -35.0
max: 35.0
}
}
joints {
name: "right_hip_x0"
stiffness: 8000.0
parent: "pelvis0"
child: "right_thigh0"
parent_offset {
y: -0.1
z: -0.04
}
child_offset {
}
rotation {
}
angular_damping: 20.0
limit_strength: 2000.0
angle_limit {
min: -10.0
max: 10.0
}
angle_limit {
min: -10.0
max: 10.0
}
angle_limit {
min: -30.0
max: 70.0
}
}
joints {
name: "right_knee0"
stiffness: 15000.0
parent: "right_thigh0"
child: "right_shin0"
parent_offset {
y: 0.01
z: -0.383
}
child_offset {
z: 0.02
}
rotation {
z: -90.0
}
angular_damping: 20.0
angle_limit {
min: -160.0
max: -2.0
}
}
joints {
name: "left_hip_x0"
stiffness: 8000.0
parent: "pelvis0"
child: "left_thigh0"
parent_offset {
y: 0.1
z: -0.04
}
child_offset {
}
angular_damping: 20.0
limit_strength: 2000.0
angle_limit {
min: -10.0
max: 10.0
}
angle_limit {
min: -10.0
max: 10.0
}
angle_limit {
min: -30.0
max: 70.0
}
}
joints {
name: "left_knee0"
stiffness: 15000.0
parent: "left_thigh0"
child: "left_shin0"
parent_offset {
y: -0.01
z: -0.383
}
child_offset {
z: 0.02
}
rotation {
z: -90.0
}
angular_damping: 20.0
angle_limit {
min: -160.0
max: -2.0
}
}
joints {
name: "right_shoulder0"
stiffness: 15000.0
parent: "torso0"
child: "right_upper_arm0"
parent_offset {
y: -0.17
z: 0.06
}
child_offset {
}
rotation {
x: 135.0
y: 35.26439
}
angular_damping: 20.0
angle_limit {
min: -85.0
max: 60.0
}
angle_limit {
min: -85.0
max: 60.0
}
}
joints {
name: "right_elbow0"
stiffness: 15000.0
parent: "right_upper_arm0"
child: "right_lower_arm0"
parent_offset {
x: 0.18
y: -0.18
z: -0.18
}
child_offset {
}
rotation {
x: 135.0
z: 90.0
}
angular_damping: 20.0
angle_limit {
min: -90.0
max: 50.0
}
}
joints {
name: "left_shoulder0"
stiffness: 15000.0
parent: "torso0"
child: "left_upper_arm0"
parent_offset {
y: 0.17
z: 0.06
}
child_offset {
}
rotation {
x: 45.0
y: -35.26439
}
angular_damping: 20.0
angle_limit {
min: -60.0
max: 85.0
}
angle_limit {
min: -60.0
max: 85.0
}
}
joints {
name: "left_elbow0"
stiffness: 15000.0
parent: "left_upper_arm0"
child: "left_lower_arm0"
parent_offset {
x: 0.18
y: 0.18
z: -0.18
}
child_offset {
}
rotation {
x: 45.0
z: -90.0
}
angular_damping: 20.0
angle_limit {
min: -90.0
max: 50.0
}
}
actuators {
name: "abdomen_z0"
joint: "abdomen_z0"
strength: 300.0
torque {
}
}
actuators {
name: "abdomen_x0"
joint: "abdomen_x0"
strength: 300.0
torque {
}
}
actuators {
name: "right_hip_x0"
joint: "right_hip_x0"
strength: 300.0
torque {
}
}
actuators {
name: "right_knee0"
joint: "right_knee0"
strength: 300.0
torque {
}
}
actuators {
name: "left_hip_x0"
joint: "left_hip_x0"
strength: 300.0
torque {
}
}
actuators {
name: "left_knee0"
joint: "left_knee0"
strength: 300.0
torque {
}
}
actuators {
name: "right_shoulder0"
joint: "right_shoulder0"
strength: 75.0
torque {
}
}
actuators {
name: "right_elbow0"
joint: "right_elbow0"
strength: 75.0
torque {
}
}
actuators {
name: "left_shoulder0"
joint: "left_shoulder0"
strength: 75.0
torque {
}
}
actuators {
name: "left_elbow0"
joint: "left_elbow0"
strength: 75.0
torque {
}
}
collide_include {
first: "floor"
second: "left_shin0"
}
collide_include {
first: "floor"
second: "right_shin0"
}
"""
_HUMANOID1_CONFIG = """
bodies {
name: "torso1"
colliders {
position {
}
rotation {
x: -90.0
}
capsule {
radius: 0.07
length: 0.28
}
}
colliders {
position {
z: 0.19
}
capsule {
radius: 0.09
length: 0.18
}
}
colliders {
position {
x: -0.01
z: -0.12
}
rotation {
x: -90.0
}
capsule {
radius: 0.06
length: 0.24
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 8.907463
}
bodies {
name: "lwaist1"
colliders {
position {
}
rotation {
x: -90.0
}
capsule {
radius: 0.06
length: 0.24
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 2.2619467
}
bodies {
name: "pelvis1"
colliders {
position {
x: -0.02
}
rotation {
x: -90.0
}
capsule {
radius: 0.09
length: 0.32
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 6.6161942
}
bodies {
name: "right_thigh1"
colliders {
position {
y: 0.005
z: -0.17
}
rotation {
x: -178.31532
}
capsule {
radius: 0.06
length: 0.46014702
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 4.751751
}
bodies {
name: "right_shin1"
colliders {
position {
z: -0.15
}
rotation {
x: -180.0
}
capsule {
radius: 0.049
length: 0.398
end: -1
}
}
colliders {
position {
z: -0.35
}
capsule {
radius: 0.075
length: 0.15
end: 1
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 4.5228419
}
bodies {
name: "left_thigh1"
colliders {
position {
y: -0.005
z: -0.17
}
rotation {
x: 178.31532
}
capsule {
radius: 0.06
length: 0.46014702
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 4.751751
}
bodies {
name: "left_shin1"
colliders {
position {
z: -0.15
}
rotation {
x: -180.0
}
capsule {
radius: 0.049
length: 0.398
end: -1
}
}
colliders {
position {
z: -0.35
}
capsule {
radius: 0.075
length: 0.15
end: 1
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 4.5228419
}
bodies {
name: "right_upper_arm1"
colliders {
position {
x: 0.08
y: -0.08
z: -0.08
}
rotation {
x: 135.0
y: 35.26439
z: -75.0
}
capsule {
radius: 0.04
length: 0.35712814
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.6610805
}
bodies {
name: "right_lower_arm1"
colliders {
position {
x: 0.09
y: 0.09
z: 0.09
}
rotation {
x: -45.0
y: 35.26439
z: 15.0
}
capsule {
radius: 0.031
length: 0.33912814
}
}
colliders {
position {
x: 0.18
y: 0.18
z: 0.18
}
capsule {
radius: 0.04
length: 0.08
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.2295402
}
bodies {
name: "left_upper_arm1"
colliders {
position {
x: 0.08
y: 0.08
z: -0.08
}
rotation {
x: -135.0
y: 35.26439
z: 75.0
}
capsule {
radius: 0.04
length: 0.35712814
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.6610805
}
bodies {
name: "left_lower_arm1"
colliders {
position {
x: 0.09
y: -0.09
z: 0.09
}
rotation {
x: 45.0
y: 35.26439
z: -15.0
}
capsule {
radius: 0.031
length: 0.33912814
}
}
colliders {
position {
x: 0.18
y: -0.18
z: 0.18
}
capsule {
radius: 0.04
length: 0.08
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.2295402
}
joints {
name: "abdomen_z1"
stiffness: 15000.0
parent: "torso1"
child: "lwaist1"
parent_offset {
x: -0.01
z: -0.195
}
child_offset {
z: 0.065
}
rotation {
y: -90.0
}
angular_damping: 20.0
angle_limit {
min: -45.0
max: 45.0
}
angle_limit {
min: -75.0
max: 30.0
}
}
joints {
name: "abdomen_x1"
stiffness: 15000.0
parent: "lwaist1"
child: "pelvis1"
parent_offset {
z: -0.065
}
child_offset {
z: 0.1
}
rotation {
x: 90.0
}
angular_damping: 20.0
angle_limit {
min: -35.0
max: 35.0
}
}
joints {
name: "right_hip_x1"
stiffness: 8000.0
parent: "pelvis1"
child: "right_thigh1"
parent_offset {
y: -0.1
z: -0.04
}
child_offset {
}
rotation {
}
angular_damping: 20.0
limit_strength: 2000.0
angle_limit {
min: -10.0
max: 10.0
}
angle_limit {
min: -10.0
max: 10.0
}
angle_limit {
min: -30.0
max: 70.0
}
}
joints {
name: "right_knee1"
stiffness: 15000.0
parent: "right_thigh1"
child: "right_shin1"
parent_offset {
y: 0.01
z: -0.383
}
child_offset {
z: 0.02
}
rotation {
z: -90.0
}
angular_damping: 20.0
angle_limit {
min: -160.0
max: -2.0
}
}
joints {
name: "left_hip_x1"
stiffness: 8000.0
parent: "pelvis1"
child: "left_thigh1"
parent_offset {
y: 0.1
z: -0.04
}
child_offset {
}
angular_damping: 20.0
limit_strength: 2000.0
angle_limit {
min: -10.0
max: 10.0
}
angle_limit {
min: -10.0
max: 10.0
}
angle_limit {
min: -30.0
max: 70.0
}
}
joints {
name: "left_knee1"
stiffness: 15000.0
parent: "left_thigh1"
child: "left_shin1"
parent_offset {
y: -0.01
z: -0.383
}
child_offset {
z: 0.02
}
rotation {
z: -90.0
}
angular_damping: 20.0
angle_limit {
min: -160.0
max: -2.0
}
}
joints {
name: "right_shoulder1"
stiffness: 15000.0
parent: "torso1"
child: "right_upper_arm1"
parent_offset {
y: -0.17
z: 0.06
}
child_offset {
}
rotation {
x: 135.0
y: 35.26439
}
angular_damping: 20.0
angle_limit {
min: -85.0
max: 60.0
}
angle_limit {
min: -85.0
max: 60.0
}
}
joints {
name: "right_elbow1"
stiffness: 15000.0
parent: "right_upper_arm1"
child: "right_lower_arm1"
parent_offset {
x: 0.18
y: -0.18
z: -0.18
}
child_offset {
}
rotation {
x: 135.0
z: 90.0
}
angular_damping: 20.0
angle_limit {
min: -90.0
max: 50.0
}
}
joints {
name: "left_shoulder1"
stiffness: 15000.0
parent: "torso1"
child: "left_upper_arm1"
parent_offset {
y: 0.17
z: 0.06
}
child_offset {
}
rotation {
x: 45.0
y: -35.26439
}
angular_damping: 20.0
angle_limit {
min: -60.0
max: 85.0
}
angle_limit {
min: -60.0
max: 85.0
}
}
joints {
name: "left_elbow1"
stiffness: 15000.0
parent: "left_upper_arm1"
child: "left_lower_arm1"
parent_offset {
x: 0.18
y: 0.18
z: -0.18
}
child_offset {
}
rotation {
x: 45.0
z: -90.0
}
angular_damping: 20.0
angle_limit {
min: -90.0
max: 50.0
}
}
actuators {
name: "abdomen_z1"
joint: "abdomen_z1"
strength: 300.0
torque {
}
}
actuators {
name: "abdomen_x1"
joint: "abdomen_x1"
strength: 300.0
torque {
}
}
actuators {
name: "right_hip_x1"
joint: "right_hip_x1"
strength: 300.0
torque {
}
}
actuators {
name: "right_knee1"
joint: "right_knee1"
strength: 300.0
torque {
}
}
actuators {
name: "left_hip_x1"
joint: "left_hip_x1"
strength: 300.0
torque {
}
}
actuators {
name: "left_knee1"
joint: "left_knee1"
strength: 300.0
torque {
}
}
actuators {
name: "right_shoulder1"
joint: "right_shoulder1"
strength: 75.0
torque {
}
}
actuators {
name: "right_elbow1"
joint: "right_elbow1"
strength: 75.0
torque {
}
}
actuators {
name: "left_shoulder1"
joint: "left_shoulder1"
strength: 75.0
torque {
}
}
actuators {
name: "left_elbow1"
joint: "left_elbow1"
strength: 75.0
torque {
}
}
collide_include {
first: "floor"
second: "left_shin1"
}
collide_include {
first: "floor"
second: "right_shin1"
}
"""
_ENV_CONFIG = """
bodies {
name: "floor"
colliders {
plane {
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen { all: true }
}
friction: 1.0
gravity {
z: -9.81
}
angular_damping: -0.05
baumgarte_erp: 0.1
dt: 0.015
substeps: 8
"""
_SYSTEM_CONFIG = _HUMANOID0_CONFIG + _HUMANOID1_CONFIG + _ENV_CONFIG
|
[
"brax.physics.base.take",
"brax.Config",
"brax.physics.bodies.Body.from_config",
"jax.numpy.reshape",
"jax.random.uniform",
"jax.numpy.where",
"jax.numpy.linalg.norm",
"jax.numpy.sin",
"brax.envs.env.State",
"jax.numpy.sum",
"jax.vmap",
"jax.numpy.square",
"jax.lax.scan",
"jax.numpy.concatenate",
"jax.numpy.eye",
"jax.numpy.cos",
"jax.numpy.ones",
"jax.numpy.zeros",
"dataclasses.replace",
"jax.numpy.outer",
"jax.numpy.array",
"jax.numpy.cross",
"jax.numpy.arange",
"jax.ops.index_update",
"jax.random.split"
] |
[((2031, 2060), 'jax.numpy.array', 'jnp.array', (['[2, 6, 10, 13, 16]'], {}), '([2, 6, 10, 13, 16])\n', (2040, 2060), True, 'import jax.numpy as jnp\n'), ((2095, 2134), 'jax.numpy.array', 'jnp.array', (['[[0, 1], [11, 12], [14, 15]]'], {}), '([[0, 1], [11, 12], [14, 15]])\n', (2104, 2134), True, 'import jax.numpy as jnp\n'), ((2169, 2202), 'jax.numpy.array', 'jnp.array', (['[[3, 4, 5], [7, 8, 9]]'], {}), '([[3, 4, 5], [7, 8, 9]])\n', (2178, 2202), True, 'import jax.numpy as jnp\n'), ((2420, 2451), 'brax.physics.bodies.Body.from_config', 'bodies.Body.from_config', (['config'], {}), '(config)\n', (2443, 2451), False, 'from brax.physics import bodies\n'), ((3720, 3824), 'jax.ops.index_update', 'jax.ops.index_update', (['qp_pos', 'jax.ops.index[part_idx]', '(xyz_offset + qp_pos[jax.ops.index[part_idx]])'], {}), '(qp_pos, jax.ops.index[part_idx], xyz_offset + qp_pos[\n jax.ops.index[part_idx]])\n', (3740, 3824), False, 'import jax\n'), ((4106, 4174), 'jax.lax.scan', 'jax.lax.scan', (['self.update_parts_xyz', '(qp_pos, xyz_offset)', 'part_idxs'], {}), '(self.update_parts_xyz, (qp_pos, xyz_offset), part_idxs)\n', (4118, 4174), False, 'import jax\n'), ((4608, 4671), 'jax.lax.scan', 'jax.lax.scan', (['self.set_agent_xyz', '(pos, rng)', 'agents_parts_idxs'], {}), '(self.set_agent_xyz, (pos, rng), agents_parts_idxs)\n', (4620, 4671), False, 'import jax\n'), ((4710, 4742), 'dataclasses.replace', 'dataclasses.replace', (['qp'], {'pos': 'pos'}), '(qp, pos=pos)\n', (4729, 4742), False, 'import dataclasses\n'), ((5043, 5072), 'jax.numpy.zeros', 'jnp.zeros', (['(self.num_agents,)'], {}), '((self.num_agents,))\n', (5052, 5072), True, 'import jax.numpy as jnp\n'), ((5108, 5120), 'jax.numpy.zeros', 'jnp.zeros', (['(1)'], {}), '(1)\n', (5117, 5120), True, 'import jax.numpy as jnp\n'), ((5413, 5476), 'brax.envs.env.State', 'env.State', (['rng', 'qp', 'info', 'all_obs', 'reward', 'done', 'steps', 'metrics'], {}), '(rng, qp, info, all_obs, reward, done, steps, metrics)\n', (5422, 5476), False, 'from brax.envs import env\n'), ((6364, 6427), 'brax.envs.env.State', 'env.State', (['rng', 'qp', 'info', 'all_obs', 'reward', 'done', 'steps', 'metrics'], {}), '(rng, qp, info, all_obs, reward, done, steps, metrics)\n', (6373, 6427), False, 'from brax.envs import env\n'), ((6915, 6933), 'jax.numpy.array', 'jnp.array', (['all_obs'], {}), '(all_obs)\n', (6924, 6933), True, 'import jax.numpy as jnp\n'), ((7255, 7328), 'jax.numpy.reshape', 'jnp.reshape', (['state.qp.pos[:-1]', '(self.num_agents, self.num_body_parts, 3)'], {}), '(state.qp.pos[:-1], (self.num_agents, self.num_body_parts, 3))\n', (7266, 7328), True, 'import jax.numpy as jnp\n'), ((7380, 7447), 'jax.numpy.reshape', 'jnp.reshape', (['qp.pos[:-1]', '(self.num_agents, self.num_body_parts, 3)'], {}), '(qp.pos[:-1], (self.num_agents, self.num_body_parts, 3))\n', (7391, 7447), True, 'import jax.numpy as jnp\n'), ((7781, 7835), 'jax.numpy.reshape', 'jnp.reshape', (['action', '(self.num_agents, self.agent_dof)'], {}), '(action, (self.num_agents, self.agent_dof))\n', (7792, 7835), True, 'import jax.numpy as jnp\n'), ((8025, 8051), 'jax.numpy.zeros', 'jnp.zeros', (['self.num_agents'], {}), '(self.num_agents)\n', (8034, 8051), True, 'import jax.numpy as jnp\n'), ((8557, 8588), 'brax.physics.base.take', 'take', (['qp.pos[:, 2]', 'torsos_idxs'], {}), '(qp.pos[:, 2], torsos_idxs)\n', (8561, 8588), False, 'from brax.physics.base import take\n'), ((8613, 8666), 'jax.numpy.where', 'jnp.where', (['(steps >= self.episode_length)'], {'x': '(1.0)', 'y': '(0.0)'}), '(steps >= self.episode_length, x=1.0, y=0.0)\n', (8622, 8666), True, 'import jax.numpy as jnp\n'), ((8689, 8731), 'jax.numpy.where', 'jnp.where', (['(torsos_zdim < 0.6)'], {'x': '(1.0)', 'y': '(0.0)'}), '(torsos_zdim < 0.6, x=1.0, y=0.0)\n', (8698, 8731), True, 'import jax.numpy as jnp\n'), ((8754, 8796), 'jax.numpy.where', 'jnp.where', (['(torsos_zdim > 2.1)'], {'x': '(1.0)', 'y': '(0.0)'}), '(torsos_zdim > 2.1, x=1.0, y=0.0)\n', (8763, 8796), True, 'import jax.numpy as jnp\n'), ((8876, 8915), 'jax.numpy.where', 'jnp.where', (['(done_vec > 0.0)'], {'x': '(1.0)', 'y': '(0.0)'}), '(done_vec > 0.0, x=1.0, y=0.0)\n', (8885, 8915), True, 'import jax.numpy as jnp\n'), ((8992, 9040), 'jax.numpy.where', 'jnp.where', (['(done_ratio > done_thres)'], {'x': '(1.0)', 'y': '(0.0)'}), '(done_ratio > done_thres, x=1.0, y=0.0)\n', (9001, 9040), True, 'import jax.numpy as jnp\n'), ((9662, 9733), 'jax.numpy.concatenate', 'jnp.concatenate', (['(qpos + qvel + cinert + cvel + qfrc_actuator + cfrc_ext)'], {}), '(qpos + qvel + cinert + cvel + qfrc_actuator + cfrc_ext)\n', (9677, 9733), True, 'import jax.numpy as jnp\n'), ((11971, 12013), 'brax.physics.base.take', 'take', (['agent_action', 'self.torque_1d_act_idx'], {}), '(agent_action, self.torque_1d_act_idx)\n', (11975, 12013), False, 'from brax.physics.base import take\n'), ((12215, 12257), 'brax.physics.base.take', 'take', (['agent_action', 'self.torque_2d_act_idx'], {}), '(agent_action, self.torque_2d_act_idx)\n', (12219, 12257), False, 'from brax.physics.base import take\n'), ((12554, 12596), 'brax.physics.base.take', 'take', (['agent_action', 'self.torque_3d_act_idx'], {}), '(agent_action, self.torque_3d_act_idx)\n', (12558, 12596), False, 'from brax.physics.base import take\n'), ((14386, 14416), 'jax.vmap', 'jax.vmap', (['v_outer'], {'in_axes': '[0]'}), '(v_outer, in_axes=[0])\n', (14394, 14416), False, 'import jax\n'), ((14436, 14469), 'jax.vmap', 'jax.vmap', (['v_cross'], {'in_axes': '[0, 0]'}), '(v_cross, in_axes=[0, 0])\n', (14444, 14469), False, 'import jax\n'), ((15279, 15303), 'jax.random.split', 'jax.random.split', (['rng', '(3)'], {}), '(rng, 3)\n', (15295, 15303), False, 'import jax\n'), ((1275, 1288), 'brax.Config', 'brax.Config', ([], {}), '()\n', (1286, 1288), False, 'import brax\n'), ((3002, 3110), 'brax.physics.base.take', 'take', (['all_bodies', 'all_bodies.idx[i * self.num_body_parts:i * self.num_body_parts + self.\n num_body_parts]'], {}), '(all_bodies, all_bodies.idx[i * self.num_body_parts:i * self.\n num_body_parts + self.num_body_parts])\n', (3006, 3110), False, 'from brax.physics.base import take\n'), ((4979, 5023), 'jax.numpy.zeros', 'jnp.zeros', (['(self.num_agents, self.agent_dof)'], {}), '((self.num_agents, self.agent_dof))\n', (4988, 5023), True, 'import jax.numpy as jnp\n'), ((5172, 5201), 'jax.numpy.zeros', 'jnp.zeros', (['(self.num_agents,)'], {}), '((self.num_agents,))\n', (5181, 5201), True, 'import jax.numpy as jnp\n'), ((5235, 5264), 'jax.numpy.zeros', 'jnp.zeros', (['(self.num_agents,)'], {}), '((self.num_agents,))\n', (5244, 5264), True, 'import jax.numpy as jnp\n'), ((5295, 5324), 'jax.numpy.zeros', 'jnp.zeros', (['(self.num_agents,)'], {}), '((self.num_agents,))\n', (5304, 5324), True, 'import jax.numpy as jnp\n'), ((5356, 5385), 'jax.numpy.zeros', 'jnp.zeros', (['(self.num_agents,)'], {}), '((self.num_agents,))\n', (5365, 5385), True, 'import jax.numpy as jnp\n'), ((7502, 7541), 'jax.numpy.sum', 'jnp.sum', (['(pos_before * self.mass)'], {'axis': '(1)'}), '(pos_before * self.mass, axis=1)\n', (7509, 7541), True, 'import jax.numpy as jnp\n'), ((7544, 7570), 'jax.numpy.sum', 'jnp.sum', (['self.mass'], {'axis': '(1)'}), '(self.mass, axis=1)\n', (7551, 7570), True, 'import jax.numpy as jnp\n'), ((7592, 7630), 'jax.numpy.sum', 'jnp.sum', (['(pos_after * self.mass)'], {'axis': '(1)'}), '(pos_after * self.mass, axis=1)\n', (7599, 7630), True, 'import jax.numpy as jnp\n'), ((7633, 7659), 'jax.numpy.sum', 'jnp.sum', (['self.mass'], {'axis': '(1)'}), '(self.mass, axis=1)\n', (7640, 7659), True, 'import jax.numpy as jnp\n'), ((8081, 8106), 'jax.numpy.ones', 'jnp.ones', (['self.num_agents'], {}), '(self.num_agents)\n', (8089, 8106), True, 'import jax.numpy as jnp\n'), ((8484, 8511), 'jax.numpy.arange', 'jnp.arange', (['self.num_agents'], {}), '(self.num_agents)\n', (8494, 8511), True, 'import jax.numpy as jnp\n'), ((8940, 8957), 'jax.numpy.sum', 'jnp.sum', (['done_vec'], {}), '(done_vec)\n', (8947, 8957), True, 'import jax.numpy as jnp\n'), ((10398, 10453), 'jax.numpy.arange', 'jnp.arange', (['idx_offset', '(idx_offset + self.num_joints_1d)'], {}), '(idx_offset, idx_offset + self.num_joints_1d)\n', (10408, 10453), True, 'import jax.numpy as jnp\n'), ((10498, 10553), 'jax.numpy.arange', 'jnp.arange', (['idx_offset', '(idx_offset + self.num_joints_1d)'], {}), '(idx_offset, idx_offset + self.num_joints_1d)\n', (10508, 10553), True, 'import jax.numpy as jnp\n'), ((10655, 10710), 'jax.numpy.arange', 'jnp.arange', (['idx_offset', '(idx_offset + self.num_joints_2d)'], {}), '(idx_offset, idx_offset + self.num_joints_2d)\n', (10665, 10710), True, 'import jax.numpy as jnp\n'), ((10755, 10810), 'jax.numpy.arange', 'jnp.arange', (['idx_offset', '(idx_offset + self.num_joints_2d)'], {}), '(idx_offset, idx_offset + self.num_joints_2d)\n', (10765, 10810), True, 'import jax.numpy as jnp\n'), ((10912, 10967), 'jax.numpy.arange', 'jnp.arange', (['idx_offset', '(idx_offset + self.num_joints_3d)'], {}), '(idx_offset, idx_offset + self.num_joints_3d)\n', (10922, 10967), True, 'import jax.numpy as jnp\n'), ((11012, 11067), 'jax.numpy.arange', 'jnp.arange', (['idx_offset', '(idx_offset + self.num_joints_3d)'], {}), '(idx_offset, idx_offset + self.num_joints_3d)\n', (11022, 11067), True, 'import jax.numpy as jnp\n'), ((12082, 12137), 'jax.numpy.arange', 'jnp.arange', (['idx_offset', '(idx_offset + self.num_joints_1d)'], {}), '(idx_offset, idx_offset + self.num_joints_1d)\n', (12092, 12137), True, 'import jax.numpy as jnp\n'), ((14116, 14154), 'jax.numpy.sum', 'jnp.sum', (['(body_pos * agent_mass)'], {'axis': '(0)'}), '(body_pos * agent_mass, axis=0)\n', (14123, 14154), True, 'import jax.numpy as jnp\n'), ((14157, 14176), 'jax.numpy.sum', 'jnp.sum', (['agent_mass'], {}), '(agent_mass)\n', (14164, 14176), True, 'import jax.numpy as jnp\n'), ((14220, 14239), 'jax.numpy.sum', 'jnp.sum', (['agent_mass'], {}), '(agent_mass)\n', (14227, 14239), True, 'import jax.numpy as jnp\n'), ((14285, 14300), 'jax.numpy.outer', 'jnp.outer', (['a', 'a'], {}), '(a, a)\n', (14294, 14300), True, 'import jax.numpy as jnp\n'), ((14349, 14364), 'jax.numpy.cross', 'jnp.cross', (['a', 'b'], {}), '(a, b)\n', (14358, 14364), True, 'import jax.numpy as jnp\n'), ((15342, 15366), 'jax.random.uniform', 'jax.random.uniform', (['rng1'], {}), '(rng1)\n', (15360, 15366), False, 'import jax\n'), ((15396, 15420), 'jax.random.uniform', 'jax.random.uniform', (['rng2'], {}), '(rng2)\n', (15414, 15420), False, 'import jax\n'), ((15448, 15460), 'jax.numpy.cos', 'jnp.cos', (['ang'], {}), '(ang)\n', (15455, 15460), True, 'import jax.numpy as jnp\n'), ((15488, 15500), 'jax.numpy.sin', 'jnp.sin', (['ang'], {}), '(ang)\n', (15495, 15500), True, 'import jax.numpy as jnp\n'), ((4851, 4895), 'jax.random.uniform', 'jax.random.uniform', (['rng', '(self.action_size,)'], {}), '(rng, (self.action_size,))\n', (4869, 4895), False, 'import jax\n'), ((7876, 7904), 'jax.numpy.square', 'jnp.square', (['reshaped_actions'], {}), '(reshaped_actions)\n', (7886, 7904), True, 'import jax.numpy as jnp\n'), ((12406, 12461), 'jax.numpy.arange', 'jnp.arange', (['idx_offset', '(idx_offset + self.num_joints_2d)'], {}), '(idx_offset, idx_offset + self.num_joints_2d)\n', (12416, 12461), True, 'import jax.numpy as jnp\n'), ((12745, 12800), 'jax.numpy.arange', 'jnp.arange', (['idx_offset', '(idx_offset + self.num_joints_3d)'], {}), '(idx_offset, idx_offset + self.num_joints_3d)\n', (12755, 12800), True, 'import jax.numpy as jnp\n'), ((15541, 15582), 'jax.numpy.array', 'jnp.array', (['[target_x, target_y, target_z]'], {}), '([target_x, target_y, target_z])\n', (15550, 15582), True, 'import jax.numpy as jnp\n'), ((14853, 14886), 'jax.numpy.linalg.norm', 'jnp.linalg.norm', (['disp_vec'], {'axis': '(1)'}), '(disp_vec, axis=1)\n', (14868, 14886), True, 'import jax.numpy as jnp\n'), ((14646, 14679), 'jax.numpy.linalg.norm', 'jnp.linalg.norm', (['disp_vec'], {'axis': '(1)'}), '(disp_vec, axis=1)\n', (14661, 14679), True, 'import jax.numpy as jnp\n'), ((14736, 14746), 'jax.numpy.eye', 'jnp.eye', (['(3)'], {}), '(3)\n', (14743, 14746), True, 'import jax.numpy as jnp\n')]
|
# TODOS
#--------------------------------------
# imports
import matplotlib.pyplot as plt
from atalaia.atalaia import Atalaia
import numpy as np
import networkx as nx
class Explore:
"""Explore is used for text exploratory tasks.
"""
def __init__(self, language:str):
"""
Parameters
----------
language : str
The language of the corpus
"""
self.language = language
self.atalaia = self.__start_atalaia()
def __start_atalaia(self):
""" Starts an instance of Atalaia"""
return Atalaia(self.language)
def describe(self, corpus:list):
""" Gets the lengths of the sentences present in the corpus, based on the number of tokens.
Returns the lengths, the shortest value and the longest value and the average sentence size."""
# tokenize sentences
tokenized_sentences = [self.atalaia.tokenize(sentence) for sentence in corpus]
# get the lengths
lengths = [len(sentence) for sentence in tokenized_sentences]
# get the percentiles
a = np.array(lengths)
percentiles = (np.percentile(a,0), np.percentile(a,25), np.percentile(a,50), np.percentile(a,75), np.percentile(a,100))
# get shortest, longest and average sentence size using the percentiles values
shortest = percentiles[0] # 0%
longest = percentiles[4] # 100%
average = percentiles[2] # 50%
return lengths, shortest, longest, average, percentiles
def plot_sentences_size_histogram(self, corpus:list, bins = 30, xlabel = 'Number of tokens', ylabel = 'Frequency'):
""" Plots the tokens distribution """
# get sentences sizes
sentences_sizes, shortest, longest, average, percentiles = self.describe(corpus)
# plot
plt.hist(sentences_sizes, bins = bins)
plt.xlabel(xlabel)
plt.xlabel(ylabel)
plt.show()
# return sizes, shortest and longest values and average
return sentences_sizes, shortest, longest, average, percentiles
def plot_sentences_size_boxplot(self, corpus:list):
# get sentences sizes
sentences_sizes, shortest, longest, average, percentiles = self.describe(corpus)
# plot boxplot
plt.boxplot(sentences_sizes)
plt.show()
# return sizes, shortest and longest values and average
return sentences_sizes, shortest, longest, average, percentiles
def plot_representative_tokens(self, corpus:list, percentage=0.3):
#create corpus
corpus = self.atalaia.create_corpus(corpus)
# let's lowercase everything first
texts_lower = self.atalaia.lower_remove_white(corpus)
# plot
token_data = self.atalaia.representative_tokens(percentage,
texts_lower,
reverse=False)
token_data = token_data.items()
token_data = list(token_data)[:10]
tokens, counts = zip(*token_data)
# plot
plt.figure(figsize=(20,10))
plt.bar(tokens,
counts,
color='b')
plt.xlabel('Tokens');
plt.ylabel('Counts');
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.bar",
"atalaia.atalaia.Atalaia",
"numpy.percentile",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((585, 607), 'atalaia.atalaia.Atalaia', 'Atalaia', (['self.language'], {}), '(self.language)\n', (592, 607), False, 'from atalaia.atalaia import Atalaia\n'), ((1116, 1133), 'numpy.array', 'np.array', (['lengths'], {}), '(lengths)\n', (1124, 1133), True, 'import numpy as np\n'), ((1854, 1890), 'matplotlib.pyplot.hist', 'plt.hist', (['sentences_sizes'], {'bins': 'bins'}), '(sentences_sizes, bins=bins)\n', (1862, 1890), True, 'import matplotlib.pyplot as plt\n'), ((1901, 1919), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (1911, 1919), True, 'import matplotlib.pyplot as plt\n'), ((1928, 1946), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['ylabel'], {}), '(ylabel)\n', (1938, 1946), True, 'import matplotlib.pyplot as plt\n'), ((1955, 1965), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1963, 1965), True, 'import matplotlib.pyplot as plt\n'), ((2312, 2340), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['sentences_sizes'], {}), '(sentences_sizes)\n', (2323, 2340), True, 'import matplotlib.pyplot as plt\n'), ((2349, 2359), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2357, 2359), True, 'import matplotlib.pyplot as plt\n'), ((3145, 3173), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (3155, 3173), True, 'import matplotlib.pyplot as plt\n'), ((3181, 3215), 'matplotlib.pyplot.bar', 'plt.bar', (['tokens', 'counts'], {'color': '"""b"""'}), "(tokens, counts, color='b')\n", (3188, 3215), True, 'import matplotlib.pyplot as plt\n'), ((3258, 3278), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tokens"""'], {}), "('Tokens')\n", (3268, 3278), True, 'import matplotlib.pyplot as plt\n'), ((3288, 3308), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (3298, 3308), True, 'import matplotlib.pyplot as plt\n'), ((1157, 1176), 'numpy.percentile', 'np.percentile', (['a', '(0)'], {}), '(a, 0)\n', (1170, 1176), True, 'import numpy as np\n'), ((1177, 1197), 'numpy.percentile', 'np.percentile', (['a', '(25)'], {}), '(a, 25)\n', (1190, 1197), True, 'import numpy as np\n'), ((1198, 1218), 'numpy.percentile', 'np.percentile', (['a', '(50)'], {}), '(a, 50)\n', (1211, 1218), True, 'import numpy as np\n'), ((1219, 1239), 'numpy.percentile', 'np.percentile', (['a', '(75)'], {}), '(a, 75)\n', (1232, 1239), True, 'import numpy as np\n'), ((1240, 1261), 'numpy.percentile', 'np.percentile', (['a', '(100)'], {}), '(a, 100)\n', (1253, 1261), True, 'import numpy as np\n')]
|
"""Test I/O related functionality."""
import tempfile
import os
import pathlib
def test_cache_dir():
"""Test getting cache directory."""
from sattools.io import get_cache_dir
with tempfile.TemporaryDirectory() as tmpdir:
d = get_cache_dir(tmpdir, "tofu")
assert str(d.parent) == tmpdir
assert d.name == "tofu"
try:
_environ = os.environ.copy()
os.environ.pop("XDG_CACHE_HOME", None)
d = get_cache_dir(subdir="raspberry")
assert d.parent.name == ".cache"
assert d.name == "raspberry"
finally:
try:
d.rmdir()
except OSError:
pass
os.environ.clear()
os.environ.update(_environ)
try:
_environ = os.environ.copy()
pt = pathlib.Path(os.environ.get("TMPDIR", "/tmp/"))
os.environ["XDG_CACHE_HOME"] = str(pt)
d = get_cache_dir(subdir="banana")
assert d.parent == pt
assert d.name == "banana"
finally:
try:
d.rmdir()
except OSError:
pass
os.environ.clear()
os.environ.update(_environ)
def test_plotdir(tmp_path, monkeypatch):
"""Test getting plotting directory."""
from sattools.io import plotdir
monkeypatch.delenv("PLOT_BASEDIR", raising=False)
pd = plotdir(create=False)
assert pd.parent.parent.parent == pathlib.Path(
"/media/nas/x21308/plots_and_maps")
pd = plotdir(create=False, basedir=tmp_path)
assert pd.parent.parent.parent == tmp_path
monkeypatch.setenv("PLOT_BASEDIR", str(tmp_path))
pd = plotdir(create=False)
assert pd.parent.parent.parent == tmp_path
assert not pd.exists()
pd = plotdir(create=True)
assert pd.exists()
def test_datadir(tmp_path, monkeypatch):
"""Test getting NAS data directory."""
from sattools.io import nas_data_out
monkeypatch.delenv("NAS_DATA", raising=False)
pd = nas_data_out(create=False)
assert pd == pathlib.Path("/media/nas/x21308/data_out")
monkeypatch.setenv("NAS_DATA", str(tmp_path))
pd = nas_data_out(create=False)
assert pd == tmp_path / "data_out"
assert not pd.exists()
pd = nas_data_out(create=True)
assert pd.exists()
pd = nas_data_out(tmp_path / "fionnay", subdir="datum", create=True)
assert pd == tmp_path / "fionnay" / "datum"
assert pd.exists()
|
[
"sattools.io.get_cache_dir",
"tempfile.TemporaryDirectory",
"sattools.io.plotdir",
"os.environ.copy",
"os.environ.clear",
"os.environ.get",
"sattools.io.nas_data_out",
"pathlib.Path",
"os.environ.pop",
"os.environ.update"
] |
[((1315, 1336), 'sattools.io.plotdir', 'plotdir', ([], {'create': '(False)'}), '(create=False)\n', (1322, 1336), False, 'from sattools.io import plotdir\n'), ((1446, 1485), 'sattools.io.plotdir', 'plotdir', ([], {'create': '(False)', 'basedir': 'tmp_path'}), '(create=False, basedir=tmp_path)\n', (1453, 1485), False, 'from sattools.io import plotdir\n'), ((1596, 1617), 'sattools.io.plotdir', 'plotdir', ([], {'create': '(False)'}), '(create=False)\n', (1603, 1617), False, 'from sattools.io import plotdir\n'), ((1701, 1721), 'sattools.io.plotdir', 'plotdir', ([], {'create': '(True)'}), '(create=True)\n', (1708, 1721), False, 'from sattools.io import plotdir\n'), ((1931, 1957), 'sattools.io.nas_data_out', 'nas_data_out', ([], {'create': '(False)'}), '(create=False)\n', (1943, 1957), False, 'from sattools.io import nas_data_out\n'), ((2077, 2103), 'sattools.io.nas_data_out', 'nas_data_out', ([], {'create': '(False)'}), '(create=False)\n', (2089, 2103), False, 'from sattools.io import nas_data_out\n'), ((2179, 2204), 'sattools.io.nas_data_out', 'nas_data_out', ([], {'create': '(True)'}), '(create=True)\n', (2191, 2204), False, 'from sattools.io import nas_data_out\n'), ((2237, 2300), 'sattools.io.nas_data_out', 'nas_data_out', (["(tmp_path / 'fionnay')"], {'subdir': '"""datum"""', 'create': '(True)'}), "(tmp_path / 'fionnay', subdir='datum', create=True)\n", (2249, 2300), False, 'from sattools.io import nas_data_out\n'), ((194, 223), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (221, 223), False, 'import tempfile\n'), ((247, 276), 'sattools.io.get_cache_dir', 'get_cache_dir', (['tmpdir', '"""tofu"""'], {}), "(tmpdir, 'tofu')\n", (260, 276), False, 'from sattools.io import get_cache_dir\n'), ((376, 393), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (391, 393), False, 'import os\n'), ((402, 440), 'os.environ.pop', 'os.environ.pop', (['"""XDG_CACHE_HOME"""', 'None'], {}), "('XDG_CACHE_HOME', None)\n", (416, 440), False, 'import os\n'), ((453, 486), 'sattools.io.get_cache_dir', 'get_cache_dir', ([], {'subdir': '"""raspberry"""'}), "(subdir='raspberry')\n", (466, 486), False, 'from sattools.io import get_cache_dir\n'), ((662, 680), 'os.environ.clear', 'os.environ.clear', ([], {}), '()\n', (678, 680), False, 'import os\n'), ((689, 716), 'os.environ.update', 'os.environ.update', (['_environ'], {}), '(_environ)\n', (706, 716), False, 'import os\n'), ((745, 762), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (760, 762), False, 'import os\n'), ((883, 913), 'sattools.io.get_cache_dir', 'get_cache_dir', ([], {'subdir': '"""banana"""'}), "(subdir='banana')\n", (896, 913), False, 'from sattools.io import get_cache_dir\n'), ((1075, 1093), 'os.environ.clear', 'os.environ.clear', ([], {}), '()\n', (1091, 1093), False, 'import os\n'), ((1102, 1129), 'os.environ.update', 'os.environ.update', (['_environ'], {}), '(_environ)\n', (1119, 1129), False, 'import os\n'), ((1375, 1423), 'pathlib.Path', 'pathlib.Path', (['"""/media/nas/x21308/plots_and_maps"""'], {}), "('/media/nas/x21308/plots_and_maps')\n", (1387, 1423), False, 'import pathlib\n'), ((1975, 2017), 'pathlib.Path', 'pathlib.Path', (['"""/media/nas/x21308/data_out"""'], {}), "('/media/nas/x21308/data_out')\n", (1987, 2017), False, 'import pathlib\n'), ((789, 822), 'os.environ.get', 'os.environ.get', (['"""TMPDIR"""', '"""/tmp/"""'], {}), "('TMPDIR', '/tmp/')\n", (803, 822), False, 'import os\n')]
|
'''
gather redshift info across all observations for a given target type; for now from a single tile
'''
#test
#standard python
import sys
import os
import shutil
import unittest
from datetime import datetime
import json
import numpy as np
import fitsio
import glob
import argparse
from astropy.table import Table,join,unique,vstack
from matplotlib import pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--type", help="tracer type to be selected")
parser.add_argument("--tile", help="observed tile to use") #eventually remove this and just gather everything
args = parser.parse_args()
type = args.type
tile = args.tile
if type == 'LRG':
tarbit = 0 #targeting bit
if type == 'QSO':
tarbit = 2
if type == 'ELG':
tarbit = 1
print('gathering type,tile')
print(type,tile)
tp = 'SV1_DESI_TARGET'
print('targeting bit, target program type; CHECK THEY ARE CORRECT!')
print(tarbit,tp)
#location of inputs
coaddir = '/global/cfs/cdirs/desi/spectro/redux/blanc/tiles/'+tile
subsets = [x[0][len(coaddir):].strip('/') for x in os.walk(coaddir)] #something must work better than this, but for now...
#outputs
svdir = '/project/projectdirs/desi/users/ajross/catalogs/SV/'
version = 'test/'
dirout = svdir+'redshift_comps/'+version
outf = dirout +'/'+tile+'_'+type+'zinfo.fits'
if not os.path.exists(svdir+'redshift_comps'):
os.mkdir(svdir+'redshift_comps')
print('made '+svdir+'redshift_comps random directory')
if not os.path.exists(dirout):
os.mkdir(dirout)
print('made '+dirout)
ss = 0 #use to switch from creating to concatenating
for night in subsets:
if len(night) > 0:
print('going through subset '+night)
specs = []
#find out which spectrograph have data
for si in range(0,10):
try:
fl = coaddir+'/'+night+'/zbest-'+str(si)+'-'+str(tile)+'-'+night+'.fits'
#print(fl)
fitsio.read(fl)
specs.append(si)
except:
print('no spectrograph '+str(si)+ ' on subset '+night)
tspec = Table.read(coaddir+'/'+night+'/zbest-'+str(specs[0])+'-'+str(tile)+'-'+night+'.fits',hdu='ZBEST')
tf = Table.read(coaddir+'/'+night+'/coadd-'+str(specs[0])+'-'+str(tile)+'-'+night+'.fits',hdu='FIBERMAP')
for i in range(1,len(specs)):
tn = Table.read(coaddir+'/'+night+'/zbest-'+str(specs[i])+'-'+str(tile)+'-'+night+'.fits',hdu='ZBEST')
tnf = Table.read(coaddir+'/'+night+'/coadd-'+str(specs[i])+'-'+str(tile)+'-'+night+'.fits',hdu='FIBERMAP')
tspec = vstack([tspec,tn])
tf = vstack([tf,tnf])
tspec = join(tspec,tf,keys=['TARGETID'])
wtype = ((tspec[tp] & 2**tarbit) > 0)
print(str(len(tspec))+' total entries '+str(len(tspec[wtype]))+' that are '+type)
tspec = tspec[wtype]
tspec['subset'] = night
if ss == 0:
tspect = tspec
ss = 1
else:
tspect = vstack([tspect,tspec])
print('there are now '+str(len(tspect)) +' entries with '+str(len(np.unique(tspect['TARGETID'])))+' unique target IDs')
tspect.sort('TARGETID')
tspect.write(outf,format='fits', overwrite=True)
|
[
"os.mkdir",
"argparse.ArgumentParser",
"os.walk",
"os.path.exists",
"astropy.table.join",
"astropy.table.vstack",
"fitsio.read",
"numpy.unique"
] |
[((383, 408), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (406, 408), False, 'import argparse\n'), ((1314, 1354), 'os.path.exists', 'os.path.exists', (["(svdir + 'redshift_comps')"], {}), "(svdir + 'redshift_comps')\n", (1328, 1354), False, 'import os\n'), ((1358, 1392), 'os.mkdir', 'os.mkdir', (["(svdir + 'redshift_comps')"], {}), "(svdir + 'redshift_comps')\n", (1366, 1392), False, 'import os\n'), ((1458, 1480), 'os.path.exists', 'os.path.exists', (['dirout'], {}), '(dirout)\n', (1472, 1480), False, 'import os\n'), ((1486, 1502), 'os.mkdir', 'os.mkdir', (['dirout'], {}), '(dirout)\n', (1494, 1502), False, 'import os\n'), ((1057, 1073), 'os.walk', 'os.walk', (['coaddir'], {}), '(coaddir)\n', (1064, 1073), False, 'import os\n'), ((2648, 2682), 'astropy.table.join', 'join', (['tspec', 'tf'], {'keys': "['TARGETID']"}), "(tspec, tf, keys=['TARGETID'])\n", (2652, 2682), False, 'from astropy.table import Table, join, unique, vstack\n'), ((2579, 2598), 'astropy.table.vstack', 'vstack', (['[tspec, tn]'], {}), '([tspec, tn])\n', (2585, 2598), False, 'from astropy.table import Table, join, unique, vstack\n'), ((2615, 2632), 'astropy.table.vstack', 'vstack', (['[tf, tnf]'], {}), '([tf, tnf])\n', (2621, 2632), False, 'from astropy.table import Table, join, unique, vstack\n'), ((2979, 3002), 'astropy.table.vstack', 'vstack', (['[tspect, tspec]'], {}), '([tspect, tspec])\n', (2985, 3002), False, 'from astropy.table import Table, join, unique, vstack\n'), ((1919, 1934), 'fitsio.read', 'fitsio.read', (['fl'], {}), '(fl)\n', (1930, 1934), False, 'import fitsio\n'), ((3076, 3105), 'numpy.unique', 'np.unique', (["tspect['TARGETID']"], {}), "(tspect['TARGETID'])\n", (3085, 3105), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
cap = cv2.VideoCapture('grace4.mp4')
def make_360p():
cap.set(3, 480)
cap.set(4, 360)
def rescale_frame(frame):
percent = 25;
width = int(frame.shape[1] * percent/100)
height = int(frame.shape[0] * percent/100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
subtractor = cv2.createBackgroundSubtractorMOG2()
fps = cap.get(cv2.CAP_PROP_FPS)
make_360p()
while True:
_, frame = cap.read()
frame38 = rescale_frame(frame)
frame38 = cv2.transpose(frame38,frame38)
frame38 = cv2.flip(frame38, 1)
mask = subtractor.apply(frame38)
(contours,_) = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
if cv2.contourArea(contour) < 190:
continue
(x,y,w,h) = cv2.boundingRect(contour)
cv2.rectangle(frame38, (x,y),(x+w,y+h),(240,32,160),3)
cv2.imshow("Zal", frame38)
key = cv2.waitKey(30)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
|
[
"cv2.boundingRect",
"cv2.createBackgroundSubtractorMOG2",
"cv2.findContours",
"cv2.contourArea",
"cv2.waitKey",
"cv2.imshow",
"cv2.transpose",
"cv2.VideoCapture",
"cv2.rectangle",
"cv2.flip",
"cv2.destroyAllWindows",
"cv2.resize"
] |
[((37, 67), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""grace4.mp4"""'], {}), "('grace4.mp4')\n", (53, 67), False, 'import cv2\n'), ((370, 406), 'cv2.createBackgroundSubtractorMOG2', 'cv2.createBackgroundSubtractorMOG2', ([], {}), '()\n', (404, 406), False, 'import cv2\n'), ((1072, 1095), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1093, 1095), False, 'import cv2\n'), ((301, 353), 'cv2.resize', 'cv2.resize', (['frame', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(frame, dim, interpolation=cv2.INTER_AREA)\n', (311, 353), False, 'import cv2\n'), ((544, 575), 'cv2.transpose', 'cv2.transpose', (['frame38', 'frame38'], {}), '(frame38, frame38)\n', (557, 575), False, 'import cv2\n'), ((589, 609), 'cv2.flip', 'cv2.flip', (['frame38', '(1)'], {}), '(frame38, 1)\n', (597, 609), False, 'import cv2\n'), ((671, 733), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (687, 733), False, 'import cv2\n'), ((968, 994), 'cv2.imshow', 'cv2.imshow', (['"""Zal"""', 'frame38'], {}), "('Zal', frame38)\n", (978, 994), False, 'import cv2\n'), ((1005, 1020), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (1016, 1020), False, 'import cv2\n'), ((852, 877), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (868, 877), False, 'import cv2\n'), ((886, 951), 'cv2.rectangle', 'cv2.rectangle', (['frame38', '(x, y)', '(x + w, y + h)', '(240, 32, 160)', '(3)'], {}), '(frame38, (x, y), (x + w, y + h), (240, 32, 160), 3)\n', (899, 951), False, 'import cv2\n'), ((779, 803), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (794, 803), False, 'import cv2\n')]
|
#!/usr/bin/env python
# $Id$
"""74 solutions"""
import puzzler
from puzzler.puzzles.hexiamonds import Hexiamonds4x9
puzzler.run(Hexiamonds4x9)
|
[
"puzzler.run"
] |
[((119, 145), 'puzzler.run', 'puzzler.run', (['Hexiamonds4x9'], {}), '(Hexiamonds4x9)\n', (130, 145), False, 'import puzzler\n')]
|
"""
This module constructs network of streets.
"""
import numpy as np
import json
# Adobe flat UI colour scheme
DARK_BLUE = "#2C3E50"
MEDIUM_BLUE = "#2980B9"
LIGHT_BLUE = "#3498DB"
RED = "#E74C3C"
WHITE = "#ECF0F1"
# Colour parameters
STROKE_COLOUR = DARK_BLUE
STREET_COLOUR = DARK_BLUE
JUNCTION_COLOUR = MEDIUM_BLUE
JUNCTION_TEXT = DARK_BLUE
RESULTS_COLOUR = RED
RESULTS_TEXT = DARK_BLUE
# Dimensions
OFFSET = 50
STREET_WIDTH = 8
STROKE_WIDTH = 2
JUNCTION_WIDTH = 20
MAX_RADIUS = 25
INITIAL_DECIBELS = 120
# Max absorption
MAX_ABSORPTION = 0.1
# Don't plot absorption coefficients (option)
ABSORPTION = False
class Constructor(object):
"""
This class of methods initialises a network object of specified dimensions,
modifies the network using modifying methods, outputs the adjacency matrix
of the network and outputs the visualisation in the svg format.
"""
def __init__(self):
self.__horizontals = None
self.__verticals = None
self.__nodes = None
self.__adjacency = None
self.__modified_adjacency = None
self.__positions = None
self.__stage = 0
def set_grid(self, horizontals, verticals, length):
"""
This setter method sets stage 1 (setting and moving) of the construction.
"""
try:
horizontals = int(horizontals)
verticals = int(verticals)
except ValueError:
raise ValueError("Horizontals and verticals must be integers.")
try:
length = float(length)
except ValueError:
raise ValueError("Length must be a floating point number.")
for quantity in [horizontals, verticals, length]:
if quantity < 0:
raise ValueError(
"Horizontals, verticals and length must be positive numbers.")
self.__horizontals = horizontals
self.__verticals = verticals
self.__nodes = horizontals*verticals
self.__adjacency = self.__create_adjacency()
self.__modified_adjacency = None
self.__positions = self.__create_positions(length)
self.__stage = 1
def unset_grid(self):
"""
This method is used to set the network to the stage 0 (instantiation) of
the construction.
"""
self.__horizontals = None
self.__verticals = None
self.__nodes = None
self.__adjacency = None
self.__modified_adjacency = None
self.__positions = None
self.__stage = 0
def __create_adjacency(self):
"""
This private method returns initial adjacency matrix.
"""
adjacency = np.zeros((self.__nodes, self.__nodes), dtype=np.int)
# Normal adjacency matrix for grid network
for i in range(self.__nodes):
for j in range(self.__nodes):
if (j == i+1 and j%self.__verticals != 0) or \
(j == i-1 and i%self.__verticals != 0) or \
j == i+self.__verticals or \
j == i-self.__verticals:
adjacency[i][j] = 1
return adjacency
def __create_positions(self, length):
"""
This private method returns initial positions matrix.
"""
positions = np.zeros((self.__nodes, 2))
for i in range(self.__nodes):
positions[i][0] = i%self.__verticals*length
positions[i][1] = i//self.__verticals*length
return positions
def move_horizontal_line(self, i, length):
"""
This method moves the horizontal line i.
"""
assert self.__stage == 1
if i not in range(self.__horizontals):
raise ValueError("No such horizontal line.")
for node in range(self.__nodes):
if node//self.__verticals == i:
self.__positions[node][1] += length
def move_vertical_line(self, j, length):
"""
This method moves the vertical line j.
"""
assert self.__stage == 1
if j not in range(self.__verticals):
raise ValueError("No such vertical line.")
for node in range(self.__nodes):
if node%self.__verticals == j:
self.__positions[node][0] += length
def delete_connection(self, i, j):
"""
This method deletes the street (i, j).
"""
if self.__stage == 1:
self.__stage = 2 # set stage to 1 so lines cannot be moved
assert self.__stage == 2
if i not in range(self.__nodes) or j not in range(self.__nodes):
raise ValueError("Nodes out of range.")
if self.__adjacency[i][j] == 0:
raise ValueError("Junctions are not neighbours.")
self.__adjacency[i][j] = 0
self.__adjacency[j][i] = 0
to_delete = []
if sum(self.__adjacency[i]) == 2:
connections = []
for k in range(self.__nodes):
if self.__adjacency[i][k] == 1:
connections.append(k)
if (self.__positions[i][0] == self.__positions[connections[0]][0] and \
self.__positions[i][0] == self.__positions[connections[1]][0]) or \
(self.__positions[i][1] == self.__positions[connections[0]][1] and \
self.__positions[i][1] == self.__positions[connections[1]][1]):
self.__adjacency[connections[0]][connections[1]] = 1
self.__adjacency[connections[1]][connections[0]] = 1
to_delete.append(i)
elif sum(self.__adjacency[i]) == 0:
to_delete.append(i)
if sum(self.__adjacency[j]) == 2:
connections = []
for k in range(self.__nodes):
if self.__adjacency[j][k] == 1:
connections.append(k)
if (self.__positions[j][0] == self.__positions[connections[0]][0] and \
self.__positions[j][0] == self.__positions[connections[1]][0]) or \
(self.__positions[j][1] == self.__positions[connections[0]][1] and \
self.__positions[j][1] == self.__positions[connections[1]][1]):
self.__adjacency[connections[0]][connections[1]] = 1
self.__adjacency[connections[1]][connections[0]] = 1
to_delete.append(j)
elif sum(self.__adjacency[j]) == 0:
to_delete.append(j)
if len(to_delete) != 0:
self.__adjacency = np.delete(self.__adjacency, to_delete, axis=0)
self.__adjacency = np.delete(self.__adjacency, to_delete, axis=1)
self.__positions = np.delete(self.__positions, to_delete, axis=0)
self.__nodes = int(self.__nodes - len(to_delete))
def modify_adjacency(self, width, alpha, beta):
"""
This method creates new adjacency matrix with dictionaries of keys
(alpha, beta, street width, street length, orientation) instead of 1s.
"""
if self.__stage == 1 or self.__stage == 2:
self.__stage = 3
assert self.__stage == 3
try:
width = float(width)
alpha = float(alpha)
beta = float(beta)
except ValueError:
raise ValueError("Width and absorption must be floating point numbers.")
if width <= 0:
raise ValueError("Width must be a positive number.")
if alpha < 0 or alpha > 1 or beta < 0 or beta > 1:
raise ValueError("Absorption must be a number between 0 and 1.")
self.__modified_adjacency = self.__adjacency.tolist() # To python structure
positions = self.__positions
for i in range(self.__nodes):
for j in range(i):
if self.__adjacency[i][j] == 1:
if positions[i][1] == positions[j][1]:
length = abs(positions[i][0] - positions[j][0]).tolist()
if positions[i][0] < positions[j][0]:
orientation = 0
elif positions[i][0] > positions[j][0]:
orientation = 2
else:
raise ValueError("Points are at the same position.")
elif positions[i][0] == positions[j][0]:
length = abs(positions[i][1] - positions[j][1]).tolist()
if positions[i][1] < positions[j][1]:
orientation = 1
elif positions[i][1] > positions[j][1]:
orientation = 3
else:
raise ValueError("Points are at the same position.")
else:
raise ValueError("Points are not colinear.")
self.__modified_adjacency[i][j] = {
"alpha": alpha,
"beta": beta,
"width": width,
"length": length,
"orientation": orientation}
self.__modified_adjacency[j][i] = {
"alpha": alpha,
"beta": beta,
"width": width,
"length": length,
"orientation": (orientation+2)%4}
def unmodify_adjacency(self):
"""
This method is used to set the stage to stage 2 (deleting) of the
construction.
"""
self.__stage = 2
self.__modified_adjacency = None
def change_width(self, i, j, width):
"""
This method changes the street width of street (i, j).
"""
assert self.__stage == 3
try:
width = float(width)
except ValueError:
raise ValueError("Width must be a floating point number.")
if width <= 0:
raise ValueError("Width must be a positive number.")
if i not in range(self.__nodes) or j not in range(self.__nodes):
raise ValueError("Nodes out of range")
if self.__modified_adjacency[i][j] == 0:
raise ValueError("Junctions are not neighbours.")
self.__modified_adjacency[i][j]["width"] = width
self.__modified_adjacency[j][i]["width"] = width
def change_alpha(self, i, j, alpha):
"""
This method changes the wall absorption of street (i, j).
"""
assert self.__stage == 3
try:
alpha = float(alpha)
except ValueError:
raise ValueError("Absorption must be a floating point number.")
if alpha < 0 or alpha > 1:
raise ValueError("Absorption must be a number between 0 and 1")
if i not in range(self.__nodes) or j not in range(self.__nodes):
raise ValueError("Nodes out of range.")
if self.__modified_adjacency[i][j] == 0:
raise ValueError("Junctions are not neighbours.")
self.__modified_adjacency[i][j]["alpha"] = alpha
self.__modified_adjacency[j][i]["alpha"] = alpha
def change_beta(self, i, j, beta):
"""
This method changes the air absorption of street (i, j).
"""
assert self.__stage == 3
try:
beta = float(beta)
except ValueError:
raise ValueError("Absorption must be a floating point number.")
if beta < 0 or beta > 1:
raise ValueError("Absorption must be a number between 0 and 1")
if i not in range(self.__nodes) or j not in range(self.__nodes):
raise ValueError("Nodes out of range.")
if self.__modified_adjacency[i][j] == 0:
raise ValueError("Junctions are not neighbours.")
self.__modified_adjacency[i][j]["beta"] = beta
self.__modified_adjacency[j][i]["beta"] = beta
def get_horizontals(self):
"""
This getter method returns the number of horizontal streets.
"""
return self.__horizontals
def get_verticals(self):
"""
This getter method returns the number of vertical streets.
"""
return self.__verticals
def get_adjacency(self):
"""
This getter method returns the normal adjacency matrix.
"""
return self.__adjacency
def get_modified_adjacency(self):
"""
This getter method returns the modified adjacency matrix.
"""
return self.__modified_adjacency
def get_positions(self):
"""
This getter method returns the positions matrix.
"""
return self.__positions
def get_stage(self):
"""
This getter method returns current stage index.
"""
return self.__stage
def import_network(self, invalues):
"""
This method is used to import existing network from the invalues
dictionary.
"""
self.__horizontals = invalues["horizontals"]
self.__verticals = invalues["verticals"]
self.__nodes = invalues["nodes"]
self.__adjacency = np.array(invalues["adjacency"])
self.__modified_adjacency = invalues["modified_adjacency"]
self.__positions = np.array(invalues["positions"])
self.__stage = invalues["stage"]
def export_network(self, filename):
"""
This method is used to export currently constructed network to json
format to some file.
"""
data = {
"horizontals": self.__horizontals,
"verticals": self.__verticals,
"nodes": self.__nodes,
"adjacency": self.__adjacency.tolist(),
"modified_adjacency": self.__modified_adjacency,
"positions": self.__positions.tolist(),
"stage": self.__stage
}
with open(filename, "w") as file:
json.dump(data, file)
def draw_network(self, filename, results=False):
"""
This method outputs file "output.html" with svg drawing of network and
optinally plots the results.
"""
def get_hex_fill(coefficient, max_absorption):
red = hex(int(coefficient/max_absorption*255))
red = red[-2:] if len(red)==4 else "0{0}".format(red[-1])
blue = hex(int((1-coefficient/max_absorption)*255))
blue = blue[-2:] if len(blue)==4 else "0{0}".format(blue[-1])
fill = "#{0}00{1}".format(red, blue)
return fill
def svg_header(width, height):
return "<svg width='{0}' height='{1}'>\n".format(width, height)
def svg_line(x1, y1, x2, y2, fill=STREET_COLOUR, width=STREET_WIDTH):
return "<line x1='{0}' y1='{1}' x2='{2}' y2='{3}' \
style='stroke: {4}; stroke-width: {5}'/>\n".format(x1+OFFSET, y1+OFFSET,
x2+OFFSET, y2+OFFSET,
fill, width)
def svg_square(x, y):
return "<rect x='{0}' y='{1}' width='{2}' height='{2}' \
style='stroke: {3}; stroke-width: {4}; fill: {5}'/>\n".format(x-JUNCTION_WIDTH/2+OFFSET,
y-JUNCTION_WIDTH/2+OFFSET,
JUNCTION_WIDTH,
STROKE_COLOUR,
STROKE_WIDTH,
JUNCTION_COLOUR
)
def svg_circle(x, y, r, fill):
return "<circle cx='{0}' cy='{1}' r='{2}' style='stroke: {3}; \
stroke-width: {4}; fill: {5}'/>\n".format(x+OFFSET,
y+OFFSET,
r,
STROKE_COLOUR,
STROKE_WIDTH,
fill
)
def svg_text(x, y, colour, size, text):
move = (size-15)/4 # adjust text position
return "<text text-anchor='middle' x='{0}' y='{1}' \
style='fill: {2}; font-size: {3}'>{4}</text>\n".format(x+OFFSET,
y+OFFSET+JUNCTION_WIDTH/4 + move,
colour,
size,
text
)
positions = self.__positions
if self.__stage == 3:
adjacency = self.__modified_adjacency
modified = True
else:
adjacency = self.__adjacency
modified = False
with open(filename, "w") as file:
width = positions[self.__nodes-1][0]+2*OFFSET
height = positions[self.__nodes-1][1]+2*OFFSET
file.write(svg_header(width, height))
# Draw walls if modified (with absorption)
if modified and ABSORPTION:
for i in range(self.__nodes):
for j in range(i):
if adjacency[i][j] != 0:
[xi, yi] = positions[i]
[xj, yj] = positions[j]
alpha = adjacency[i][j]["alpha"]
alpha_fill = get_hex_fill(alpha, MAX_ABSORPTION)
width = adjacency[i][j]["width"]
translation = width/2
if xi == xj:
file.write(svg_line(xi-translation, yi,
xj-translation, yj,
alpha_fill, width
))
file.write(svg_line(xi+translation, yi,
xj+translation, yj,
alpha_fill, width
))
elif yi == yj:
file.write(svg_line(xi, yi-translation,
xj, yj-translation,
alpha_fill, width
))
file.write(svg_line(xi, yi+translation,
xj, yj+translation,
alpha_fill, width
))
# Draw streets (with absorption if modified)
for i in range(self.__nodes):
for j in range(i):
if adjacency[i][j] != 0:
[xi, yi] = positions[i]
[xj, yj] = positions[j]
if not modified or not ABSORPTION:
file.write(svg_line(xi, yi, xj, yj))
else:
beta = adjacency[i][j]["beta"]
beta_fill = get_hex_fill(beta, MAX_ABSORPTION)
width = adjacency[i][j]["width"]
file.write(svg_line(xi, yi, xj, yj,
beta_fill, width
))
# Draw junctions (rectangles with numbers)
counter = 0
for position in positions:
file.write(svg_square(position[0], position[1]))
file.write(svg_text(position[0], position[1], JUNCTION_TEXT, 15, counter))
counter += 1
# Draw results
if results:
(X, Y, Z) = results
for i in range(len(Z)):
decibels = 20*np.log10(Z[i]*10**(INITIAL_DECIBELS/20))
if decibels < 0:
continue
# Radius
radius = (decibels/INITIAL_DECIBELS)*MAX_RADIUS
file.write(svg_circle(X[i], Y[i], radius, RESULTS_COLOUR))
if decibels > 30:
file.write(svg_text(X[i], Y[i], RESULTS_TEXT, radius, int(round(decibels))))
file.write("</svg>")
|
[
"json.dump",
"numpy.zeros",
"numpy.array",
"numpy.log10",
"numpy.delete"
] |
[((2673, 2725), 'numpy.zeros', 'np.zeros', (['(self.__nodes, self.__nodes)'], {'dtype': 'np.int'}), '((self.__nodes, self.__nodes), dtype=np.int)\n', (2681, 2725), True, 'import numpy as np\n'), ((3291, 3318), 'numpy.zeros', 'np.zeros', (['(self.__nodes, 2)'], {}), '((self.__nodes, 2))\n', (3299, 3318), True, 'import numpy as np\n'), ((13085, 13116), 'numpy.array', 'np.array', (["invalues['adjacency']"], {}), "(invalues['adjacency'])\n", (13093, 13116), True, 'import numpy as np\n'), ((13211, 13242), 'numpy.array', 'np.array', (["invalues['positions']"], {}), "(invalues['positions'])\n", (13219, 13242), True, 'import numpy as np\n'), ((6475, 6521), 'numpy.delete', 'np.delete', (['self.__adjacency', 'to_delete'], {'axis': '(0)'}), '(self.__adjacency, to_delete, axis=0)\n', (6484, 6521), True, 'import numpy as np\n'), ((6553, 6599), 'numpy.delete', 'np.delete', (['self.__adjacency', 'to_delete'], {'axis': '(1)'}), '(self.__adjacency, to_delete, axis=1)\n', (6562, 6599), True, 'import numpy as np\n'), ((6631, 6677), 'numpy.delete', 'np.delete', (['self.__positions', 'to_delete'], {'axis': '(0)'}), '(self.__positions, to_delete, axis=0)\n', (6640, 6677), True, 'import numpy as np\n'), ((13895, 13916), 'json.dump', 'json.dump', (['data', 'file'], {}), '(data, file)\n', (13904, 13916), False, 'import json\n'), ((19959, 20005), 'numpy.log10', 'np.log10', (['(Z[i] * 10 ** (INITIAL_DECIBELS / 20))'], {}), '(Z[i] * 10 ** (INITIAL_DECIBELS / 20))\n', (19967, 20005), True, 'import numpy as np\n')]
|
# _*_ coding: utf-8 _*_
import re
__author__ = "andan"
__data__ = "2018/9/22 12:44"
from django import forms
from operation.models import UserAsk
class UserAskForm(forms.ModelForm):
class Meta:
model = UserAsk
fields = ['name', 'moblie', 'course_name']
def clean_moblie(self):
moblie = self.cleaned_data['moblie']
REGEX_MOBILE ="^1[3567890]\d{9}$"
p = re.compile(REGEX_MOBILE)
if p.match(moblie):
return moblie
else:
raise forms.ValidationError("手机号码非法",code="mobile_invaild")
|
[
"django.forms.ValidationError",
"re.compile"
] |
[((405, 429), 're.compile', 're.compile', (['REGEX_MOBILE'], {}), '(REGEX_MOBILE)\n', (415, 429), False, 'import re\n'), ((516, 570), 'django.forms.ValidationError', 'forms.ValidationError', (['"""手机号码非法"""'], {'code': '"""mobile_invaild"""'}), "('手机号码非法', code='mobile_invaild')\n", (537, 570), False, 'from django import forms\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#This script scores results from each student
#Drawn images are downloaded from a .csv file, converted from string base64 encoding,
#and scored against machine learning models saved to disk
import csv
import os
#import file
import cv2
import re
import base64
import numpy as np
from keras.models import model_from_json
from sklearn.metrics import cohen_kappa_score
from tkinter import *
import tkinter as tk
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import askdirectory
from tkinter import simpledialog
import sys
import os.path
#Specify max size due to large size of Base64 images
#**~MAC/LINUX~**#
#csv.field_size_limit(sys.maxsize)
#**~WINDOWS 64 BIT~**#
csv.field_size_limit(2**30)
#Specify which questions are drawn images. Their associated value is the
#size of the image used in data preprocessing for the machine learning model.
drawn_images ={
"Q1": 64,
"Q2": 128,
"Q3": 64,
"Q4": 64,
"Q7": 128,
"Q8": 128,
"Q9": 128,
"Q17": 128,
"Q18": 64
}
#init variables
filename = ""
filedir = ""
modeldir = ""
prefix = ""
##Retrieve the CSV file to read image data
def getCSVfile():
global filename
global filedir
filename = askopenfilename()
filedir = os.path.abspath(os.path.join(filename, os.pardir))
filedir += "/"
print(filedir)
#Select the directory containing H5 and JSON model files.
def getModelDir():
global modeldir
modeldir = askdirectory()
modeldir += "/"
#Select a prefix to read only specific records starting with the prefix.
def getPrefix():
global prefix
prefix = simpledialog.askstring("input string", "Enter an ID prefix:")
#Run program and create two response CSV files.
def Start():
#for indexing
drawn_images_list = list(drawn_images)
#Load models:
models = []
print("Loading models... This may take a moment")
for key in drawn_images:
json_file_path = modeldir + key + ".json"
weight_file_path = modeldir + key + ".h5"
json_file = open(json_file_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(weight_file_path)
loaded_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
models.append(loaded_model)
print(f"Loaded model {key}...")
print("Done loading models")
#Function to process each individual image
#Returns a prediction score of 1 or 0.
def process_image(Qnum, uri, partid):
print(f"Processing image: {Qnum}")
#Ensure value exists
if(uri == None): return 0
#Grab value to resize image
size = drawn_images[Qnum]
#create image file as temporary
path = modeldir + "temp.png"
img = open(path, "wb")
img.write(base64.b64decode(uri))
img = cv2.imread(path, 0)
#Test resizing image. If the URI is corrupted, return 'C'.
try:
img = cv2.resize(img, (size, size))
except:
return 'c'
img_reshape = np.array(img).reshape(-1,size,size,1)
#Run image against model
print("Acc: ")
print (models[drawn_images_list.index(Qnum)].predict(img_reshape))
pred = models[drawn_images_list.index(Qnum)].predict_classes(img_reshape)[0]
#This flips the class as the prediction score is on the opposite entry.
pred = ("1", "0")[pred == 0]
pred_array = models[drawn_images_list.index(Qnum)].predict(img_reshape)
#Remove the image to make room for another
os.remove(modeldir + "temp.png")
eps = .15 #Min. acceptable criterion
if(1-np.amax(pred_array) > eps):
return 'f'
return pred
#Open two files, one for response scores and the other for written
#question responses. Each file name is appended with a prefix if
#a prefix is give.
data = open(filename, 'r')
responses = open(filedir + 'responses_pref' + prefix + '.csv', 'w')
Wresponses = open(filedir + 'Wresponses_pref' + prefix + '.csv', 'w')
read_data = csv.reader(data, delimiter=',')
write_responses = csv.writer(responses, delimiter=',',
quotechar='"', quoting=csv.QUOTE_ALL)
write_Wresponses = csv.writer(Wresponses, delimiter=',',
quotechar='"', quoting=csv.QUOTE_ALL)
line_count = 0
for row in read_data:
if row[0].startswith(prefix, 0, len(prefix)):
print(row[0])
if line_count == 0:
line_count += 1
write_responses.writerow(['Number','Participant', 'Q1_drawn', 'Q2_drawn',
'Q3_drawn', 'Q4_drawn', 'Q7_drawn', 'Q8_drawn',
'Q9_drawn', 'Q17_drawn', 'Q18_drawn', 'Q5_response',
'Q5_correct_response', 'Q5_accuracy','Q6_response',
'Q6_correct_response', 'Q6_accuracy','Q10_1_response',
'Q10_1_correct_response','Q10_1_accuracy', 'Q10_2_response',
'Q10_2_correct_response', 'Q10_2_accuracy', 'Q11_response',
'Q11_correct_response', 'Q11_accuracy', 'Q12_response',
'Q12_correct_response','Q12_accuracy', 'Q13_response',
'Q13_correct_response', 'Q13_accuracy', 'Q14_1_response',
'Q14_1_correct_response', 'Q14_1_accuracy', 'Q14_2_response',
'Q14_2_correct_response','Q14_2_accuracy', 'Q15_AB_response',
'Q15_AB_correct_response','Q15_AB_accuracy', 'Q15_AD_response',
'Q15_AD_correct_response','Q15_AD_accuracy', 'Q15_BC_response',
'Q15_BC_correct_response','Q15_BC_accuracy', 'Q15_CD_response',
'Q15_CD_correct_response','Q15_CD_accuracy','Q15_BD_response',
'Q15_BD_correct_response','Q15_BD_accuracy', 'Total', 'Date Submitted'])
write_Wresponses.writerow(['Number','Participant','Q2_written', 'Q7_written', 'Q8_written',
'Q9_written', 'Q14_2_written', 'Q17_written', 'Q18_written', 'Date Submitted'])
else:
#Resp used for responses, respW for written reponses
resp = []
respW = []
count = 0
##logic here
#append number and name
resp.append(line_count)
resp.append(row[0])
respW.append(line_count)
respW.append(row[0])
#append drawn images
for x in drawn_images:
y = row[drawn_images_list.index(x) + 2].split(',')
if(len(y) > 1):
resp.append(process_image(x, y[1], row[0]))
else: resp.append("N/A")
#print(row[drawn_images_list.index(x) + 2])
##Q5
resp.append(row[23])
resp.append("A")
resp.append(("0", "1")[row[23] == "A"])
#Q6
resp.append(row[24])
resp.append("A")
resp.append(("0", "1")[row[24] == "A"])
#Q10_1
resp.append(row[15])
resp.append("Josh")
resp.append(("0", "1")["josh" in row[15].lower()])
#Q10_2
resp.append(row[18])
resp.append("josh")
resp.append(("0", "1")["josh" in row[18].lower()])
#Q11
resp.append(row[25])
resp.append("B")
resp.append(("0", "1")[row[25] == "B"])
#Q12
resp.append(row[26])
resp.append("B")
resp.append(("0", "1")[row[26] == "B"])
#Q13
resp.append(row[17])
resp.append("40")
resp.append(("0", "1")["40" in row[19]])
#Q14_1
resp.append(row[18])
resp.append("Josh")
resp.append(("0", "1")["josh" in row[18].lower()])
#Q15
##Refer to re library for digit extraction
resp.append(row[20])
resp.append("7040-7080")
val = re.findall("\d+", row[20])
if(len(val) > 0):
resp.append(("0", "1")[int(val[0]) >= 7040 and int(val[0]) <= 7080])
else: resp.append("0")
#Q16:
resp.append(row[27])
resp.append("yes")
resp.append(("0", "1")[row[27] == "yes"])
resp.append(row[28])
resp.append("yes")
resp.append(("0", "1")[row[28] == "yes"])
resp.append(row[29])
resp.append("yes")
resp.append(("0", "1")[row[29] == "yes"])
resp.append(row[30])
resp.append("no")
resp.append(("0", "1")[row[30] == "no"])
resp.append(row[31])
resp.append("yes")
resp.append(("0", "1")[row[31] == "yes"])
##WRITE ALL THE WRITTEN RESPONSES HERE
respW.append(row[11])
respW.append(row[12])
respW.append(row[13])
respW.append(row[14])
respW.append(row[16])
respW.append(row[19])
respW.append(row[21])
respW.append(row[22])
#Total
sum = 0
for x in resp:
if x == "1":
sum += 1
resp.append(sum)
#Dates
resp.append(row[32])
respW.append(row[32])
#Write rows
write_responses.writerow(resp)
write_Wresponses.writerow(respW)
line_count += 1
print(f"Finished, {line_count} rows read: ")
data.close()
responses.close()
##Run GUI
root = tk.Tk()
root.wm_title("Run Participant Data")
selectCsv = tk.Button(root, text='Select CSV file', width=25, command=getCSVfile)
selectCsv.pack()
selectDirectory = tk.Button(root, text='Select model directory', width=25, command=getModelDir)
selectDirectory.pack()
selectPrefix = tk.Button(root, text='Select an ID prefix', width=25, command=getPrefix)
selectPrefix.pack()
startButton = tk.Button(root, text='Start', width=25, command=Start)
startButton.pack()
root.mainloop()
|
[
"os.remove",
"csv.reader",
"csv.writer",
"tkinter.Button",
"csv.field_size_limit",
"tkinter.filedialog.askopenfilename",
"base64.b64decode",
"tkinter.filedialog.askdirectory",
"tkinter.simpledialog.askstring",
"cv2.imread",
"keras.models.model_from_json",
"numpy.array",
"numpy.amax",
"re.findall",
"os.path.join",
"tkinter.Tk",
"cv2.resize"
] |
[((741, 770), 'csv.field_size_limit', 'csv.field_size_limit', (['(2 ** 30)'], {}), '(2 ** 30)\n', (761, 770), False, 'import csv\n'), ((10354, 10361), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (10359, 10361), True, 'import tkinter as tk\n'), ((10412, 10481), 'tkinter.Button', 'tk.Button', (['root'], {'text': '"""Select CSV file"""', 'width': '(25)', 'command': 'getCSVfile'}), "(root, text='Select CSV file', width=25, command=getCSVfile)\n", (10421, 10481), True, 'import tkinter as tk\n'), ((10517, 10594), 'tkinter.Button', 'tk.Button', (['root'], {'text': '"""Select model directory"""', 'width': '(25)', 'command': 'getModelDir'}), "(root, text='Select model directory', width=25, command=getModelDir)\n", (10526, 10594), True, 'import tkinter as tk\n'), ((10633, 10705), 'tkinter.Button', 'tk.Button', (['root'], {'text': '"""Select an ID prefix"""', 'width': '(25)', 'command': 'getPrefix'}), "(root, text='Select an ID prefix', width=25, command=getPrefix)\n", (10642, 10705), True, 'import tkinter as tk\n'), ((10740, 10794), 'tkinter.Button', 'tk.Button', (['root'], {'text': '"""Start"""', 'width': '(25)', 'command': 'Start'}), "(root, text='Start', width=25, command=Start)\n", (10749, 10794), True, 'import tkinter as tk\n'), ((1246, 1263), 'tkinter.filedialog.askopenfilename', 'askopenfilename', ([], {}), '()\n', (1261, 1263), False, 'from tkinter.filedialog import askopenfilename\n'), ((1485, 1499), 'tkinter.filedialog.askdirectory', 'askdirectory', ([], {}), '()\n', (1497, 1499), False, 'from tkinter.filedialog import askdirectory\n'), ((1644, 1705), 'tkinter.simpledialog.askstring', 'simpledialog.askstring', (['"""input string"""', '"""Enter an ID prefix:"""'], {}), "('input string', 'Enter an ID prefix:')\n", (1666, 1705), False, 'from tkinter import simpledialog\n'), ((4218, 4249), 'csv.reader', 'csv.reader', (['data'], {'delimiter': '""","""'}), "(data, delimiter=',')\n", (4228, 4249), False, 'import csv\n'), ((4272, 4346), 'csv.writer', 'csv.writer', (['responses'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_ALL'}), '(responses, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_ALL)\n', (4282, 4346), False, 'import csv\n'), ((4402, 4477), 'csv.writer', 'csv.writer', (['Wresponses'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_ALL'}), '(Wresponses, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_ALL)\n', (4412, 4477), False, 'import csv\n'), ((1294, 1327), 'os.path.join', 'os.path.join', (['filename', 'os.pardir'], {}), '(filename, os.pardir)\n', (1306, 1327), False, 'import os\n'), ((2195, 2229), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (2210, 2229), False, 'from keras.models import model_from_json\n'), ((2964, 2983), 'cv2.imread', 'cv2.imread', (['path', '(0)'], {}), '(path, 0)\n', (2974, 2983), False, 'import cv2\n'), ((3687, 3719), 'os.remove', 'os.remove', (["(modeldir + 'temp.png')"], {}), "(modeldir + 'temp.png')\n", (3696, 3719), False, 'import os\n'), ((2927, 2948), 'base64.b64decode', 'base64.b64decode', (['uri'], {}), '(uri)\n', (2943, 2948), False, 'import base64\n'), ((3083, 3112), 'cv2.resize', 'cv2.resize', (['img', '(size, size)'], {}), '(img, (size, size))\n', (3093, 3112), False, 'import cv2\n'), ((3175, 3188), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3183, 3188), True, 'import numpy as np\n'), ((3778, 3797), 'numpy.amax', 'np.amax', (['pred_array'], {}), '(pred_array)\n', (3785, 3797), True, 'import numpy as np\n'), ((8594, 8621), 're.findall', 're.findall', (['"""\\\\d+"""', 'row[20]'], {}), "('\\\\d+', row[20])\n", (8604, 8621), False, 'import re\n')]
|
from typing import Tuple, Union, Dict, Optional, Any
import re
from mandarin.core import ELEMENTS
class NodeHasNoValueError(Exception):
pass
class Parser:
def __init__(self):
pass
@staticmethod
def remove_white_space(value: str) -> str:
cleaned_val = value.lstrip()
# TODO handle escaped strings
if cleaned_val[0] == "'":
cleaned_val = cleaned_val.split("'")[1]
else:
cleaned_val = cleaned_val.split('"')[1]
return cleaned_val
@staticmethod
def aggregate_into_tuple(*, elem_name: str, content: Any, attr_str: str):
if not content and attr_str:
return ("<%s %s>" % (elem_name, attr_str)), "</%s>" % elem_name
elif content and attr_str:
return ("<%s %s>" % (elem_name, attr_str)), Parser.remove_white_space(content), "</%s>" % elem_name
elif content and not attr_str:
return "<%s>" % elem_name, content, "</%s>" % elem_name
else:
return "<%s>" % elem_name, "</%s>" % elem_name
def parse(self, node: "Node") -> Union[Tuple[str, str, str], Tuple[str, str], str]:
el = node.elem_name
attr_str = ""
if node.elem_name:
if node.attr:
for k, v in node.attr.items():
if not attr_str:
# If this is the first attr then don't add white space
attr_str += f"{k}='{v}'"
else:
attr_str += f" {k}='{v}'"
return Parser.aggregate_into_tuple(elem_name=el, content=node.value, attr_str=attr_str)
elif node.value:
return Parser.remove_white_space(node.value)
else:
raise NodeHasNoValueError("Node did not have any values to parse.")
def add_attrs_to_elem(self):
pass
def parse_elem(self, element: str) -> Tuple[str, Optional[Dict[str, str]]]:
elem = re.split("\(|\)", element)
if len(elem) == 1:
return elem, None
attr_dict = {}
attr_str = elem[1]
attrs = attr_str.split(" ")
for attr in attrs:
attr_name, attr_val = attr.split("=")
attr_dict[attr_name] = attr_val.strip('""')
return elem[0], attr_dict
|
[
"re.split"
] |
[((1959, 1987), 're.split', 're.split', (['"""\\\\(|\\\\)"""', 'element'], {}), "('\\\\(|\\\\)', element)\n", (1967, 1987), False, 'import re\n')]
|
import math
import os
import sys
import pprint
def count_sort_func(data,maxdata,index):
maxdata +=1
count_list = [0]*(maxdata)
count_dict = data
for n in data:
count_list[n[index]] +=1
i = 0
count = 0
for n in range(len(count_list)):
print(n)
while(count_list[n]>0):
for vals in count_dict:
j = 0
if vals[index]==n:
#pprint.pprint(vals)
pprint.pprint(count_dict)
print("---------------------------------")
data[i] = vals
count+=1
print(count)
count_dict.pop(j)
break
j+=1
#data[i] = n
i+=1
print("Hi")
count_list[n] -= 1
#pprint(list(data))
return data
|
[
"pprint.pprint"
] |
[((441, 466), 'pprint.pprint', 'pprint.pprint', (['count_dict'], {}), '(count_dict)\n', (454, 466), False, 'import pprint\n')]
|
'''
Root task (Crunchbase)
========================
Luigi routine to collect all data from the Crunchbase data dump and load it to MySQL.
'''
import luigi
import datetime
import logging
from nesta.core.routines.datasets.crunchbase.crunchbase_parent_id_collect_task import ParentIdCollectTask
from nesta.core.routines.datasets.crunchbase.crunchbase_geocode_task import CBGeocodeBatchTask
from nesta.core.luigihacks.misctools import find_filepath_from_pathstub as f3p
from nesta.core.orms.crunchbase_orm import Base
from nesta.core.orms.orm_utils import get_class_by_tablename
class RootTask(luigi.WrapperTask):
'''A dummy root task, which collects the database configurations
and executes the central task.
Args:
date (datetime): Date used to label the outputs
db_config_path (str): Path to the MySQL database configuration
production (bool): Flag indicating whether running in testing
mode (False, default), or production mode (True).
'''
date = luigi.DateParameter(default=datetime.date.today())
production = luigi.BoolParameter(default=False)
insert_batch_size = luigi.IntParameter(default=500)
db_config_path = luigi.Parameter(default=f3p("mysqldb.config"))
db_config_env = luigi.Parameter(default="MYSQLDB")
def requires(self):
'''Collects the database configurations and executes the central task.'''
_routine_id = "{}-{}".format(self.date, self.production)
logging.getLogger().setLevel(logging.INFO)
yield ParentIdCollectTask(date=self.date,
_routine_id=_routine_id,
test=not self.production,
insert_batch_size=self.insert_batch_size,
db_config_path=self.db_config_path,
db_config_env=self.db_config_env)
geocode_kwargs = dict(date=self.date,
_routine_id=_routine_id,
test=not self.production,
db_config_env="MYSQLDB",
insert_batch_size=self.insert_batch_size,
env_files=[f3p("nesta"),
f3p("config/mysqldb.config"),
f3p("config/crunchbase.config")],
job_def="py37_amzn2",
job_queue="HighPriority",
region_name="eu-west-2",
poll_time=10,
memory=4096,
max_live_jobs=2)
for tablename in ['organizations', 'funding_rounds', 'investors', 'people', 'ipos']:
_class = get_class_by_tablename(Base, f'crunchbase_{tablename}')
yield CBGeocodeBatchTask(city_col=_class.city,
country_col=_class.country,
location_key_col=_class.location_id,
job_name=f"Crunchbase-{tablename}-{_routine_id}",
**geocode_kwargs)
|
[
"nesta.core.orms.orm_utils.get_class_by_tablename",
"nesta.core.luigihacks.misctools.find_filepath_from_pathstub",
"datetime.date.today",
"luigi.Parameter",
"nesta.core.routines.datasets.crunchbase.crunchbase_parent_id_collect_task.ParentIdCollectTask",
"nesta.core.routines.datasets.crunchbase.crunchbase_geocode_task.CBGeocodeBatchTask",
"luigi.BoolParameter",
"luigi.IntParameter",
"logging.getLogger"
] |
[((1091, 1125), 'luigi.BoolParameter', 'luigi.BoolParameter', ([], {'default': '(False)'}), '(default=False)\n', (1110, 1125), False, 'import luigi\n'), ((1150, 1181), 'luigi.IntParameter', 'luigi.IntParameter', ([], {'default': '(500)'}), '(default=500)\n', (1168, 1181), False, 'import luigi\n'), ((1270, 1304), 'luigi.Parameter', 'luigi.Parameter', ([], {'default': '"""MYSQLDB"""'}), "(default='MYSQLDB')\n", (1285, 1304), False, 'import luigi\n'), ((1051, 1072), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1070, 1072), False, 'import datetime\n'), ((1227, 1248), 'nesta.core.luigihacks.misctools.find_filepath_from_pathstub', 'f3p', (['"""mysqldb.config"""'], {}), "('mysqldb.config')\n", (1230, 1248), True, 'from nesta.core.luigihacks.misctools import find_filepath_from_pathstub as f3p\n'), ((1555, 1763), 'nesta.core.routines.datasets.crunchbase.crunchbase_parent_id_collect_task.ParentIdCollectTask', 'ParentIdCollectTask', ([], {'date': 'self.date', '_routine_id': '_routine_id', 'test': '(not self.production)', 'insert_batch_size': 'self.insert_batch_size', 'db_config_path': 'self.db_config_path', 'db_config_env': 'self.db_config_env'}), '(date=self.date, _routine_id=_routine_id, test=not self.\n production, insert_batch_size=self.insert_batch_size, db_config_path=\n self.db_config_path, db_config_env=self.db_config_env)\n', (1574, 1763), False, 'from nesta.core.routines.datasets.crunchbase.crunchbase_parent_id_collect_task import ParentIdCollectTask\n'), ((2822, 2877), 'nesta.core.orms.orm_utils.get_class_by_tablename', 'get_class_by_tablename', (['Base', 'f"""crunchbase_{tablename}"""'], {}), "(Base, f'crunchbase_{tablename}')\n", (2844, 2877), False, 'from nesta.core.orms.orm_utils import get_class_by_tablename\n'), ((1490, 1509), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1507, 1509), False, 'import logging\n'), ((2896, 3078), 'nesta.core.routines.datasets.crunchbase.crunchbase_geocode_task.CBGeocodeBatchTask', 'CBGeocodeBatchTask', ([], {'city_col': '_class.city', 'country_col': '_class.country', 'location_key_col': '_class.location_id', 'job_name': 'f"""Crunchbase-{tablename}-{_routine_id}"""'}), "(city_col=_class.city, country_col=_class.country,\n location_key_col=_class.location_id, job_name=\n f'Crunchbase-{tablename}-{_routine_id}', **geocode_kwargs)\n", (2914, 3078), False, 'from nesta.core.routines.datasets.crunchbase.crunchbase_geocode_task import CBGeocodeBatchTask\n'), ((2250, 2262), 'nesta.core.luigihacks.misctools.find_filepath_from_pathstub', 'f3p', (['"""nesta"""'], {}), "('nesta')\n", (2253, 2262), True, 'from nesta.core.luigihacks.misctools import find_filepath_from_pathstub as f3p\n'), ((2305, 2333), 'nesta.core.luigihacks.misctools.find_filepath_from_pathstub', 'f3p', (['"""config/mysqldb.config"""'], {}), "('config/mysqldb.config')\n", (2308, 2333), True, 'from nesta.core.luigihacks.misctools import find_filepath_from_pathstub as f3p\n'), ((2376, 2407), 'nesta.core.luigihacks.misctools.find_filepath_from_pathstub', 'f3p', (['"""config/crunchbase.config"""'], {}), "('config/crunchbase.config')\n", (2379, 2407), True, 'from nesta.core.luigihacks.misctools import find_filepath_from_pathstub as f3p\n')]
|
from flask import Blueprint, Flask, render_template, request
blueprint = Blueprint(__name__, __name__, url_prefix='/auth')
@blueprint.route('/login', methods=['GET', 'POST'])
def login():
if request.method != 'POST':
return render_template("login_start.jinja")
print(request.form)
return 'You "logged" in. EMAIL: ' + request.form['email'] + '; PASS: ' + request.form['password']
@blueprint.route('/register', methods=['GET', 'POST'])
def register():
if request.method != 'POST':
return render_template("register_start.jinja")
return 'You "register" an account. EMAIL: ' + request.form['email'] + '; PASS: ' + request.form['password']
def registerself(app: Flask, prefix=''):
app.register_blueprint(blueprint, url_prefix=prefix + blueprint.url_prefix)
|
[
"flask.Blueprint",
"flask.render_template"
] |
[((74, 123), 'flask.Blueprint', 'Blueprint', (['__name__', '__name__'], {'url_prefix': '"""/auth"""'}), "(__name__, __name__, url_prefix='/auth')\n", (83, 123), False, 'from flask import Blueprint, Flask, render_template, request\n'), ((239, 275), 'flask.render_template', 'render_template', (['"""login_start.jinja"""'], {}), "('login_start.jinja')\n", (254, 275), False, 'from flask import Blueprint, Flask, render_template, request\n'), ((523, 562), 'flask.render_template', 'render_template', (['"""register_start.jinja"""'], {}), "('register_start.jinja')\n", (538, 562), False, 'from flask import Blueprint, Flask, render_template, request\n')]
|
import struct
import itertools
polys = [
[ (1.0, 2.5), (3.5, 4.0), (2.5, 1.5) ],
[ (7.0, 1.2), (5.1, 3.0), (0.5, 7.5), (0.8, 9.0) ],
[ (3.4, 6.3), (1.2, 0.5), (4.6, 9.2) ],
]
def write_polys(filename, polys):
# Determine bounding box
flattened = list(itertools.chain(*polys))
min_x = min(x for x, y in flattened)
max_x = max(x for x, y in flattened)
min_y = min(y for x, y in flattened)
max_y = max(y for x, y in flattened)
with open(filename, 'wb') as f:
f.write(struct.pack('<iddddi',
0x1234,
min_x, min_y,
max_x, max_y,
len(polys)))
for poly in polys:
size = len(poly) * struct.calcsize('<dd')
f.write(struct.pack('<i', size+4))
for pt in poly:
f.write(struct.pack('<dd', *pt))
# Call it with our polygon data
write_polys('polys.bin', polys)
|
[
"struct.pack",
"itertools.chain",
"struct.calcsize"
] |
[((303, 326), 'itertools.chain', 'itertools.chain', (['*polys'], {}), '(*polys)\n', (318, 326), False, 'import itertools\n'), ((796, 818), 'struct.calcsize', 'struct.calcsize', (['"""<dd"""'], {}), "('<dd')\n", (811, 818), False, 'import struct\n'), ((839, 866), 'struct.pack', 'struct.pack', (['"""<i"""', '(size + 4)'], {}), "('<i', size + 4)\n", (850, 866), False, 'import struct\n'), ((918, 941), 'struct.pack', 'struct.pack', (['"""<dd"""', '*pt'], {}), "('<dd', *pt)\n", (929, 941), False, 'import struct\n')]
|
from glob import glob
import os
import os.path as op
from shutil import copyfile
from nose.tools import assert_raises
import numpy as np
from numpy.testing import assert_array_almost_equal
import mne
from mne.datasets import testing
from mne.transforms import (Transform, apply_trans, rotation, translation,
scaling)
from mne.coreg import (fit_matched_points, create_default_subject, scale_mri,
_is_mri_subject, scale_labels, scale_source_space,
coregister_fiducials)
from mne.io.constants import FIFF
from mne.utils import _TempDir, run_tests_if_main
from mne.source_space import write_source_spaces
from functools import reduce
def test_coregister_fiducials():
"""Test coreg.coregister_fiducials()"""
# prepare head and MRI fiducials
trans = Transform('head', 'mri',
rotation(.4, .1, 0).dot(translation(.1, -.1, .1)))
coords_orig = np.array([[-0.08061612, -0.02908875, -0.04131077],
[0.00146763, 0.08506715, -0.03483611],
[0.08436285, -0.02850276, -0.04127743]])
coords_trans = apply_trans(trans, coords_orig)
def make_dig(coords, cf):
return ({'coord_frame': cf, 'ident': 1, 'kind': 1, 'r': coords[0]},
{'coord_frame': cf, 'ident': 2, 'kind': 1, 'r': coords[1]},
{'coord_frame': cf, 'ident': 3, 'kind': 1, 'r': coords[2]})
mri_fiducials = make_dig(coords_trans, FIFF.FIFFV_COORD_MRI)
info = {'dig': make_dig(coords_orig, FIFF.FIFFV_COORD_HEAD)}
# test coregister_fiducials()
trans_est = coregister_fiducials(info, mri_fiducials)
assert trans_est.from_str == trans.from_str
assert trans_est.to_str == trans.to_str
assert_array_almost_equal(trans_est['trans'], trans['trans'])
@testing.requires_testing_data
def test_scale_mri():
"""Test creating fsaverage and scaling it."""
# create fsaverage using the testing "fsaverage" instead of the FreeSurfer
# one
tempdir = _TempDir()
fake_home = testing.data_path()
create_default_subject(subjects_dir=tempdir, fs_home=fake_home,
verbose=True)
assert _is_mri_subject('fsaverage', tempdir), "Creating fsaverage failed"
fid_path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif')
os.remove(fid_path)
create_default_subject(update=True, subjects_dir=tempdir,
fs_home=fake_home)
assert op.exists(fid_path), "Updating fsaverage"
# copy MRI file from sample data (shouldn't matter that it's incorrect,
# so here choose a small one)
path_from = op.join(testing.data_path(), 'subjects', 'sample', 'mri',
'T1.mgz')
path_to = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz')
copyfile(path_from, path_to)
# remove redundant label files
label_temp = op.join(tempdir, 'fsaverage', 'label', '*.label')
label_paths = glob(label_temp)
for label_path in label_paths[1:]:
os.remove(label_path)
# create source space
print('Creating surface source space')
path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-%s-src.fif')
src = mne.setup_source_space('fsaverage', 'ico0', subjects_dir=tempdir,
add_dist=False)
write_source_spaces(path % 'ico-0', src)
mri = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz')
print('Creating volume source space')
vsrc = mne.setup_volume_source_space(
'fsaverage', pos=50, mri=mri, subjects_dir=tempdir,
add_interpolator=False)
write_source_spaces(path % 'vol-50', vsrc)
# scale fsaverage
os.environ['_MNE_FEW_SURFACES'] = 'true'
scale = np.array([1, .2, .8])
scale_mri('fsaverage', 'flachkopf', scale, True, subjects_dir=tempdir,
verbose='debug')
del os.environ['_MNE_FEW_SURFACES']
assert _is_mri_subject('flachkopf', tempdir), "Scaling fsaverage failed"
spath = op.join(tempdir, 'flachkopf', 'bem', 'flachkopf-%s-src.fif')
assert op.exists(spath % 'ico-0'), "Source space ico-0 was not scaled"
assert os.path.isfile(os.path.join(tempdir, 'flachkopf', 'surf',
'lh.sphere.reg'))
vsrc_s = mne.read_source_spaces(spath % 'vol-50')
pt = np.array([0.12, 0.41, -0.22])
assert_array_almost_equal(apply_trans(vsrc_s[0]['src_mri_t'], pt * scale),
apply_trans(vsrc[0]['src_mri_t'], pt))
scale_labels('flachkopf', subjects_dir=tempdir)
# add distances to source space
mne.add_source_space_distances(src)
src.save(path % 'ico-0', overwrite=True)
# scale with distances
os.remove(spath % 'ico-0')
scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir)
ssrc = mne.read_source_spaces(spath % 'ico-0')
assert ssrc[0]['dist'] is not None
def test_fit_matched_points():
"""Test fit_matched_points: fitting two matching sets of points"""
tgt_pts = np.random.RandomState(42).uniform(size=(6, 3))
# rotation only
trans = rotation(2, 6, 3)
src_pts = apply_trans(trans, tgt_pts)
trans_est = fit_matched_points(src_pts, tgt_pts, translate=False,
out='trans')
est_pts = apply_trans(trans_est, src_pts)
assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
"rotation")
# rotation & translation
trans = np.dot(translation(2, -6, 3), rotation(2, 6, 3))
src_pts = apply_trans(trans, tgt_pts)
trans_est = fit_matched_points(src_pts, tgt_pts, out='trans')
est_pts = apply_trans(trans_est, src_pts)
assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
"rotation and translation.")
# rotation & translation & scaling
trans = reduce(np.dot, (translation(2, -6, 3), rotation(1.5, .3, 1.4),
scaling(.5, .5, .5)))
src_pts = apply_trans(trans, tgt_pts)
trans_est = fit_matched_points(src_pts, tgt_pts, scale=1, out='trans')
est_pts = apply_trans(trans_est, src_pts)
assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
"rotation, translation and scaling.")
# test exceeding tolerance
tgt_pts[0, :] += 20
assert_raises(RuntimeError, fit_matched_points, tgt_pts, src_pts, tol=10)
run_tests_if_main()
|
[
"mne.coreg.fit_matched_points",
"os.remove",
"mne.utils._TempDir",
"mne.utils.run_tests_if_main",
"mne.setup_volume_source_space",
"mne.coreg.coregister_fiducials",
"glob.glob",
"numpy.testing.assert_array_almost_equal",
"os.path.join",
"mne.read_source_spaces",
"mne.source_space.write_source_spaces",
"os.path.exists",
"mne.coreg.scale_source_space",
"numpy.random.RandomState",
"mne.transforms.apply_trans",
"mne.coreg.scale_mri",
"nose.tools.assert_raises",
"shutil.copyfile",
"mne.coreg._is_mri_subject",
"mne.setup_source_space",
"mne.coreg.create_default_subject",
"mne.transforms.scaling",
"mne.datasets.testing.data_path",
"mne.add_source_space_distances",
"mne.transforms.translation",
"numpy.array",
"mne.transforms.rotation",
"mne.coreg.scale_labels"
] |
[((6417, 6436), 'mne.utils.run_tests_if_main', 'run_tests_if_main', ([], {}), '()\n', (6434, 6436), False, 'from mne.utils import _TempDir, run_tests_if_main\n'), ((950, 1084), 'numpy.array', 'np.array', (['[[-0.08061612, -0.02908875, -0.04131077], [0.00146763, 0.08506715, -\n 0.03483611], [0.08436285, -0.02850276, -0.04127743]]'], {}), '([[-0.08061612, -0.02908875, -0.04131077], [0.00146763, 0.08506715,\n -0.03483611], [0.08436285, -0.02850276, -0.04127743]])\n', (958, 1084), True, 'import numpy as np\n'), ((1156, 1187), 'mne.transforms.apply_trans', 'apply_trans', (['trans', 'coords_orig'], {}), '(trans, coords_orig)\n', (1167, 1187), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((1629, 1670), 'mne.coreg.coregister_fiducials', 'coregister_fiducials', (['info', 'mri_fiducials'], {}), '(info, mri_fiducials)\n', (1649, 1670), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((1767, 1828), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (["trans_est['trans']", "trans['trans']"], {}), "(trans_est['trans'], trans['trans'])\n", (1792, 1828), False, 'from numpy.testing import assert_array_almost_equal\n'), ((2037, 2047), 'mne.utils._TempDir', '_TempDir', ([], {}), '()\n', (2045, 2047), False, 'from mne.utils import _TempDir, run_tests_if_main\n'), ((2064, 2083), 'mne.datasets.testing.data_path', 'testing.data_path', ([], {}), '()\n', (2081, 2083), False, 'from mne.datasets import testing\n'), ((2088, 2165), 'mne.coreg.create_default_subject', 'create_default_subject', ([], {'subjects_dir': 'tempdir', 'fs_home': 'fake_home', 'verbose': '(True)'}), '(subjects_dir=tempdir, fs_home=fake_home, verbose=True)\n', (2110, 2165), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((2204, 2241), 'mne.coreg._is_mri_subject', '_is_mri_subject', (['"""fsaverage"""', 'tempdir'], {}), "('fsaverage', tempdir)\n", (2219, 2241), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((2287, 2350), 'os.path.join', 'op.join', (['tempdir', '"""fsaverage"""', '"""bem"""', '"""fsaverage-fiducials.fif"""'], {}), "(tempdir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif')\n", (2294, 2350), True, 'import os.path as op\n'), ((2355, 2374), 'os.remove', 'os.remove', (['fid_path'], {}), '(fid_path)\n', (2364, 2374), False, 'import os\n'), ((2379, 2455), 'mne.coreg.create_default_subject', 'create_default_subject', ([], {'update': '(True)', 'subjects_dir': 'tempdir', 'fs_home': 'fake_home'}), '(update=True, subjects_dir=tempdir, fs_home=fake_home)\n', (2401, 2455), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((2494, 2513), 'os.path.exists', 'op.exists', (['fid_path'], {}), '(fid_path)\n', (2503, 2513), True, 'import os.path as op\n'), ((2769, 2817), 'os.path.join', 'op.join', (['tempdir', '"""fsaverage"""', '"""mri"""', '"""orig.mgz"""'], {}), "(tempdir, 'fsaverage', 'mri', 'orig.mgz')\n", (2776, 2817), True, 'import os.path as op\n'), ((2822, 2850), 'shutil.copyfile', 'copyfile', (['path_from', 'path_to'], {}), '(path_from, path_to)\n', (2830, 2850), False, 'from shutil import copyfile\n'), ((2904, 2953), 'os.path.join', 'op.join', (['tempdir', '"""fsaverage"""', '"""label"""', '"""*.label"""'], {}), "(tempdir, 'fsaverage', 'label', '*.label')\n", (2911, 2953), True, 'import os.path as op\n'), ((2972, 2988), 'glob.glob', 'glob', (['label_temp'], {}), '(label_temp)\n', (2976, 2988), False, 'from glob import glob\n'), ((3139, 3199), 'os.path.join', 'op.join', (['tempdir', '"""fsaverage"""', '"""bem"""', '"""fsaverage-%s-src.fif"""'], {}), "(tempdir, 'fsaverage', 'bem', 'fsaverage-%s-src.fif')\n", (3146, 3199), True, 'import os.path as op\n'), ((3210, 3296), 'mne.setup_source_space', 'mne.setup_source_space', (['"""fsaverage"""', '"""ico0"""'], {'subjects_dir': 'tempdir', 'add_dist': '(False)'}), "('fsaverage', 'ico0', subjects_dir=tempdir, add_dist=\n False)\n", (3232, 3296), False, 'import mne\n'), ((3329, 3369), 'mne.source_space.write_source_spaces', 'write_source_spaces', (["(path % 'ico-0')", 'src'], {}), "(path % 'ico-0', src)\n", (3348, 3369), False, 'from mne.source_space import write_source_spaces\n'), ((3380, 3428), 'os.path.join', 'op.join', (['tempdir', '"""fsaverage"""', '"""mri"""', '"""orig.mgz"""'], {}), "(tempdir, 'fsaverage', 'mri', 'orig.mgz')\n", (3387, 3428), True, 'import os.path as op\n'), ((3482, 3592), 'mne.setup_volume_source_space', 'mne.setup_volume_source_space', (['"""fsaverage"""'], {'pos': '(50)', 'mri': 'mri', 'subjects_dir': 'tempdir', 'add_interpolator': '(False)'}), "('fsaverage', pos=50, mri=mri, subjects_dir=\n tempdir, add_interpolator=False)\n", (3511, 3592), False, 'import mne\n'), ((3609, 3651), 'mne.source_space.write_source_spaces', 'write_source_spaces', (["(path % 'vol-50')", 'vsrc'], {}), "(path % 'vol-50', vsrc)\n", (3628, 3651), False, 'from mne.source_space import write_source_spaces\n'), ((3732, 3755), 'numpy.array', 'np.array', (['[1, 0.2, 0.8]'], {}), '([1, 0.2, 0.8])\n', (3740, 3755), True, 'import numpy as np\n'), ((3758, 3849), 'mne.coreg.scale_mri', 'scale_mri', (['"""fsaverage"""', '"""flachkopf"""', 'scale', '(True)'], {'subjects_dir': 'tempdir', 'verbose': '"""debug"""'}), "('fsaverage', 'flachkopf', scale, True, subjects_dir=tempdir,\n verbose='debug')\n", (3767, 3849), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((3911, 3948), 'mne.coreg._is_mri_subject', '_is_mri_subject', (['"""flachkopf"""', 'tempdir'], {}), "('flachkopf', tempdir)\n", (3926, 3948), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((3989, 4049), 'os.path.join', 'op.join', (['tempdir', '"""flachkopf"""', '"""bem"""', '"""flachkopf-%s-src.fif"""'], {}), "(tempdir, 'flachkopf', 'bem', 'flachkopf-%s-src.fif')\n", (3996, 4049), True, 'import os.path as op\n'), ((4062, 4088), 'os.path.exists', 'op.exists', (["(spath % 'ico-0')"], {}), "(spath % 'ico-0')\n", (4071, 4088), True, 'import os.path as op\n'), ((4265, 4305), 'mne.read_source_spaces', 'mne.read_source_spaces', (["(spath % 'vol-50')"], {}), "(spath % 'vol-50')\n", (4287, 4305), False, 'import mne\n'), ((4315, 4344), 'numpy.array', 'np.array', (['[0.12, 0.41, -0.22]'], {}), '([0.12, 0.41, -0.22])\n', (4323, 4344), True, 'import numpy as np\n'), ((4497, 4544), 'mne.coreg.scale_labels', 'scale_labels', (['"""flachkopf"""'], {'subjects_dir': 'tempdir'}), "('flachkopf', subjects_dir=tempdir)\n", (4509, 4544), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((4586, 4621), 'mne.add_source_space_distances', 'mne.add_source_space_distances', (['src'], {}), '(src)\n', (4616, 4621), False, 'import mne\n'), ((4699, 4725), 'os.remove', 'os.remove', (["(spath % 'ico-0')"], {}), "(spath % 'ico-0')\n", (4708, 4725), False, 'import os\n'), ((4730, 4792), 'mne.coreg.scale_source_space', 'scale_source_space', (['"""flachkopf"""', '"""ico-0"""'], {'subjects_dir': 'tempdir'}), "('flachkopf', 'ico-0', subjects_dir=tempdir)\n", (4748, 4792), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((4804, 4843), 'mne.read_source_spaces', 'mne.read_source_spaces', (["(spath % 'ico-0')"], {}), "(spath % 'ico-0')\n", (4826, 4843), False, 'import mne\n'), ((5081, 5098), 'mne.transforms.rotation', 'rotation', (['(2)', '(6)', '(3)'], {}), '(2, 6, 3)\n', (5089, 5098), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5113, 5140), 'mne.transforms.apply_trans', 'apply_trans', (['trans', 'tgt_pts'], {}), '(trans, tgt_pts)\n', (5124, 5140), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5157, 5223), 'mne.coreg.fit_matched_points', 'fit_matched_points', (['src_pts', 'tgt_pts'], {'translate': '(False)', 'out': '"""trans"""'}), "(src_pts, tgt_pts, translate=False, out='trans')\n", (5175, 5223), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((5273, 5304), 'mne.transforms.apply_trans', 'apply_trans', (['trans_est', 'src_pts'], {}), '(trans_est, src_pts)\n', (5284, 5304), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5309, 5395), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['tgt_pts', 'est_pts', '(2)', '"""fit_matched_points with rotation"""'], {}), "(tgt_pts, est_pts, 2,\n 'fit_matched_points with rotation')\n", (5334, 5395), False, 'from numpy.testing import assert_array_almost_equal\n'), ((5530, 5557), 'mne.transforms.apply_trans', 'apply_trans', (['trans', 'tgt_pts'], {}), '(trans, tgt_pts)\n', (5541, 5557), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5574, 5623), 'mne.coreg.fit_matched_points', 'fit_matched_points', (['src_pts', 'tgt_pts'], {'out': '"""trans"""'}), "(src_pts, tgt_pts, out='trans')\n", (5592, 5623), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((5638, 5669), 'mne.transforms.apply_trans', 'apply_trans', (['trans_est', 'src_pts'], {}), '(trans_est, src_pts)\n', (5649, 5669), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5674, 5777), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['tgt_pts', 'est_pts', '(2)', '"""fit_matched_points with rotation and translation."""'], {}), "(tgt_pts, est_pts, 2,\n 'fit_matched_points with rotation and translation.')\n", (5699, 5777), False, 'from numpy.testing import assert_array_almost_equal\n'), ((5986, 6013), 'mne.transforms.apply_trans', 'apply_trans', (['trans', 'tgt_pts'], {}), '(trans, tgt_pts)\n', (5997, 6013), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((6030, 6088), 'mne.coreg.fit_matched_points', 'fit_matched_points', (['src_pts', 'tgt_pts'], {'scale': '(1)', 'out': '"""trans"""'}), "(src_pts, tgt_pts, scale=1, out='trans')\n", (6048, 6088), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((6103, 6134), 'mne.transforms.apply_trans', 'apply_trans', (['trans_est', 'src_pts'], {}), '(trans_est, src_pts)\n', (6114, 6134), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((6139, 6251), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['tgt_pts', 'est_pts', '(2)', '"""fit_matched_points with rotation, translation and scaling."""'], {}), "(tgt_pts, est_pts, 2,\n 'fit_matched_points with rotation, translation and scaling.')\n", (6164, 6251), False, 'from numpy.testing import assert_array_almost_equal\n'), ((6341, 6414), 'nose.tools.assert_raises', 'assert_raises', (['RuntimeError', 'fit_matched_points', 'tgt_pts', 'src_pts'], {'tol': '(10)'}), '(RuntimeError, fit_matched_points, tgt_pts, src_pts, tol=10)\n', (6354, 6414), False, 'from nose.tools import assert_raises\n'), ((2671, 2690), 'mne.datasets.testing.data_path', 'testing.data_path', ([], {}), '()\n', (2688, 2690), False, 'from mne.datasets import testing\n'), ((3036, 3057), 'os.remove', 'os.remove', (['label_path'], {}), '(label_path)\n', (3045, 3057), False, 'import os\n'), ((4152, 4211), 'os.path.join', 'os.path.join', (['tempdir', '"""flachkopf"""', '"""surf"""', '"""lh.sphere.reg"""'], {}), "(tempdir, 'flachkopf', 'surf', 'lh.sphere.reg')\n", (4164, 4211), False, 'import os\n'), ((4375, 4422), 'mne.transforms.apply_trans', 'apply_trans', (["vsrc_s[0]['src_mri_t']", '(pt * scale)'], {}), "(vsrc_s[0]['src_mri_t'], pt * scale)\n", (4386, 4422), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((4454, 4491), 'mne.transforms.apply_trans', 'apply_trans', (["vsrc[0]['src_mri_t']", 'pt'], {}), "(vsrc[0]['src_mri_t'], pt)\n", (4465, 4491), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5474, 5495), 'mne.transforms.translation', 'translation', (['(2)', '(-6)', '(3)'], {}), '(2, -6, 3)\n', (5485, 5495), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5497, 5514), 'mne.transforms.rotation', 'rotation', (['(2)', '(6)', '(3)'], {}), '(2, 6, 3)\n', (5505, 5514), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((905, 932), 'mne.transforms.translation', 'translation', (['(0.1)', '(-0.1)', '(0.1)'], {}), '(0.1, -0.1, 0.1)\n', (916, 932), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5001, 5026), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (5022, 5026), True, 'import numpy as np\n'), ((5875, 5896), 'mne.transforms.translation', 'translation', (['(2)', '(-6)', '(3)'], {}), '(2, -6, 3)\n', (5886, 5896), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5898, 5921), 'mne.transforms.rotation', 'rotation', (['(1.5)', '(0.3)', '(1.4)'], {}), '(1.5, 0.3, 1.4)\n', (5906, 5921), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5950, 5972), 'mne.transforms.scaling', 'scaling', (['(0.5)', '(0.5)', '(0.5)'], {}), '(0.5, 0.5, 0.5)\n', (5957, 5972), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((881, 902), 'mne.transforms.rotation', 'rotation', (['(0.4)', '(0.1)', '(0)'], {}), '(0.4, 0.1, 0)\n', (889, 902), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n')]
|
import util
import pygame
import math
import images
class Particles(util.Block):
lifetime = 100
def __init__(self, x, y, n=10, lifetime=10, imgname=images.particleDefault):
super().__init__(x, y, imgname)
self.particles_xyd = list() # координаты и скорости частицы (x, y, dx, dy)
spd = 2
self.lifetime = lifetime
self.n = n
for i in range(n):
p = {'x': x, 'y': y}
p['dx'] = spd * (math.cos(i*2*math.pi/n))
p['dy'] = spd * (math.sin(i*2*math.pi/n))
self.particles_xyd.append(p)
def step(self):
self.lifetime -= 1
if self.lifetime:
for p in self.particles_xyd:
p['x'] += p['dx']
p['y'] += p['dy']
return True
else:
return False
def draw(self, cam, screen):
if self.lifetime:
for p in self.particles_xyd:
super().draw(p['x'], p['y'], cam, screen)
|
[
"math.cos",
"math.sin"
] |
[((464, 493), 'math.cos', 'math.cos', (['(i * 2 * math.pi / n)'], {}), '(i * 2 * math.pi / n)\n', (472, 493), False, 'import math\n'), ((518, 547), 'math.sin', 'math.sin', (['(i * 2 * math.pi / n)'], {}), '(i * 2 * math.pi / n)\n', (526, 547), False, 'import math\n')]
|
"""HTTP module for CFEngine"""
import os
import urllib
import urllib.request
import ssl
import json
from cfengine import PromiseModule, ValidationError, Result
_SUPPORTED_METHODS = {"GET", "POST", "PUT", "DELETE", "PATCH"}
class HTTPPromiseModule(PromiseModule):
def __init__(self, *args, **kwargs):
super().__init__("http_promise_module", "1.0.0", *args, **kwargs)
def validate_promise(self, promiser, attributes):
if "url" in attributes:
url = attributes["url"]
if type(url) != str:
raise ValidationError("'url' must be a string")
if not url.startswith(("https://", "http://")):
raise ValidationError("Only HTTP(S) requests are supported")
if "method" in attributes:
method = attributes["method"]
if type(method) != str:
raise ValidationError("'method' must be a string")
if method not in _SUPPORTED_METHODS:
raise ValidationError("'method' must be one of %s" % ", ".join(_SUPPORTED_METHODS))
if "headers" in attributes:
headers = attributes["headers"]
headers_type = type(headers)
if headers_type == str:
headers_lines = headers.splitlines()
if any(line.count(":") != 1 for line in headers_lines):
raise ValidationError("'headers' must be string with 'name: value' pairs on separate lines")
elif headers_type == list:
if any(line.count(":") != 1 for line in headers):
raise ValidationError("'headers' must be a list of 'name: value' pairs")
elif headers_type == dict:
# nothing to check for dict?
pass
else:
raise ValidationError("'headers' must be a string, an slist or a data container" +
" value with 'name: value' pairs")
if "payload" in attributes:
payload = attributes["payload"]
if type(payload) not in (str, dict):
raise ValidationError("'payload' must be a string or a data container value")
if type(payload) == str and payload.startswith("@") and not os.path.isabs(payload[1:]):
raise ValidationError("File-based payload must be an absolute path")
if "file" in attributes:
file_ = attributes["file"]
if type(file_) != str or not os.path.isabs(file_):
raise ValidationError("'file' must be an absolute path to a file")
if "insecure" in attributes:
insecure = attributes["insecure"]
if type(insecure) != str or insecure not in ("true", "True", "false", "False"):
raise ValidationError("'insecure' must be either \"true\" or \"false\"")
def evaluate_promise(self, promiser, attributes):
url = attributes.get("url", promiser)
method = attributes.get("method", "GET")
headers = attributes.get("headers", dict())
payload = attributes.get("payload")
target = attributes.get("file")
insecure = attributes.get("insecure", False)
canonical_promiser = promiser.translate(str.maketrans({char: "_" for char in ("@", "/", ":", "?", "&", "%")}))
if headers and type(headers) != dict:
if type(headers) == str:
headers = {key: value for key, value in (line.split(":") for line in headers.splitlines())}
elif type(headers) == list:
headers = {key: value for key, value in (line.split(":") for line in headers)}
if payload:
if type(payload) == dict:
try:
payload = json.dumps(payload)
except TypeError:
self.log_error("Failed to convert 'payload' to text representation for request '%s'" % url)
return (Result.NOT_KEPT,
["%s_%s_request_failed" % (canonical_promiser, method),
"%s_%s_payload_failed" % (canonical_promiser, method),
"%s_%s_payload_conversion_failed" % (canonical_promiser, method)])
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
elif payload.startswith("@"):
path = payload[1:]
try:
# Closed automatically when this variable gets out of
# scope. Thank you, Python!
payload = open(path, "rb")
except OSError as e:
self.log_error("Failed to open payload file '%s' for request '%s': %s" % (path, url, e))
return (Result.NOT_KEPT,
["%s_%s_request_failed" % (canonical_promiser, method),
"%s_%s_payload_failed" % (canonical_promiser, method),
"%s_%s_payload_file_failed" % (canonical_promiser, method)])
if "Content-Lenght" not in headers:
headers["Content-Length"] = os.path.getsize(path)
# must be 'None' or bytes or file object
if type(payload) == str:
payload = payload.encode("utf-8")
request = urllib.request.Request(url=url, data=payload, method=method, headers=headers)
SSL_context = None
if insecure:
# convert to a boolean
insecure = (insecure.lower() == "true")
if insecure:
SSL_context = ssl.SSLContext()
SSL_context.verify_method = ssl.CERT_NONE
try:
if target:
# TODO: create directories
with open(target, "wb") as target_file:
with urllib.request.urlopen(request, context=SSL_context) as url_req:
if not (200 <= url_req.status <= 300):
self.log_error("Request for '%s' failed with code %d" % (url, url_req.status))
return (Result.NOT_KEPT, ["%s_%s_request_failed" % (canonical_promiser, method)])
# TODO: log progress when url_req.headers["Content-length"] > REPORTING_THRESHOLD
done = False
while not done:
data = url_req.read(512 * 1024)
target_file.write(data)
done = bool(data)
else:
with urllib.request.urlopen(request, context=SSL_context) as url_req:
if not (200 <= url_req.status <= 300):
self.log_error("Request for '%s' failed with code %d" % (url, url_req.status))
return (Result.NOT_KEPT, ["%s_%s_request_failed" % (canonical_promiser, method)])
done = False
while not done:
data = url_req.read(512 * 1024)
done = bool(data)
except urllib.error.URLError as e:
self.log_error("Failed to request '%s': %s" % (url, e))
return (Result.NOT_KEPT, ["%s_%s_request_failed" % (canonical_promiser, method)])
except OSError as e:
self.log_error("Failed to store '%s' response to '%s': %s" % (url, target, e))
return (Result.NOT_KEPT,
["%s_%s_request_failed" % (canonical_promiser, method),
"%s_%s_file_failed" % (canonical_promiser, method)])
if target:
self.log_info("Saved request response from '%s' to '%s'" % (url, target))
else:
self.log_info("Successfully executed%s request to '%s'" % ((" " + method if method else ""),
url))
return (Result.REPAIRED, ["%s_%s_request_done" % (canonical_promiser, method)])
if __name__ == "__main__":
HTTPPromiseModule().start()
|
[
"os.path.isabs",
"ssl.SSLContext",
"urllib.request.Request",
"os.path.getsize",
"urllib.request.urlopen",
"json.dumps",
"cfengine.ValidationError"
] |
[((5342, 5419), 'urllib.request.Request', 'urllib.request.Request', ([], {'url': 'url', 'data': 'payload', 'method': 'method', 'headers': 'headers'}), '(url=url, data=payload, method=method, headers=headers)\n', (5364, 5419), False, 'import urllib\n'), ((562, 603), 'cfengine.ValidationError', 'ValidationError', (['"""\'url\' must be a string"""'], {}), '("\'url\' must be a string")\n', (577, 603), False, 'from cfengine import PromiseModule, ValidationError, Result\n'), ((686, 740), 'cfengine.ValidationError', 'ValidationError', (['"""Only HTTP(S) requests are supported"""'], {}), "('Only HTTP(S) requests are supported')\n", (701, 740), False, 'from cfengine import PromiseModule, ValidationError, Result\n'), ((877, 921), 'cfengine.ValidationError', 'ValidationError', (['"""\'method\' must be a string"""'], {}), '("\'method\' must be a string")\n', (892, 921), False, 'from cfengine import PromiseModule, ValidationError, Result\n'), ((2112, 2183), 'cfengine.ValidationError', 'ValidationError', (['"""\'payload\' must be a string or a data container value"""'], {}), '("\'payload\' must be a string or a data container value")\n', (2127, 2183), False, 'from cfengine import PromiseModule, ValidationError, Result\n'), ((2307, 2369), 'cfengine.ValidationError', 'ValidationError', (['"""File-based payload must be an absolute path"""'], {}), "('File-based payload must be an absolute path')\n", (2322, 2369), False, 'from cfengine import PromiseModule, ValidationError, Result\n'), ((2528, 2588), 'cfengine.ValidationError', 'ValidationError', (['"""\'file\' must be an absolute path to a file"""'], {}), '("\'file\' must be an absolute path to a file")\n', (2543, 2588), False, 'from cfengine import PromiseModule, ValidationError, Result\n'), ((2787, 2851), 'cfengine.ValidationError', 'ValidationError', (['"""\'insecure\' must be either "true" or "false\\""""'], {}), '(\'\\\'insecure\\\' must be either "true" or "false"\')\n', (2802, 2851), False, 'from cfengine import PromiseModule, ValidationError, Result\n'), ((5611, 5627), 'ssl.SSLContext', 'ssl.SSLContext', ([], {}), '()\n', (5625, 5627), False, 'import ssl\n'), ((1380, 1471), 'cfengine.ValidationError', 'ValidationError', (['"""\'headers\' must be string with \'name: value\' pairs on separate lines"""'], {}), '(\n "\'headers\' must be string with \'name: value\' pairs on separate lines")\n', (1395, 1471), False, 'from cfengine import PromiseModule, ValidationError, Result\n'), ((2257, 2283), 'os.path.isabs', 'os.path.isabs', (['payload[1:]'], {}), '(payload[1:])\n', (2270, 2283), False, 'import os\n'), ((2484, 2504), 'os.path.isabs', 'os.path.isabs', (['file_'], {}), '(file_)\n', (2497, 2504), False, 'import os\n'), ((3751, 3770), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (3761, 3770), False, 'import json\n'), ((6572, 6624), 'urllib.request.urlopen', 'urllib.request.urlopen', (['request'], {'context': 'SSL_context'}), '(request, context=SSL_context)\n', (6594, 6624), False, 'import urllib\n'), ((1598, 1664), 'cfengine.ValidationError', 'ValidationError', (['"""\'headers\' must be a list of \'name: value\' pairs"""'], {}), '("\'headers\' must be a list of \'name: value\' pairs")\n', (1613, 1664), False, 'from cfengine import PromiseModule, ValidationError, Result\n'), ((1810, 1925), 'cfengine.ValidationError', 'ValidationError', (['("\'headers\' must be a string, an slist or a data container" +\n " value with \'name: value\' pairs")'], {}), '("\'headers\' must be a string, an slist or a data container" +\n " value with \'name: value\' pairs")\n', (1825, 1925), False, 'from cfengine import PromiseModule, ValidationError, Result\n'), ((5160, 5181), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (5175, 5181), False, 'import os\n'), ((5847, 5899), 'urllib.request.urlopen', 'urllib.request.urlopen', (['request'], {'context': 'SSL_context'}), '(request, context=SSL_context)\n', (5869, 5899), False, 'import urllib\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from pycket import impersonators as imp
from pycket import values, values_string
from pycket.hash.base import W_HashTable, W_ImmutableHashTable, w_missing
from pycket.hash.simple import (
W_EqvMutableHashTable, W_EqMutableHashTable,
W_EqvImmutableHashTable, W_EqImmutableHashTable,
make_simple_mutable_table, make_simple_mutable_table_assocs,
make_simple_immutable_table, make_simple_immutable_table_assocs)
from pycket.hash.equal import W_EqualHashTable
from pycket.impersonators.baseline import W_ImpHashTable, W_ChpHashTable
from pycket.cont import continuation, loop_label
from pycket.error import SchemeException
from pycket.prims.expose import default, expose, procedure, define_nyi
from rpython.rlib import jit, objectmodel
_KEY = 0
_VALUE = 1
_KEY_AND_VALUE = 2
_PAIR = 3
PREFIXES = ["unsafe-mutable", "unsafe-immutable"]
def prefix_hash_names(base):
result = [base]
for pre in PREFIXES:
result.append("%s-%s" % (pre, base))
return result
@expose(prefix_hash_names("hash-iterate-first"), [W_HashTable])
def hash_iterate_first(ht):
if ht.length() == 0:
return values.w_false
return values.W_Fixnum.ZERO
@expose(prefix_hash_names("hash-iterate-next"), [W_HashTable, values.W_Fixnum])
def hash_iterate_next(ht, pos):
return ht.hash_iterate_next(pos)
@objectmodel.specialize.arg(4)
def hash_iter_ref(ht, n, env, cont, returns):
from pycket.interpreter import return_value, return_multi_vals
try:
w_key, w_val = ht.get_item(n)
if returns == _KEY:
return return_value(w_key, env, cont)
if returns == _VALUE:
return return_value(w_val, env, cont)
if returns == _KEY_AND_VALUE:
vals = values.Values._make2(w_key, w_val)
return return_multi_vals(vals, env, cont)
if returns == _PAIR:
vals = values.W_Cons.make(w_key, w_val)
return return_value(vals, env, cont)
assert False, "unknown return code"
except KeyError:
raise SchemeException("hash-iterate-key: invalid position")
except IndexError:
raise SchemeException("hash-iterate-key: invalid position")
@expose(prefix_hash_names("hash-iterate-key"),
[W_HashTable, values.W_Fixnum], simple=False)
def hash_iterate_key(ht, pos, env, cont):
return hash_iter_ref(ht, pos.value, env, cont, returns=_KEY)
@expose(prefix_hash_names("hash-iterate-value"),
[W_HashTable, values.W_Fixnum], simple=False)
def hash_iterate_value(ht, pos, env, cont):
return hash_iter_ref(ht, pos.value, env, cont, returns=_VALUE)
@expose(prefix_hash_names("hash-iterate-key+value"),
[W_HashTable, values.W_Fixnum], simple=False)
def hash_iterate_key_value(ht, pos, env, cont):
return hash_iter_ref(ht, pos.value, env, cont, returns=_KEY_AND_VALUE)
@expose(prefix_hash_names("hash-iterate-pair"),
[W_HashTable, values.W_Fixnum], simple=False)
def hash_iterate_pair(ht, pos, env, cont):
return hash_iter_ref(ht, pos.value, env, cont, returns=_PAIR)
@expose("hash-for-each", [W_HashTable, procedure, default(values.W_Object, values.w_false)], simple=False)
def hash_for_each(ht, f, try_order, env, cont):
# FIXME: implmeent try-order? -- see hash-map
return hash_for_each_loop(ht, f, 0, env, cont)
@loop_label
def hash_for_each_loop(ht, f, index, env, cont):
from pycket.interpreter import return_value
try:
w_key, w_value = ht.get_item(index)
except KeyError:
return hash_for_each_loop(ht, f, index + 1, env, cont)
except IndexError:
return return_value(values.w_void, env, cont)
return f.call([w_key, w_value], env,
hash_for_each_cont(ht, f, index, env, cont))
@continuation
def hash_for_each_cont(ht, f, index, env, cont, _vals):
return hash_for_each_loop(ht, f, index + 1, env, cont)
@expose("hash-map", [W_HashTable, procedure, default(values.W_Object, values.w_false)], simple=False)
def hash_map(h, f, try_order, env, cont):
# FIXME : If try-order? is true, then the order of keys and values
# passed to proc is normalized under certain circumstances, such
# as when the keys are all symbols and hash is not an
# impersonator.
from pycket.interpreter import return_value
acc = values.w_null
return hash_map_loop(f, h, 0, acc, env, cont)
# f.enable_jitting()
# return return_value(w_missing, env,
# hash_map_cont(f, h, 0, acc, env, cont))
@loop_label
def hash_map_loop(f, ht, index, w_acc, env, cont):
from pycket.interpreter import return_value
try:
w_key, w_value = ht.get_item(index)
except KeyError:
return hash_map_loop(f, ht, index + 1, w_acc, env, cont)
except IndexError:
return return_value(w_acc, env, cont)
after = hash_map_cont(f, ht, index, w_acc, env, cont)
return f.call([w_key, w_value], env, after)
@continuation
def hash_map_cont(f, ht, index, w_acc, env, cont, _vals):
from pycket.interpreter import check_one_val
w_val = check_one_val(_vals)
w_acc = values.W_Cons.make(w_val, w_acc)
return hash_map_loop(f, ht, index + 1, w_acc, env, cont)
@jit.elidable
def from_assocs(assocs, fname):
if not assocs.is_proper_list():
raise SchemeException("%s: expected proper list" % fname)
keys = []
vals = []
while isinstance(assocs, values.W_Cons):
val, assocs = assocs.car(), assocs.cdr()
if not isinstance(val, values.W_Cons):
raise SchemeException("%s: expected list of pairs" % fname)
keys.append(val.car())
vals.append(val.cdr())
return keys[:], vals[:]
@expose("make-weak-hasheq", [default(values.W_List, values.w_null)])
def make_weak_hasheq(assocs):
# FIXME: not actually weak
return make_simple_mutable_table_assocs(W_EqMutableHashTable, assocs, "make-weak-hasheq")
@expose("make-weak-hasheqv", [default(values.W_List, values.w_null)])
def make_weak_hasheqv(assocs):
# FIXME: not actually weak
return make_simple_mutable_table_assocs(W_EqvMutableHashTable, assocs, "make-weak-hasheqv")
@expose(["make-weak-hash", "make-late-weak-hasheq"], [default(values.W_List, None)])
def make_weak_hash(assocs):
if assocs is None:
return W_EqualHashTable([], [], immutable=False)
return W_EqualHashTable(*from_assocs(assocs, "make-weak-hash"), immutable=False)
@expose("make-immutable-hash", [default(values.W_List, values.w_null)])
def make_immutable_hash(assocs):
keys, vals = from_assocs(assocs, "make-immutable-hash")
return W_EqualHashTable(keys, vals, immutable=True)
@expose("make-immutable-hasheq", [default(values.W_List, values.w_null)])
def make_immutable_hasheq(assocs):
return make_simple_immutable_table_assocs(W_EqImmutableHashTable, assocs, "make-immutable-hasheq")
@expose("make-immutable-hasheqv", [default(values.W_List, values.w_null)])
def make_immutable_hasheqv(assocs):
return make_simple_immutable_table_assocs(W_EqvImmutableHashTable, assocs, "make-immutable-hasheq")
@expose("hash")
def hash(args):
if len(args) % 2 != 0:
raise SchemeException("hash: key does not have a corresponding value")
keys = [args[i] for i in range(0, len(args), 2)]
vals = [args[i] for i in range(1, len(args), 2)]
return W_EqualHashTable(keys, vals, immutable=True)
@expose("hasheq")
def hasheq(args):
if len(args) % 2 != 0:
raise SchemeException("hasheq: key does not have a corresponding value")
keys = [args[i] for i in range(0, len(args), 2)]
vals = [args[i] for i in range(1, len(args), 2)]
return make_simple_immutable_table(W_EqImmutableHashTable, keys, vals)
@expose("hasheqv")
def hasheqv(args):
if len(args) % 2 != 0:
raise SchemeException("hasheqv: key does not have a corresponding value")
keys = [args[i] for i in range(0, len(args), 2)]
vals = [args[i] for i in range(1, len(args), 2)]
return make_simple_immutable_table(W_EqvImmutableHashTable, keys, vals)
@expose("make-hash", [default(values.W_List, values.w_null)])
def make_hash(pairs):
return W_EqualHashTable(*from_assocs(pairs, "make-hash"))
@expose("make-hasheq", [default(values.W_List, values.w_null)])
def make_hasheq(pairs):
return make_simple_mutable_table_assocs(W_EqMutableHashTable, pairs, "make-hasheq")
@expose("make-hasheqv", [default(values.W_List, values.w_null)])
def make_hasheqv(pairs):
return make_simple_mutable_table_assocs(W_EqvMutableHashTable, pairs, "make-hasheqv")
@expose("hash-set!", [W_HashTable, values.W_Object, values.W_Object], simple=False)
def hash_set_bang(ht, k, v, env, cont):
if ht.immutable():
raise SchemeException("hash-set!: given immutable table")
return ht.hash_set(k, v, env, cont)
@continuation
def hash_set_cont(key, val, env, cont, _vals):
from pycket.interpreter import check_one_val
table = check_one_val(_vals)
return table.hash_set(key, val, env, return_table_cont(table, env, cont))
@continuation
def return_table_cont(table, env, cont, _vals):
from pycket.interpreter import return_value
return return_value(table, env, cont)
@expose("hash-set", [W_HashTable, values.W_Object, values.W_Object], simple=False)
def hash_set(table, key, val, env, cont):
from pycket.interpreter import return_value
if not table.immutable():
raise SchemeException("hash-set: not given an immutable table")
# Fast path
if isinstance(table, W_ImmutableHashTable):
new_table = table.assoc(key, val)
return return_value(new_table, env, cont)
return hash_copy(table, env,
hash_set_cont(key, val, env, cont))
@continuation
def hash_ref_cont(default, k, env, cont, _vals):
from pycket.interpreter import return_value, check_one_val
val = check_one_val(_vals)
if val is not w_missing:
return return_value(val, env, cont)
if default is None:
raise SchemeException("key %s not found"%k.tostring())
if default.iscallable():
return default.call([], env, cont)
return return_value(default, env, cont)
@expose("hash-ref", [W_HashTable, values.W_Object, default(values.W_Object, None)], simple=False)
def hash_ref(ht, k, default, env, cont):
return ht.hash_ref(k, env, hash_ref_cont(default, k, env, cont))
@expose("hash-remove!", [W_HashTable, values.W_Object], simple=False)
def hash_remove_bang(ht, k, env, cont):
if ht.immutable():
raise SchemeException("hash-remove!: expected mutable hash table")
return ht.hash_remove_inplace(k, env, cont)
@expose("hash-remove", [W_HashTable, values.W_Object], simple=False)
def hash_remove(ht, k, env, cont):
if not ht.immutable():
raise SchemeException("hash-remove: expected immutable hash table")
return ht.hash_remove(k, env, cont)
@continuation
def hash_clear_cont(ht, env, cont, _vals):
return hash_clear_loop(ht, env, cont)
def hash_clear_loop(ht, env, cont):
from pycket.interpreter import return_value
if ht.length() == 0:
return return_value(values.w_void, env, cont)
w_k, w_v = ht.get_item(0)
return ht.hash_remove_inplace(w_k, env, hash_clear_cont(ht, env, cont))
@expose("hash-clear!", [W_HashTable], simple=False)
def hash_clear_bang(ht, env, cont):
from pycket.interpreter import return_value
if ht.is_impersonator():
ht.hash_clear_proc(env, cont)
return hash_clear_loop(ht, env, cont)
else:
ht.hash_empty()
return return_value(values.w_void, env, cont)
define_nyi("hash-clear", [W_HashTable])
@expose("hash-count", [W_HashTable])
def hash_count(hash):
return values.W_Fixnum(hash.length())
@continuation
def hash_keys_subset_huh_cont(keys_vals, hash_2, idx, env, cont, _vals):
from pycket.interpreter import return_value, check_one_val
val = check_one_val(_vals)
if val is values.w_false:
return return_value(values.w_false, env, cont)
else:
return hash_keys_subset_huh_loop(keys_vals, hash_2, idx + 1, env, cont)
@loop_label
def hash_keys_subset_huh_loop(keys_vals, hash_2, idx, env, cont):
from pycket.interpreter import return_value
if idx >= len(keys_vals):
return return_value(values.w_true, env, cont)
else:
return hash_ref([hash_2, keys_vals[idx][0], values.w_false], env,
hash_keys_subset_huh_cont(keys_vals, hash_2, idx, env, cont))
@jit.elidable
def uses_same_eq_comparison(hash_1, hash_2):
h_1 = hash_1
h_2 = hash_2
if hash_1.is_impersonator() or hash_1.is_chaperone():
h_1 = hash_1.get_proxied()
if hash_2.is_impersonator() or hash_2.is_chaperone():
h_2 = hash_2.get_proxied()
if isinstance(h_1, W_EqualHashTable):
return isinstance(h_2, W_EqualHashTable)
elif isinstance(h_1, W_EqMutableHashTable) or isinstance(h_1, W_EqImmutableHashTable):
return isinstance(h_2, W_EqMutableHashTable) or isinstance(h_2, W_EqImmutableHashTable)
elif isinstance(h_1, W_EqvMutableHashTable) or isinstance(h_1, W_EqvImmutableHashTable):
return isinstance(h_2, W_EqvMutableHashTable) or isinstance(h_2, W_EqvImmutableHashTable)
else:
return False
@expose("hash-keys-subset?", [W_HashTable, W_HashTable], simple=False)
def hash_keys_subset_huh(hash_1, hash_2, env, cont):
if not uses_same_eq_comparison(hash_1, hash_2):
raise SchemeException("hash-keys-subset?: given hash tables do not use the same key comparison -- first table : %s - second table: %s" % (hash_1.tostring(), hash_2.tostring()))
return hash_keys_subset_huh_loop(hash_1.hash_items(), hash_2, 0, env, cont)
@continuation
def hash_copy_ref_cont(keys, idx, src, new, env, cont, _vals):
from pycket.interpreter import check_one_val
val = check_one_val(_vals)
return new.hash_set(keys[idx][0], val, env,
hash_copy_set_cont(keys, idx, src, new, env, cont))
@continuation
def hash_copy_set_cont(keys, idx, src, new, env, cont, _vals):
return hash_copy_loop(keys, idx + 1, src, new, env, cont)
@loop_label
def hash_copy_loop(keys, idx, src, new, env, cont):
from pycket.interpreter import return_value
if idx >= len(keys):
return return_value(new, env, cont)
return src.hash_ref(keys[idx][0], env,
hash_copy_ref_cont(keys, idx, src, new, env, cont))
def hash_copy(src, env, cont):
from pycket.interpreter import return_value
if isinstance(src, W_ImmutableHashTable):
new = src.make_copy()
return return_value(new, env, cont)
new = src.make_empty()
if src.length() == 0:
return return_value(new, env, cont)
return hash_copy_loop(src.hash_items(), 0, src, new, env, cont)
expose("hash-copy", [W_HashTable], simple=False)(hash_copy)
# FIXME: not implemented
@expose("equal-hash-code", [values.W_Object])
def equal_hash_code(v):
# only for improper path cache entries
if isinstance(v, values.W_Cons):
if v.is_proper_list():
return values.W_Fixnum.ZERO
nm = v.car()
p = v.cdr()
if isinstance(nm, values_string.W_String) and \
isinstance(p, values.W_Path) and \
isinstance(p.path, str):
return values.W_Fixnum(objectmodel.compute_hash((nm.tostring(), p.path)))
return values.W_Fixnum.ZERO
@expose("equal-secondary-hash-code", [values.W_Object])
def equal_secondary_hash_code(v):
return values.W_Fixnum.ZERO
@expose("eq-hash-code", [values.W_Object])
def eq_hash_code(v):
t = type(v)
if t is values.W_Fixnum:
return v
if t is values.W_Flonum:
hash = objectmodel.compute_hash(v.value)
elif t is values.W_Character:
hash = objectmodel.compute_hash(v.value)
else:
hash = objectmodel.compute_hash(v)
return values.W_Fixnum(hash)
@expose("eqv-hash-code", [values.W_Object])
def eqv_hash_code(v):
hash = v.hash_eqv()
return values.W_Fixnum(hash)
|
[
"pycket.values.Values._make2",
"pycket.hash.simple.make_simple_immutable_table",
"pycket.interpreter.check_one_val",
"pycket.prims.expose.expose",
"pycket.hash.simple.make_simple_mutable_table_assocs",
"pycket.interpreter.return_value",
"pycket.prims.expose.define_nyi",
"pycket.values.W_Fixnum",
"pycket.prims.expose.default.call",
"pycket.hash.simple.make_simple_immutable_table_assocs",
"pycket.values.W_Cons.make",
"pycket.interpreter.return_multi_vals",
"pycket.error.SchemeException",
"pycket.prims.expose.default.iscallable",
"pycket.prims.expose.default",
"rpython.rlib.objectmodel.specialize.arg",
"rpython.rlib.objectmodel.compute_hash",
"pycket.hash.equal.W_EqualHashTable"
] |
[((1430, 1459), 'rpython.rlib.objectmodel.specialize.arg', 'objectmodel.specialize.arg', (['(4)'], {}), '(4)\n', (1456, 1459), False, 'from rpython.rlib import jit, objectmodel\n'), ((7113, 7127), 'pycket.prims.expose.expose', 'expose', (['"""hash"""'], {}), "('hash')\n", (7119, 7127), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((7414, 7430), 'pycket.prims.expose.expose', 'expose', (['"""hasheq"""'], {}), "('hasheq')\n", (7420, 7430), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((7740, 7757), 'pycket.prims.expose.expose', 'expose', (['"""hasheqv"""'], {}), "('hasheqv')\n", (7746, 7757), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((8575, 8662), 'pycket.prims.expose.expose', 'expose', (['"""hash-set!"""', '[W_HashTable, values.W_Object, values.W_Object]'], {'simple': '(False)'}), "('hash-set!', [W_HashTable, values.W_Object, values.W_Object], simple\n =False)\n", (8581, 8662), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((9204, 9290), 'pycket.prims.expose.expose', 'expose', (['"""hash-set"""', '[W_HashTable, values.W_Object, values.W_Object]'], {'simple': '(False)'}), "('hash-set', [W_HashTable, values.W_Object, values.W_Object], simple=\n False)\n", (9210, 9290), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((10361, 10429), 'pycket.prims.expose.expose', 'expose', (['"""hash-remove!"""', '[W_HashTable, values.W_Object]'], {'simple': '(False)'}), "('hash-remove!', [W_HashTable, values.W_Object], simple=False)\n", (10367, 10429), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((10618, 10685), 'pycket.prims.expose.expose', 'expose', (['"""hash-remove"""', '[W_HashTable, values.W_Object]'], {'simple': '(False)'}), "('hash-remove', [W_HashTable, values.W_Object], simple=False)\n", (10624, 10685), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((11237, 11287), 'pycket.prims.expose.expose', 'expose', (['"""hash-clear!"""', '[W_HashTable]'], {'simple': '(False)'}), "('hash-clear!', [W_HashTable], simple=False)\n", (11243, 11287), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((11576, 11615), 'pycket.prims.expose.define_nyi', 'define_nyi', (['"""hash-clear"""', '[W_HashTable]'], {}), "('hash-clear', [W_HashTable])\n", (11586, 11615), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((11618, 11653), 'pycket.prims.expose.expose', 'expose', (['"""hash-count"""', '[W_HashTable]'], {}), "('hash-count', [W_HashTable])\n", (11624, 11653), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((13240, 13309), 'pycket.prims.expose.expose', 'expose', (['"""hash-keys-subset?"""', '[W_HashTable, W_HashTable]'], {'simple': '(False)'}), "('hash-keys-subset?', [W_HashTable, W_HashTable], simple=False)\n", (13246, 13309), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((14832, 14876), 'pycket.prims.expose.expose', 'expose', (['"""equal-hash-code"""', '[values.W_Object]'], {}), "('equal-hash-code', [values.W_Object])\n", (14838, 14876), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((15354, 15408), 'pycket.prims.expose.expose', 'expose', (['"""equal-secondary-hash-code"""', '[values.W_Object]'], {}), "('equal-secondary-hash-code', [values.W_Object])\n", (15360, 15408), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((15477, 15518), 'pycket.prims.expose.expose', 'expose', (['"""eq-hash-code"""', '[values.W_Object]'], {}), "('eq-hash-code', [values.W_Object])\n", (15483, 15518), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((15852, 15894), 'pycket.prims.expose.expose', 'expose', (['"""eqv-hash-code"""', '[values.W_Object]'], {}), "('eqv-hash-code', [values.W_Object])\n", (15858, 15894), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((5120, 5140), 'pycket.interpreter.check_one_val', 'check_one_val', (['_vals'], {}), '(_vals)\n', (5133, 5140), False, 'from pycket.interpreter import check_one_val\n'), ((5153, 5185), 'pycket.values.W_Cons.make', 'values.W_Cons.make', (['w_val', 'w_acc'], {}), '(w_val, w_acc)\n', (5171, 5185), False, 'from pycket import values, values_string\n'), ((5869, 5955), 'pycket.hash.simple.make_simple_mutable_table_assocs', 'make_simple_mutable_table_assocs', (['W_EqMutableHashTable', 'assocs', '"""make-weak-hasheq"""'], {}), "(W_EqMutableHashTable, assocs,\n 'make-weak-hasheq')\n", (5901, 5955), False, 'from pycket.hash.simple import W_EqvMutableHashTable, W_EqMutableHashTable, W_EqvImmutableHashTable, W_EqImmutableHashTable, make_simple_mutable_table, make_simple_mutable_table_assocs, make_simple_immutable_table, make_simple_immutable_table_assocs\n'), ((6096, 6184), 'pycket.hash.simple.make_simple_mutable_table_assocs', 'make_simple_mutable_table_assocs', (['W_EqvMutableHashTable', 'assocs', '"""make-weak-hasheqv"""'], {}), "(W_EqvMutableHashTable, assocs,\n 'make-weak-hasheqv')\n", (6128, 6184), False, 'from pycket.hash.simple import W_EqvMutableHashTable, W_EqMutableHashTable, W_EqvImmutableHashTable, W_EqImmutableHashTable, make_simple_mutable_table, make_simple_mutable_table_assocs, make_simple_immutable_table, make_simple_immutable_table_assocs\n'), ((6637, 6681), 'pycket.hash.equal.W_EqualHashTable', 'W_EqualHashTable', (['keys', 'vals'], {'immutable': '(True)'}), '(keys, vals, immutable=True)\n', (6653, 6681), False, 'from pycket.hash.equal import W_EqualHashTable\n'), ((6803, 6898), 'pycket.hash.simple.make_simple_immutable_table_assocs', 'make_simple_immutable_table_assocs', (['W_EqImmutableHashTable', 'assocs', '"""make-immutable-hasheq"""'], {}), "(W_EqImmutableHashTable, assocs,\n 'make-immutable-hasheq')\n", (6837, 6898), False, 'from pycket.hash.simple import W_EqvMutableHashTable, W_EqMutableHashTable, W_EqvImmutableHashTable, W_EqImmutableHashTable, make_simple_mutable_table, make_simple_mutable_table_assocs, make_simple_immutable_table, make_simple_immutable_table_assocs\n'), ((7018, 7114), 'pycket.hash.simple.make_simple_immutable_table_assocs', 'make_simple_immutable_table_assocs', (['W_EqvImmutableHashTable', 'assocs', '"""make-immutable-hasheq"""'], {}), "(W_EqvImmutableHashTable, assocs,\n 'make-immutable-hasheq')\n", (7052, 7114), False, 'from pycket.hash.simple import W_EqvMutableHashTable, W_EqMutableHashTable, W_EqvImmutableHashTable, W_EqImmutableHashTable, make_simple_mutable_table, make_simple_mutable_table_assocs, make_simple_immutable_table, make_simple_immutable_table_assocs\n'), ((7367, 7411), 'pycket.hash.equal.W_EqualHashTable', 'W_EqualHashTable', (['keys', 'vals'], {'immutable': '(True)'}), '(keys, vals, immutable=True)\n', (7383, 7411), False, 'from pycket.hash.equal import W_EqualHashTable\n'), ((7674, 7737), 'pycket.hash.simple.make_simple_immutable_table', 'make_simple_immutable_table', (['W_EqImmutableHashTable', 'keys', 'vals'], {}), '(W_EqImmutableHashTable, keys, vals)\n', (7701, 7737), False, 'from pycket.hash.simple import W_EqvMutableHashTable, W_EqMutableHashTable, W_EqvImmutableHashTable, W_EqImmutableHashTable, make_simple_mutable_table, make_simple_mutable_table_assocs, make_simple_immutable_table, make_simple_immutable_table_assocs\n'), ((8003, 8067), 'pycket.hash.simple.make_simple_immutable_table', 'make_simple_immutable_table', (['W_EqvImmutableHashTable', 'keys', 'vals'], {}), '(W_EqvImmutableHashTable, keys, vals)\n', (8030, 8067), False, 'from pycket.hash.simple import W_EqvMutableHashTable, W_EqMutableHashTable, W_EqvImmutableHashTable, W_EqImmutableHashTable, make_simple_mutable_table, make_simple_mutable_table_assocs, make_simple_immutable_table, make_simple_immutable_table_assocs\n'), ((8315, 8391), 'pycket.hash.simple.make_simple_mutable_table_assocs', 'make_simple_mutable_table_assocs', (['W_EqMutableHashTable', 'pairs', '"""make-hasheq"""'], {}), "(W_EqMutableHashTable, pairs, 'make-hasheq')\n", (8347, 8391), False, 'from pycket.hash.simple import W_EqvMutableHashTable, W_EqMutableHashTable, W_EqvImmutableHashTable, W_EqImmutableHashTable, make_simple_mutable_table, make_simple_mutable_table_assocs, make_simple_immutable_table, make_simple_immutable_table_assocs\n'), ((8494, 8572), 'pycket.hash.simple.make_simple_mutable_table_assocs', 'make_simple_mutable_table_assocs', (['W_EqvMutableHashTable', 'pairs', '"""make-hasheqv"""'], {}), "(W_EqvMutableHashTable, pairs, 'make-hasheqv')\n", (8526, 8572), False, 'from pycket.hash.simple import W_EqvMutableHashTable, W_EqMutableHashTable, W_EqvImmutableHashTable, W_EqImmutableHashTable, make_simple_mutable_table, make_simple_mutable_table_assocs, make_simple_immutable_table, make_simple_immutable_table_assocs\n'), ((8950, 8970), 'pycket.interpreter.check_one_val', 'check_one_val', (['_vals'], {}), '(_vals)\n', (8963, 8970), False, 'from pycket.interpreter import check_one_val\n'), ((9171, 9201), 'pycket.interpreter.return_value', 'return_value', (['table', 'env', 'cont'], {}), '(table, env, cont)\n', (9183, 9201), False, 'from pycket.interpreter import return_value\n'), ((9853, 9873), 'pycket.interpreter.check_one_val', 'check_one_val', (['_vals'], {}), '(_vals)\n', (9866, 9873), False, 'from pycket.interpreter import check_one_val\n'), ((10041, 10061), 'pycket.prims.expose.default.iscallable', 'default.iscallable', ([], {}), '()\n', (10059, 10061), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((10117, 10149), 'pycket.interpreter.return_value', 'return_value', (['default', 'env', 'cont'], {}), '(default, env, cont)\n', (10129, 10149), False, 'from pycket.interpreter import return_value\n'), ((11879, 11899), 'pycket.interpreter.check_one_val', 'check_one_val', (['_vals'], {}), '(_vals)\n', (11892, 11899), False, 'from pycket.interpreter import check_one_val\n'), ((13817, 13837), 'pycket.interpreter.check_one_val', 'check_one_val', (['_vals'], {}), '(_vals)\n', (13830, 13837), False, 'from pycket.interpreter import check_one_val\n'), ((14745, 14793), 'pycket.prims.expose.expose', 'expose', (['"""hash-copy"""', '[W_HashTable]'], {'simple': '(False)'}), "('hash-copy', [W_HashTable], simple=False)\n", (14751, 14793), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((15828, 15849), 'pycket.values.W_Fixnum', 'values.W_Fixnum', (['hash'], {}), '(hash)\n', (15843, 15849), False, 'from pycket import values, values_string\n'), ((15952, 15973), 'pycket.values.W_Fixnum', 'values.W_Fixnum', (['hash'], {}), '(hash)\n', (15967, 15973), False, 'from pycket import values, values_string\n'), ((3196, 3236), 'pycket.prims.expose.default', 'default', (['values.W_Object', 'values.w_false'], {}), '(values.W_Object, values.w_false)\n', (3203, 3236), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((4000, 4040), 'pycket.prims.expose.default', 'default', (['values.W_Object', 'values.w_false'], {}), '(values.W_Object, values.w_false)\n', (4007, 4040), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((5344, 5395), 'pycket.error.SchemeException', 'SchemeException', (["('%s: expected proper list' % fname)"], {}), "('%s: expected proper list' % fname)\n", (5359, 5395), False, 'from pycket.error import SchemeException\n'), ((5757, 5794), 'pycket.prims.expose.default', 'default', (['values.W_List', 'values.w_null'], {}), '(values.W_List, values.w_null)\n', (5764, 5794), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((5983, 6020), 'pycket.prims.expose.default', 'default', (['values.W_List', 'values.w_null'], {}), '(values.W_List, values.w_null)\n', (5990, 6020), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((6333, 6374), 'pycket.hash.equal.W_EqualHashTable', 'W_EqualHashTable', (['[]', '[]'], {'immutable': '(False)'}), '([], [], immutable=False)\n', (6349, 6374), False, 'from pycket.hash.equal import W_EqualHashTable\n'), ((6236, 6264), 'pycket.prims.expose.default', 'default', (['values.W_List', 'None'], {}), '(values.W_List, None)\n', (6243, 6264), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((6493, 6530), 'pycket.prims.expose.default', 'default', (['values.W_List', 'values.w_null'], {}), '(values.W_List, values.w_null)\n', (6500, 6530), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((6717, 6754), 'pycket.prims.expose.default', 'default', (['values.W_List', 'values.w_null'], {}), '(values.W_List, values.w_null)\n', (6724, 6754), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((6931, 6968), 'pycket.prims.expose.default', 'default', (['values.W_List', 'values.w_null'], {}), '(values.W_List, values.w_null)\n', (6938, 6968), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((7185, 7249), 'pycket.error.SchemeException', 'SchemeException', (['"""hash: key does not have a corresponding value"""'], {}), "('hash: key does not have a corresponding value')\n", (7200, 7249), False, 'from pycket.error import SchemeException\n'), ((7490, 7556), 'pycket.error.SchemeException', 'SchemeException', (['"""hasheq: key does not have a corresponding value"""'], {}), "('hasheq: key does not have a corresponding value')\n", (7505, 7556), False, 'from pycket.error import SchemeException\n'), ((7818, 7885), 'pycket.error.SchemeException', 'SchemeException', (['"""hasheqv: key does not have a corresponding value"""'], {}), "('hasheqv: key does not have a corresponding value')\n", (7833, 7885), False, 'from pycket.error import SchemeException\n'), ((8091, 8128), 'pycket.prims.expose.default', 'default', (['values.W_List', 'values.w_null'], {}), '(values.W_List, values.w_null)\n', (8098, 8128), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((8240, 8277), 'pycket.prims.expose.default', 'default', (['values.W_List', 'values.w_null'], {}), '(values.W_List, values.w_null)\n', (8247, 8277), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((8418, 8455), 'pycket.prims.expose.default', 'default', (['values.W_List', 'values.w_null'], {}), '(values.W_List, values.w_null)\n', (8425, 8455), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((8735, 8786), 'pycket.error.SchemeException', 'SchemeException', (['"""hash-set!: given immutable table"""'], {}), "('hash-set!: given immutable table')\n", (8750, 8786), False, 'from pycket.error import SchemeException\n'), ((9420, 9477), 'pycket.error.SchemeException', 'SchemeException', (['"""hash-set: not given an immutable table"""'], {}), "('hash-set: not given an immutable table')\n", (9435, 9477), False, 'from pycket.error import SchemeException\n'), ((9599, 9633), 'pycket.interpreter.return_value', 'return_value', (['new_table', 'env', 'cont'], {}), '(new_table, env, cont)\n', (9611, 9633), False, 'from pycket.interpreter import return_value\n'), ((9918, 9946), 'pycket.interpreter.return_value', 'return_value', (['val', 'env', 'cont'], {}), '(val, env, cont)\n', (9930, 9946), False, 'from pycket.interpreter import return_value\n'), ((10078, 10105), 'pycket.prims.expose.default.call', 'default.call', (['[]', 'env', 'cont'], {}), '([], env, cont)\n', (10090, 10105), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((10202, 10232), 'pycket.prims.expose.default', 'default', (['values.W_Object', 'None'], {}), '(values.W_Object, None)\n', (10209, 10232), False, 'from pycket.prims.expose import default, expose, procedure, define_nyi\n'), ((10507, 10567), 'pycket.error.SchemeException', 'SchemeException', (['"""hash-remove!: expected mutable hash table"""'], {}), "('hash-remove!: expected mutable hash table')\n", (10522, 10567), False, 'from pycket.error import SchemeException\n'), ((10762, 10823), 'pycket.error.SchemeException', 'SchemeException', (['"""hash-remove: expected immutable hash table"""'], {}), "('hash-remove: expected immutable hash table')\n", (10777, 10823), False, 'from pycket.error import SchemeException\n'), ((11089, 11127), 'pycket.interpreter.return_value', 'return_value', (['values.w_void', 'env', 'cont'], {}), '(values.w_void, env, cont)\n', (11101, 11127), False, 'from pycket.interpreter import return_value\n'), ((11536, 11574), 'pycket.interpreter.return_value', 'return_value', (['values.w_void', 'env', 'cont'], {}), '(values.w_void, env, cont)\n', (11548, 11574), False, 'from pycket.interpreter import return_value\n'), ((11945, 11984), 'pycket.interpreter.return_value', 'return_value', (['values.w_false', 'env', 'cont'], {}), '(values.w_false, env, cont)\n', (11957, 11984), False, 'from pycket.interpreter import return_value\n'), ((12247, 12285), 'pycket.interpreter.return_value', 'return_value', (['values.w_true', 'env', 'cont'], {}), '(values.w_true, env, cont)\n', (12259, 12285), False, 'from pycket.interpreter import return_value\n'), ((14243, 14271), 'pycket.interpreter.return_value', 'return_value', (['new', 'env', 'cont'], {}), '(new, env, cont)\n', (14255, 14271), False, 'from pycket.interpreter import return_value\n'), ((14550, 14578), 'pycket.interpreter.return_value', 'return_value', (['new', 'env', 'cont'], {}), '(new, env, cont)\n', (14562, 14578), False, 'from pycket.interpreter import return_value\n'), ((14647, 14675), 'pycket.interpreter.return_value', 'return_value', (['new', 'env', 'cont'], {}), '(new, env, cont)\n', (14659, 14675), False, 'from pycket.interpreter import return_value\n'), ((15647, 15680), 'rpython.rlib.objectmodel.compute_hash', 'objectmodel.compute_hash', (['v.value'], {}), '(v.value)\n', (15671, 15680), False, 'from rpython.rlib import jit, objectmodel\n'), ((1667, 1697), 'pycket.interpreter.return_value', 'return_value', (['w_key', 'env', 'cont'], {}), '(w_key, env, cont)\n', (1679, 1697), False, 'from pycket.interpreter import return_value\n'), ((1747, 1777), 'pycket.interpreter.return_value', 'return_value', (['w_val', 'env', 'cont'], {}), '(w_val, env, cont)\n', (1759, 1777), False, 'from pycket.interpreter import return_value\n'), ((1835, 1869), 'pycket.values.Values._make2', 'values.Values._make2', (['w_key', 'w_val'], {}), '(w_key, w_val)\n', (1855, 1869), False, 'from pycket import values, values_string\n'), ((1889, 1923), 'pycket.interpreter.return_multi_vals', 'return_multi_vals', (['vals', 'env', 'cont'], {}), '(vals, env, cont)\n', (1906, 1923), False, 'from pycket.interpreter import return_value, return_multi_vals\n'), ((1972, 2004), 'pycket.values.W_Cons.make', 'values.W_Cons.make', (['w_key', 'w_val'], {}), '(w_key, w_val)\n', (1990, 2004), False, 'from pycket import values, values_string\n'), ((2024, 2053), 'pycket.interpreter.return_value', 'return_value', (['vals', 'env', 'cont'], {}), '(vals, env, cont)\n', (2036, 2053), False, 'from pycket.interpreter import return_value\n'), ((2133, 2186), 'pycket.error.SchemeException', 'SchemeException', (['"""hash-iterate-key: invalid position"""'], {}), "('hash-iterate-key: invalid position')\n", (2148, 2186), False, 'from pycket.error import SchemeException\n'), ((2224, 2277), 'pycket.error.SchemeException', 'SchemeException', (['"""hash-iterate-key: invalid position"""'], {}), "('hash-iterate-key: invalid position')\n", (2239, 2277), False, 'from pycket.error import SchemeException\n'), ((3687, 3725), 'pycket.interpreter.return_value', 'return_value', (['values.w_void', 'env', 'cont'], {}), '(values.w_void, env, cont)\n', (3699, 3725), False, 'from pycket.interpreter import return_value\n'), ((4849, 4879), 'pycket.interpreter.return_value', 'return_value', (['w_acc', 'env', 'cont'], {}), '(w_acc, env, cont)\n', (4861, 4879), False, 'from pycket.interpreter import return_value\n'), ((5583, 5636), 'pycket.error.SchemeException', 'SchemeException', (["('%s: expected list of pairs' % fname)"], {}), "('%s: expected list of pairs' % fname)\n", (5598, 5636), False, 'from pycket.error import SchemeException\n'), ((15730, 15763), 'rpython.rlib.objectmodel.compute_hash', 'objectmodel.compute_hash', (['v.value'], {}), '(v.value)\n', (15754, 15763), False, 'from rpython.rlib import jit, objectmodel\n'), ((15789, 15816), 'rpython.rlib.objectmodel.compute_hash', 'objectmodel.compute_hash', (['v'], {}), '(v)\n', (15813, 15816), False, 'from rpython.rlib import jit, objectmodel\n')]
|
import sys
try:
from PIL import Image, ImageFilter
except ImportError:
print("error:", sys.argv[0], "requires Pillow - install it via 'pip install Pillow'")
sys.exit(2)
if len(sys.argv) != 3:
print("error - usage:", sys.argv[0], "input_file output_file")
sys.exit(2)
input_filename = sys.argv[1]
output_filename = sys.argv[2]
#Read image
try:
im = Image.open(input_filename)
except OSError:
print("error - can't open file:", input_file)
sys.exit(2)
#Apply a filter to the image
im_sharp = im.filter(ImageFilter.SHARPEN)
#Save the filtered image to a new file
im_sharp.save(output_filename, 'JPEG')
|
[
"sys.exit",
"PIL.Image.open"
] |
[((278, 289), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (286, 289), False, 'import sys\n'), ((377, 403), 'PIL.Image.open', 'Image.open', (['input_filename'], {}), '(input_filename)\n', (387, 403), False, 'from PIL import Image, ImageFilter\n'), ((170, 181), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (178, 181), False, 'import sys\n'), ((474, 485), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (482, 485), False, 'import sys\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vendors', '0028_auto_20180211_1350'),
('contracts', '0024_auto_20180205_0342'),
]
operations = [
migrations.RunSQL("UPDATE django_content_type SET app_label = 'contracts' WHERE app_label = 'contract';"),
migrations.RunSQL("ALTER TABLE IF EXISTS contract_contract RENAME TO contracts_contract;"),
migrations.RunSQL("ALTER TABLE IF EXISTS contract_fpdsload RENAME TO contracts_fpdsload;"),
migrations.RunSQL("ALTER TABLE IF EXISTS contract_placeofperformance RENAME TO contracts_placeofperformance;"),
]
|
[
"django.db.migrations.RunSQL"
] |
[((301, 416), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""UPDATE django_content_type SET app_label = \'contracts\' WHERE app_label = \'contract\';"""'], {}), '(\n "UPDATE django_content_type SET app_label = \'contracts\' WHERE app_label = \'contract\';"\n )\n', (318, 416), False, 'from django.db import migrations, models\n'), ((424, 519), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""ALTER TABLE IF EXISTS contract_contract RENAME TO contracts_contract;"""'], {}), "(\n 'ALTER TABLE IF EXISTS contract_contract RENAME TO contracts_contract;')\n", (441, 519), False, 'from django.db import migrations, models\n'), ((524, 619), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""ALTER TABLE IF EXISTS contract_fpdsload RENAME TO contracts_fpdsload;"""'], {}), "(\n 'ALTER TABLE IF EXISTS contract_fpdsload RENAME TO contracts_fpdsload;')\n", (541, 619), False, 'from django.db import migrations, models\n'), ((624, 744), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""ALTER TABLE IF EXISTS contract_placeofperformance RENAME TO contracts_placeofperformance;"""'], {}), "(\n 'ALTER TABLE IF EXISTS contract_placeofperformance RENAME TO contracts_placeofperformance;'\n )\n", (641, 744), False, 'from django.db import migrations, models\n')]
|
from enum import Enum, auto
class State(Enum):
USR_START = auto()
#
# SYS_GENRE = auto()
# USR_GENRE = auto()
# SYS_WEEKDAY = auto()
USR_WHAT_FAV = auto()
SYS_CHECK_POSITIVE = auto()
SYS_CHECK_NEGATIVE = auto()
SYS_CHECK_NEUTRAL = auto()
SYS_GET_REASON = auto()
USR_REPEAT = auto()
SYS_AGREED = auto()
SYS_DISAGREED = auto()
USR_ASSENT_YES = auto()
USR_ASSENT_NO = auto()
USR_MY_FAV = auto()
SYS_YES = auto()
SYS_NO = auto()
USR_WHY = auto()
USR_MY_FAV_STORY = auto()
# USR_WEEKDAY = auto()
SYS_FRIDAY = auto()
USR_FRIDAY = auto()
SYS_SMTH = auto()
USR_MY_FAV_DAY = auto()
#
SYS_ERR = auto()
USR_ERR = auto()
|
[
"enum.auto"
] |
[((65, 71), 'enum.auto', 'auto', ([], {}), '()\n', (69, 71), False, 'from enum import Enum, auto\n'), ((174, 180), 'enum.auto', 'auto', ([], {}), '()\n', (178, 180), False, 'from enum import Enum, auto\n'), ((206, 212), 'enum.auto', 'auto', ([], {}), '()\n', (210, 212), False, 'from enum import Enum, auto\n'), ((238, 244), 'enum.auto', 'auto', ([], {}), '()\n', (242, 244), False, 'from enum import Enum, auto\n'), ((269, 275), 'enum.auto', 'auto', ([], {}), '()\n', (273, 275), False, 'from enum import Enum, auto\n'), ((297, 303), 'enum.auto', 'auto', ([], {}), '()\n', (301, 303), False, 'from enum import Enum, auto\n'), ((321, 327), 'enum.auto', 'auto', ([], {}), '()\n', (325, 327), False, 'from enum import Enum, auto\n'), ((345, 351), 'enum.auto', 'auto', ([], {}), '()\n', (349, 351), False, 'from enum import Enum, auto\n'), ((372, 378), 'enum.auto', 'auto', ([], {}), '()\n', (376, 378), False, 'from enum import Enum, auto\n'), ((400, 406), 'enum.auto', 'auto', ([], {}), '()\n', (404, 406), False, 'from enum import Enum, auto\n'), ((427, 433), 'enum.auto', 'auto', ([], {}), '()\n', (431, 433), False, 'from enum import Enum, auto\n'), ((451, 457), 'enum.auto', 'auto', ([], {}), '()\n', (455, 457), False, 'from enum import Enum, auto\n'), ((472, 478), 'enum.auto', 'auto', ([], {}), '()\n', (476, 478), False, 'from enum import Enum, auto\n'), ((492, 498), 'enum.auto', 'auto', ([], {}), '()\n', (496, 498), False, 'from enum import Enum, auto\n'), ((513, 519), 'enum.auto', 'auto', ([], {}), '()\n', (517, 519), False, 'from enum import Enum, auto\n'), ((543, 549), 'enum.auto', 'auto', ([], {}), '()\n', (547, 549), False, 'from enum import Enum, auto\n'), ((594, 600), 'enum.auto', 'auto', ([], {}), '()\n', (598, 600), False, 'from enum import Enum, auto\n'), ((618, 624), 'enum.auto', 'auto', ([], {}), '()\n', (622, 624), False, 'from enum import Enum, auto\n'), ((640, 646), 'enum.auto', 'auto', ([], {}), '()\n', (644, 646), False, 'from enum import Enum, auto\n'), ((668, 674), 'enum.auto', 'auto', ([], {}), '()\n', (672, 674), False, 'from enum import Enum, auto\n'), ((695, 701), 'enum.auto', 'auto', ([], {}), '()\n', (699, 701), False, 'from enum import Enum, auto\n'), ((716, 722), 'enum.auto', 'auto', ([], {}), '()\n', (720, 722), False, 'from enum import Enum, auto\n')]
|
# -*- coding: utf-8 -*-
"""Class for tests of pysiaalarm."""
import json
import logging
import random
import socket
import threading
import time
import pytest
from mock import patch
from pysiaalarm import InvalidAccountFormatError
from pysiaalarm import InvalidAccountLengthError
from pysiaalarm import InvalidKeyFormatError
from pysiaalarm import InvalidKeyLengthError
from pysiaalarm import SIAAccount
from pysiaalarm import SIAClient
from pysiaalarm import SIAEvent
from tests.test_client import client_program
from tests.test_utils import create_test_items
_LOGGER = logging.getLogger(__name__)
KEY = "<KEY>"
ACCOUNT = "1111"
HOST = "localhost"
PORT = 7777
def func(event: SIAEvent):
"""Pass for testing."""
pass
class testSIA(object):
"""Class for pysiaalarm tests."""
@pytest.mark.parametrize(
"line, account, type, code",
[
(
'98100078"*SIA-DCS"5994L0#AAA[5AB718E008C616BF16F6468033A11326B0F7546CAB230910BCA10E4DEBA42283C436E4F8EFF50931070DDE36D5BB5F0C',
"AAA",
"",
"",
),
(
'2E680078"SIA-DCS"6002L0#AAA[|Nri1/CL501]_14:12:04,09-25-2019',
"AAA",
"Closing Report",
"CL",
),
],
)
def test_event_parsing(self, line, account, type, code):
"""Test event parsing methods."""
event = SIAEvent(line)
assert event.code == code
assert event.type == type
assert event.account == account
@pytest.mark.parametrize(
"key, account, port, error",
[
("ZZZZZZZZZZZZZZZZ", ACCOUNT, 7777, InvalidKeyFormatError),
("158888888888888", ACCOUNT, 7777, InvalidKeyLengthError),
("1688888888888888", ACCOUNT, 7777, None),
("23888888888888888888888", ACCOUNT, 7777, InvalidKeyLengthError),
("248888888888888888888888", ACCOUNT, 7777, None),
("3188888888888888888888888888888", ACCOUNT, 7777, InvalidKeyLengthError),
("32888888888888888888888888888888", ACCOUNT, 7777, None),
(KEY, "22", 7777, InvalidAccountLengthError),
(KEY, "ZZZ", 7777, InvalidAccountFormatError),
],
)
def test_sia_key_account_errors(self, key, account, port, error):
"""Test sia client behaviour."""
try:
SIAClient(
host="",
port=port,
accounts=[SIAAccount(account_id=account, key=key)],
function=func,
)
assert False if error else True
except Exception as exp:
assert isinstance(exp, error)
@pytest.mark.parametrize("config_file", [("tests\\unencrypted_config.json")])
def test_client(self, config_file):
"""Test the client.
Arguments:
config_file {str} -- Filename of the config.
"""
try:
with open(config_file, "r") as f:
config = json.load(f)
except: # noqa: E722
config = {"host": HOST, "port": PORT, "account_id": ACCOUNT, "key": None}
events = []
def func_append(event: SIAEvent):
events.append(event)
siac = SIAClient(
host="",
port=config["port"],
accounts=[SIAAccount(account_id=config["account_id"], key=config["key"])],
function=func_append,
)
siac.start()
tests = [
{"code": False, "crc": False, "account": False, "time": False},
{"code": True, "crc": False, "account": False, "time": False},
{"code": False, "crc": True, "account": False, "time": False},
{"code": False, "crc": False, "account": True, "time": False},
{"code": False, "crc": False, "account": False, "time": True},
]
t = threading.Thread(
target=client_program, name="test_client", args=(config, 1, tests)
)
t.daemon = True
t.start() # stops after the five events have been sent.
# run for 30 seconds
time.sleep(30)
siac.stop()
assert siac.counts == {
"events": 5,
"valid_events": 1,
"errors": {
"crc": 1,
"timestamp": 1,
"account": 1,
"code": 1,
"format": 0,
"user_code": 0,
},
}
assert len(events) == 1
|
[
"threading.Thread",
"json.load",
"time.sleep",
"pysiaalarm.SIAAccount",
"pytest.mark.parametrize",
"pysiaalarm.SIAEvent",
"logging.getLogger"
] |
[((574, 601), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (591, 601), False, 'import logging\n'), ((800, 1116), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""line, account, type, code"""', '[(\n \'98100078"*SIA-DCS"5994L0#AAA[5AB718E008C616BF16F6468033A11326B0F7546CAB230910BCA10E4DEBA42283C436E4F8EFF50931070DDE36D5BB5F0C\'\n , \'AAA\', \'\', \'\'), (\n \'2E680078"SIA-DCS"6002L0#AAA[|Nri1/CL501]_14:12:04,09-25-2019\', \'AAA\',\n \'Closing Report\', \'CL\')]'], {}), '(\'line, account, type, code\', [(\n \'98100078"*SIA-DCS"5994L0#AAA[5AB718E008C616BF16F6468033A11326B0F7546CAB230910BCA10E4DEBA42283C436E4F8EFF50931070DDE36D5BB5F0C\'\n , \'AAA\', \'\', \'\'), (\n \'2E680078"SIA-DCS"6002L0#AAA[|Nri1/CL501]_14:12:04,09-25-2019\', \'AAA\',\n \'Closing Report\', \'CL\')])\n', (823, 1116), False, 'import pytest\n'), ((1562, 2159), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""key, account, port, error"""', "[('ZZZZZZZZZZZZZZZZ', ACCOUNT, 7777, InvalidKeyFormatError), (\n '158888888888888', ACCOUNT, 7777, InvalidKeyLengthError), (\n '1688888888888888', ACCOUNT, 7777, None), ('23888888888888888888888',\n ACCOUNT, 7777, InvalidKeyLengthError), ('248888888888888888888888',\n ACCOUNT, 7777, None), ('3188888888888888888888888888888', ACCOUNT, 7777,\n InvalidKeyLengthError), ('32888888888888888888888888888888', ACCOUNT, \n 7777, None), (KEY, '22', 7777, InvalidAccountLengthError), (KEY, 'ZZZ',\n 7777, InvalidAccountFormatError)]"], {}), "('key, account, port, error', [('ZZZZZZZZZZZZZZZZ',\n ACCOUNT, 7777, InvalidKeyFormatError), ('158888888888888', ACCOUNT, \n 7777, InvalidKeyLengthError), ('1688888888888888', ACCOUNT, 7777, None),\n ('23888888888888888888888', ACCOUNT, 7777, InvalidKeyLengthError), (\n '248888888888888888888888', ACCOUNT, 7777, None), (\n '3188888888888888888888888888888', ACCOUNT, 7777, InvalidKeyLengthError\n ), ('32888888888888888888888888888888', ACCOUNT, 7777, None), (KEY,\n '22', 7777, InvalidAccountLengthError), (KEY, 'ZZZ', 7777,\n InvalidAccountFormatError)])\n", (1585, 2159), False, 'import pytest\n'), ((2703, 2777), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""config_file"""', "['tests\\\\unencrypted_config.json']"], {}), "('config_file', ['tests\\\\unencrypted_config.json'])\n", (2726, 2777), False, 'import pytest\n'), ((1433, 1447), 'pysiaalarm.SIAEvent', 'SIAEvent', (['line'], {}), '(line)\n', (1441, 1447), False, 'from pysiaalarm import SIAEvent\n'), ((3899, 3987), 'threading.Thread', 'threading.Thread', ([], {'target': 'client_program', 'name': '"""test_client"""', 'args': '(config, 1, tests)'}), "(target=client_program, name='test_client', args=(config, 1,\n tests))\n", (3915, 3987), False, 'import threading\n'), ((4133, 4147), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (4143, 4147), False, 'import time\n'), ((3022, 3034), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3031, 3034), False, 'import json\n'), ((3351, 3413), 'pysiaalarm.SIAAccount', 'SIAAccount', ([], {'account_id': "config['account_id']", 'key': "config['key']"}), "(account_id=config['account_id'], key=config['key'])\n", (3361, 3413), False, 'from pysiaalarm import SIAAccount\n'), ((2491, 2530), 'pysiaalarm.SIAAccount', 'SIAAccount', ([], {'account_id': 'account', 'key': 'key'}), '(account_id=account, key=key)\n', (2501, 2530), False, 'from pysiaalarm import SIAAccount\n')]
|
import json
from requests_html import HTMLSession
from helpers import compare_trees, get_content_tree
MAIN_URL = r'http://docente.ifrn.edu.br/abrahaolopes/2017.1-integrado/2.02401.1v-poo'
def main():
session = HTMLSession()
current_tree = get_content_tree(MAIN_URL, session)
with open('storage/tree.json', 'r') as stored_tree_file:
stored_tree = json.load(stored_tree_file)
difference = compare_trees(
stored_tree,
current_tree
)
if difference:
for item in difference:
category = item['category'].upper()
category = category.rjust(8)
path = item['path']
url = item['url']
print(
f'{category} | {path}'
)
print(
f'{url}\n'
)
with open('storage/tree.json', 'w') as stored_tree_file:
stored_tree_file.write(
json.dumps(current_tree)
)
if __name__ == "__main__":
main()
|
[
"json.load",
"helpers.compare_trees",
"json.dumps",
"helpers.get_content_tree",
"requests_html.HTMLSession"
] |
[((219, 232), 'requests_html.HTMLSession', 'HTMLSession', ([], {}), '()\n', (230, 232), False, 'from requests_html import HTMLSession\n'), ((252, 287), 'helpers.get_content_tree', 'get_content_tree', (['MAIN_URL', 'session'], {}), '(MAIN_URL, session)\n', (268, 287), False, 'from helpers import compare_trees, get_content_tree\n'), ((417, 457), 'helpers.compare_trees', 'compare_trees', (['stored_tree', 'current_tree'], {}), '(stored_tree, current_tree)\n', (430, 457), False, 'from helpers import compare_trees, get_content_tree\n'), ((371, 398), 'json.load', 'json.load', (['stored_tree_file'], {}), '(stored_tree_file)\n', (380, 398), False, 'import json\n'), ((936, 960), 'json.dumps', 'json.dumps', (['current_tree'], {}), '(current_tree)\n', (946, 960), False, 'import json\n')]
|
import sys
from io import StringIO
def zen_of_python() -> list[str]:
"""
Dump the Zen of Python into a variable
https://stackoverflow.com/a/23794519
"""
zen = StringIO()
old_stdout = sys.stdout
sys.stdout = zen
import this # noqa F401
sys.stdout = old_stdout
return zen.getvalue().splitlines()
def main():
import pyperclip
zen = "\n".join(zen_of_python())
pyperclip.copy(zen)
print("The Zen of Python has been copied to your clipboard")
if __name__ == "__main__":
main()
|
[
"io.StringIO",
"pyperclip.copy"
] |
[((181, 191), 'io.StringIO', 'StringIO', ([], {}), '()\n', (189, 191), False, 'from io import StringIO\n'), ((415, 434), 'pyperclip.copy', 'pyperclip.copy', (['zen'], {}), '(zen)\n', (429, 434), False, 'import pyperclip\n')]
|
import unittest
from exceptions import RangeValidationException
from weight_calculator import calculate_weights, validate_rages, validate_grades_and_ranges
class WeightCalculatorTest(unittest.TestCase):
def range_validation_tests(self):
self.assertFalse(validate_rages({
"Final": [50, 60],
"Midterms": [25, 30],
"Assignments": [0, 5]
}))
self.assertFalse(validate_rages({
"Final": [50, 60],
"Midterms": [50, 60],
"Assignments": [10, 15]
}))
self.assertFalse(validate_rages({
"Final": [50, 60],
"Midterms": [25, 30],
"Assignments": [15, 10]
}))
self.assertTrue(validate_rages({
"Final": [50, 60],
"Midterms": [25, 30],
"Assignments": [5, 15]
}))
def range_and_weights_validation_tests(self):
self.assertFalse(validate_grades_and_ranges(
{
"Final": [100, 50],
"Midterms": [50, 100],
"Assignments": [100, 0, 100]
},
{
"Final": [50, 60],
"Midterms": [25, 30],
"Assignments": [15, 10]
}))
self.assertFalse(validate_grades_and_ranges(
{
"Finals": [100, 50],
"Midterms": [50, 100],
"Assignments": [100, 0, 100]
},
{
"Final": [50, 60],
"Midterms": [25, 30],
"Assignments": [10, 15]
}))
self.assertFalse(validate_grades_and_ranges(
{
"Final": [100, 50],
"Midterms": [50, 100],
"Assignments": [100, 0, 100]
},
{
"Finals": [50, 60],
"Midterms": [25, 30],
"Assignments": [10, 15]
}))
self.assertTrue(validate_grades_and_ranges(
{
"Final": [100, 50],
"Midterms": [50, 100],
"Assignments": [100, 0, 100]
},
{
"Final": [50, 60],
"Midterms": [25, 30],
"Assignments": [10, 15]
}))
def test(self):
ranges = {
"Final": [50, 60],
"Midterms": [30, 40],
"Assignments": [5, 30],
}
bad_ranges = {
"Finals": [50, 60],
"Midterms": [30, 40],
"Assignments": [5, 30],
}
grades = {
"Final": [100, 100, 99],
"Midterms": [50, 60],
"Assignments": [0, 0, 2, 5, 10]
}
weights = calculate_weights(grades, ranges)
self.assertEqual(weights, {
"Final": 60,
"Midterms": 35,
"Assignments": 5,
})
self.assertRaises(RangeValidationException, calculate_weights, bad_ranges, ranges)
|
[
"weight_calculator.validate_grades_and_ranges",
"weight_calculator.calculate_weights",
"weight_calculator.validate_rages"
] |
[((2751, 2784), 'weight_calculator.calculate_weights', 'calculate_weights', (['grades', 'ranges'], {}), '(grades, ranges)\n', (2768, 2784), False, 'from weight_calculator import calculate_weights, validate_rages, validate_grades_and_ranges\n'), ((270, 355), 'weight_calculator.validate_rages', 'validate_rages', (["{'Final': [50, 60], 'Midterms': [25, 30], 'Assignments': [0, 5]}"], {}), "({'Final': [50, 60], 'Midterms': [25, 30], 'Assignments': [0, 5]}\n )\n", (284, 355), False, 'from weight_calculator import calculate_weights, validate_rages, validate_grades_and_ranges\n'), ((424, 510), 'weight_calculator.validate_rages', 'validate_rages', (["{'Final': [50, 60], 'Midterms': [50, 60], 'Assignments': [10, 15]}"], {}), "({'Final': [50, 60], 'Midterms': [50, 60], 'Assignments': [10,\n 15]})\n", (438, 510), False, 'from weight_calculator import calculate_weights, validate_rages, validate_grades_and_ranges\n'), ((580, 666), 'weight_calculator.validate_rages', 'validate_rages', (["{'Final': [50, 60], 'Midterms': [25, 30], 'Assignments': [15, 10]}"], {}), "({'Final': [50, 60], 'Midterms': [25, 30], 'Assignments': [15,\n 10]})\n", (594, 666), False, 'from weight_calculator import calculate_weights, validate_rages, validate_grades_and_ranges\n'), ((735, 820), 'weight_calculator.validate_rages', 'validate_rages', (["{'Final': [50, 60], 'Midterms': [25, 30], 'Assignments': [5, 15]}"], {}), "({'Final': [50, 60], 'Midterms': [25, 30], 'Assignments': [5,\n 15]})\n", (749, 820), False, 'from weight_calculator import calculate_weights, validate_rages, validate_grades_and_ranges\n'), ((940, 1117), 'weight_calculator.validate_grades_and_ranges', 'validate_grades_and_ranges', (["{'Final': [100, 50], 'Midterms': [50, 100], 'Assignments': [100, 0, 100]}", "{'Final': [50, 60], 'Midterms': [25, 30], 'Assignments': [15, 10]}"], {}), "({'Final': [100, 50], 'Midterms': [50, 100],\n 'Assignments': [100, 0, 100]}, {'Final': [50, 60], 'Midterms': [25, 30],\n 'Assignments': [15, 10]})\n", (966, 1117), False, 'from weight_calculator import calculate_weights, validate_rages, validate_grades_and_ranges\n'), ((1286, 1464), 'weight_calculator.validate_grades_and_ranges', 'validate_grades_and_ranges', (["{'Finals': [100, 50], 'Midterms': [50, 100], 'Assignments': [100, 0, 100]}", "{'Final': [50, 60], 'Midterms': [25, 30], 'Assignments': [10, 15]}"], {}), "({'Finals': [100, 50], 'Midterms': [50, 100],\n 'Assignments': [100, 0, 100]}, {'Final': [50, 60], 'Midterms': [25, 30],\n 'Assignments': [10, 15]})\n", (1312, 1464), False, 'from weight_calculator import calculate_weights, validate_rages, validate_grades_and_ranges\n'), ((1633, 1812), 'weight_calculator.validate_grades_and_ranges', 'validate_grades_and_ranges', (["{'Final': [100, 50], 'Midterms': [50, 100], 'Assignments': [100, 0, 100]}", "{'Finals': [50, 60], 'Midterms': [25, 30], 'Assignments': [10, 15]}"], {}), "({'Final': [100, 50], 'Midterms': [50, 100],\n 'Assignments': [100, 0, 100]}, {'Finals': [50, 60], 'Midterms': [25, 30\n ], 'Assignments': [10, 15]})\n", (1659, 1812), False, 'from weight_calculator import calculate_weights, validate_rages, validate_grades_and_ranges\n'), ((1979, 2156), 'weight_calculator.validate_grades_and_ranges', 'validate_grades_and_ranges', (["{'Final': [100, 50], 'Midterms': [50, 100], 'Assignments': [100, 0, 100]}", "{'Final': [50, 60], 'Midterms': [25, 30], 'Assignments': [10, 15]}"], {}), "({'Final': [100, 50], 'Midterms': [50, 100],\n 'Assignments': [100, 0, 100]}, {'Final': [50, 60], 'Midterms': [25, 30],\n 'Assignments': [10, 15]})\n", (2005, 2156), False, 'from weight_calculator import calculate_weights, validate_rages, validate_grades_and_ranges\n')]
|
import torch
import torch.nn.functional as F
from torch.nn import Parameter
from .metrics import Beta_divergence
from .base import Base
from tqdm import tqdm
def _mu_update(param, pos, gamma, l1_reg, l2_reg, constant_rows=None):
if param.grad is None:
return
# prevent negative term, very likely to happen with kl divergence
multiplier:torch.Tensor = F.relu(pos - param.grad, inplace=True)
if l1_reg > 0:
pos.add_(l1_reg)
if l2_reg > 0:
if pos.shape != param.shape:
pos = pos + l2_reg * param
else:
pos.add_(l2_reg * param)
multiplier.div_(pos)
if gamma != 1:
multiplier.pow_(gamma)
# Fill the first `constant_rows` of the multiplier with 1s
# to leave them unchanged
if constant_rows is not None:
multiplier[:constant_rows,:].fill_(1.0)
param.mul_(multiplier)
class _NMF(Base):
def __init__(self, W_size, H_size, rank):
super().__init__()
self.rank = rank
self.W = Parameter(torch.rand(*W_size).double())
self.H = Parameter(torch.rand(*H_size).double())
def forward(self, H=None, W=None):
if H is None:
H = self.H
if W is None:
W = self.W
return self.reconstruct(H, W)
def reconstruct(self, H, W):
raise NotImplementedError
def get_W_positive(self, WH, beta, H_sum) -> (torch.Tensor, None or torch.Tensor):
raise NotImplementedError
def get_H_positive(self, WH, beta, W_sum) -> (torch.Tensor, None or torch.Tensor):
raise NotImplementedError
def fit(self,
V,
W=None,
H=None,
fix_h_rows=None,
update_W=True,
update_H=True,
update_H_after_iter=None,
beta=1,
tol=1e-5,
min_loss=None,
max_iter=200,
min_iter=20,
verbose=0,
initial='random',
alpha=0,
l1_ratio=0,
lower_thresh=1e-8,
):
self.fix_neg.value = lower_thresh
V = self.fix_neg(V)
if W is None:
pass # will do special initialization in thre future
else:
self.W.data.copy_(W)
self.W.requires_grad = update_W
if H is None:
pass
else:
self.H.data.copy_(H)
self.H.requires_grad = update_H
if update_H_after_iter is None:
update_H_after_iter = max_iter
if beta < 1:
gamma = 1 / (2 - beta)
elif beta > 2:
gamma = 1 / (beta - 1)
else:
gamma = 1
l1_reg = alpha * l1_ratio
l2_reg = alpha * (1 - l1_ratio)
loss_scale = torch.prod(torch.tensor(V.shape)).float()
H_sum, W_sum = None, None
with tqdm(total=max_iter, disable=not verbose) as pbar:
for n_iter in range(max_iter):
if n_iter >= update_H_after_iter:
update_H = True
self.H.requires_grad = True
if self.W.requires_grad:
self.zero_grad()
WH = self.reconstruct(self.H.detach(), self.W)
loss = Beta_divergence(self.fix_neg(WH), V, beta)
loss.backward()
with torch.no_grad():
positive_comps, H_sum = self.get_W_positive(WH, beta, H_sum)
_mu_update(self.W, positive_comps, gamma, l1_reg, l2_reg)
W_sum = None
if self.H.requires_grad:
self.zero_grad()
WH = self.reconstruct(self.H, self.W.detach())
loss = Beta_divergence(self.fix_neg(WH), V, beta)
loss.backward()
with torch.no_grad():
positive_comps, W_sum = self.get_H_positive(WH, beta, W_sum)
_mu_update(self.H, positive_comps, gamma, l1_reg, l2_reg, fix_h_rows)
H_sum = None
loss = loss.div_(loss_scale).item()
pbar.set_postfix(loss=loss)
# pbar.set_description('Beta loss=%.4f' % error)
pbar.update()
if not n_iter:
loss_init = loss
elif (previous_loss - loss) / loss_init < tol and n_iter >= min_iter:
if min_loss is not None and loss > min_loss: pass
else: break
previous_loss = loss
return n_iter
def fit_transform(self, *args, **kwargs):
n_iter = self.fit(*args, **kwargs)
return n_iter, self.forward()
class NMF(_NMF):
def __init__(self, Vshape, rank=None):
self.K, self.M = Vshape
if not rank:
rank = self.K
super().__init__((self.K, rank), (rank, self.M), rank)
def reconstruct(self, H, W):
return W @ H
def get_W_positive(self, WH, beta, H_sum):
H = self.H
if beta == 1:
if H_sum is None:
H_sum = H.sum(1)
denominator = H_sum[None, :]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WHHt = WH @ H.t()
denominator = WHHt
return denominator, H_sum
def get_H_positive(self, WH, beta, W_sum):
W = self.W
if beta == 1:
if W_sum is None:
W_sum = W.sum(0) # shape(n_components, )
denominator = W_sum[:, None]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WtWH = W.t() @ WH
denominator = WtWH
return denominator, W_sum
def sort(self):
_, maxidx = self.W.data.max(0)
_, idx = maxidx.sort()
self.W.data = self.W.data[:, idx]
self.H.data = self.H.data[idx]
class NMFD(_NMF):
def __init__(self, Vshape, T=1, rank=None):
self.K, self.M = Vshape
if not rank:
rank = self.K
self.pad_size = T - 1
super().__init__((self.K, rank, T), (rank, self.M - T + 1), rank)
def reconstruct(self, H, W):
return F.conv1d(H[None, :], W.flip(2), padding=self.pad_size)[0]
def get_W_positive(self, WH, beta, H_sum):
H = self.H
if beta == 1:
if H_sum is None:
H_sum = H.sum(1)
denominator = H_sum[None, :, None]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WHHt = F.conv1d(WH[:, None], H[:, None])
denominator = WHHt
return denominator, H_sum
def get_H_positive(self, WH, beta, W_sum):
W = self.W
if beta == 1:
if W_sum is None:
W_sum = W.sum((0, 2))
denominator = W_sum[:, None]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WtWH = F.conv1d(WH[None, :], W.transpose(0, 1))[0]
denominator = WtWH
return denominator, W_sum
def sort(self):
_, maxidx = self.W.data.sum(2).max(0)
_, idx = maxidx.sort()
self.W.data = self.W.data[:, idx]
self.H.data = self.H.data[idx]
class NMF2D(_NMF):
def __init__(self, Vshape, win=1, rank=None):
try:
F, T = win
except:
F = T = win
if len(Vshape) == 3:
self.channel, self.K, self.M = Vshape
else:
self.K, self.M = Vshape
self.channel = 1
self.pad_size = (F - 1, T - 1)
super().__init__((self.channel, rank, F, T), (rank, self.K - F + 1, self.M - T + 1), rank)
def reconstruct(self, H, W):
out = F.conv2d(H[None, ...], W.flip((2, 3)), padding=self.pad_size)[0]
if self.channel == 1:
return out[0]
return out
def get_W_positive(self, WH, beta, H_sum):
H = self.H
if beta == 1:
if H_sum is None:
H_sum = H.sum((1, 2))
denominator = H_sum[None, :, None, None]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WH = WH.view(self.channel, 1, self.K, self.M)
WHHt = F.conv2d(WH, H[:, None])
denominator = WHHt
return denominator, H_sum
def get_H_positive(self, WH, beta, W_sum):
W = self.W
if beta == 1:
if W_sum is None:
W_sum = W.sum((0, 2, 3))
denominator = W_sum[:, None, None]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WH = WH.view(1, self.channel, self.K, self.M)
WtWH = F.conv2d(WH, W.transpose(0, 1))[0]
denominator = WtWH
return denominator, W_sum
def sort(self):
raise NotImplementedError
class NMF3D(_NMF):
def __init__(self, Vshape: tuple, rank: int = None, win=1):
try:
T, H, W = win
except:
T = H = W = win
if len(Vshape) == 4:
self.channel, self.N, self.K, self.M = Vshape
else:
self.N, self.K, self.M = Vshape
self.channel = 1
self.pad_size = (T - 1, H - 1, W - 1)
if not rank:
rank = self.K
super().__init__((self.channel, rank, T, H, W), (rank, self.N - T + 1, self.K - H + 1, self.M - W + 1), rank)
def reconstruct(self, H, W):
out = F.conv3d(H[None, ...], W.flip((2, 3, 4)), padding=self.pad_size)[0]
if self.channel == 1:
return out[0]
return out
def get_W_positive(self, WH, beta, H_sum):
H = self.H
if beta == 1:
if H_sum is None:
H_sum = H.sum((1, 2, 3))
denominator = H_sum[None, :, None, None, None]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WH = WH.view(self.channel, 1, self.N, self.K, self.M)
WHHt = F.conv3d(WH, H[:, None])
denominator = WHHt
return denominator, H_sum
def get_H_positive(self, WH, beta, W_sum):
W = self.W
if beta == 1:
if W_sum is None:
W_sum = W.sum((0, 2, 3, 4))
denominator = W_sum[:, None, None, None]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WH = WH.view(1, self.channel, self.N, self.K, self.M)
WtWH = F.conv3d(WH, W.transpose(0, 1))[0]
denominator = WtWH
return denominator, W_sum
def sort(self):
raise NotImplementedError
|
[
"tqdm.tqdm",
"torch.rand",
"torch.nn.functional.conv2d",
"torch.nn.functional.conv3d",
"torch.nn.functional.conv1d",
"torch.nn.functional.relu",
"torch.no_grad",
"torch.tensor"
] |
[((373, 411), 'torch.nn.functional.relu', 'F.relu', (['(pos - param.grad)'], {'inplace': '(True)'}), '(pos - param.grad, inplace=True)\n', (379, 411), True, 'import torch.nn.functional as F\n'), ((2867, 2908), 'tqdm.tqdm', 'tqdm', ([], {'total': 'max_iter', 'disable': '(not verbose)'}), '(total=max_iter, disable=not verbose)\n', (2871, 2908), False, 'from tqdm import tqdm\n'), ((6600, 6633), 'torch.nn.functional.conv1d', 'F.conv1d', (['WH[:, None]', 'H[:, None]'], {}), '(WH[:, None], H[:, None])\n', (6608, 6633), True, 'import torch.nn.functional as F\n'), ((8280, 8304), 'torch.nn.functional.conv2d', 'F.conv2d', (['WH', 'H[:, None]'], {}), '(WH, H[:, None])\n', (8288, 8304), True, 'import torch.nn.functional as F\n'), ((10015, 10039), 'torch.nn.functional.conv3d', 'F.conv3d', (['WH', 'H[:, None]'], {}), '(WH, H[:, None])\n', (10023, 10039), True, 'import torch.nn.functional as F\n'), ((1031, 1050), 'torch.rand', 'torch.rand', (['*W_size'], {}), '(*W_size)\n', (1041, 1050), False, 'import torch\n'), ((1088, 1107), 'torch.rand', 'torch.rand', (['*H_size'], {}), '(*H_size)\n', (1098, 1107), False, 'import torch\n'), ((2788, 2809), 'torch.tensor', 'torch.tensor', (['V.shape'], {}), '(V.shape)\n', (2800, 2809), False, 'import torch\n'), ((3393, 3408), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3406, 3408), False, 'import torch\n'), ((3888, 3903), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3901, 3903), False, 'import torch\n')]
|
from gensim.models import KeyedVectors
import pprint
import json
PATH_DATA = '../data/sake_dataset_v1.json'
def preprocessing(sake_data):
return sake_data.strip().replace(' ', '_')
def fix_data(data):
fixed_data = []
for k, v in sorted(data.items(), key=lambda x:x[0]):
if 'mean' in v:
fixed_data.append('{}:{}'.format(k, v['mean']))
elif type(v) == list:
for _v in v:
_v = preprocessing(_v)
fixed_data.append('{}:{}'.format(k, _v))
else:
v = preprocessing(v)
fixed_data.append('{}:{}'.format(k, v))
return fixed_data
def load_dataset(path = PATH_DATA):
with open(path) as f:
dataset = json.load(f)
return dataset
def load_sake_embedding(path):
return KeyedVectors.load_word2vec_format(path)
class SearchAPI():
def __init__(self, path = PATH_DATA):
self.dataset = load_dataset(path)['dataset']
def and_search(self, *args):
""" This function returns sake data that contain the queries
Args:
queries
Return:
data (list) that contain the queries
Example:
>>> api = SearchAPI()
>>> results = api.and_search("brand:英勲", "rice:祝")
>>> pprint.pprint(results[0], width=40)
{'alcohol_rate': {'max': '15.00', 'mean': '15.00', 'min': '15.00'},
'amino_acid_content': {'max': '', 'mean': '', 'min': ''},
'brand': '英勲',
...
}
"""
result = self.dataset
for query in args:
result = self._filtering(query, result)
return result
def _filtering(self, query, dataset):
return [d for d in dataset if query in fix_data(d)]
|
[
"json.load",
"gensim.models.KeyedVectors.load_word2vec_format"
] |
[((806, 845), 'gensim.models.KeyedVectors.load_word2vec_format', 'KeyedVectors.load_word2vec_format', (['path'], {}), '(path)\n', (839, 845), False, 'from gensim.models import KeyedVectors\n'), ((730, 742), 'json.load', 'json.load', (['f'], {}), '(f)\n', (739, 742), False, 'import json\n')]
|
"""Psychopy ElementArrayStim with flexible pedestal luminance.
Psychopy authors have said on record that this functionality should exist in
Psychopy itself. Future users of this code should double check as to whether
that has been implemented and if this code can be excised.
Note however that we have also added some functinoality to set the contrast in
a way that depends on the pedestal, which may not get added.
This module is adapted from a similar extension to GratingStim
Original credit to https://github.com/nwilming/PedestalGrating/
Covered under the PsychoPy license, as it is a simple extension of prior code:
Copyright (C) 2015 <NAME>
Distributed under the terms of the GNU General Public License (GPL).
"""
from __future__ import division
import pyglet
pyglet.options['debug_gl'] = False
import ctypes # noqa: 402
GL = pyglet.gl
from psychopy.visual.elementarray import ElementArrayStim # noqa: 402
from psychopy.visual.basevisual import MinimalStim, TextureMixin # noqa: 402
try:
from psychopy.visual import shaders
except ImportError:
from psychopy import _shadersPyglet as shaders
# Framgent shader for the gabor stimulus. This is needed to add the pedestal to
# the color values for each location. I'm keeping it in this file to make the
# stimulus fairly self contained and to avoid messing with anything else.
# Almost a one to one copy of the original psychopy shader.
fragSignedColorTexMask = '''
uniform sampler2D texture, mask;
uniform float pedestal;
void main() {
vec4 textureFrag = texture2D(texture,gl_TexCoord[0].st);
vec4 maskFrag = texture2D(mask,gl_TexCoord[1].st);
gl_FragColor.a = gl_Color.a*maskFrag.a*textureFrag.a;
gl_FragColor.rgb = ((pedestal+1.0)/2.0)
+ ((textureFrag.rgb
* (gl_Color.rgb*2.0-1.0)+1.0)/2.0) -0.5;
}
'''
class ElementArray(ElementArrayStim, MinimalStim, TextureMixin):
"""Field of elements that are independently controlled and rapidly drawn.
This stimulus class defines a field of elements whose behaviour can be
independently controlled. Suitable for creating 'global form' stimuli or
more detailed random dot stimuli.
This stimulus can draw thousands of elements without dropping a frame, but
in order to achieve this performance, uses several OpenGL extensions only
available on modern graphics cards (supporting OpenGL2.0). See the
ElementArray demo.
"""
def __init__(self,
win,
units=None,
fieldPos=(0.0, 0.0),
fieldSize=(1.0, 1.0),
fieldShape='circle',
nElements=100,
sizes=2.0,
xys=None,
rgbs=None,
colors=(1.0, 1.0, 1.0),
colorSpace='rgb',
opacities=None,
depths=0,
fieldDepth=0,
oris=0,
sfs=1.0,
contrs=1,
phases=0,
elementTex='sin',
elementMask='gauss',
texRes=48,
interpolate=True,
name=None,
autoLog=False,
maskParams=None,
pedestal=None):
super(ElementArray, self).__init__(
win, units=units, fieldPos=fieldPos, fieldSize=fieldSize,
fieldShape=fieldShape, nElements=nElements, sizes=sizes, xys=xys,
rgbs=rgbs, colors=colors, colorSpace=colorSpace,
opacities=opacities, depths=depths, fieldDepth=fieldDepth,
oris=oris, sfs=sfs, contrs=contrs, phases=phases,
elementTex=elementTex, elementMask=elementMask, texRes=texRes,
interpolate=interpolate, name=name, autoLog=autoLog,
maskParams=maskParams)
# Set the default pedestal assuming a gray window color
pedestal = win.background_color if pedestal is None else pedestal
self.pedestal = pedestal
self._progSignedTexMask = shaders.compileProgram(
shaders.vertSimple, fragSignedColorTexMask)
@property
def pedestal_contrs(self):
"""Stimulus contrast, accounting for pedestal"""
return self.contrs / (self.pedestal + 1)
@pedestal_contrs.setter
def pedestal_contrs(self, values):
"""Stimulus contrast, accounting for pedestal."""
adjusted_values = values * (self.pedestal + 1)
self.contrs = adjusted_values
def draw(self, win=None):
"""Draw the stimulus in its relevant window.
You must call this method after every win.update() if you want the
stimulus to appear on that frame and then update the screen again.
"""
if win is None:
win = self.win
self._selectWindow(win)
if self._needVertexUpdate:
self._updateVertices()
if self._needColorUpdate:
self.updateElementColors()
if self._needTexCoordUpdate:
self.updateTextureCoords()
# scale the drawing frame and get to centre of field
GL.glPushMatrix() # push before drawing, pop after
# push the data for client attributes
GL.glPushClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS)
# GL.glLoadIdentity()
self.win.setScale('pix')
cpcd = ctypes.POINTER(ctypes.c_double)
GL.glColorPointer(4, GL.GL_DOUBLE, 0,
self._RGBAs.ctypes.data_as(cpcd))
GL.glVertexPointer(3, GL.GL_DOUBLE, 0,
self.verticesPix.ctypes.data_as(cpcd))
# setup the shaderprogram
_prog = self._progSignedTexMask
GL.glUseProgram(_prog)
# set the texture to be texture unit 0
GL.glUniform1i(GL.glGetUniformLocation(_prog, b"texture"), 0)
# mask is texture unit 1
GL.glUniform1i(GL.glGetUniformLocation(_prog, b"mask"), 1)
# BEGIN ADDED CODE
GL.glUniform1f(GL.glGetUniformLocation(_prog, b"pedestal"), self.pedestal)
# END ADDED CODE
# bind textures
GL.glActiveTexture(GL.GL_TEXTURE1)
GL.glBindTexture(GL.GL_TEXTURE_2D, self._maskID)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
GL.glEnable(GL.GL_TEXTURE_2D)
# setup client texture coordinates first
GL.glClientActiveTexture(GL.GL_TEXTURE0)
GL.glTexCoordPointer(2, GL.GL_DOUBLE, 0, self._texCoords.ctypes)
GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glClientActiveTexture(GL.GL_TEXTURE1)
GL.glTexCoordPointer(2, GL.GL_DOUBLE, 0, self._maskCoords.ctypes)
GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glEnableClientState(GL.GL_COLOR_ARRAY)
GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
GL.glDrawArrays(GL.GL_QUADS, 0, self.verticesPix.shape[0] * 4)
# unbind the textures
GL.glActiveTexture(GL.GL_TEXTURE1)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glDisable(GL.GL_TEXTURE_2D)
# main texture
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glDisable(GL.GL_TEXTURE_2D)
# disable states
GL.glDisableClientState(GL.GL_COLOR_ARRAY)
GL.glDisableClientState(GL.GL_VERTEX_ARRAY)
GL.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glUseProgram(0)
GL.glPopClientAttrib()
GL.glPopMatrix()
|
[
"psychopy._shadersPyglet.compileProgram",
"ctypes.POINTER"
] |
[((4092, 4158), 'psychopy._shadersPyglet.compileProgram', 'shaders.compileProgram', (['shaders.vertSimple', 'fragSignedColorTexMask'], {}), '(shaders.vertSimple, fragSignedColorTexMask)\n', (4114, 4158), True, 'from psychopy import _shadersPyglet as shaders\n'), ((5402, 5433), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_double'], {}), '(ctypes.c_double)\n', (5416, 5433), False, 'import ctypes\n')]
|
#!/usr/bin/env python
import json
import yaml
sharesFilename = 'simple-exports.json'
with open(sharesFilename, 'r') as f:
shares = json.load(f)
### For Loop to write out playbook for each cluster
for cluster in shares['clusters']:
playbookFilename = 'playbook-simple-exports-%s.yml' % cluster['name']
with open(playbookFilename, 'w') as playbook:
play = [
{
'hosts': 'localhost',
'name': 'Isilon New NFS Export with URI module',
'tasks': [],
}
]
startsession = {
'name': 'get isilon API session IDs',
'register': 'results_login',
'uri': {
'body': {'password': cluster['password'],
'services': ['platform', 'namespace'],
'username': cluster['username']},
'body_format': 'json',
'method': 'POST',
'status_code': 201,
'url': 'https://' + cluster['name'] +':8080/session/1/session',
'validate_certs': False }
}
play[0]['tasks'].append(startsession)
for export in cluster['exports']:
createexport = {
'name': 'make NFS Export',
'uri': {
'body': {
'description': export['description'],
'paths': export['paths'],
'zone': export['zone']},
'body_format': 'json',
'headers': {'Cookie': 'isisessid={{ results_login.cookies.isisessid }}',
'X-CSRF-Token': '{{ results_login.cookies.isicsrf }}',
'referer': 'https://'+cluster['name']+':8080'},
'method': 'POST',
'status_code': 201,
'url': 'https://'+cluster['name']+':8080/platform/4/protocols/nfs/exports',
'validate_certs': False,
}
}
play[0]['tasks'].append(createexport)
endsession = {
'name': 'Delete isilon API session IDs',
'register': 'results_DEL_cookie',
'uri': {
'headers': {
'Cookie': 'isisessid={{ results_login.cookies.isisessid }}',
'X-CSRF-Token': '{{ results_login.cookies.isicsrf }}',
'referer': 'https://'+cluster['name']+':8080',
},
'method': 'DELETE',
'status_code': 204,
'url': 'https://'+cluster['name']+':8080/session/1/session',
'validate_certs': False,
}
}
play[0]['tasks'].append(endsession)
yaml.safe_dump(play, playbook, default_flow_style=False)
|
[
"json.load",
"yaml.safe_dump"
] |
[((138, 150), 'json.load', 'json.load', (['f'], {}), '(f)\n', (147, 150), False, 'import json\n'), ((2747, 2803), 'yaml.safe_dump', 'yaml.safe_dump', (['play', 'playbook'], {'default_flow_style': '(False)'}), '(play, playbook, default_flow_style=False)\n', (2761, 2803), False, 'import yaml\n')]
|
from flask import request, Blueprint, send_file
from sasukekun_flask.utils import v1, format_response
from sasukekun_flask.config import API_IMAGE
from .models import PasteFile
ONE_MONTH = 60 * 60 * 24 * 30
upload = Blueprint('upload', __name__)
@upload.route(v1('/upload/'), methods=['GET', 'POST'])
def upload_file():
if request.method == 'GET':
paste_files = PasteFile.objects.all()
data = [paste_file.json for paste_file in paste_files]
return format_response(data=data)
elif request.method == 'POST':
uploaded_file = request.files['file']
w = request.form.get('w')
h = request.form.get('h')
if not uploaded_file:
format_response(code=400, info='not file')
if False and w and h:
paste_file = PasteFile.rsize(uploaded_file, w, h) # TODO: fix issues
else:
paste_file = PasteFile.create_by_uploaded_file(uploaded_file)
paste_file.save()
return format_response(data=paste_file.to_dict())
@upload.route(v1('/upload/<filehash>/', base=API_IMAGE),
methods=['GET'])
def download(filehash):
paste_file = PasteFile.get_by_filehash(filehash)
return send_file(
open(paste_file.path, 'rb'),
mimetype='application/octet-stream',
cache_timeout=ONE_MONTH,
as_attachment=True,
attachment_filename=paste_file.filename.encode('utf-8'))
|
[
"flask.Blueprint",
"sasukekun_flask.utils.format_response",
"sasukekun_flask.utils.v1",
"flask.request.form.get"
] |
[((218, 247), 'flask.Blueprint', 'Blueprint', (['"""upload"""', '__name__'], {}), "('upload', __name__)\n", (227, 247), False, 'from flask import request, Blueprint, send_file\n'), ((263, 277), 'sasukekun_flask.utils.v1', 'v1', (['"""/upload/"""'], {}), "('/upload/')\n", (265, 277), False, 'from sasukekun_flask.utils import v1, format_response\n'), ((1042, 1083), 'sasukekun_flask.utils.v1', 'v1', (['"""/upload/<filehash>/"""'], {'base': 'API_IMAGE'}), "('/upload/<filehash>/', base=API_IMAGE)\n", (1044, 1083), False, 'from sasukekun_flask.utils import v1, format_response\n'), ((479, 505), 'sasukekun_flask.utils.format_response', 'format_response', ([], {'data': 'data'}), '(data=data)\n', (494, 505), False, 'from sasukekun_flask.utils import v1, format_response\n'), ((600, 621), 'flask.request.form.get', 'request.form.get', (['"""w"""'], {}), "('w')\n", (616, 621), False, 'from flask import request, Blueprint, send_file\n'), ((634, 655), 'flask.request.form.get', 'request.form.get', (['"""h"""'], {}), "('h')\n", (650, 655), False, 'from flask import request, Blueprint, send_file\n'), ((698, 740), 'sasukekun_flask.utils.format_response', 'format_response', ([], {'code': '(400)', 'info': '"""not file"""'}), "(code=400, info='not file')\n", (713, 740), False, 'from sasukekun_flask.utils import v1, format_response\n')]
|
from uuid import uuid4
from datetime import datetime
from time import time
import boto3
from boto3 import Session
from botocore.credentials import RefreshableCredentials
from botocore.session import get_session
from botocore.credentials import InstanceMetadataFetcher
from storages.utils import setting
import logging
class InstanceMetadataBotoSession:
METHOD = 'iam-role'
CANONICAL_NAME = 'Ec2InstanceMetadata'
"""
Boto Helper class which lets us create refreshable session, so that we can cache the client or resource.
Usage
-----
session = BotoSession().refreshable_session()
client = session.client("s3") # we now can cache this client object without worrying about expiring credentials
"""
def __init__(
self,
region_name: str = None,
session_name: str = None,
):
"""
Initialize `BotoSession`
Parameters
----------
region_name : str (optional)
Default region when creating new connection.
session_name : str (optional)
An identifier for the assumed role session. (required when `sts_arn` is given)
"""
self.region_name = region_name
# read why RoleSessionName is important https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sts.html
self.session_name = session_name or uuid4().hex
self._role_fetcher = InstanceMetadataFetcher(timeout=setting("S3_CREDENTIALS_TIMEOUT", 1000), num_attempts=3)
self.access_key = None
self.secret_key = None
self.security_token = None
def __get_session_credentials(self):
"""
Get session credentials
"""
fetcher = self._role_fetcher
# We do the first request, to see if we get useful data back.
# If not, we'll pass & move on to whatever's next in the credential
# chain.
metadata = fetcher.retrieve_iam_role_credentials()
if not metadata:
return None
logging.debug('Found credentials from IAM Role: %s',
metadata['role_name'])
# We manually set the data here, since we already made the request &
# have it. When the expiry is hit, the credentials will auto-refresh
# themselves.
credentials = RefreshableCredentials.create_from_metadata(
metadata,
method=self.METHOD,
refresh_using=fetcher.retrieve_iam_role_credentials,
)
self.access_key = credentials.access_key
self.secret_key = credentials.secret_key
self.security_token = credentials.token
return credentials
def refreshable_session(self) -> Session:
"""
Get refreshable boto3 session.
"""
try:
# get refreshable credentials
refreshable_credentials = RefreshableCredentials.create_from_metadata(
metadata=self.__get_session_credentials(),
refresh_using=self._role_fetcher.retrieve_iam_role_credentials,
method=self.METHOD,
)
# attach refreshable credentials current session
session = get_session()
session._credentials = refreshable_credentials
session.set_config_variable("region", self.region_name)
autorefresh_session = Session(botocore_session=session)
return autorefresh_session
except:
return boto3.session.Session()
|
[
"boto3.session.Session",
"uuid.uuid4",
"logging.debug",
"boto3.Session",
"storages.utils.setting",
"botocore.credentials.RefreshableCredentials.create_from_metadata",
"botocore.session.get_session"
] |
[((2028, 2103), 'logging.debug', 'logging.debug', (['"""Found credentials from IAM Role: %s"""', "metadata['role_name']"], {}), "('Found credentials from IAM Role: %s', metadata['role_name'])\n", (2041, 2103), False, 'import logging\n'), ((2323, 2453), 'botocore.credentials.RefreshableCredentials.create_from_metadata', 'RefreshableCredentials.create_from_metadata', (['metadata'], {'method': 'self.METHOD', 'refresh_using': 'fetcher.retrieve_iam_role_credentials'}), '(metadata, method=self.METHOD,\n refresh_using=fetcher.retrieve_iam_role_credentials)\n', (2366, 2453), False, 'from botocore.credentials import RefreshableCredentials\n'), ((3205, 3218), 'botocore.session.get_session', 'get_session', ([], {}), '()\n', (3216, 3218), False, 'from botocore.session import get_session\n'), ((3380, 3413), 'boto3.Session', 'Session', ([], {'botocore_session': 'session'}), '(botocore_session=session)\n', (3387, 3413), False, 'from boto3 import Session\n'), ((1384, 1391), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1389, 1391), False, 'from uuid import uuid4\n'), ((1457, 1496), 'storages.utils.setting', 'setting', (['"""S3_CREDENTIALS_TIMEOUT"""', '(1000)'], {}), "('S3_CREDENTIALS_TIMEOUT', 1000)\n", (1464, 1496), False, 'from storages.utils import setting\n'), ((3490, 3513), 'boto3.session.Session', 'boto3.session.Session', ([], {}), '()\n', (3511, 3513), False, 'import boto3\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
#########
Reporting
#########
*Created on Thu Jun 8 14:40 2017 by <NAME>*
Tools for creating HTML Reports."""
import time
import base64
import os
import gc
import os.path as op
from string import Template
from io import BytesIO as IO
import pandas as pd
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import Draw
import numpy as np
from PIL import Image, ImageChops
import matplotlib.pyplot as plt
from cellpainting2 import tools as cpt
from cellpainting2 import report_templ as cprt
from cellpainting2 import processing as cpp
cp_config = cpt.load_config("config")
# cp_plates = cpt.load_config("plates")
IPYTHON = cpt.is_interactive_ipython()
if IPYTHON:
from IPython.core.display import HTML
ACT_PROF_PARAMETERS = cp_config["Parameters"]
ACT_CUTOFF_PERC = cp_config["Cutoffs"]["ActCutoffPerc"]
ACT_CUTOFF_PERC_H = cp_config["Cutoffs"]["ActCutoffPercH"]
ACT_CUTOFF_PERC_REF = cp_config["Cutoffs"]["ActCutoffPercRef"]
OVERACT_H = cp_config["Cutoffs"]["OverActH"]
LIMIT_ACTIVITY_H = cp_config["Cutoffs"]["LimitActivityH"]
LIMIT_ACTIVITY_L = cp_config["Cutoffs"]["LimitActivityL"]
LIMIT_CELL_COUNT_H = cp_config["Cutoffs"]["LimitCellCountH"]
LIMIT_CELL_COUNT_L = cp_config["Cutoffs"]["LimitCellCountL"]
LIMIT_SIMILARITY_H = cp_config["Cutoffs"]["LimitSimilarityH"]
LIMIT_SIMILARITY_L = cp_config["Cutoffs"]["LimitSimilarityL"]
PARAMETER_HELP = cp_config["ParameterHelp"]
# get positions of the compartments in the list of parameters
x = 1
XTICKS = [x]
for comp in ["Median_Cytoplasm", "Median_Nuclei"]:
for idx, p in enumerate(ACT_PROF_PARAMETERS[x:], 1):
if p.startswith(comp):
XTICKS.append(idx + x)
x += idx
break
XTICKS.append(len(ACT_PROF_PARAMETERS))
Draw.DrawingOptions.atomLabelFontFace = "DejaVu Sans"
Draw.DrawingOptions.atomLabelFontSize = 18
try:
from misc_tools import apl_tools
AP_TOOLS = True
# Library version
VERSION = apl_tools.get_commit(__file__)
# I use this to keep track of the library versions I use in my project notebooks
print("{:45s} ({})".format(__name__, VERSION))
except ImportError:
AP_TOOLS = False
print("{:45s} ({})".format(__name__, time.strftime(
"%y%m%d-%H:%M", time.localtime(op.getmtime(__file__)))))
try:
# Try to import Avalon so it can be used for generation of 2d coordinates.
from rdkit.Avalon import pyAvalonTools as pyAv
USE_AVALON_2D = True
except ImportError:
print(" * Avalon not available. Using RDKit for 2d coordinate generation.")
USE_AVALON_2D = False
try:
import holoviews as hv
hv.extension("bokeh")
HOLOVIEWS = True
except ImportError:
HOLOVIEWS = False
print("* holoviews could not be import. heat_hv is not available.")
def check_2d_coords(mol, force=False):
"""Check if a mol has 2D coordinates and if not, calculate them."""
if not force:
try:
mol.GetConformer()
except ValueError:
force = True # no 2D coords... calculate them
if force:
if USE_AVALON_2D:
pyAv.Generate2DCoords(mol)
else:
mol.Compute2DCoords()
def mol_from_smiles(smi, calc_2d=True):
mol = Chem.MolFromSmiles(smi)
if not mol:
mol = Chem.MolFromSmiles("*")
else:
if calc_2d:
check_2d_coords(mol)
return mol
def autocrop(im, bgcolor="white"):
if im.mode != "RGB":
im = im.convert("RGB")
bg = Image.new("RGB", im.size, bgcolor)
diff = ImageChops.difference(im, bg)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
return None # no contents
def get_value(str_val):
if not str_val:
return ""
try:
val = float(str_val)
if "." not in str_val:
val = int(val)
except ValueError:
val = str_val
return val
def isnumber(x):
"""Returns True, if x is a number (i.e. can be converted to float)."""
try:
float(x)
return True
except ValueError:
return False
def convert_bool(dict, dkey, true="Yes", false="No", default="n.d."):
if dkey in dict:
if dict[dkey]:
dict[dkey] = true
else:
dict[dkey] = false
else:
dict[dkey] = default
def load_image(path, well, channel):
image_fn = "{}/{}_w{}.jpg".format(path, well, channel)
im = Image.open(image_fn)
return im
def b64_mol(mol, size=300):
img_file = IO()
try:
img = autocrop(Draw.MolToImage(mol, size=(size, size)))
except UnicodeEncodeError:
print(Chem.MolToSmiles(mol))
mol = Chem.MolFromSmiles("C")
img = autocrop(Draw.MolToImage(mol, size=(size, size)))
img.save(img_file, format='PNG')
b64 = base64.b64encode(img_file.getvalue())
b64 = b64.decode()
img_file.close()
return b64
def b64_img(im, format="JPEG"):
if isinstance(im, IO):
needs_close = False
img_file = im
else:
needs_close = True
img_file = IO()
im.save(img_file, format=format)
b64 = base64.b64encode(img_file.getvalue())
b64 = b64.decode()
if needs_close:
img_file.close()
return b64
def mol_img_tag(mol, options=None):
tag = """<img {} src="data:image/png;base64,{}" alt="Mol"/>"""
if options is None:
options = ""
img_tag = tag.format(options, b64_mol(mol))
return img_tag
def img_tag(im, format="jpeg", options=None):
tag = """<img {} src="data:image/{};base64,{}" alt="Image"/>"""
if options is None:
options = ""
b = b64_img(im, format=format)
img_tag = tag.format(options, format.lower(), b)
return img_tag
def load_control_images(src_dir):
image_dir = op.join(src_dir, "images")
ctrl_images = {}
for ch in range(1, 6):
im = load_image(image_dir, "H11", ch)
ctrl_images[ch] = img_tag(im, options='style="width: 250px;"')
return ctrl_images
def sanitize_filename(fn):
result = fn.replace(":", "_").replace(",", "_").replace(".", "_")
return result
def write(text, fn):
with open(fn, "w") as f:
f.write(text)
def write_page(page, title="Report", fn="index.html", templ=cprt.HTML_INTRO):
t = Template(templ + page + cprt.HTML_EXTRO)
result = t.substitute(title=title)
write(result, fn=fn)
def assign_colors(rec):
act_cutoff_high = ACT_CUTOFF_PERC_H
if "Toxic" in rec:
if rec["Toxic"]:
rec["Col_Toxic"] = cprt.COL_RED
else:
rec["Col_Toxic"] = cprt.COL_GREEN
else:
rec["Col_Toxic"] = cprt.COL_WHITE
if "Pure_Flag" in rec:
if rec["Pure_Flag"] == "Ok":
rec["Col_Purity"] = cprt.COL_GREEN
elif rec["Pure_Flag"] == "Warn":
rec["Col_Purity"] = cprt.COL_YELLOW
elif rec["Pure_Flag"] == "Fail":
rec["Col_Purity"] = cprt.COL_RED
else:
rec["Col_Purity"] = cprt.COL_WHITE
else:
rec["Col_Purity"] = cprt.COL_WHITE
if rec["Rel_Cell_Count"] >= LIMIT_CELL_COUNT_H:
rec["Col_Cell_Count"] = cprt.COL_GREEN
elif rec["Rel_Cell_Count"] >= LIMIT_CELL_COUNT_L:
rec["Col_Cell_Count"] = cprt.COL_YELLOW
else:
rec["Col_Cell_Count"] = cprt.COL_RED
if rec["Activity"] > act_cutoff_high:
rec["Col_Act"] = cprt.COL_RED
elif rec["Activity"] >= LIMIT_ACTIVITY_H:
rec["Col_Act"] = cprt.COL_GREEN
elif rec["Activity"] >= LIMIT_ACTIVITY_L:
rec["Col_Act"] = cprt.COL_YELLOW
else:
rec["Col_Act"] = cprt.COL_RED
if rec["Act_Flag"] == "active":
rec["Col_Act_Flag"] = cprt.COL_GREEN
else:
rec["Col_Act_Flag"] = cprt.COL_RED
def remove_colors(rec):
for k in rec.keys():
if k.startswith("Col_"):
rec[k] = cprt.COL_WHITE
def overview_report(df, cutoff=LIMIT_SIMILARITY_L / 100,
highlight=False, mode="cpd"):
"""mode `int` displays similarities not to references but to other internal compounds
(just displays the `Similarity` column)."""
cpp.load_resource("SIM_REFS")
sim_refs = cpp.SIM_REFS
detailed_cpds = []
if isinstance(df, cpp.DataSet):
df = df.data
t = Template(cprt.OVERVIEW_TABLE_HEADER)
if "int" in mode:
tbl_header = t.substitute(sim_entity="to another Test Compound")
else:
tbl_header = t.substitute(sim_entity="to a Reference")
report = [cprt.OVERVIEW_TABLE_INTRO, tbl_header]
row_templ = Template(cprt.OVERVIEW_TABLE_ROW)
idx = 0
for _, rec in df.iterrows():
act_cutoff_low = ACT_CUTOFF_PERC
act_cutoff_high = ACT_CUTOFF_PERC_H
idx += 1
well_id = rec["Well_Id"]
mol = mol_from_smiles(rec.get("Smiles", "*"))
rec["mol_img"] = mol_img_tag(mol)
rec["idx"] = idx
if "Pure_Flag" not in rec:
rec["Pure_Flag"] = "n.d."
rec["Act_Flag"] = "active"
rec["Max_Sim"] = ""
rec["Link"] = ""
rec["Col_Sim"] = cprt.COL_WHITE
has_details = True
if rec["Activity"] < act_cutoff_low:
has_details = False
rec["Act_Flag"] = "inactive"
# print(rec)
# similar references are searched for non-toxic compounds with an activity >= LIMIT_ACTIVITY_L
if rec["Activity"] < LIMIT_ACTIVITY_L or rec["Activity"] > act_cutoff_high or rec["Toxic"] or rec["OverAct"] > OVERACT_H:
similars_determined = False
if rec["OverAct"] > OVERACT_H:
rec["Max_Sim"] = "Overact."
rec["Col_Sim"] = cprt.COL_RED
else:
similars_determined = True
assign_colors(rec)
convert_bool(rec, "Toxic")
if has_details:
detailed_cpds.append(well_id)
details_fn = sanitize_filename(well_id)
plate = rec["Plate"]
rec["Link"] = '<a href="../{}/details/{}.html">Detailed<br>Report</a>'.format(
plate, details_fn)
if similars_determined:
if "int" in mode:
# similar = {"Similarity": [rec["Similarity"]]}
similar = pd.DataFrame(
{"Well_Id": [well_id], "Similarity": [rec["Similarity"]]})
else:
similar = sim_refs[sim_refs["Well_Id"] == well_id].compute()
similar = similar.sort_values("Similarity",
ascending=False).reset_index()
if len(similar) > 0:
max_sim = round(
similar["Similarity"][0] * 100, 1) # first in the list has the highest similarity
rec["Max_Sim"] = max_sim
if max_sim >= LIMIT_SIMILARITY_H:
rec["Col_Sim"] = cprt.COL_GREEN
elif max_sim >= LIMIT_SIMILARITY_L:
rec["Col_Sim"] = cprt.COL_YELLOW
else:
rec["Col_Sim"] = cprt.COL_WHITE
print("ERROR: This should not happen (Max_Sim).")
else:
rec["Max_Sim"] = "< {}".format(LIMIT_SIMILARITY_L)
rec["Col_Sim"] = cprt.COL_RED
if not highlight:
# remove all coloring again:
remove_colors(rec)
report.append(row_templ.substitute(rec))
report.append(cprt.TABLE_EXTRO)
return "\n".join(report), detailed_cpds
def sim_ref_table(similar):
cpp.load_resource("REFERENCES")
df_refs = cpp.REFERENCES
table = [cprt.TABLE_INTRO, cprt.REF_TABLE_HEADER]
templ = Template(cprt.REF_TABLE_ROW)
for idx, rec in similar.iterrows():
rec = rec.to_dict()
ref_id = rec["Ref_Id"]
ref_data = df_refs[df_refs["Well_Id"] == ref_id]
if cpp.is_dask(ref_data):
ref_data = ref_data.compute()
if len(ref_data) == 0:
print(rec)
raise ValueError("BUG: ref_data should not be empty.")
ref_data = ref_data.copy()
ref_data = ref_data.fillna("—")
rec.update(ref_data.to_dict("records")[0])
mol = mol_from_smiles(rec.get("Smiles", "*"))
rec["Sim_Format"] = "{:.1f}".format(rec["Similarity"] * 100)
rec["Tan_Format"] = "{:.1f}".format(rec["Tanimoto"] * 100)
if rec["Tan_Format"] == np.nan:
rec["Tan_Format"] = "—"
rec["mol_img"] = mol_img_tag(mol)
rec["idx"] = idx + 1
link = "../../{}/details/{}.html".format(rec["Plate"],
sanitize_filename(rec["Well_Id"]))
rec["link"] = link
row = templ.substitute(rec)
table.append(row)
table.append(cprt.TABLE_EXTRO)
return "\n".join(table)
def changed_parameters_table(act_prof, val, parameters=ACT_PROF_PARAMETERS):
changed = cpt.parameters_from_act_profile_by_val(
act_prof, val, parameters=parameters)
table = []
templ = Template(cprt.PARM_TABLE_ROW)
for idx, p in enumerate(changed, 1):
p_elmnts = p.split("_")
p_module = p_elmnts[2]
p_name = "_".join(p_elmnts[1:])
rec = {
"idx": idx,
"Parameter": p_name,
"Help_Page": PARAMETER_HELP[p_module]
}
row = templ.substitute(rec)
table.append(row)
return "\n".join(table), changed
def parm_stats(parameters):
result = []
channels = ["_Mito", "_Ph_golgi", "_Syto", "_ER", "Hoechst"]
for ch in channels:
cnt = len([p for p in parameters if ch in p])
result.append(cnt)
return result
def parm_hist(increased, decreased, hist_cache):
# try to load histogram from cache:
if op.isfile(hist_cache):
result = open(hist_cache).read()
return result
labels = [
"Mito",
"Golgi / Membrane",
"RNA / Nucleoli",
"ER",
"Nuclei"
]
inc_max = max(increased)
dec_max = max(decreased)
max_total = max([inc_max, dec_max])
if max_total == 0:
result = "No compartment-specific parameters were changed."
return result
inc_norm = [v / max_total for v in increased]
dec_norm = [v / max_total for v in decreased]
n_groups = 5
dpi = 96
# plt.rcParams['axes.titlesize'] = 25
plt.style.use("seaborn-white")
plt.style.use("seaborn-pastel")
plt.style.use("seaborn-talk")
plt.rcParams['axes.labelsize'] = 25
plt.rcParams['xtick.labelsize'] = 20
plt.rcParams['ytick.labelsize'] = 20
plt.rcParams['legend.fontsize'] = 20
size = (1500, 1000)
figsize = (size[0] / dpi, size[1] / dpi)
fig, ax = plt.subplots(figsize=figsize)
index = np.arange(n_groups)
bar_width = 0.25
plt.bar(index, inc_norm, bar_width,
color='#94caef',
label='Inc')
plt.bar(index + bar_width, dec_norm, bar_width,
color='#ffdd1a',
label='Dec')
plt.xlabel('Cell Compartment')
plt.ylabel('rel. Occurrence')
plt.xticks(index + bar_width / 2, labels, rotation=45)
plt.legend()
plt.tight_layout()
img_file = IO()
plt.savefig(img_file, bbox_inches='tight', format="jpg")
result = img_tag(img_file, format="jpg", options='style="width: 800px;"')
img_file.close()
# important, otherwise the plots will accumulate and fill up memory:
plt.close()
open(hist_cache, "w").write(result) # cache the histogram
return result
def heat_mpl(df, id_prop="Compound_Id", cmap="bwr",
show=True, colorbar=True, biosim=False, chemsim=False, method="dist_corr",
sort_parm=False, parm_dict=None,
plot_cache=None):
# try to load heatmap from cache:
if plot_cache is not None and op.isfile(plot_cache):
result = open(plot_cache).read()
return result
if "dist" in method.lower():
profile_sim = cpt.profile_sim_dist_corr
else:
profile_sim = cpt.profile_sim_tanimoto
df_len = len(df)
img_size = 15 if show else 17
plt.style.use("seaborn-white")
plt.style.use("seaborn-pastel")
plt.style.use("seaborn-talk")
plt.rcParams['axes.labelsize'] = 25
# plt.rcParams['legend.fontsize'] = 20
plt.rcParams['figure.figsize'] = (img_size, 1.1 + 0.47 * (df_len - 1))
plt.rcParams['axes.labelsize'] = 25
plt.rcParams['ytick.labelsize'] = 20
plt.rcParams['xtick.labelsize'] = 15
fs_text = 18
y_labels = []
fp_list = []
max_val = 3 # using a fixed color range now
min_val = -3
ylabel_templ = "{}{}{}"
ylabel_cs = ""
ylabel_bs = ""
id_prop_list = []
for ctr, (_, rec) in enumerate(df.iterrows()):
if sort_parm:
if ctr == 0:
compartments = ["Median_Cells", "Median_Cytoplasm", "Median_Nuclei"]
parm_list = []
for comp in compartments:
parm_comp = [x for x in ACT_PROF_PARAMETERS if x.startswith(comp)]
val_list = [rec[x] for x in parm_comp]
parm_sorted = [x for _, x in sorted(zip(val_list, parm_comp))]
parm_list.extend(parm_sorted)
else:
parm_list = ACT_PROF_PARAMETERS
fp = [rec[x] for x in ACT_PROF_PARAMETERS]
fp_view = [rec[x] for x in parm_list]
fp_list.append(fp_view)
id_prop_list.append(rec[id_prop])
if chemsim:
if ctr == 0:
mol = mol_from_smiles(rec.get("Smiles", "*"))
if len(mol.GetAtoms()) > 1:
ylabel_cs = "Chem | "
mol_fp = Chem.GetMorganFingerprint(mol, 2) # ECFC4
else: # no Smiles present in the DataFrame
ylabel_cs = ""
chemsim = False
else:
q = rec.get("Smiles", "*")
if len(q) < 2:
ylabel_cs = " | "
else:
sim = cpt.chem_sim(mol_fp, q) * 100
ylabel_cs = "{:3.0f}% | ".format(sim)
if biosim:
if ctr == 0:
prof_ref = fp
ylabel_bs = " Bio | "
else:
sim = profile_sim(prof_ref, fp) * 100
ylabel_bs = "{:3.0f}% | ".format(sim)
ylabel = ylabel_templ.format(ylabel_cs, ylabel_bs, rec[id_prop])
y_labels.append(ylabel)
# m_val = max(fp) # this was the calculation of the color range
# if m_val > max_val:
# max_val = m_val
# m_val = min(fp)
# if m_val < min_val:
# min_val = m_val
if isinstance(parm_dict, dict):
parm_dict["Parameter"] = parm_list
for i in range(len(id_prop_list)):
parm_dict[str(id_prop_list[i])] = fp_list[i].copy()
# calc the colorbar range
max_val = max(abs(min_val), max_val)
# invert y axis:
y_labels = y_labels[::-1]
fp_list = fp_list[::-1]
Z = np.asarray(fp_list)
plt.xticks(XTICKS)
plt.yticks(np.arange(df_len) + 0.5, y_labels)
plt.pcolor(Z, vmin=-max_val, vmax=max_val, cmap=cmap)
plt.text(XTICKS[1] // 2, -1.1, "Cells",
horizontalalignment='center', fontsize=fs_text)
plt.text(XTICKS[1] + ((XTICKS[2] - XTICKS[1]) // 2), -1.1,
"Cytoplasm", horizontalalignment='center', fontsize=fs_text)
plt.text(XTICKS[2] + ((XTICKS[3] - XTICKS[2]) // 2), -1.1,
"Nuclei", horizontalalignment='center', fontsize=fs_text)
if colorbar and len(df) > 3:
plt.colorbar()
plt.tight_layout()
if show:
plt.show()
else:
img_file = IO()
plt.savefig(img_file, bbox_inches='tight', format="jpg")
result = img_tag(img_file, format="jpg",
options='style="width: 900px;"')
img_file.close()
# important, otherwise the plots will accumulate and fill up memory:
plt.clf()
plt.close()
gc.collect()
if plot_cache is not None: # cache the plot
open(plot_cache, "w").write(result)
return result
def heat_hv(df, id_prop="Compound_Id", cmap="bwr", invert_y=False):
if not HOLOVIEWS:
raise ImportError("# holoviews library could not be imported")
df_parm = df[[id_prop] + ACT_PROF_PARAMETERS].copy()
df_len = len(df_parm)
col_bar = False if df_len < 3 else True
values = list(df_parm.drop(id_prop, axis=1).values.flatten())
max_val = max(values)
min_val = min(values)
max_val = max(abs(min_val), max_val)
hm_opts = dict(width=950, height=40 + 30 * df_len, tools=['hover'], invert_yaxis=invert_y,
xrotation=90, labelled=[], toolbar='above', colorbar=col_bar, xaxis=None,
colorbar_opts={"width": 10})
hm_style = {"cmap": cmap}
opts = {'HeatMap': {'plot': hm_opts, "style": hm_style}}
df_heat = cpt.melt(df_parm, id_prop=id_prop)
heatmap = hv.HeatMap(df_heat).redim.range(Value=(-max_val, max_val))
return heatmap(opts)
def show_images(plate_full_name, well):
"""For interactive viewing in the notebook."""
if not IPYTHON:
return
src_dir = op.join(cp_config["Paths"]["SrcPath"], plate_full_name)
ctrl_images = load_control_images(src_dir)
image_dir = op.join(src_dir, "images")
templ_dict = {}
for ch in range(1, 6):
im = load_image(image_dir, well, ch)
templ_dict["Img_{}_Cpd".format(ch)] = img_tag(
im, options='style="width: 250px;"')
templ_dict["Img_{}_Ctrl".format(ch)] = ctrl_images[ch]
tbody_templ = Template(cprt.IMAGES_TABLE)
table = cprt.TABLE_INTRO + \
tbody_templ.substitute(templ_dict) + cprt.HTML_EXTRO
return HTML(table)
def get_data_for_wells(well_ids):
cpp.load_resource("DATASTORE")
data = cpp.DATASTORE
result = data[data["Well_Id"].isin(well_ids)]
if cpp.is_dask(result):
result = result.compute()
result["_sort"] = pd.Categorical(
result["Well_Id"], categories=well_ids, ordered=True)
result = result.sort_values("_sort")
result.drop("_sort", axis=1, inplace=False)
return result
def detailed_report(rec, src_dir, ctrl_images):
# print(rec)
cpp.load_resource("SIM_REFS")
sim_refs = cpp.SIM_REFS
date = time.strftime("%d-%m-%Y %H:%M", time.localtime())
image_dir = op.join(src_dir, "images")
well_id = rec["Well_Id"]
# act_prof = [rec[x] for x in ACT_PROF_PARAMETERS]
mol = mol_from_smiles(rec.get("Smiles", "*"))
if "Pure_Flag" not in rec:
rec["Pure_Flag"] = "n.d."
templ_dict = rec.copy()
log2_vals = [(x, rec[x]) for x in ACT_PROF_PARAMETERS]
parm_table = []
for idx, x in enumerate(log2_vals, 1):
parm_table.extend(["<tr><td>", str(idx), "</td>",
# omit the "Median_" head of each parameter
"<td>", x[0][7:], "</td>",
'<td align="right">', "{:.2f}".format(x[1]), "</td></tr>\n"])
templ_dict["Parm_Table"] = "".join(parm_table)
df_heat = pd.DataFrame([rec])
templ_dict["Date"] = date
templ_dict["mol_img"] = mol_img_tag(mol, options='class="cpd_image"')
if templ_dict["Is_Ref"]:
if not isinstance(templ_dict["Trivial_Name"], str) or templ_dict["Trivial_Name"] == "":
templ_dict["Trivial_Name"] = "—"
if not isinstance(templ_dict["Known_Act"], str) or templ_dict["Known_Act"] == "":
templ_dict["Known_Act"] = "—"
t = Template(cprt.DETAILS_REF_ROW)
templ_dict["Reference"] = t.substitute(templ_dict)
else:
templ_dict["Reference"] = ""
well = rec["Metadata_Well"]
for ch in range(1, 6):
im = load_image(image_dir, well, ch)
templ_dict["Img_{}_Cpd".format(ch)] = img_tag(
im, options='style="width: 250px;"')
templ_dict["Img_{}_Ctrl".format(ch)] = ctrl_images[ch]
act_cutoff_high = ACT_CUTOFF_PERC_H
if rec["Rel_Cell_Count"] < LIMIT_CELL_COUNT_L:
templ_dict["Ref_Table"] = "Because of compound toxicity, no similarity was determined."
elif rec["Activity"] < LIMIT_ACTIVITY_L:
templ_dict["Ref_Table"] = "Because of low induction (< {}%), no similarity was determined.".format(LIMIT_ACTIVITY_L)
elif rec["Activity"] > act_cutoff_high:
templ_dict["Ref_Table"] = "Because of high induction (> {}%), no similarity was determined.".format(act_cutoff_high)
elif rec["OverAct"] > OVERACT_H:
templ_dict["Ref_Table"] = "Because of high similarity to the overactivation profile (> {}%), no similarity was determined.".format(OVERACT_H)
else:
similar = sim_refs[sim_refs["Well_Id"] == well_id].compute()
if len(similar) > 0:
similar = similar.sort_values("Similarity",
ascending=False).reset_index().head(5)
ref_tbl = sim_ref_table(similar)
templ_dict["Ref_Table"] = ref_tbl
sim_data = get_data_for_wells(similar["Ref_Id"].values)
df_heat = pd.concat([df_heat, sim_data])
else:
templ_dict["Ref_Table"] = "No similar references found."
cache_path = op.join(cp_config["Dirs"]["DataDir"], "plots", rec["Plate"])
if not op.isdir(cache_path):
os.makedirs(cache_path, exist_ok=True)
hm_fn = sanitize_filename(rec["Well_Id"] + ".txt")
hm_cache = op.join(cache_path, hm_fn)
templ_dict["Heatmap"] = heat_mpl(df_heat, id_prop="Compound_Id", cmap="bwr",
show=False, colorbar=True, plot_cache=hm_cache)
t = Template(cprt.DETAILS_TEMPL)
report = t.substitute(templ_dict)
return report
def full_report(df, src_dir, report_name="report", plate=None,
cutoff=0.6, highlight=False):
report_full_path = op.join(cp_config["Dirs"]["ReportDir"], report_name)
overview_fn = op.join(report_full_path, "index.html")
date = time.strftime("%d-%m-%Y %H:%M", time.localtime())
cpt.create_dirs(op.join(report_full_path, "details"))
if isinstance(df, cpp.DataSet):
df = df.data
print("* creating overview...")
header = "{}\n<h2>Cell Painting Overview Report</h2>\n".format(cprt.LOGO)
title = "Overview"
if plate is not None:
title = plate
header += "<h3>Plate {}</h3>\n".format(plate)
header += "<p>({})</p>\n".format(date)
if highlight:
highlight_legend = cprt.HIGHLIGHT_LEGEND
else:
highlight_legend = ""
overview, detailed_cpds = overview_report(df, cutoff=cutoff, highlight=highlight)
overview = header + overview + highlight_legend
write_page(overview, title=title, fn=overview_fn,
templ=cprt.OVERVIEW_HTML_INTRO)
# print(detailed_cpds)
print("* creating detailed reports...")
print(" * loading control images...")
ctrl_images = load_control_images(src_dir)
print(" * writing individual reports...")
df_detailed = df[df["Well_Id"].isin(detailed_cpds)]
ctr = 0
df_len = len(df_detailed)
for _, rec in df_detailed.iterrows():
ctr += 1
if not IPYTHON and ctr % 10 == 0:
print(" ({:3d}%)\r".format(int(100 * ctr / df_len)), end="")
well_id = rec["Well_Id"]
fn = op.join(report_full_path, "details",
"{}.html".format(sanitize_filename(well_id)))
title = "{} Details".format(well_id)
# similar = detailed_cpds[well_id]
details = detailed_report(rec, src_dir, ctrl_images)
write_page(details, title=title, fn=fn, templ=cprt.DETAILS_HTML_INTRO)
print("* done. ")
if IPYTHON:
return HTML('<a href="{}">{}</a>'.format(overview_fn, "Overview"))
|
[
"PIL.ImageChops.difference",
"cellpainting2.tools.load_config",
"PIL.Image.new",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.bar",
"IPython.core.display.HTML",
"gc.collect",
"os.path.isfile",
"matplotlib.pyplot.style.use",
"numpy.arange",
"cellpainting2.tools.parameters_from_act_profile_by_val",
"os.path.join",
"matplotlib.pyplot.tight_layout",
"rdkit.Chem.Draw.MolToImage",
"pandas.DataFrame",
"cellpainting2.tools.chem_sim",
"cellpainting2.processing.load_resource",
"rdkit.Chem.AllChem.GetMorganFingerprint",
"matplotlib.pyplot.close",
"holoviews.extension",
"matplotlib.pyplot.colorbar",
"holoviews.HeatMap",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"cellpainting2.processing.is_dask",
"time.localtime",
"pandas.concat",
"matplotlib.pyplot.pcolor",
"io.BytesIO",
"matplotlib.pyplot.show",
"cellpainting2.tools.melt",
"matplotlib.pyplot.legend",
"numpy.asarray",
"cellpainting2.tools.is_interactive_ipython",
"matplotlib.pyplot.text",
"rdkit.Avalon.pyAvalonTools.Generate2DCoords",
"matplotlib.pyplot.ylabel",
"rdkit.Chem.AllChem.MolFromSmiles",
"os.makedirs",
"os.path.isdir",
"misc_tools.apl_tools.get_commit",
"rdkit.Chem.AllChem.MolToSmiles",
"PIL.Image.open",
"string.Template",
"os.path.getmtime",
"pandas.Categorical",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((607, 632), 'cellpainting2.tools.load_config', 'cpt.load_config', (['"""config"""'], {}), "('config')\n", (622, 632), True, 'from cellpainting2 import tools as cpt\n'), ((684, 712), 'cellpainting2.tools.is_interactive_ipython', 'cpt.is_interactive_ipython', ([], {}), '()\n', (710, 712), True, 'from cellpainting2 import tools as cpt\n'), ((1976, 2006), 'misc_tools.apl_tools.get_commit', 'apl_tools.get_commit', (['__file__'], {}), '(__file__)\n', (1996, 2006), False, 'from misc_tools import apl_tools\n'), ((2631, 2652), 'holoviews.extension', 'hv.extension', (['"""bokeh"""'], {}), "('bokeh')\n", (2643, 2652), True, 'import holoviews as hv\n'), ((3230, 3253), 'rdkit.Chem.AllChem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (3248, 3253), True, 'from rdkit.Chem import AllChem as Chem\n'), ((3488, 3522), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'im.size', 'bgcolor'], {}), "('RGB', im.size, bgcolor)\n", (3497, 3522), False, 'from PIL import Image, ImageChops\n'), ((3534, 3563), 'PIL.ImageChops.difference', 'ImageChops.difference', (['im', 'bg'], {}), '(im, bg)\n', (3555, 3563), False, 'from PIL import Image, ImageChops\n'), ((4404, 4424), 'PIL.Image.open', 'Image.open', (['image_fn'], {}), '(image_fn)\n', (4414, 4424), False, 'from PIL import Image, ImageChops\n'), ((4484, 4488), 'io.BytesIO', 'IO', ([], {}), '()\n', (4486, 4488), True, 'from io import BytesIO as IO\n'), ((5757, 5783), 'os.path.join', 'op.join', (['src_dir', '"""images"""'], {}), "(src_dir, 'images')\n", (5764, 5783), True, 'import os.path as op\n'), ((6251, 6291), 'string.Template', 'Template', (['(templ + page + cprt.HTML_EXTRO)'], {}), '(templ + page + cprt.HTML_EXTRO)\n', (6259, 6291), False, 'from string import Template\n'), ((8092, 8121), 'cellpainting2.processing.load_resource', 'cpp.load_resource', (['"""SIM_REFS"""'], {}), "('SIM_REFS')\n", (8109, 8121), True, 'from cellpainting2 import processing as cpp\n'), ((8238, 8274), 'string.Template', 'Template', (['cprt.OVERVIEW_TABLE_HEADER'], {}), '(cprt.OVERVIEW_TABLE_HEADER)\n', (8246, 8274), False, 'from string import Template\n'), ((8512, 8545), 'string.Template', 'Template', (['cprt.OVERVIEW_TABLE_ROW'], {}), '(cprt.OVERVIEW_TABLE_ROW)\n', (8520, 8545), False, 'from string import Template\n'), ((11537, 11568), 'cellpainting2.processing.load_resource', 'cpp.load_resource', (['"""REFERENCES"""'], {}), "('REFERENCES')\n", (11554, 11568), True, 'from cellpainting2 import processing as cpp\n'), ((11664, 11692), 'string.Template', 'Template', (['cprt.REF_TABLE_ROW'], {}), '(cprt.REF_TABLE_ROW)\n', (11672, 11692), False, 'from string import Template\n'), ((12914, 12990), 'cellpainting2.tools.parameters_from_act_profile_by_val', 'cpt.parameters_from_act_profile_by_val', (['act_prof', 'val'], {'parameters': 'parameters'}), '(act_prof, val, parameters=parameters)\n', (12952, 12990), True, 'from cellpainting2 import tools as cpt\n'), ((13027, 13056), 'string.Template', 'Template', (['cprt.PARM_TABLE_ROW'], {}), '(cprt.PARM_TABLE_ROW)\n', (13035, 13056), False, 'from string import Template\n'), ((13765, 13786), 'os.path.isfile', 'op.isfile', (['hist_cache'], {}), '(hist_cache)\n', (13774, 13786), True, 'import os.path as op\n'), ((14363, 14393), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-white"""'], {}), "('seaborn-white')\n", (14376, 14393), True, 'import matplotlib.pyplot as plt\n'), ((14398, 14429), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-pastel"""'], {}), "('seaborn-pastel')\n", (14411, 14429), True, 'import matplotlib.pyplot as plt\n'), ((14434, 14463), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-talk"""'], {}), "('seaborn-talk')\n", (14447, 14463), True, 'import matplotlib.pyplot as plt\n'), ((14710, 14739), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (14722, 14739), True, 'import matplotlib.pyplot as plt\n'), ((14752, 14771), 'numpy.arange', 'np.arange', (['n_groups'], {}), '(n_groups)\n', (14761, 14771), True, 'import numpy as np\n'), ((14797, 14862), 'matplotlib.pyplot.bar', 'plt.bar', (['index', 'inc_norm', 'bar_width'], {'color': '"""#94caef"""', 'label': '"""Inc"""'}), "(index, inc_norm, bar_width, color='#94caef', label='Inc')\n", (14804, 14862), True, 'import matplotlib.pyplot as plt\n'), ((14891, 14968), 'matplotlib.pyplot.bar', 'plt.bar', (['(index + bar_width)', 'dec_norm', 'bar_width'], {'color': '"""#ffdd1a"""', 'label': '"""Dec"""'}), "(index + bar_width, dec_norm, bar_width, color='#ffdd1a', label='Dec')\n", (14898, 14968), True, 'import matplotlib.pyplot as plt\n'), ((14998, 15028), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cell Compartment"""'], {}), "('Cell Compartment')\n", (15008, 15028), True, 'import matplotlib.pyplot as plt\n'), ((15033, 15062), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""rel. Occurrence"""'], {}), "('rel. Occurrence')\n", (15043, 15062), True, 'import matplotlib.pyplot as plt\n'), ((15067, 15121), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(index + bar_width / 2)', 'labels'], {'rotation': '(45)'}), '(index + bar_width / 2, labels, rotation=45)\n', (15077, 15121), True, 'import matplotlib.pyplot as plt\n'), ((15126, 15138), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15136, 15138), True, 'import matplotlib.pyplot as plt\n'), ((15143, 15161), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15159, 15161), True, 'import matplotlib.pyplot as plt\n'), ((15177, 15181), 'io.BytesIO', 'IO', ([], {}), '()\n', (15179, 15181), True, 'from io import BytesIO as IO\n'), ((15186, 15242), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_file'], {'bbox_inches': '"""tight"""', 'format': '"""jpg"""'}), "(img_file, bbox_inches='tight', format='jpg')\n", (15197, 15242), True, 'import matplotlib.pyplot as plt\n'), ((15419, 15430), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15428, 15430), True, 'import matplotlib.pyplot as plt\n'), ((16086, 16116), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-white"""'], {}), "('seaborn-white')\n", (16099, 16116), True, 'import matplotlib.pyplot as plt\n'), ((16121, 16152), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-pastel"""'], {}), "('seaborn-pastel')\n", (16134, 16152), True, 'import matplotlib.pyplot as plt\n'), ((16157, 16186), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-talk"""'], {}), "('seaborn-talk')\n", (16170, 16186), True, 'import matplotlib.pyplot as plt\n'), ((19039, 19058), 'numpy.asarray', 'np.asarray', (['fp_list'], {}), '(fp_list)\n', (19049, 19058), True, 'import numpy as np\n'), ((19063, 19081), 'matplotlib.pyplot.xticks', 'plt.xticks', (['XTICKS'], {}), '(XTICKS)\n', (19073, 19081), True, 'import matplotlib.pyplot as plt\n'), ((19136, 19189), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['Z'], {'vmin': '(-max_val)', 'vmax': 'max_val', 'cmap': 'cmap'}), '(Z, vmin=-max_val, vmax=max_val, cmap=cmap)\n', (19146, 19189), True, 'import matplotlib.pyplot as plt\n'), ((19194, 19285), 'matplotlib.pyplot.text', 'plt.text', (['(XTICKS[1] // 2)', '(-1.1)', '"""Cells"""'], {'horizontalalignment': '"""center"""', 'fontsize': 'fs_text'}), "(XTICKS[1] // 2, -1.1, 'Cells', horizontalalignment='center',\n fontsize=fs_text)\n", (19202, 19285), True, 'import matplotlib.pyplot as plt\n'), ((19299, 19420), 'matplotlib.pyplot.text', 'plt.text', (['(XTICKS[1] + (XTICKS[2] - XTICKS[1]) // 2)', '(-1.1)', '"""Cytoplasm"""'], {'horizontalalignment': '"""center"""', 'fontsize': 'fs_text'}), "(XTICKS[1] + (XTICKS[2] - XTICKS[1]) // 2, -1.1, 'Cytoplasm',\n horizontalalignment='center', fontsize=fs_text)\n", (19307, 19420), True, 'import matplotlib.pyplot as plt\n'), ((19436, 19554), 'matplotlib.pyplot.text', 'plt.text', (['(XTICKS[2] + (XTICKS[3] - XTICKS[2]) // 2)', '(-1.1)', '"""Nuclei"""'], {'horizontalalignment': '"""center"""', 'fontsize': 'fs_text'}), "(XTICKS[2] + (XTICKS[3] - XTICKS[2]) // 2, -1.1, 'Nuclei',\n horizontalalignment='center', fontsize=fs_text)\n", (19444, 19554), True, 'import matplotlib.pyplot as plt\n'), ((19626, 19644), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19642, 19644), True, 'import matplotlib.pyplot as plt\n'), ((20957, 20991), 'cellpainting2.tools.melt', 'cpt.melt', (['df_parm'], {'id_prop': 'id_prop'}), '(df_parm, id_prop=id_prop)\n', (20965, 20991), True, 'from cellpainting2 import tools as cpt\n'), ((21233, 21288), 'os.path.join', 'op.join', (["cp_config['Paths']['SrcPath']", 'plate_full_name'], {}), "(cp_config['Paths']['SrcPath'], plate_full_name)\n", (21240, 21288), True, 'import os.path as op\n'), ((21352, 21378), 'os.path.join', 'op.join', (['src_dir', '"""images"""'], {}), "(src_dir, 'images')\n", (21359, 21378), True, 'import os.path as op\n'), ((21656, 21683), 'string.Template', 'Template', (['cprt.IMAGES_TABLE'], {}), '(cprt.IMAGES_TABLE)\n', (21664, 21683), False, 'from string import Template\n'), ((21789, 21800), 'IPython.core.display.HTML', 'HTML', (['table'], {}), '(table)\n', (21793, 21800), False, 'from IPython.core.display import HTML\n'), ((21841, 21871), 'cellpainting2.processing.load_resource', 'cpp.load_resource', (['"""DATASTORE"""'], {}), "('DATASTORE')\n", (21858, 21871), True, 'from cellpainting2 import processing as cpp\n'), ((21954, 21973), 'cellpainting2.processing.is_dask', 'cpp.is_dask', (['result'], {}), '(result)\n', (21965, 21973), True, 'from cellpainting2 import processing as cpp\n'), ((22031, 22099), 'pandas.Categorical', 'pd.Categorical', (["result['Well_Id']"], {'categories': 'well_ids', 'ordered': '(True)'}), "(result['Well_Id'], categories=well_ids, ordered=True)\n", (22045, 22099), True, 'import pandas as pd\n'), ((22287, 22316), 'cellpainting2.processing.load_resource', 'cpp.load_resource', (['"""SIM_REFS"""'], {}), "('SIM_REFS')\n", (22304, 22316), True, 'from cellpainting2 import processing as cpp\n'), ((22422, 22448), 'os.path.join', 'op.join', (['src_dir', '"""images"""'], {}), "(src_dir, 'images')\n", (22429, 22448), True, 'import os.path as op\n'), ((23136, 23155), 'pandas.DataFrame', 'pd.DataFrame', (['[rec]'], {}), '([rec])\n', (23148, 23155), True, 'import pandas as pd\n'), ((25274, 25334), 'os.path.join', 'op.join', (["cp_config['Dirs']['DataDir']", '"""plots"""', "rec['Plate']"], {}), "(cp_config['Dirs']['DataDir'], 'plots', rec['Plate'])\n", (25281, 25334), True, 'import os.path as op\n'), ((25485, 25511), 'os.path.join', 'op.join', (['cache_path', 'hm_fn'], {}), '(cache_path, hm_fn)\n', (25492, 25511), True, 'import os.path as op\n'), ((25687, 25715), 'string.Template', 'Template', (['cprt.DETAILS_TEMPL'], {}), '(cprt.DETAILS_TEMPL)\n', (25695, 25715), False, 'from string import Template\n'), ((25906, 25958), 'os.path.join', 'op.join', (["cp_config['Dirs']['ReportDir']", 'report_name'], {}), "(cp_config['Dirs']['ReportDir'], report_name)\n", (25913, 25958), True, 'import os.path as op\n'), ((25977, 26016), 'os.path.join', 'op.join', (['report_full_path', '"""index.html"""'], {}), "(report_full_path, 'index.html')\n", (25984, 26016), True, 'import os.path as op\n'), ((3284, 3307), 'rdkit.Chem.AllChem.MolFromSmiles', 'Chem.MolFromSmiles', (['"""*"""'], {}), "('*')\n", (3302, 3307), True, 'from rdkit.Chem import AllChem as Chem\n'), ((5043, 5047), 'io.BytesIO', 'IO', ([], {}), '()\n', (5045, 5047), True, 'from io import BytesIO as IO\n'), ((11860, 11881), 'cellpainting2.processing.is_dask', 'cpp.is_dask', (['ref_data'], {}), '(ref_data)\n', (11871, 11881), True, 'from cellpainting2 import processing as cpp\n'), ((15803, 15824), 'os.path.isfile', 'op.isfile', (['plot_cache'], {}), '(plot_cache)\n', (15812, 15824), True, 'import os.path as op\n'), ((19607, 19621), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (19619, 19621), True, 'import matplotlib.pyplot as plt\n'), ((19666, 19676), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19674, 19676), True, 'import matplotlib.pyplot as plt\n'), ((19706, 19710), 'io.BytesIO', 'IO', ([], {}), '()\n', (19708, 19710), True, 'from io import BytesIO as IO\n'), ((19719, 19775), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_file'], {'bbox_inches': '"""tight"""', 'format': '"""jpg"""'}), "(img_file, bbox_inches='tight', format='jpg')\n", (19730, 19775), True, 'import matplotlib.pyplot as plt\n'), ((19993, 20002), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (20000, 20002), True, 'import matplotlib.pyplot as plt\n'), ((20011, 20022), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (20020, 20022), True, 'import matplotlib.pyplot as plt\n'), ((20031, 20043), 'gc.collect', 'gc.collect', ([], {}), '()\n', (20041, 20043), False, 'import gc\n'), ((22388, 22404), 'time.localtime', 'time.localtime', ([], {}), '()\n', (22402, 22404), False, 'import time\n'), ((23586, 23616), 'string.Template', 'Template', (['cprt.DETAILS_REF_ROW'], {}), '(cprt.DETAILS_REF_ROW)\n', (23594, 23616), False, 'from string import Template\n'), ((25346, 25366), 'os.path.isdir', 'op.isdir', (['cache_path'], {}), '(cache_path)\n', (25354, 25366), True, 'import os.path as op\n'), ((25376, 25414), 'os.makedirs', 'os.makedirs', (['cache_path'], {'exist_ok': '(True)'}), '(cache_path, exist_ok=True)\n', (25387, 25414), False, 'import os\n'), ((26060, 26076), 'time.localtime', 'time.localtime', ([], {}), '()\n', (26074, 26076), False, 'import time\n'), ((26098, 26134), 'os.path.join', 'op.join', (['report_full_path', '"""details"""'], {}), "(report_full_path, 'details')\n", (26105, 26134), True, 'import os.path as op\n'), ((3103, 3129), 'rdkit.Avalon.pyAvalonTools.Generate2DCoords', 'pyAv.Generate2DCoords', (['mol'], {}), '(mol)\n', (3124, 3129), True, 'from rdkit.Avalon import pyAvalonTools as pyAv\n'), ((4521, 4560), 'rdkit.Chem.Draw.MolToImage', 'Draw.MolToImage', (['mol'], {'size': '(size, size)'}), '(mol, size=(size, size))\n', (4536, 4560), False, 'from rdkit.Chem import Draw\n'), ((4644, 4667), 'rdkit.Chem.AllChem.MolFromSmiles', 'Chem.MolFromSmiles', (['"""C"""'], {}), "('C')\n", (4662, 4667), True, 'from rdkit.Chem import AllChem as Chem\n'), ((19097, 19114), 'numpy.arange', 'np.arange', (['df_len'], {}), '(df_len)\n', (19106, 19114), True, 'import numpy as np\n'), ((4607, 4628), 'rdkit.Chem.AllChem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {}), '(mol)\n', (4623, 4628), True, 'from rdkit.Chem import AllChem as Chem\n'), ((4691, 4730), 'rdkit.Chem.Draw.MolToImage', 'Draw.MolToImage', (['mol'], {'size': '(size, size)'}), '(mol, size=(size, size))\n', (4706, 4730), False, 'from rdkit.Chem import Draw\n'), ((21006, 21025), 'holoviews.HeatMap', 'hv.HeatMap', (['df_heat'], {}), '(df_heat)\n', (21016, 21025), True, 'import holoviews as hv\n'), ((10182, 10253), 'pandas.DataFrame', 'pd.DataFrame', (["{'Well_Id': [well_id], 'Similarity': [rec['Similarity']]}"], {}), "({'Well_Id': [well_id], 'Similarity': [rec['Similarity']]})\n", (10194, 10253), True, 'import pandas as pd\n'), ((17676, 17709), 'rdkit.Chem.AllChem.GetMorganFingerprint', 'Chem.GetMorganFingerprint', (['mol', '(2)'], {}), '(mol, 2)\n', (17701, 17709), True, 'from rdkit.Chem import AllChem as Chem\n'), ((2280, 2301), 'os.path.getmtime', 'op.getmtime', (['__file__'], {}), '(__file__)\n', (2291, 2301), True, 'import os.path as op\n'), ((18032, 18055), 'cellpainting2.tools.chem_sim', 'cpt.chem_sim', (['mol_fp', 'q'], {}), '(mol_fp, q)\n', (18044, 18055), True, 'from cellpainting2 import tools as cpt\n'), ((25142, 25172), 'pandas.concat', 'pd.concat', (['[df_heat, sim_data]'], {}), '([df_heat, sim_data])\n', (25151, 25172), True, 'import pandas as pd\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-01 01:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api_v2', '0007_auto_20170101_0101'),
]
operations = [
migrations.AlterField(
model_name='trial',
name='percentage_all',
field=models.FloatField(blank=True, help_text='Percentage Coefficient - all', null=True, verbose_name='P'),
),
migrations.AlterField(
model_name='trial',
name='percentage_blue',
field=models.FloatField(blank=True, help_text='Percentage Coefficient - blue', null=True, verbose_name='PB'),
),
migrations.AlterField(
model_name='trial',
name='percentage_red',
field=models.FloatField(blank=True, help_text='Percentage Coefficient - red', null=True, verbose_name='PR'),
),
migrations.AlterField(
model_name='trial',
name='percentage_white',
field=models.FloatField(blank=True, help_text='Percentage Coefficient - white', null=True, verbose_name='PW'),
),
migrations.AlterField(
model_name='trial',
name='time_mean_all',
field=models.FloatField(blank=True, help_text='Time Coefficient Mean - all', null=True, verbose_name='TM'),
),
migrations.AlterField(
model_name='trial',
name='time_mean_blue',
field=models.FloatField(blank=True, help_text='Time Coefficient Mean - blue', null=True, verbose_name='TMB'),
),
migrations.AlterField(
model_name='trial',
name='time_mean_red',
field=models.FloatField(blank=True, help_text='Time Coefficient Mean - red', null=True, verbose_name='TMR'),
),
migrations.AlterField(
model_name='trial',
name='time_mean_white',
field=models.FloatField(blank=True, help_text='Time Coefficient Mean - white', null=True, verbose_name='TMW'),
),
migrations.AlterField(
model_name='trial',
name='time_stdev_all',
field=models.FloatField(blank=True, help_text='Time Coefficient Standard Deviation - all', null=True, verbose_name='TSD'),
),
migrations.AlterField(
model_name='trial',
name='time_stdev_blue',
field=models.FloatField(blank=True, help_text='Time Coefficient Standard Deviation - blue', null=True, verbose_name='TSDB'),
),
migrations.AlterField(
model_name='trial',
name='time_stdev_red',
field=models.FloatField(blank=True, help_text='Time Coefficient Standard Deviation - red', null=True, verbose_name='TSDR'),
),
migrations.AlterField(
model_name='trial',
name='time_stdev_white',
field=models.FloatField(blank=True, help_text='Time Coefficient Standard Deviation - white', null=True, verbose_name='TSDW'),
),
migrations.AlterField(
model_name='trial',
name='timeout',
field=models.FloatField(help_text='Seconds per color', verbose_name='Timeout'),
),
]
|
[
"django.db.models.FloatField"
] |
[((407, 511), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'help_text': '"""Percentage Coefficient - all"""', 'null': '(True)', 'verbose_name': '"""P"""'}), "(blank=True, help_text='Percentage Coefficient - all',\n null=True, verbose_name='P')\n", (424, 511), False, 'from django.db import migrations, models\n'), ((637, 743), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'help_text': '"""Percentage Coefficient - blue"""', 'null': '(True)', 'verbose_name': '"""PB"""'}), "(blank=True, help_text='Percentage Coefficient - blue',\n null=True, verbose_name='PB')\n", (654, 743), False, 'from django.db import migrations, models\n'), ((868, 973), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'help_text': '"""Percentage Coefficient - red"""', 'null': '(True)', 'verbose_name': '"""PR"""'}), "(blank=True, help_text='Percentage Coefficient - red',\n null=True, verbose_name='PR')\n", (885, 973), False, 'from django.db import migrations, models\n'), ((1100, 1207), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'help_text': '"""Percentage Coefficient - white"""', 'null': '(True)', 'verbose_name': '"""PW"""'}), "(blank=True, help_text='Percentage Coefficient - white',\n null=True, verbose_name='PW')\n", (1117, 1207), False, 'from django.db import migrations, models\n'), ((1331, 1436), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'help_text': '"""Time Coefficient Mean - all"""', 'null': '(True)', 'verbose_name': '"""TM"""'}), "(blank=True, help_text='Time Coefficient Mean - all', null\n =True, verbose_name='TM')\n", (1348, 1436), False, 'from django.db import migrations, models\n'), ((1560, 1666), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'help_text': '"""Time Coefficient Mean - blue"""', 'null': '(True)', 'verbose_name': '"""TMB"""'}), "(blank=True, help_text='Time Coefficient Mean - blue',\n null=True, verbose_name='TMB')\n", (1577, 1666), False, 'from django.db import migrations, models\n'), ((1790, 1896), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'help_text': '"""Time Coefficient Mean - red"""', 'null': '(True)', 'verbose_name': '"""TMR"""'}), "(blank=True, help_text='Time Coefficient Mean - red', null\n =True, verbose_name='TMR')\n", (1807, 1896), False, 'from django.db import migrations, models\n'), ((2021, 2128), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'help_text': '"""Time Coefficient Mean - white"""', 'null': '(True)', 'verbose_name': '"""TMW"""'}), "(blank=True, help_text='Time Coefficient Mean - white',\n null=True, verbose_name='TMW')\n", (2038, 2128), False, 'from django.db import migrations, models\n'), ((2253, 2373), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'help_text': '"""Time Coefficient Standard Deviation - all"""', 'null': '(True)', 'verbose_name': '"""TSD"""'}), "(blank=True, help_text=\n 'Time Coefficient Standard Deviation - all', null=True, verbose_name='TSD')\n", (2270, 2373), False, 'from django.db import migrations, models\n'), ((2498, 2625), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'help_text': '"""Time Coefficient Standard Deviation - blue"""', 'null': '(True)', 'verbose_name': '"""TSDB"""'}), "(blank=True, help_text=\n 'Time Coefficient Standard Deviation - blue', null=True, verbose_name=\n 'TSDB')\n", (2515, 2625), False, 'from django.db import migrations, models\n'), ((2744, 2870), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'help_text': '"""Time Coefficient Standard Deviation - red"""', 'null': '(True)', 'verbose_name': '"""TSDR"""'}), "(blank=True, help_text=\n 'Time Coefficient Standard Deviation - red', null=True, verbose_name='TSDR'\n )\n", (2761, 2870), False, 'from django.db import migrations, models\n'), ((2991, 3119), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'help_text': '"""Time Coefficient Standard Deviation - white"""', 'null': '(True)', 'verbose_name': '"""TSDW"""'}), "(blank=True, help_text=\n 'Time Coefficient Standard Deviation - white', null=True, verbose_name=\n 'TSDW')\n", (3008, 3119), False, 'from django.db import migrations, models\n'), ((3231, 3303), 'django.db.models.FloatField', 'models.FloatField', ([], {'help_text': '"""Seconds per color"""', 'verbose_name': '"""Timeout"""'}), "(help_text='Seconds per color', verbose_name='Timeout')\n", (3248, 3303), False, 'from django.db import migrations, models\n')]
|
import numpy as np
import torch
# https://github.com/sfujim/TD3/blob/ade6260da88864d1ab0ed592588e090d3d97d679/utils.py
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
ind = np.random.randint(0, self.size, size=batch_size)
return (
torch.from_numpy(self.state[ind]).float().to(self.device),
torch.from_numpy(self.action[ind]).float().to(self.device),
torch.from_numpy(self.next_state[ind]).float().to(self.device),
torch.from_numpy(self.reward[ind]).float().to(self.device),
torch.from_numpy(self.not_done[ind]).float().to(self.device)
)
def sample_np(self, batch_size):
ind = np.random.randint(0, self.size, size=batch_size)
return (
np.float32(self.state[ind]),
np.float32(self.action[ind]),
np.float32(self.next_state[ind]),
np.float32(self.reward[ind]),
np.float32(self.not_done[ind])
)
def save(self, fdir):
np.save(fdir + '/sample-state', self.state[:self.size])
np.save(fdir + '/sample-action', self.action[:self.size])
np.save(fdir + '/sample-nstate', self.next_state[:self.size])
np.save(fdir + '/sample-reward', self.reward[:self.size])
np.save(fdir + '/sample-ndone', self.not_done[:self.size])
def load(self, fdir):
state = np.load(fdir + '/sample-state.npy', allow_pickle=True)
action = np.load(fdir + '/sample-action.npy', allow_pickle=True)
nstate = np.load(fdir + '/sample-nstate.npy', allow_pickle=True)
reward = np.load(fdir + '/sample-reward.npy', allow_pickle=True)
ndone = np.load(fdir + '/sample-ndone.npy', allow_pickle=True)
for s, a, ns, r, nd in zip(state, action, nstate, reward, ndone):
self.add(s, a, ns, r, 1. - nd)
def reset(self):
self.ptr = 0
self.size = 0
|
[
"numpy.load",
"numpy.save",
"numpy.float32",
"numpy.zeros",
"numpy.random.randint",
"torch.cuda.is_available",
"torch.from_numpy"
] |
[((313, 344), 'numpy.zeros', 'np.zeros', (['(max_size, state_dim)'], {}), '((max_size, state_dim))\n', (321, 344), True, 'import numpy as np\n'), ((367, 399), 'numpy.zeros', 'np.zeros', (['(max_size, action_dim)'], {}), '((max_size, action_dim))\n', (375, 399), True, 'import numpy as np\n'), ((426, 457), 'numpy.zeros', 'np.zeros', (['(max_size, state_dim)'], {}), '((max_size, state_dim))\n', (434, 457), True, 'import numpy as np\n'), ((480, 503), 'numpy.zeros', 'np.zeros', (['(max_size, 1)'], {}), '((max_size, 1))\n', (488, 503), True, 'import numpy as np\n'), ((528, 551), 'numpy.zeros', 'np.zeros', (['(max_size, 1)'], {}), '((max_size, 1))\n', (536, 551), True, 'import numpy as np\n'), ((1057, 1105), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.size'], {'size': 'batch_size'}), '(0, self.size, size=batch_size)\n', (1074, 1105), True, 'import numpy as np\n'), ((1549, 1597), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.size'], {'size': 'batch_size'}), '(0, self.size, size=batch_size)\n', (1566, 1597), True, 'import numpy as np\n'), ((1874, 1929), 'numpy.save', 'np.save', (["(fdir + '/sample-state')", 'self.state[:self.size]'], {}), "(fdir + '/sample-state', self.state[:self.size])\n", (1881, 1929), True, 'import numpy as np\n'), ((1938, 1995), 'numpy.save', 'np.save', (["(fdir + '/sample-action')", 'self.action[:self.size]'], {}), "(fdir + '/sample-action', self.action[:self.size])\n", (1945, 1995), True, 'import numpy as np\n'), ((2004, 2065), 'numpy.save', 'np.save', (["(fdir + '/sample-nstate')", 'self.next_state[:self.size]'], {}), "(fdir + '/sample-nstate', self.next_state[:self.size])\n", (2011, 2065), True, 'import numpy as np\n'), ((2074, 2131), 'numpy.save', 'np.save', (["(fdir + '/sample-reward')", 'self.reward[:self.size]'], {}), "(fdir + '/sample-reward', self.reward[:self.size])\n", (2081, 2131), True, 'import numpy as np\n'), ((2140, 2198), 'numpy.save', 'np.save', (["(fdir + '/sample-ndone')", 'self.not_done[:self.size]'], {}), "(fdir + '/sample-ndone', self.not_done[:self.size])\n", (2147, 2198), True, 'import numpy as np\n'), ((2242, 2296), 'numpy.load', 'np.load', (["(fdir + '/sample-state.npy')"], {'allow_pickle': '(True)'}), "(fdir + '/sample-state.npy', allow_pickle=True)\n", (2249, 2296), True, 'import numpy as np\n'), ((2314, 2369), 'numpy.load', 'np.load', (["(fdir + '/sample-action.npy')"], {'allow_pickle': '(True)'}), "(fdir + '/sample-action.npy', allow_pickle=True)\n", (2321, 2369), True, 'import numpy as np\n'), ((2387, 2442), 'numpy.load', 'np.load', (["(fdir + '/sample-nstate.npy')"], {'allow_pickle': '(True)'}), "(fdir + '/sample-nstate.npy', allow_pickle=True)\n", (2394, 2442), True, 'import numpy as np\n'), ((2460, 2515), 'numpy.load', 'np.load', (["(fdir + '/sample-reward.npy')"], {'allow_pickle': '(True)'}), "(fdir + '/sample-reward.npy', allow_pickle=True)\n", (2467, 2515), True, 'import numpy as np\n'), ((2532, 2586), 'numpy.load', 'np.load', (["(fdir + '/sample-ndone.npy')"], {'allow_pickle': '(True)'}), "(fdir + '/sample-ndone.npy', allow_pickle=True)\n", (2539, 2586), True, 'import numpy as np\n'), ((1627, 1654), 'numpy.float32', 'np.float32', (['self.state[ind]'], {}), '(self.state[ind])\n', (1637, 1654), True, 'import numpy as np\n'), ((1668, 1696), 'numpy.float32', 'np.float32', (['self.action[ind]'], {}), '(self.action[ind])\n', (1678, 1696), True, 'import numpy as np\n'), ((1710, 1742), 'numpy.float32', 'np.float32', (['self.next_state[ind]'], {}), '(self.next_state[ind])\n', (1720, 1742), True, 'import numpy as np\n'), ((1756, 1784), 'numpy.float32', 'np.float32', (['self.reward[ind]'], {}), '(self.reward[ind])\n', (1766, 1784), True, 'import numpy as np\n'), ((1798, 1828), 'numpy.float32', 'np.float32', (['self.not_done[ind]'], {}), '(self.not_done[ind])\n', (1808, 1828), True, 'import numpy as np\n'), ((598, 623), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (621, 623), False, 'import torch\n'), ((1135, 1168), 'torch.from_numpy', 'torch.from_numpy', (['self.state[ind]'], {}), '(self.state[ind])\n', (1151, 1168), False, 'import torch\n'), ((1206, 1240), 'torch.from_numpy', 'torch.from_numpy', (['self.action[ind]'], {}), '(self.action[ind])\n', (1222, 1240), False, 'import torch\n'), ((1278, 1316), 'torch.from_numpy', 'torch.from_numpy', (['self.next_state[ind]'], {}), '(self.next_state[ind])\n', (1294, 1316), False, 'import torch\n'), ((1354, 1388), 'torch.from_numpy', 'torch.from_numpy', (['self.reward[ind]'], {}), '(self.reward[ind])\n', (1370, 1388), False, 'import torch\n'), ((1426, 1462), 'torch.from_numpy', 'torch.from_numpy', (['self.not_done[ind]'], {}), '(self.not_done[ind])\n', (1442, 1462), False, 'import torch\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.