filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_19321
|
import itertools
import logging
import os
import sys
from contextlib import closing
import flask
from flask import render_template
from flask import request
from packaging.version import parse
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class PluginResult(Base):
"""Results of testing a pytest plugin against a pytest and python version."""
__tablename__ = "results"
id = Column(Integer, primary_key=True)
name = Column(String, index=True)
version = Column(String, index=True)
env = Column(String)
pytest = Column(String)
status = Column(String)
output = Column(String)
description = Column(String, default="")
def as_dict(self):
return {
"name": self.name,
"version": self.version,
"env": self.env,
"pytest": self.pytest,
"status": self.status,
"output": self.output,
"description": self.description,
}
def __repr__(self):
attrs = [f"{k}={v!r}" for k, v in self.as_dict().items()]
return f"PluginResult({', '.join(attrs)})"
def __eq__(self, other):
if not isinstance(other, PluginResult):
return NotImplemented
return self.as_dict() == other.as_dict()
app = flask.Flask("plugincompat")
def get_python_versions():
"""
Python versions we are willing to display on the page, in order to ignore
old and incomplete results.
"""
return {"py36", "py37", "py38"}
def get_pytest_versions():
"""
Same as `get_python_versions`, but for pytest versions.
"""
return {"6.0.1"}
class PlugsStorage:
"""
API around a MongoDatabase used to add and obtain test results for pytest plugins.
"""
def __init__(self, url=None):
url = url or os.environ["DATABASE_URL"]
self._engine = create_engine(url)
Base.metadata.create_all(self._engine)
self._session_maker = sessionmaker(autocommit=False, autoflush=False, bind=self._engine)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._engine.dispose()
def add_test_result(self, payload):
"""
:param result: adds results from a compatibility test for a pytest plugin.
The results is given as a dict containing the following keys:
* "name": name of the library;
* "version": version of the library;
* "env": python environment of the test. Examples: "py27", "py32", "py33".
* "pytest": pytest version of the test. Examples: "2.3.5"
* "status": "ok" or "fail".
* "output": string with output from running tox commands.
* "description": description of this package (optional).
"""
expected = {"name", "version", "env", "pytest", "status"}
if not expected.issubset(payload):
raise TypeError("Invalid keys given: %s" % payload.keys())
with closing(self._session_maker()) as session:
result = (
session.query(PluginResult)
.filter(PluginResult.name == payload["name"])
.filter(PluginResult.version == payload["version"])
.filter(PluginResult.env == payload["env"])
.filter(PluginResult.pytest == payload["pytest"])
.first()
)
if result is None:
result = PluginResult(**payload)
result.status = payload["status"]
result.output = payload.get("output", "")
result.description = payload.get("description", "")
session.add(result)
session.commit()
def drop_all(self):
Base.metadata.drop_all(self._engine)
Base.metadata.create_all(self._engine)
def get_all_results(self):
with closing(self._session_maker()) as session:
return [x.as_dict() for x in session.query(PluginResult).all()]
def get_test_results(self, name, version):
"""
searches the database for all test results given library name and
version. If version is LATEST_VERSION, only results for highest
version number are returned.
"""
with closing(self._session_maker()) as session:
q = session.query(PluginResult).filter(PluginResult.name == name)
if version != LATEST_VERSION:
q = q.filter(PluginResult.version == version)
results = [p.as_dict() for p in q.all()]
if version != LATEST_VERSION:
return results
else:
return filter_latest_results(results)
def _filter_entry_ids(self, entries):
"""
removes special "_id" from entries returned from MongoDB
"""
result = []
for entry in entries:
del entry["_id"]
result.append(entry)
return result
_storage = None
def get_storage_for_view():
"""
Returns a storage instance to be used by the view functions. This exists
solely we can mock this function during testing.
"""
global _storage
if _storage is None:
_storage = PlugsStorage()
return _storage
def authenticate(json_data):
"""Ensure the posted data contains the correct secret"""
if json_data.get("secret") != os.environ["POST_KEY"]:
flask.abort(401)
@app.route("/", methods=["GET", "POST"])
def index():
storage = get_storage_for_view()
if request.method == "POST":
data = request.get_json()
authenticate(data)
results = data["results"]
if not isinstance(results, list):
results = [results]
for result in results:
storage.add_test_result(result)
return "OK, posted {} entries".format(len(results))
else:
all_results = storage.get_all_results()
if request.args.get("json", False):
response = flask.jsonify(data=all_results)
return response
else:
if all_results:
namespace = get_namespace_for_rendering(all_results)
return render_template("index.html", **namespace)
else:
return "Database is empty"
def filter_latest_results(all_results):
"""
given a list of test results read from the db, filter out only the ones
for highest library version available in the database.
"""
latest_versions = set(get_latest_versions((x["name"], x["version"]) for x in all_results))
for result in all_results:
if (result["name"], result["version"]) in latest_versions:
yield result
def get_namespace_for_rendering(all_results):
# python_versions, lib_names, pytest_versions, statuses, latest_pytest_ver
python_versions = get_python_versions()
lib_names = set()
pytest_versions = get_pytest_versions()
statuses = {}
outputs = {}
descriptions = {}
latest_results = filter_latest_results(all_results)
for result in latest_results:
ignore = result["env"] not in python_versions or result["pytest"] not in pytest_versions
if ignore:
continue
lib_name = "{}-{}".format(result["name"], result["version"])
lib_names.add(lib_name)
key = (lib_name, result["env"], result["pytest"])
statuses[key] = result["status"]
outputs[key] = result.get("output", NO_OUTPUT_AVAILABLE)
if not descriptions.get(lib_name):
descriptions[lib_name] = result.get("description", "")
latest_pytest_ver = max(pytest_versions, key=parse)
return dict(
python_versions=sorted(python_versions),
lib_names=sorted(lib_names),
pytest_versions=sorted(pytest_versions),
statuses=statuses,
outputs=outputs,
descriptions=descriptions,
latest_pytest_ver=latest_pytest_ver,
)
def get_latest_versions(names_and_versions):
"""
Returns an iterator of (name, version) from the given list of (name,
version), but returning only the latest version of the package.
"""
names_and_versions = sorted((name, parse(version)) for (name, version) in names_and_versions)
for name, grouped_versions in itertools.groupby(names_and_versions, key=lambda x: x[0]):
name, loose_version = list(grouped_versions)[-1]
yield name, str(loose_version)
@app.route("/status")
@app.route("/status/<name>")
def get_status_image(name=None):
py = request.args.get("py")
pytest = request.args.get("pytest")
if name and py and pytest:
status = get_field_for(name, py, pytest, "status")
if not status:
status = "unknown"
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, "static", "%s.png" % status)
response = flask.make_response(open(filename, "rb").read())
response.content_type = "image/png"
return response
else:
if name is None:
name = "pytest-pep8-1.0.5"
name = name.rsplit("-", 1)[0]
return render_template("status_help.html", name=name)
@app.route("/output/<name>")
def get_output(name):
py = request.args.get("py")
pytest = request.args.get("pytest")
if name and py and pytest:
output = get_field_for(name, py, pytest, "output")
status_code = 200
if not output:
output = NO_OUTPUT_AVAILABLE
status_code = 404
response = flask.make_response(output)
response.content_type = "text/plain"
response.content_type = "text/plain"
response.status_code = status_code
return response
else:
return 'Specify "py" and "pytest" parameters'
def get_field_for(fullname, env, pytest, field_name):
storage = get_storage_for_view()
name, version = fullname.rsplit("-", 1)
for test_result in storage.get_test_results(name, version):
if test_result["env"] == env and test_result["pytest"] == pytest:
return test_result.get(field_name, None)
# text returned when an entry in the database lacks an "output" field
NO_OUTPUT_AVAILABLE = "<no output available>"
LATEST_VERSION = "latest"
def main():
app.debug = True
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.ERROR)
app.run(host="127.0.0.1", port=int(os.environ.get("PORT", "5000")))
if __name__ == "__main__":
main()
|
the-stack_106_19324
|
"""
Tools for opening a cluster's web UI.
"""
import click
from dcos_e2e_cli.common.options import (
existing_cluster_id_option,
verbosity_option,
)
from dcos_e2e_cli.common.utils import check_cluster_id_exists
from dcos_e2e_cli.common.web import launch_web_ui
from ._common import ClusterInstances, existing_cluster_ids
from ._options import aws_region_option
@click.command('web')
@existing_cluster_id_option
@aws_region_option
@verbosity_option
def web(cluster_id: str, aws_region: str) -> None:
"""
Open the browser at the web UI.
Note that the web UI may not be available at first.
Consider using ``minidcos aws wait`` before running this command.
"""
check_cluster_id_exists(
new_cluster_id=cluster_id,
existing_cluster_ids=existing_cluster_ids(aws_region=aws_region),
)
cluster_instances = ClusterInstances(
cluster_id=cluster_id,
aws_region=aws_region,
)
launch_web_ui(cluster=cluster_instances.cluster)
|
the-stack_106_19325
|
from __future__ import absolute_import, annotations
import logging
from operator import itemgetter
from typing import Any, Dict, List, Set, Tuple
import numpy as np
import scipy
from nltk.metrics import edit_distance
from ..models.graph import Edge, Graph, Node
from ..models.nlp import Embeddings
from ..models.ontology import Ontology, OntologyNode
from ..models.result import Result
from ..services import utils
logger = logging.getLogger("recap")
config = utils.Config.get_instance()
class Similarity(object):
"""Class to store similarity params and compute metrics based on them"""
# Here will be the instance stored.
_instance = None
@staticmethod
def get_instance():
""" Static access method. """
if Similarity._instance == None:
Similarity()
return Similarity._instance
def __init__(self):
""" Virtually private constructor. """
if Similarity._instance != None:
raise Exception("This class is a singleton!")
else:
Similarity._instance = self
def graphs_similarity(
self, graphs: Dict[str, Graph], query_graph: Graph
) -> List[Result]:
"""Compute similarity between multiple graphs"""
similarities: List[Result] = []
for graph in graphs.values():
similarities.append(
Result(graph, self.graph_similarity(graph, query_graph))
)
similarities = sorted(
similarities, key=lambda result: result.similarity, reverse=True
)
return similarities
def _cosine_similarity(self, vec1: np.ndarray, vec2: np.ndarray) -> float:
"""Compute cosine similarity between two vectors"""
sim = 0.0
if vec1.any() and vec2.any():
try:
sim = 1 - scipy.spatial.distance.cosine(vec1, vec2)
except:
pass
return sim
def _angular_similarity(self, vec1: np.ndarray, vec2: np.ndarray) -> float:
"""Compute angular similarity between two vectors"""
sim = 0.0
if vec1.any() and vec2.any():
try:
sim = (
1.0
- np.arccos(
np.dot(vec1, vec2)
/ (np.linalg.norm(vec1) * np.linalg.norm(vec2))
)
/ np.pi
)
except:
pass
return sim
def _edit_distance(self, text1: str, text2: str) -> float:
"""Calculate the Levenshtein distance between two strings"""
sim = 0
if text1 == text2:
sim = 1
else:
sim = 1 - (edit_distance(text1, text2) / max(len(text1), len(text2)))
return sim
def _word_movers_distance(self, tokens1: List[str], tokens2: List[str]) -> float:
"""Calculate the word mover's distance with gensim
As multiple models are available, the results are averaged.
"""
similarities = []
embs = Embeddings.get_instance()
for emb in embs.values():
similarities.append(1 / (1 + emb.wmdistance(tokens1, tokens2)))
return np.mean(similarities)
def _fuzzify(self, s, u):
"""https://github.com/Babylonpartners/fuzzymax
Sentence fuzzifier.
Computes membership vector for the sentence S with respect to the
universe U
:param s: list of word embeddings for the sentence
:param u: the universe matrix U with shape (K, d)
:return: membership vectors for the sentence
"""
f_s = np.dot(s, u.T)
m_s = np.max(f_s, axis=0)
m_s = np.maximum(m_s, 0, m_s)
return m_s
def _dynamax_jaccard(self, x, y):
"""https://github.com/Babylonpartners/fuzzymax
DynaMax-Jaccard similarity measure between two sentences
:param x: list of word embeddings for the first sentence
:param y: list of word embeddings for the second sentence
:return: similarity score between the two sentences
"""
u = np.vstack((x, y))
m_x = self._fuzzify(x, u)
m_y = self._fuzzify(y, u)
m_inter = np.sum(np.minimum(m_x, m_y))
m_union = np.sum(np.maximum(m_x, m_y))
return m_inter / m_union
def _max_jaccard(self, x, y):
"""
MaxPool-Jaccard similarity measure between two sentences
:param x: list of word embeddings for the first sentence
:param y: list of word embeddings for the second sentence
:return: similarity score between the two sentences
"""
m_x = np.max(x, axis=0)
m_x = np.maximum(m_x, 0, m_x)
m_y = np.max(y, axis=0)
m_y = np.maximum(m_y, 0, m_y)
m_inter = np.sum(np.minimum(m_x, m_y))
m_union = np.sum(np.maximum(m_x, m_y))
return m_inter / m_union
def _fuzzy_jaccard(self, x, y):
m_inter = np.sum(np.minimum(x, y))
m_union = np.sum(np.maximum(x, y))
return m_inter / m_union
def _threshold_similarity(self, sim: float) -> float:
"""Adapt a similarity value to a specified threshold"""
if sim < config["similarity_threshold"]:
sim = 0.0
elif config["similarity_threshold"] > 0:
sim = (sim - config["similarity_threshold"]) / (
1 - config["similarity_threshold"]
)
return sim
def graph_similarity(self, graph1: Graph, graph2: Graph) -> float:
"""Compute similarity of two graphs based on their texts"""
return self.general_similarity(graph1, graph2)
def general_similarity(self, entity1: Any, entity2: Any) -> float:
sim = 0.0
if config["similarity_method"] == "edit":
sim = self._edit_distance(entity1.text, entity2.text)
elif config["similarity_method"] == "cosine":
sim = self._cosine_similarity(entity1.vector, entity2.vector)
elif config["similarity_method"] == "angular":
sim = self._angular_similarity(entity1.vector, entity2.vector)
elif config["similarity_method"] == "wmd":
sim = self._word_movers_distance(entity1.tokens, entity2.tokens)
elif config["similarity_method"] == "dynamax":
sim = self._dynamax_jaccard(entity1.vectors, entity2.vectors)
elif config["similarity_method"] == "maxpool":
sim = self._max_jaccard(entity1.vectors, entity2.vectors)
elif config["similarity_method"] == "fuzzy-jaccard":
sim = self._fuzzy_jaccard(entity1.vector, entity2.vector)
return self._threshold_similarity(sim)
def node_similarity(self, node1: Node, node2: Node) -> float:
"""Compute similarity of nodes
If it is an I-Node, the text will be compared
If it is a RA-Node, the ontology can be used to compare the scheme
"""
sim = 0.0
if node1.type_ == node2.type_:
if node1.type_ == "I":
sim = self.general_similarity(node1, node2)
elif config["use_schemes"]:
if node1.text == node2.text:
sim = 1
elif node1.type_ == "RA" and config["use_ontology"]:
ontology = Ontology.get_instance()
sim = self._threshold_similarity(
ontology.get_similarity(node1.text, node2.text)
)
else:
sim = 1
# TODO: Check if this is correct and improves the result
# elif node1.type_ in ["RA", "CA"] and node2.type_ in ["RA", "CA"]:
# sim = 0.5
return sim
def edge_similarity(self, edge1: Edge, edge2: Edge) -> float:
"""Compute edge similarity by comparing the four corresponding nodes"""
return 0.5 * (
self.node_similarity(edge1.from_node, edge2.from_node)
+ self.node_similarity(edge1.to_node, edge2.to_node)
)
|
the-stack_106_19332
|
class InvalidPage(Exception):
pass
class Paginator(object):
def __init__(self, object_list, per_page, orphans=0, allow_empty_first_page=True):
self.object_list = object_list
self.per_page = per_page
self.orphans = orphans
self.allow_empty_first_page = allow_empty_first_page
self._num_pages = self._count = None
def validate_number(self, number):
"Validates the given 1-based page number."
try:
number = int(number)
except ValueError:
raise InvalidPage('That page number is not an integer')
if number < 1:
raise InvalidPage('That page number is less than 1')
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise InvalidPage('That page contains no results')
return number
def page(self, number):
"Returns a Page object for the given 1-based page number."
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return Page(self.object_list[bottom:top], number, self)
def _get_count(self):
"Returns the total number of objects, across all pages."
if self._count is None:
self._count = len(self.object_list)
return self._count
count = property(_get_count)
def _get_num_pages(self):
"Returns the total number of pages."
if self._num_pages is None:
hits = self.count - 1 - self.orphans
if hits < 1:
hits = 0
if hits == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
self._num_pages = hits // self.per_page + 1
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
"""
Returns a 1-based range of pages for iterating through within
a template for loop.
"""
return range(1, self.num_pages + 1)
page_range = property(_get_page_range)
class QuerySetPaginator(Paginator):
"""
Like Paginator, but works on QuerySets.
"""
def _get_count(self):
if self._count is None:
self._count = self.object_list.count()
return self._count
count = property(_get_count)
class Page(object):
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.number = number
self.paginator = paginator
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.num_pages)
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def next_page_number(self):
return self.number + 1
def previous_page_number(self):
return self.number - 1
def start_index(self):
"""
Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Returns the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page
class ObjectPaginator(Paginator):
"""
Legacy ObjectPaginator class, for backwards compatibility.
Note that each method on this class that takes page_number expects a
zero-based page number, whereas the new API (Paginator/Page) uses one-based
page numbers.
"""
def __init__(self, query_set, num_per_page, orphans=0):
Paginator.__init__(self, query_set, num_per_page, orphans)
import warnings
warnings.warn("The ObjectPaginator is deprecated. Use django.core.paginator.Paginator instead.", DeprecationWarning)
# Keep these attributes around for backwards compatibility.
self.query_set = query_set
self.num_per_page = num_per_page
self._hits = self._pages = None
def validate_page_number(self, page_number):
try:
page_number = int(page_number) + 1
except ValueError:
raise InvalidPage
return self.validate_number(page_number)
def get_page(self, page_number):
try:
page_number = int(page_number) + 1
except ValueError:
raise InvalidPage
return self.page(page_number).object_list
def has_next_page(self, page_number):
return page_number < self.pages - 1
def has_previous_page(self, page_number):
return page_number > 0
def first_on_page(self, page_number):
"""
Returns the 1-based index of the first object on the given page,
relative to total objects found (hits).
"""
page_number = self.validate_page_number(page_number)
return (self.num_per_page * (page_number - 1)) + 1
def last_on_page(self, page_number):
"""
Returns the 1-based index of the last object on the given page,
relative to total objects found (hits).
"""
page_number = self.validate_page_number(page_number)
if page_number == self.num_pages:
return self.count
return page_number * self.num_per_page
def _get_count(self):
# The old API allowed for self.object_list to be either a QuerySet or a
# list. Here, we handle both.
if self._count is None:
try:
self._count = self.object_list.count()
except TypeError:
self._count = len(self.object_list)
return self._count
count = property(_get_count)
# The old API called it "hits" instead of "count".
hits = count
# The old API called it "pages" instead of "num_pages".
pages = Paginator.num_pages
|
the-stack_106_19333
|
import re
from model.contact import Contact
def test_main_page_db(app, db):
contacts_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
contacts_from_db = sorted(db.get_contact_list(), key=Contact.id_or_max)
assert len(contacts_from_home_page) == len(contacts_from_db)
for i in range(len(contacts_from_home_page)):
assert contacts_from_home_page[i].all_phones == merge_phones_like_on_home_page(contacts_from_db[i])
assert contacts_from_home_page[i].all_emails == merge_emails_like_on_home_page(contacts_from_db[i])
assert contacts_from_home_page[i].firstname == contacts_from_db[i].firstname
assert contacts_from_home_page[i].lastname == contacts_from_db[i].lastname
assert contacts_from_home_page[i].address == contacts_from_db[i].address
def test_phones_on_home_page(app):
contact_from_home_page = app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
assert contact_from_home_page.all_emails == merge_emails_like_on_home_page(contact_from_edit_page)
assert contact_from_home_page.firstname == contact_from_edit_page.firstname
assert contact_from_home_page.lastname == contact_from_edit_page.lastname
assert contact_from_home_page.address == contact_from_edit_page.address
def test_pfones_on_contact_view_page(app):
contact_from_view_page = app.contact.get_contact_from_view_page(0)
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_view_page.home == contact_from_edit_page.home
assert contact_from_view_page.work == contact_from_edit_page.work
assert contact_from_view_page.mobile == contact_from_edit_page.mobile
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home, contact.mobile, contact.work]))))
def merge_emails_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
filter(lambda x: x is not None,
[contact.email, contact.email2, contact.email3])))
|
the-stack_106_19334
|
from xdsl.dialects.builtin import *
from xdsl.dialects.std import *
from xdsl.dialects.arith import *
from xdsl.printer import Printer
from xdsl.dialects.affine import *
def get_example_affine_program(ctx: MLContext, builtin: Builtin, std: Std,
affine: Affine) -> Operation:
def affine_mm(arg0: BlockArgument, arg1: BlockArgument,
arg2: BlockArgument) -> List[Operation]:
# yapf: disable
return [
affine.for_(0, 256, Block.from_callable([i64], lambda i: [
affine.for_(0, 256, Block.from_callable([i64], lambda j: [
affine.for_(0, 250, Block.from_callable([i64], lambda k: [
l := affine.load(arg0, i, k),
r := affine.load(arg1, k, j),
o := affine.load(arg2, i, j),
m := Mulf.get(l, r),
a := Mulf.get(o, m),
affine.store(a, arg2, i, j)
]))
]))
])),
Return.get(arg2)
]
# yapf: enable
f = FuncOp.from_callable("affine_mm", [f32, f32, f32], [f32], affine_mm)
return f
def test_affine():
ctx = MLContext()
builtin = Builtin(ctx)
std = Std(ctx)
arith = Arith(ctx)
affine = Affine(ctx)
test_empty = new_op("test_empty", 0, 0, 0)
ctx.register_op(test_empty)
op = test_empty()
f = get_example_affine_program(ctx, builtin, std, affine)
f.verify()
printer = Printer()
printer.print_op(f)
|
the-stack_106_19335
|
#!/usr/bin/env python
import os
import sys
from setuptools import setup
try:
from setuptools import find_namespace_packages
except ImportError:
# the user has a downlevel version of setuptools.
print('Error: dbt requires setuptools v40.1.0 or higher.')
print('Please upgrade setuptools with "pip install --upgrade setuptools" '
'and try again')
sys.exit(1)
package_name = "dbt-bigquery"
package_version = "0.16.0b1"
description = """The bigquery adapter plugin for dbt (data build tool)"""
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md')) as f:
long_description = f.read()
setup(
name=package_name,
version=package_version,
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
author="Fishtown Analytics",
author_email="[email protected]",
url="https://github.com/fishtown-analytics/dbt",
packages=find_namespace_packages(include=['dbt', 'dbt.*']),
package_data={
'dbt': [
'include/bigquery/dbt_project.yml',
'include/bigquery/macros/*.sql',
'include/bigquery/macros/**/*.sql',
]
},
install_requires=[
'dbt-core=={}'.format(package_version),
'google-cloud-core>=1,<=1.1.0',
'google-cloud-bigquery>=1.15.0,<1.24.0',
# hidden secret dependency: bq requires this but only documents 1.10.0
# through its dependency chain.
# see https://github.com/googleapis/google-cloud-python/issues/9965
'six>=1.13.0',
],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
|
the-stack_106_19336
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
from tf_slim import tfexample_decoder as slim_example_decoder
from object_detection.core import data_decoder
from object_detection.core import standard_fields as fields
from object_detection.protos import input_reader_pb2
from object_detection.utils import label_map_util
from object_detection.utils import shape_utils
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import lookup as contrib_lookup
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
_LABEL_OFFSET = 1
class Visibility(enum.Enum):
"""Visibility definitions.
This follows the MS Coco convention (http://cocodataset.org/#format-data).
"""
# Keypoint is not labeled.
UNLABELED = 0
# Keypoint is labeled but falls outside the object segment (e.g. occluded).
NOT_VISIBLE = 1
# Keypoint is labeled and visible.
VISIBLE = 2
class _ClassTensorHandler(slim_example_decoder.Tensor):
"""An ItemHandler to fetch class ids from class text."""
def __init__(self,
tensor_key,
label_map_proto_file,
shape_keys=None,
shape=None,
default_value=''):
"""Initializes the LookupTensor handler.
Simply calls a vocabulary (most often, a label mapping) lookup.
Args:
tensor_key: the name of the `TFExample` feature to read the tensor from.
label_map_proto_file: File path to a text format LabelMapProto message
mapping class text to id.
shape_keys: Optional name or list of names of the TF-Example feature in
which the tensor shape is stored. If a list, then each corresponds to
one dimension of the shape.
shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is
reshaped accordingly.
default_value: The value used when the `tensor_key` is not found in a
particular `TFExample`.
Raises:
ValueError: if both `shape_keys` and `shape` are specified.
"""
name_to_id = label_map_util.get_label_map_dict(
label_map_proto_file, use_display_name=False)
# We use a default_value of -1, but we expect all labels to be contained
# in the label map.
try:
# Dynamically try to load the tf v2 lookup, falling back to contrib
lookup = tf.compat.v2.lookup
hash_table_class = tf.compat.v2.lookup.StaticHashTable
except AttributeError:
lookup = contrib_lookup
hash_table_class = contrib_lookup.HashTable
name_to_id_table = hash_table_class(
initializer=lookup.KeyValueTensorInitializer(
keys=tf.constant(list(name_to_id.keys())),
values=tf.constant(list(name_to_id.values()), dtype=tf.int64)),
default_value=-1)
display_name_to_id = label_map_util.get_label_map_dict(
label_map_proto_file, use_display_name=True)
# We use a default_value of -1, but we expect all labels to be contained
# in the label map.
display_name_to_id_table = hash_table_class(
initializer=lookup.KeyValueTensorInitializer(
keys=tf.constant(list(display_name_to_id.keys())),
values=tf.constant(
list(display_name_to_id.values()), dtype=tf.int64)),
default_value=-1)
self._name_to_id_table = name_to_id_table
self._display_name_to_id_table = display_name_to_id_table
super(_ClassTensorHandler, self).__init__(tensor_key, shape_keys, shape,
default_value)
def tensors_to_item(self, keys_to_tensors):
unmapped_tensor = super(_ClassTensorHandler,
self).tensors_to_item(keys_to_tensors)
return tf.maximum(self._name_to_id_table.lookup(unmapped_tensor),
self._display_name_to_id_table.lookup(unmapped_tensor))
class _BackupHandler(slim_example_decoder.ItemHandler):
"""An ItemHandler that tries two ItemHandlers in order."""
def __init__(self, handler, backup):
"""Initializes the BackupHandler handler.
If the first Handler's tensors_to_item returns a Tensor with no elements,
the second Handler is used.
Args:
handler: The primary ItemHandler.
backup: The backup ItemHandler.
Raises:
ValueError: if either is not an ItemHandler.
"""
if not isinstance(handler, slim_example_decoder.ItemHandler):
raise ValueError('Primary handler is of type %s instead of ItemHandler' %
type(handler))
if not isinstance(backup, slim_example_decoder.ItemHandler):
raise ValueError(
'Backup handler is of type %s instead of ItemHandler' % type(backup))
self._handler = handler
self._backup = backup
super(_BackupHandler, self).__init__(handler.keys + backup.keys)
def tensors_to_item(self, keys_to_tensors):
item = self._handler.tensors_to_item(keys_to_tensors)
return tf.cond(
pred=tf.equal(tf.reduce_prod(tf.shape(item)), 0),
true_fn=lambda: self._backup.tensors_to_item(keys_to_tensors),
false_fn=lambda: item)
class TfExampleDecoder(data_decoder.DataDecoder):
"""Tensorflow Example proto decoder."""
def __init__(self,
load_instance_masks=False,
instance_mask_type=input_reader_pb2.NUMERICAL_MASKS,
label_map_proto_file=None,
use_display_name=False,
dct_method='',
num_keypoints=0,
num_additional_channels=0,
load_multiclass_scores=False,
load_context_features=False,
expand_hierarchy_labels=False,
load_dense_pose=False):
"""Constructor sets keys_to_features and items_to_handlers.
Args:
load_instance_masks: whether or not to load and handle instance masks.
instance_mask_type: type of instance masks. Options are provided in
input_reader.proto. This is only used if `load_instance_masks` is True.
label_map_proto_file: a file path to a
object_detection.protos.StringIntLabelMap proto. If provided, then the
mapped IDs of 'image/object/class/text' will take precedence over the
existing 'image/object/class/label' ID. Also, if provided, it is
assumed that 'image/object/class/text' will be in the data.
use_display_name: whether or not to use the `display_name` for label
mapping (instead of `name`). Only used if label_map_proto_file is
provided.
dct_method: An optional string. Defaults to None. It only takes
effect when image format is jpeg, used to specify a hint about the
algorithm used for jpeg decompression. Currently valid values
are ['INTEGER_FAST', 'INTEGER_ACCURATE']. The hint may be ignored, for
example, the jpeg library does not have that specific option.
num_keypoints: the number of keypoints per object.
num_additional_channels: how many additional channels to use.
load_multiclass_scores: Whether to load multiclass scores associated with
boxes.
load_context_features: Whether to load information from context_features,
to provide additional context to a detection model for training and/or
inference.
expand_hierarchy_labels: Expands the object and image labels taking into
account the provided hierarchy in the label_map_proto_file. For positive
classes, the labels are extended to ancestor. For negative classes,
the labels are expanded to descendants.
load_dense_pose: Whether to load DensePose annotations.
Raises:
ValueError: If `instance_mask_type` option is not one of
input_reader_pb2.DEFAULT, input_reader_pb2.NUMERICAL, or
input_reader_pb2.PNG_MASKS.
ValueError: If `expand_labels_hierarchy` is True, but the
`label_map_proto_file` is not provided.
"""
# TODO(rathodv): delete unused `use_display_name` argument once we change
# other decoders to handle label maps similarly.
del use_display_name
self.keys_to_features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/filename':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/key/sha256':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/source_id':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/height':
tf.FixedLenFeature((), tf.int64, default_value=1),
'image/width':
tf.FixedLenFeature((), tf.int64, default_value=1),
# Image-level labels.
'image/class/text':
tf.VarLenFeature(tf.string),
'image/class/label':
tf.VarLenFeature(tf.int64),
'image/class/confidence':
tf.VarLenFeature(tf.float32),
# Object boxes and classes.
'image/object/bbox/xmin':
tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax':
tf.VarLenFeature(tf.float32),
'image/object/bbox/ymin':
tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax':
tf.VarLenFeature(tf.float32),
'image/object/class/label':
tf.VarLenFeature(tf.int64),
'image/object/class/text':
tf.VarLenFeature(tf.string),
'image/object/area':
tf.VarLenFeature(tf.float32),
'image/object/is_crowd':
tf.VarLenFeature(tf.int64),
'image/object/difficult':
tf.VarLenFeature(tf.int64),
'image/object/group_of':
tf.VarLenFeature(tf.int64),
'image/object/weight':
tf.VarLenFeature(tf.float32),
}
# We are checking `dct_method` instead of passing it directly in order to
# ensure TF version 1.6 compatibility.
if dct_method:
image = slim_example_decoder.Image(
image_key='image/encoded',
format_key='image/format',
channels=3,
dct_method=dct_method)
additional_channel_image = slim_example_decoder.Image(
image_key='image/additional_channels/encoded',
format_key='image/format',
channels=1,
repeated=True,
dct_method=dct_method)
else:
image = slim_example_decoder.Image(
image_key='image/encoded', format_key='image/format', channels=3)
additional_channel_image = slim_example_decoder.Image(
image_key='image/additional_channels/encoded',
format_key='image/format',
channels=1,
repeated=True)
self.items_to_handlers = {
fields.InputDataFields.image:
image,
fields.InputDataFields.source_id: (
slim_example_decoder.Tensor('image/source_id')),
fields.InputDataFields.key: (
slim_example_decoder.Tensor('image/key/sha256')),
fields.InputDataFields.filename: (
slim_example_decoder.Tensor('image/filename')),
# Image-level labels.
fields.InputDataFields.groundtruth_image_confidences: (
slim_example_decoder.Tensor('image/class/confidence')),
# Object boxes and classes.
fields.InputDataFields.groundtruth_boxes: (
slim_example_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],
'image/object/bbox/')),
fields.InputDataFields.groundtruth_area:
slim_example_decoder.Tensor('image/object/area'),
fields.InputDataFields.groundtruth_is_crowd: (
slim_example_decoder.Tensor('image/object/is_crowd')),
fields.InputDataFields.groundtruth_difficult: (
slim_example_decoder.Tensor('image/object/difficult')),
fields.InputDataFields.groundtruth_group_of: (
slim_example_decoder.Tensor('image/object/group_of')),
fields.InputDataFields.groundtruth_weights: (
slim_example_decoder.Tensor('image/object/weight')),
}
if load_multiclass_scores:
self.keys_to_features[
'image/object/class/multiclass_scores'] = tf.VarLenFeature(tf.float32)
self.items_to_handlers[fields.InputDataFields.multiclass_scores] = (
slim_example_decoder.Tensor('image/object/class/multiclass_scores'))
if load_context_features:
self.keys_to_features[
'image/context_features'] = tf.VarLenFeature(tf.float32)
self.items_to_handlers[fields.InputDataFields.context_features] = (
slim_example_decoder.ItemHandlerCallback(
['image/context_features', 'image/context_feature_length'],
self._reshape_context_features))
self.keys_to_features[
'image/context_feature_length'] = tf.FixedLenFeature((), tf.int64)
self.items_to_handlers[fields.InputDataFields.context_feature_length] = (
slim_example_decoder.Tensor('image/context_feature_length'))
if num_additional_channels > 0:
self.keys_to_features[
'image/additional_channels/encoded'] = tf.FixedLenFeature(
(num_additional_channels,), tf.string)
self.items_to_handlers[
fields.InputDataFields.
image_additional_channels] = additional_channel_image
self._num_keypoints = num_keypoints
if num_keypoints > 0:
self.keys_to_features['image/object/keypoint/x'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/keypoint/y'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/keypoint/visibility'] = (
tf.VarLenFeature(tf.int64))
self.items_to_handlers[fields.InputDataFields.groundtruth_keypoints] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/keypoint/y', 'image/object/keypoint/x'],
self._reshape_keypoints))
kpt_vis_field = fields.InputDataFields.groundtruth_keypoint_visibilities
self.items_to_handlers[kpt_vis_field] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/keypoint/x', 'image/object/keypoint/visibility'],
self._reshape_keypoint_visibilities))
if load_instance_masks:
if instance_mask_type in (input_reader_pb2.DEFAULT,
input_reader_pb2.NUMERICAL_MASKS):
self.keys_to_features['image/object/mask'] = (
tf.VarLenFeature(tf.float32))
self.items_to_handlers[
fields.InputDataFields.groundtruth_instance_masks] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/mask', 'image/height', 'image/width'],
self._reshape_instance_masks))
elif instance_mask_type == input_reader_pb2.PNG_MASKS:
self.keys_to_features['image/object/mask'] = tf.VarLenFeature(tf.string)
self.items_to_handlers[
fields.InputDataFields.groundtruth_instance_masks] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/mask', 'image/height', 'image/width'],
self._decode_png_instance_masks))
else:
raise ValueError('Did not recognize the `instance_mask_type` option.')
if load_dense_pose:
self.keys_to_features['image/object/densepose/num'] = (
tf.VarLenFeature(tf.int64))
self.keys_to_features['image/object/densepose/part_index'] = (
tf.VarLenFeature(tf.int64))
self.keys_to_features['image/object/densepose/x'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/densepose/y'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/densepose/u'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/densepose/v'] = (
tf.VarLenFeature(tf.float32))
self.items_to_handlers[
fields.InputDataFields.groundtruth_dp_num_points] = (
slim_example_decoder.Tensor('image/object/densepose/num'))
self.items_to_handlers[fields.InputDataFields.groundtruth_dp_part_ids] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/densepose/part_index',
'image/object/densepose/num'], self._dense_pose_part_indices))
self.items_to_handlers[
fields.InputDataFields.groundtruth_dp_surface_coords] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/densepose/x', 'image/object/densepose/y',
'image/object/densepose/u', 'image/object/densepose/v',
'image/object/densepose/num'],
self._dense_pose_surface_coordinates))
if label_map_proto_file:
# If the label_map_proto is provided, try to use it in conjunction with
# the class text, and fall back to a materialized ID.
label_handler = _BackupHandler(
_ClassTensorHandler(
'image/object/class/text', label_map_proto_file,
default_value=''),
slim_example_decoder.Tensor('image/object/class/label'))
image_label_handler = _BackupHandler(
_ClassTensorHandler(
fields.TfExampleFields.image_class_text,
label_map_proto_file,
default_value=''),
slim_example_decoder.Tensor(fields.TfExampleFields.image_class_label))
else:
label_handler = slim_example_decoder.Tensor('image/object/class/label')
image_label_handler = slim_example_decoder.Tensor(
fields.TfExampleFields.image_class_label)
self.items_to_handlers[
fields.InputDataFields.groundtruth_classes] = label_handler
self.items_to_handlers[
fields.InputDataFields.groundtruth_image_classes] = image_label_handler
self._expand_hierarchy_labels = expand_hierarchy_labels
self._ancestors_lut = None
self._descendants_lut = None
if expand_hierarchy_labels:
if label_map_proto_file:
ancestors_lut, descendants_lut = (
label_map_util.get_label_map_hierarchy_lut(label_map_proto_file,
True))
self._ancestors_lut = tf.constant(ancestors_lut, dtype=tf.int64)
self._descendants_lut = tf.constant(descendants_lut, dtype=tf.int64)
else:
raise ValueError('In order to expand labels, the label_map_proto_file '
'has to be provided.')
def decode(self, tf_example_string_tensor):
"""Decodes serialized tensorflow example and returns a tensor dictionary.
Args:
tf_example_string_tensor: a string tensor holding a serialized tensorflow
example proto.
Returns:
A dictionary of the following tensors.
fields.InputDataFields.image - 3D uint8 tensor of shape [None, None, 3]
containing image.
fields.InputDataFields.original_image_spatial_shape - 1D int32 tensor of
shape [2] containing shape of the image.
fields.InputDataFields.source_id - string tensor containing original
image id.
fields.InputDataFields.key - string tensor with unique sha256 hash key.
fields.InputDataFields.filename - string tensor with original dataset
filename.
fields.InputDataFields.groundtruth_boxes - 2D float32 tensor of shape
[None, 4] containing box corners.
fields.InputDataFields.groundtruth_classes - 1D int64 tensor of shape
[None] containing classes for the boxes.
fields.InputDataFields.groundtruth_weights - 1D float32 tensor of
shape [None] indicating the weights of groundtruth boxes.
fields.InputDataFields.groundtruth_area - 1D float32 tensor of shape
[None] containing containing object mask area in pixel squared.
fields.InputDataFields.groundtruth_is_crowd - 1D bool tensor of shape
[None] indicating if the boxes enclose a crowd.
Optional:
fields.InputDataFields.groundtruth_image_confidences - 1D float tensor of
shape [None] indicating if a class is present in the image (1.0) or
a class is not present in the image (0.0).
fields.InputDataFields.image_additional_channels - 3D uint8 tensor of
shape [None, None, num_additional_channels]. 1st dim is height; 2nd dim
is width; 3rd dim is the number of additional channels.
fields.InputDataFields.groundtruth_difficult - 1D bool tensor of shape
[None] indicating if the boxes represent `difficult` instances.
fields.InputDataFields.groundtruth_group_of - 1D bool tensor of shape
[None] indicating if the boxes represent `group_of` instances.
fields.InputDataFields.groundtruth_keypoints - 3D float32 tensor of
shape [None, num_keypoints, 2] containing keypoints, where the
coordinates of the keypoints are ordered (y, x).
fields.InputDataFields.groundtruth_keypoint_visibilities - 2D bool
tensor of shape [None, num_keypoints] containing keypoint visibilites.
fields.InputDataFields.groundtruth_instance_masks - 3D float32 tensor of
shape [None, None, None] containing instance masks.
fields.InputDataFields.groundtruth_image_classes - 1D int64 of shape
[None] containing classes for the boxes.
fields.InputDataFields.multiclass_scores - 1D float32 tensor of shape
[None * num_classes] containing flattened multiclass scores for
groundtruth boxes.
fields.InputDataFields.context_features - 1D float32 tensor of shape
[context_feature_length * num_context_features]
fields.InputDataFields.context_feature_length - int32 tensor specifying
the length of each feature in context_features
"""
serialized_example = tf.reshape(tf_example_string_tensor, shape=[])
decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features,
self.items_to_handlers)
keys = decoder.list_items()
tensors = decoder.decode(serialized_example, items=keys)
tensor_dict = dict(zip(keys, tensors))
is_crowd = fields.InputDataFields.groundtruth_is_crowd
tensor_dict[is_crowd] = tf.cast(tensor_dict[is_crowd], dtype=tf.bool)
tensor_dict[fields.InputDataFields.image].set_shape([None, None, 3])
tensor_dict[fields.InputDataFields.original_image_spatial_shape] = tf.shape(
tensor_dict[fields.InputDataFields.image])[:2]
if fields.InputDataFields.image_additional_channels in tensor_dict:
channels = tensor_dict[fields.InputDataFields.image_additional_channels]
channels = tf.squeeze(channels, axis=3)
channels = tf.transpose(channels, perm=[1, 2, 0])
tensor_dict[fields.InputDataFields.image_additional_channels] = channels
def default_groundtruth_weights():
return tf.ones(
[tf.shape(tensor_dict[fields.InputDataFields.groundtruth_boxes])[0]],
dtype=tf.float32)
tensor_dict[fields.InputDataFields.groundtruth_weights] = tf.cond(
tf.greater(
tf.shape(
tensor_dict[fields.InputDataFields.groundtruth_weights])[0],
0), lambda: tensor_dict[fields.InputDataFields.groundtruth_weights],
default_groundtruth_weights)
if fields.InputDataFields.groundtruth_keypoints in tensor_dict:
# Set all keypoints that are not labeled to NaN.
gt_kpt_fld = fields.InputDataFields.groundtruth_keypoints
gt_kpt_vis_fld = fields.InputDataFields.groundtruth_keypoint_visibilities
visibilities_tiled = tf.tile(
tf.expand_dims(tensor_dict[gt_kpt_vis_fld], -1),
[1, 1, 2])
tensor_dict[gt_kpt_fld] = tf.where(
visibilities_tiled,
tensor_dict[gt_kpt_fld],
np.nan * tf.ones_like(tensor_dict[gt_kpt_fld]))
if self._expand_hierarchy_labels:
input_fields = fields.InputDataFields
image_classes, image_confidences = self._expand_image_label_hierarchy(
tensor_dict[input_fields.groundtruth_image_classes],
tensor_dict[input_fields.groundtruth_image_confidences])
tensor_dict[input_fields.groundtruth_image_classes] = image_classes
tensor_dict[input_fields.groundtruth_image_confidences] = (
image_confidences)
box_fields = [
fields.InputDataFields.groundtruth_group_of,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_weights,
]
def expand_field(field_name):
return self._expansion_box_field_labels(
tensor_dict[input_fields.groundtruth_classes],
tensor_dict[field_name])
# pylint: disable=cell-var-from-loop
for field in box_fields:
if field in tensor_dict:
tensor_dict[field] = tf.cond(
tf.size(tensor_dict[field]) > 0, lambda: expand_field(field),
lambda: tensor_dict[field])
# pylint: enable=cell-var-from-loop
tensor_dict[input_fields.groundtruth_classes] = (
self._expansion_box_field_labels(
tensor_dict[input_fields.groundtruth_classes],
tensor_dict[input_fields.groundtruth_classes], True))
if fields.InputDataFields.groundtruth_group_of in tensor_dict:
group_of = fields.InputDataFields.groundtruth_group_of
tensor_dict[group_of] = tf.cast(tensor_dict[group_of], dtype=tf.bool)
if fields.InputDataFields.groundtruth_dp_num_points in tensor_dict:
tensor_dict[fields.InputDataFields.groundtruth_dp_num_points] = tf.cast(
tensor_dict[fields.InputDataFields.groundtruth_dp_num_points],
dtype=tf.int32)
tensor_dict[fields.InputDataFields.groundtruth_dp_part_ids] = tf.cast(
tensor_dict[fields.InputDataFields.groundtruth_dp_part_ids],
dtype=tf.int32)
return tensor_dict
def _reshape_keypoints(self, keys_to_tensors):
"""Reshape keypoints.
The keypoints are reshaped to [num_instances, num_keypoints, 2].
Args:
keys_to_tensors: a dictionary from keys to tensors. Expected keys are:
'image/object/keypoint/x'
'image/object/keypoint/y'
Returns:
A 3-D float tensor of shape [num_instances, num_keypoints, 2] with values
in [0, 1].
"""
y = keys_to_tensors['image/object/keypoint/y']
if isinstance(y, tf.SparseTensor):
y = tf.sparse_tensor_to_dense(y)
y = tf.expand_dims(y, 1)
x = keys_to_tensors['image/object/keypoint/x']
if isinstance(x, tf.SparseTensor):
x = tf.sparse_tensor_to_dense(x)
x = tf.expand_dims(x, 1)
keypoints = tf.concat([y, x], 1)
keypoints = tf.reshape(keypoints, [-1, self._num_keypoints, 2])
return keypoints
def _reshape_keypoint_visibilities(self, keys_to_tensors):
"""Reshape keypoint visibilities.
The keypoint visibilities are reshaped to [num_instances,
num_keypoints].
The raw keypoint visibilities are expected to conform to the
MSCoco definition. See Visibility enum.
The returned boolean is True for the labeled case (either
Visibility.NOT_VISIBLE or Visibility.VISIBLE). These are the same categories
that COCO uses to evaluate keypoint detection performance:
http://cocodataset.org/#keypoints-eval
If image/object/keypoint/visibility is not provided, visibilities will be
set to True for finite keypoint coordinate values, and 0 if the coordinates
are NaN.
Args:
keys_to_tensors: a dictionary from keys to tensors. Expected keys are:
'image/object/keypoint/x'
'image/object/keypoint/visibility'
Returns:
A 2-D bool tensor of shape [num_instances, num_keypoints] with values
in {0, 1}. 1 if the keypoint is labeled, 0 otherwise.
"""
x = keys_to_tensors['image/object/keypoint/x']
vis = keys_to_tensors['image/object/keypoint/visibility']
if isinstance(vis, tf.SparseTensor):
vis = tf.sparse_tensor_to_dense(vis)
if isinstance(x, tf.SparseTensor):
x = tf.sparse_tensor_to_dense(x)
default_vis = tf.where(
tf.math.is_nan(x),
Visibility.UNLABELED.value * tf.ones_like(x, dtype=tf.int64),
Visibility.VISIBLE.value * tf.ones_like(x, dtype=tf.int64))
# Use visibility if provided, otherwise use the default visibility.
vis = tf.cond(tf.equal(tf.size(x), tf.size(vis)),
true_fn=lambda: vis,
false_fn=lambda: default_vis)
vis = tf.math.logical_or(
tf.math.equal(vis, Visibility.NOT_VISIBLE.value),
tf.math.equal(vis, Visibility.VISIBLE.value))
vis = tf.reshape(vis, [-1, self._num_keypoints])
return vis
def _reshape_instance_masks(self, keys_to_tensors):
"""Reshape instance segmentation masks.
The instance segmentation masks are reshaped to [num_instances, height,
width].
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float tensor of shape [num_instances, height, width] with values
in {0, 1}.
"""
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32)
masks = keys_to_tensors['image/object/mask']
if isinstance(masks, tf.SparseTensor):
masks = tf.sparse_tensor_to_dense(masks)
masks = tf.reshape(
tf.cast(tf.greater(masks, 0.0), dtype=tf.float32), to_shape)
return tf.cast(masks, tf.float32)
def _reshape_context_features(self, keys_to_tensors):
"""Reshape context features.
The instance context_features are reshaped to
[num_context_features, context_feature_length]
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 2-D float tensor of shape [num_context_features, context_feature_length]
"""
context_feature_length = keys_to_tensors['image/context_feature_length']
to_shape = tf.cast(tf.stack([-1, context_feature_length]), tf.int32)
context_features = keys_to_tensors['image/context_features']
if isinstance(context_features, tf.SparseTensor):
context_features = tf.sparse_tensor_to_dense(context_features)
context_features = tf.reshape(context_features, to_shape)
return context_features
def _decode_png_instance_masks(self, keys_to_tensors):
"""Decode PNG instance segmentation masks and stack into dense tensor.
The instance segmentation masks are reshaped to [num_instances, height,
width].
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float tensor of shape [num_instances, height, width] with values
in {0, 1}.
"""
def decode_png_mask(image_buffer):
image = tf.squeeze(
tf.image.decode_image(image_buffer, channels=1), axis=2)
image.set_shape([None, None])
image = tf.cast(tf.greater(image, 0), dtype=tf.float32)
return image
png_masks = keys_to_tensors['image/object/mask']
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
if isinstance(png_masks, tf.SparseTensor):
png_masks = tf.sparse_tensor_to_dense(png_masks, default_value='')
return tf.cond(
tf.greater(tf.size(png_masks), 0),
lambda: tf.map_fn(decode_png_mask, png_masks, dtype=tf.float32),
lambda: tf.zeros(tf.cast(tf.stack([0, height, width]), dtype=tf.int32)))
def _dense_pose_part_indices(self, keys_to_tensors):
"""Creates a tensor that contains part indices for each DensePose point.
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 2-D int32 tensor of shape [num_instances, num_points] where each element
contains the DensePose part index (0-23). The value `num_points`
corresponds to the maximum number of sampled points across all instances
in the image. Note that instances with less sampled points will be padded
with zeros in the last dimension.
"""
num_points_per_instances = keys_to_tensors['image/object/densepose/num']
part_index = keys_to_tensors['image/object/densepose/part_index']
if isinstance(num_points_per_instances, tf.SparseTensor):
num_points_per_instances = tf.sparse_tensor_to_dense(
num_points_per_instances)
if isinstance(part_index, tf.SparseTensor):
part_index = tf.sparse_tensor_to_dense(part_index)
part_index = tf.cast(part_index, dtype=tf.int32)
max_points_per_instance = tf.cast(
tf.math.reduce_max(num_points_per_instances), dtype=tf.int32)
num_points_cumulative = tf.concat([
[0], tf.math.cumsum(num_points_per_instances)], axis=0)
def pad_parts_tensor(instance_ind):
points_range_start = num_points_cumulative[instance_ind]
points_range_end = num_points_cumulative[instance_ind + 1]
part_inds = part_index[points_range_start:points_range_end]
return shape_utils.pad_or_clip_nd(part_inds,
output_shape=[max_points_per_instance])
return tf.map_fn(pad_parts_tensor,
tf.range(tf.size(num_points_per_instances)),
dtype=tf.int32)
def _dense_pose_surface_coordinates(self, keys_to_tensors):
"""Creates a tensor that contains surface coords for each DensePose point.
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float32 tensor of shape [num_instances, num_points, 4] where each
point contains (y, x, v, u) data for each sampled DensePose point. The
(y, x) coordinate has normalized image locations for the point, and (v, u)
contains the surface coordinate (also normalized) for the part. The value
`num_points` corresponds to the maximum number of sampled points across
all instances in the image. Note that instances with less sampled points
will be padded with zeros in dim=1.
"""
num_points_per_instances = keys_to_tensors['image/object/densepose/num']
dp_y = keys_to_tensors['image/object/densepose/y']
dp_x = keys_to_tensors['image/object/densepose/x']
dp_v = keys_to_tensors['image/object/densepose/v']
dp_u = keys_to_tensors['image/object/densepose/u']
if isinstance(num_points_per_instances, tf.SparseTensor):
num_points_per_instances = tf.sparse_tensor_to_dense(
num_points_per_instances)
if isinstance(dp_y, tf.SparseTensor):
dp_y = tf.sparse_tensor_to_dense(dp_y)
if isinstance(dp_x, tf.SparseTensor):
dp_x = tf.sparse_tensor_to_dense(dp_x)
if isinstance(dp_v, tf.SparseTensor):
dp_v = tf.sparse_tensor_to_dense(dp_v)
if isinstance(dp_u, tf.SparseTensor):
dp_u = tf.sparse_tensor_to_dense(dp_u)
max_points_per_instance = tf.cast(
tf.math.reduce_max(num_points_per_instances), dtype=tf.int32)
num_points_cumulative = tf.concat([
[0], tf.math.cumsum(num_points_per_instances)], axis=0)
def pad_surface_coordinates_tensor(instance_ind):
"""Pads DensePose surface coordinates for each instance."""
points_range_start = num_points_cumulative[instance_ind]
points_range_end = num_points_cumulative[instance_ind + 1]
y = dp_y[points_range_start:points_range_end]
x = dp_x[points_range_start:points_range_end]
v = dp_v[points_range_start:points_range_end]
u = dp_u[points_range_start:points_range_end]
# Create [num_points_i, 4] tensor, where num_points_i is the number of
# sampled points for instance i.
unpadded_tensor = tf.stack([y, x, v, u], axis=1)
return shape_utils.pad_or_clip_nd(
unpadded_tensor, output_shape=[max_points_per_instance, 4])
return tf.map_fn(pad_surface_coordinates_tensor,
tf.range(tf.size(num_points_per_instances)),
dtype=tf.float32)
def _expand_image_label_hierarchy(self, image_classes, image_confidences):
"""Expand image level labels according to the hierarchy.
Args:
image_classes: Int64 tensor with the image level class ids for a sample.
image_confidences: Float tensor signaling whether a class id is present in
the image (1.0) or not present (0.0).
Returns:
new_image_classes: Int64 tensor equal to expanding image_classes.
new_image_confidences: Float tensor equal to expanding image_confidences.
"""
def expand_labels(relation_tensor, confidence_value):
"""Expand to ancestors or descendants depending on arguments."""
mask = tf.equal(image_confidences, confidence_value)
target_image_classes = tf.boolean_mask(image_classes, mask)
expanded_indices = tf.reduce_any((tf.gather(
relation_tensor, target_image_classes - _LABEL_OFFSET, axis=0) > 0),
axis=0)
expanded_indices = tf.where(expanded_indices)[:, 0] + _LABEL_OFFSET
new_groundtruth_image_classes = (
tf.concat([
tf.boolean_mask(image_classes, tf.logical_not(mask)),
expanded_indices,
],
axis=0))
new_groundtruth_image_confidences = (
tf.concat([
tf.boolean_mask(image_confidences, tf.logical_not(mask)),
tf.ones([tf.shape(expanded_indices)[0]],
dtype=image_confidences.dtype) * confidence_value,
],
axis=0))
return new_groundtruth_image_classes, new_groundtruth_image_confidences
image_classes, image_confidences = expand_labels(self._ancestors_lut, 1.0)
new_image_classes, new_image_confidences = expand_labels(
self._descendants_lut, 0.0)
return new_image_classes, new_image_confidences
def _expansion_box_field_labels(self,
object_classes,
object_field,
copy_class_id=False):
"""Expand the labels of a specific object field according to the hierarchy.
Args:
object_classes: Int64 tensor with the class id for each element in
object_field.
object_field: Tensor to be expanded.
copy_class_id: Boolean to choose whether to use class id values in the
output tensor instead of replicating the original values.
Returns:
A tensor with the result of expanding object_field.
"""
expanded_indices = tf.gather(
self._ancestors_lut, object_classes - _LABEL_OFFSET, axis=0)
if copy_class_id:
new_object_field = tf.where(expanded_indices > 0)[:, 1] + _LABEL_OFFSET
else:
new_object_field = tf.repeat(
object_field, tf.reduce_sum(expanded_indices, axis=1), axis=0)
return new_object_field
|
the-stack_106_19337
|
#Import required packages
from PyPDF2 import PdfFileWriter, PdfFileReader
import io
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4
# Define variables
debugGrid = False
def write_pdf(destinationPath, signatureImage, neighbor, gnr, bnr, adress):
packet = io.BytesIO()
# Create a new PDF with Reportlab
can = canvas.Canvas(packet, pagesize=A4)
can.setFont("Times-Roman", 11)
can.drawString(65, 700, neighbor)
can.drawString(80, 637, gnr)
can.drawString(138, 637, bnr)
can.drawString(80, 616, adress)
can.drawImage(signatureImage, 440,45, width=60, mask=[250,255,250,255,250,255], preserveAspectRatio=True, anchor="sw")
# Print debug grid to help with text placement
if debugGrid:
can.grid(range(0,800,20),range(0,1000,20))
can.save()
####################
# Start writing PDFs
####################
# Move to the beginning of the StringIO buffer
packet.seek(0)
new_pdf = PdfFileReader(packet)
# Read your existing PDF
existing_pdf = PdfFileReader(open("input/nabovarsel-utfylt.pdf", "rb"))
output = PdfFileWriter()
# Add the "watermark" (which is the new pdf) on the existing page
page = existing_pdf.getPage(0)
page.mergePage(new_pdf.getPage(0))
output.addPage(page)
# Finally, write "output" to a real file
outputStream = open(destinationPath, "wb")
output.write(outputStream)
outputStream.close() #Remember to close it
# This code runs only if this python-file gets called directly
if __name__ == "__main__":
write_pdf("output/dest.pdf", "input/Hans Martin Eikerol - signatur.png", \
"Kjære nabo", "50", "100","Testveien 1")
|
the-stack_106_19338
|
l,r = input().split()
l,r = int(l), int(r)
if l == 0 and r == 0:
print("Not a moose")
elif l == r:
print("Even", l+r)
elif l != r:
mx = max(l,r)
print("Odd", mx*2)
|
the-stack_106_19339
|
#!/usr/local/bin/python3
import serial, io
import time
device = '/dev/cu.usbmodem1431' # serial port
baud = 9600 # baud rate
now=time.localtime(time.time())
currentyear=now.tm_year
currentmonth=now.tm_mon
currentday=now.tm_mday
filename = '{0}_{1}_{2}GAC-log.txt'.format(currentyear,currentmonth,currentday) # log file to save data in
with serial.Serial(device,baud) as serialPort, open(filename,'wb') as outFile:
while(1):
line = serialPort.readline() # must send \n! get a line of log
timestamp = int(time.time())
print(timestamp), " ",(line)
#print("/t")
#print (line) # show line on screen
#outFile.write(timestamp)
#outFile.write(/t)
outFile.write("%s\t%s" % (timestamp,line)) # write line of text to file
outFile.flush() # make sure it actually gets written out
|
the-stack_106_19341
|
# coding: utf-8
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
"""
"""
ProductAdvertisingAPI
https://webservices.amazon.com/paapi5/documentation/index.html # noqa: E501
"""
import pprint
import re # noqa: F401
import six
from .unit_based_attribute import UnitBasedAttribute # noqa: F401,E501
class DimensionBasedAttribute(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'height': 'UnitBasedAttribute',
'length': 'UnitBasedAttribute',
'weight': 'UnitBasedAttribute',
'width': 'UnitBasedAttribute'
}
attribute_map = {
'height': 'Height',
'length': 'Length',
'weight': 'Weight',
'width': 'Width'
}
def __init__(self, height=None, length=None, weight=None, width=None): # noqa: E501
"""DimensionBasedAttribute - a model defined in Swagger""" # noqa: E501
self._height = None
self._length = None
self._weight = None
self._width = None
self.discriminator = None
if height is not None:
self.height = height
if length is not None:
self.length = length
if weight is not None:
self.weight = weight
if width is not None:
self.width = width
@property
def height(self):
"""Gets the height of this DimensionBasedAttribute. # noqa: E501
:return: The height of this DimensionBasedAttribute. # noqa: E501
:rtype: UnitBasedAttribute
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this DimensionBasedAttribute.
:param height: The height of this DimensionBasedAttribute. # noqa: E501
:type: UnitBasedAttribute
"""
self._height = height
@property
def length(self):
"""Gets the length of this DimensionBasedAttribute. # noqa: E501
:return: The length of this DimensionBasedAttribute. # noqa: E501
:rtype: UnitBasedAttribute
"""
return self._length
@length.setter
def length(self, length):
"""Sets the length of this DimensionBasedAttribute.
:param length: The length of this DimensionBasedAttribute. # noqa: E501
:type: UnitBasedAttribute
"""
self._length = length
@property
def weight(self):
"""Gets the weight of this DimensionBasedAttribute. # noqa: E501
:return: The weight of this DimensionBasedAttribute. # noqa: E501
:rtype: UnitBasedAttribute
"""
return self._weight
@weight.setter
def weight(self, weight):
"""Sets the weight of this DimensionBasedAttribute.
:param weight: The weight of this DimensionBasedAttribute. # noqa: E501
:type: UnitBasedAttribute
"""
self._weight = weight
@property
def width(self):
"""Gets the width of this DimensionBasedAttribute. # noqa: E501
:return: The width of this DimensionBasedAttribute. # noqa: E501
:rtype: UnitBasedAttribute
"""
return self._width
@width.setter
def width(self, width):
"""Sets the width of this DimensionBasedAttribute.
:param width: The width of this DimensionBasedAttribute. # noqa: E501
:type: UnitBasedAttribute
"""
self._width = width
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DimensionBasedAttribute, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DimensionBasedAttribute):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_19342
|
import faiss
import numpy as np
class FaissKMeans:
def __init__(self, n_clusters=8, n_init=1, max_iter=50):
self.n_clusters = n_clusters
self.n_init = n_init
self.max_iter = max_iter
self.kmeans = None
self.cluster_centers_ = None
self.inertia_ = None
self.labels_ = None
self.obj = None
def predict(self, X):
return self.kmeans.index.search(X.astype(np.float32), 1)[1]
def fit(self, X, ngpu=2):
# This code is based on https://github.com/facebookresearch/faiss/blob/master/benchs/kmeans_mnist.py
D = X.shape[1]
clus = faiss.Clustering(D, self.n_clusters)
# otherwise the kmeans implementation sub-samples the training set
clus.max_points_per_centroid = 10000000
clus.niter = self.max_iter
clus.nredo = self.n_init
res = [faiss.StandardGpuResources() for i in range(ngpu)]
flat_config = []
for i in range(ngpu):
cfg = faiss.GpuIndexFlatConfig()
cfg.useFloat16 = False
cfg.device = i
flat_config.append(cfg)
if ngpu == 1:
index = faiss.GpuIndexFlatL2(res[0], D, flat_config[0])
else:
indexes = [faiss.GpuIndexFlatL2(res[i], D, flat_config[i])
for i in range(ngpu)]
index = faiss.IndexProxy()
for sub_index in indexes:
index.addIndex(sub_index)
clus.train(X.astype(np.float32), index)
self.labels_ = index.search(X.astype(np.float32), 1)[1].reshape(-1)
stats = clus.iteration_stats
stats = [stats.at(i) for i in range(stats.size())]
self.obj = np.array([st.obj for st in stats])
self.kmeans = clus
self.inertia_ = self.obj[-1]
self.cluster_centers_ = self.kmeans.centroids
|
the-stack_106_19343
|
from data_importers.management.commands import BaseHalaroseCsvImporter
from django.contrib.gis.geos import Point
class Command(BaseHalaroseCsvImporter):
council_id = "E06000019"
addresses_name = (
"parl.2019-12-12/Version 1/polling_station_export-2019-11-08here.csv"
)
stations_name = (
"parl.2019-12-12/Version 1/polling_station_export-2019-11-08here.csv"
)
elections = ["parl.2019-12-12"]
allow_station_point_from_postcode = False
def station_record_to_dict(self, record):
rec = super().station_record_to_dict(record)
if record.pollingstationname == "Weobley Village Hall":
rec["location"] = Point(-2.869103, 52.159915, srid=4326)
if record.pollingstationname == "Richards Castle Village Hall":
rec["location"] = Point(-2.74213, 52.32607, srid=4326)
return rec
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.uprn.strip().lstrip("0")
if record.housepostcode.strip() == "HR9 7RA":
return None
if uprn in ["10007369349", "10091655370"]:
return None
if record.houseid in [
"3072006",
"9010513",
]:
return None
if record.housepostcode.strip() in ["HR1 2PJ", "HR4 8FH"]:
return None
return rec
|
the-stack_106_19345
|
#!/usr/bin/env python
import sys
import os
import re
import yaml
HEADER = '''---
permalink: "/{}/all/"
---'''
def main(language, configPath, crossrefPath, rootDir):
slugs, markers = readConfig(configPath)
crossref = readCrossref(crossrefPath, markers['crossref'])
print(HEADER.format(language))
before, body, after = getBody(os.path.join(rootDir, 'index.html'),
markers['start'], markers['end'])
print(fixToc(before, language))
print(fixLinks(body, crossref))
slugPaths = [(s, os.path.join(rootDir, s, 'index.html')) for s in slugs]
for (slug, path) in slugPaths:
_, body, _ = getBody(path, markers['start'], markers['end'])
body = body.replace('<h1', '<h1 id="s:{}"'.format(slug))
print(fixLinks(body, crossref))
print(after)
def readConfig(configPath):
with open(configPath, 'r') as reader:
config = yaml.load(reader)
slugs = [p.strip('/') for p in config['toc']['lessons'] + config['toc']['extras']]
return slugs, config['marker']
def readCrossref(crossrefPath, prefix):
with open(crossrefPath, 'r') as reader:
data = reader.read()
data = data.replace(prefix, '')
data = eval(data)
return data
def getBody(path, markerStart, markerEnd):
with open(path, 'r') as reader:
text = reader.read()
start = text.find(markerStart) + len(markerStart)
end = text.find(markerEnd, start)
return text[:start], text[start:end], text[end:]
def fixToc(doc, language):
def link(m):
slug = m.group(1)
return 'href="#s:{}"'.format(slug)
link.pattern = re.compile('href="/{}/(.+?)/"'.format(language))
return link.pattern.sub(link, doc)
def fixLinks(doc, crossref):
def appendix(m):
target = m.group(1)
title = crossref[target]['number']
return '<a href="#{}">Appendix {}</a>'.format(target, title)
appendix.pattern = re.compile(r'<a href="#APPENDIX">(.+?)</a>')
def chapter(m):
target = m.group(1)
title = crossref[target]['number']
return '<a href="#{}">Chapter {}</a>'.format(target, title)
chapter.pattern = re.compile(r'<a href="#CHAPTER">(.+?)</a>')
def cite(m):
target = m.group(1)
return '<a href="#{}">{}</a>'.format(target, target)
cite.pattern = re.compile(r'<a href="#CITE">(.+?)</a>')
def figure(m):
target = m.group(1)
title = crossref[target]['number']
return '<a href="#{}">Figure {}</a>'.format(target, title)
figure.pattern = re.compile(r'<a href="#FIGURE">(.+?)</a>')
def section(m):
target = m.group(1)
title = crossref[target]['number']
return '<a href="#{}">Section {}</a>'.format(target, title)
section.pattern = re.compile(r'<a href="#SECTION">(.+?)</a>')
FUNCS = [appendix, figure, cite, chapter, section]
for func in FUNCS:
doc = func.pattern.sub(func, doc)
return doc
if __name__ == '__main__':
if len(sys.argv) != 5:
sys.stderr.write('Usage: mergebook language /path/to/config /path/to/crossref /path/to/site\n')
sys.exit(1)
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
|
the-stack_106_19346
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import date
from django import forms
from django.utils.safestring import mark_safe
from django.forms.models import inlineformset_factory
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, Div, Submit, Hidden, HTML, Field
from crispy_forms.bootstrap import FormActions, AppendedText, InlineRadios
from wells.models import ActivitySubmission
class ActivitySubmissionSurfaceSealForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.disable_csrf = True
self.helper.render_hidden_fields = True
self.helper.layout = Layout(
Fieldset(
'Surface Seal and Backfill Information',
Div(
Div('surface_seal_material', css_class='col-md-3'),
Div(AppendedText('surface_seal_depth', 'ft'), css_class='col-md-2'),
Div(AppendedText('surface_seal_thickness', 'in'), css_class='col-md-2'),
css_class='row',
),
Div(
Div('surface_seal_method', css_class='col-md-3'),
css_class='row',
),
Div(
Div(HTML(' '), css_class='col-md-12'),
css_class='row',
),
Div(
Div('backfill_above_surface_seal', css_class='col-md-3'),
Div(AppendedText('backfill_above_surface_seal_depth', 'ft'), css_class='col-md-2'),
css_class='row',
),
),
Fieldset(
'Liner Information',
Div(
Div('liner_material', css_class='col-md-3'),
css_class='row',
),
Div(
Div(AppendedText('liner_diameter', 'in'), css_class='col-md-2'),
Div(AppendedText('liner_thickness', 'in'), css_class='col-md-2'),
css_class='row',
),
Div(
Div(AppendedText('liner_from', 'ft (bgl)'), css_class='col-md-2'),
Div(AppendedText('liner_to', 'ft (bgl)'), css_class='col-md-2'),
css_class='row',
),
)
)
super(ActivitySubmissionSurfaceSealForm, self).__init__(*args, **kwargs)
def clean_surface_seal_material(self):
surface_seal_material = self.cleaned_data.get('surface_seal_material')
if self.initial['casing_exists'] and not surface_seal_material:
raise forms.ValidationError('This field is required.')
def clean_surface_seal_depth(self):
surface_seal_depth = self.cleaned_data.get('surface_seal_depth')
if self.initial['casing_exists'] and not surface_seal_depth:
raise forms.ValidationError('This field is required.')
def clean_surface_seal_thickness(self):
surface_seal_thickness = self.cleaned_data.get('surface_seal_thickness')
if self.initial['casing_exists'] and not surface_seal_thickness:
raise forms.ValidationError('This field is required.')
def clean_surface_seal_method(self):
surface_seal_method = self.cleaned_data.get('surface_seal_method')
if self.initial['casing_exists'] and not surface_seal_method:
raise forms.ValidationError('This field is required.')
def clean(self):
cleaned_data = super(ActivitySubmissionSurfaceSealForm, self).clean()
liner_from = cleaned_data.get('liner_from')
liner_to = cleaned_data.get('liner_to')
errors = []
if liner_from and liner_to and liner_to < liner_from:
errors.append('Liner To must be greater than or equal to From.')
if len(errors) > 0:
raise forms.ValidationError(errors)
return cleaned_data
class Meta:
model = ActivitySubmission
fields = ['surface_seal_material', 'surface_seal_depth', 'surface_seal_thickness',
'surface_seal_method', 'backfill_above_surface_seal', 'backfill_above_surface_seal_depth',
'liner_material', 'liner_diameter', 'liner_thickness', 'liner_from', 'liner_to']
|
the-stack_106_19347
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, PYNINI_AVAILABLE, parse_test_case_file
class TestDate:
inverse_normalizer = (
InverseNormalizer(lang='es', cache_dir=CACHE_DIR, overwrite_cache=False) if PYNINI_AVAILABLE else None
)
@parameterized.expand(parse_test_case_file('es/data_inverse_text_normalization/test_cases_date.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE, reason="`pynini` not installed, please install via nemo_text_processing/setup.sh"
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer = (
Normalizer(input_case='cased', lang='es', cache_dir=CACHE_DIR, overwrite_cache=False)
if PYNINI_AVAILABLE
else None
)
normalizer_with_audio = (
NormalizerWithAudio(input_case='cased', lang='es', cache_dir=CACHE_DIR, overwrite_cache=False)
if PYNINI_AVAILABLE and CACHE_DIR
else None
)
@parameterized.expand(parse_test_case_file('es/data_text_normalization/test_cases_date.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE, reason="`pynini` not installed, please install via nemo_text_processing/setup.sh"
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_with_audio:
pred_non_deterministic = self.normalizer_with_audio.normalize(
test_input, n_tagged=500, punct_post_process=False
)
assert expected in pred_non_deterministic
|
the-stack_106_19348
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import os
import glob
import shutil
import numpy as np
import matplotlib.pyplot as plt
import pycocotools.coco as coco
from pycocotools.coco import COCO
from pycocotools import mask
import skimage.io as skio
import skimage.color as skcl
import warnings
######################################
dictFoods = {
# 'banana' : 52,
# 'apple' : 53,
'sandwich' : 54,
# 'orange' : 55,
# 'broccoli' : 56,
# 'carrot' : 57,
'hot dog' : 58,
'pizza' : 59,
# 'donut' : 60,
'cake' : 61
}
reversedDirFoods = {vv:kk for kk,vv in dictFoods.items()}
listSortedFoodNames=[
'pizza',
'cake',
'sandwich',
'hot dog',
# 'donut',
# 'banana',
# 'apple',
# 'orange',
# 'broccoli',
# 'carrot'
]
listSortedFoodIds = [dictFoods[xx] for xx in listSortedFoodNames]
######################################
def makeDirIfNotExists(pathToDir, isCleanIfExists=True):
"""
create directory if directory is absent
:param pathToDir: path to directory
:param isCleanIfExists: flag: clean directory if directory exists
:return: None
"""
if os.path.isdir(pathToDir) and isCleanIfExists:
shutil.rmtree(pathToDir)
if not os.path.isdir(pathToDir):
os.makedirs(pathToDir)
######################################
if __name__ == '__main__':
# dataDir = '/home/ar/datasets/mscoco'
dataDir = '/mnt/data1T2/datasets2/mscoco/raw-data'
# dataType = 'train2014'
dataType = 'val2014'
dirImg = '%s/%s' % (dataDir, dataType)
if not os.path.isdir(dirImg):
raise Exception('Cant find directory with images [%s]' % dirImg)
dirOut = '%s/%s-food2' % (dataDir, dataType)
makeDirIfNotExists(pathToDir=dirOut, isCleanIfExists=False)
#
annFile = '%s/annotations/instances_%s.json' % (dataDir, dataType)
imgDir = '%s/%s' % (dataDir, dataType)
if not os.path.isdir(imgDir):
raise Exception('Cant find directory with MS-COCO images [%s]' % dataDir)
#
coco = COCO(annFile)
#
listCatsFoodIdx = coco.getCatIds(supNms=['food'])
assert (set(listCatsFoodIdx) == set(listSortedFoodIds))
for ii, idx in enumerate(listCatsFoodIdx):
tmpCat = coco.loadCats(ids = idx)[0]
print ('%d [%d] : %s (%s)' % (ii, idx, tmpCat['name'], tmpCat['supercategory']))
#
tmpDictFoodImgIds = {}
for ii, idx in enumerate(listSortedFoodIds):
tmpImgIds = coco.getImgIds(catIds=idx)
for timgId in tmpImgIds:
if tmpDictFoodImgIds.has_key(timgId):
tmpDictFoodImgIds[timgId].append(idx)
else:
tmpDictFoodImgIds[timgId] = [idx]
setAllFoodImgIds = sorted(tmpDictFoodImgIds.keys())
print ('#list/#set = %d' % len(tmpDictFoodImgIds.keys()))
numImages = len(setAllFoodImgIds)
for ii, kk in enumerate(setAllFoodImgIds):
print ('[%d/%d]' % (ii, numImages))
timgInfo = coco.loadImgs(kk)[0]
foutImg = '%s/%s' % (dirOut, timgInfo['file_name'])
foutMsk = '%s/%s-mskfood.png' % (dirOut, timgInfo['file_name'])
foutPrv = '%s/%s-preview.jpg' % (dirOut, timgInfo['file_name'])
if os.path.isfile(foutPrv):
print ('\timage already processed [%s]' % foutImg)
continue
#
fimg = '%s/%s' % (dirImg, timgInfo['file_name'])
timg = skio.imread(fimg)
# assert (timg.ndim==3)
if timg.ndim==2:
timg = skcl.gray2rgb(timg)
twidth = timgInfo['width']
theight = timgInfo['height']
vv = tmpDictFoodImgIds[kk]
tmsk = None
for vvi in vv:
tannIds = coco.getAnnIds(imgIds=kk, catIds=vvi, iscrowd=False)
tanns = coco.loadAnns(tannIds)
print ('\t :: processing: %d -> %s' % (vvi, reversedDirFoods[vvi]))
tmpMsk = None
for ttt in tanns:
rle = mask.frPyObjects(ttt['segmentation'], theight, twidth)
tmpm = mask.decode(rle)
if tmpm.shape[2]>1:
print ('\t\t**** multiple shape out :( --> [%s]' % fimg)
tmpm = np.sum(tmpm, axis=2)
if tmpMsk is None:
tmpMsk = tmpm
else:
tmpMsk += tmpm
if tmsk is None:
tmsk = np.zeros(tmpMsk.shape, dtype=tmpMsk.dtype)
tmsk[tmpMsk>0]=vvi
#
timgPreview = timg.copy()
chIdx = 1
timgPreviewCh = timgPreview[:,:,chIdx]
timgPreviewCh[tmsk>0] = 255
timgPreview[:,:,chIdx] = timgPreviewCh
#
with warnings.catch_warnings():
warnings.simplefilter('ignore')
skio.imsave(foutImg, timg)
skio.imsave(foutMsk, tmsk[:,:])
skio.imsave(foutPrv, timgPreview)
print ('-------')
|
the-stack_106_19349
|
import collections
from . import base
__all__ = ["AdaGrad"]
class AdaGrad(base.Optimizer):
"""AdaGrad optimizer.
Parameters
----------
lr
eps
Attributes
----------
g2 : collections.defaultdict
Examples
--------
>>> from river import datasets
>>> from river import evaluate
>>> from river import linear_model
>>> from river import metrics
>>> from river import optim
>>> from river import preprocessing
>>> dataset = datasets.Phishing()
>>> optimizer = optim.AdaGrad()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LogisticRegression(optimizer)
... )
>>> metric = metrics.F1()
>>> evaluate.progressive_val_score(dataset, model, metric)
F1: 88.01%
References
----------
[^1]: [Duchi, J., Hazan, E. and Singer, Y., 2011. Adaptive subgradient methods for online learning and stochastic optimization. Journal of machine learning research, 12(Jul), pp.2121-2159.](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
"""
def __init__(self, lr=0.1, eps=1e-8):
super().__init__(lr)
self.eps = eps
self.g2 = collections.defaultdict(float)
def _step_with_dict(self, w, g):
for i, gi in g.items():
self.g2[i] += gi**2
w[i] -= self.learning_rate / (self.g2[i] + self.eps) ** 0.5 * gi
return w
|
the-stack_106_19350
|
import logging
from functools import partial
from typing import Optional
from qcodes import VisaInstrument
from qcodes import ChannelList, InstrumentChannel
from qcodes.utils import validators as vals
import numpy as np
from qcodes import MultiParameter, ArrayParameter
log = logging.getLogger(__name__)
class FrequencySweepMagPhase(MultiParameter):
"""
Sweep that return magnitude and phase.
"""
def __init__(self, name, instrument, start, stop, npts, channel):
super().__init__(name, names=("", ""), shapes=((), ()))
self._instrument = instrument
self.set_sweep(start, stop, npts)
self._channel = channel
self.names = ('magnitude',
'phase')
self.labels = ('{} magnitude'.format(instrument.short_name),
'{} phase'.format(instrument.short_name))
self.units = ('', 'rad')
self.setpoint_units = (('Hz',), ('Hz',))
self.setpoint_labels = (('{} frequency'.format(instrument.short_name),), ('{} frequency'.format(instrument.short_name),))
self.setpoint_names = (('{}_frequency'.format(instrument.short_name),), ('{}_frequency'.format(instrument.short_name),))
def set_sweep(self, start, stop, npts):
# needed to update config of the software parameter on sweep change
# freq setpoints tuple as needs to be hashable for look up
f = tuple(np.linspace(int(start), int(stop), num=npts))
self.setpoints = ((f,), (f,))
self.shapes = ((npts,), (npts,))
def get_raw(self):
old_format = self._instrument.format()
self._instrument.format('Complex')
data = self._instrument._get_sweep_data(force_polar = True)
self._instrument.format(old_format)
return abs(data), np.angle(data)
class FrequencySweep(ArrayParameter):
"""
Hardware controlled parameter class for Rohde Schwarz ZNB trace.
Instrument returns an array of transmission or reflection data depending
on the active measurement.
Args:
name: parameter name
instrument: instrument the parameter belongs to
start: starting frequency of sweep
stop: ending frequency of sweep
npts: number of points in frequency sweep
Methods:
set_sweep(start, stop, npts): sets the shapes and
setpoint arrays of the parameter to correspond with the sweep
get(): executes a sweep and returns magnitude and phase arrays
"""
def __init__(self, name, instrument, start, stop, npts, channel):
super().__init__(name, shape=(npts,),
instrument=instrument,
unit='dB',
label='{} magnitude'.format(
instrument.short_name),
setpoint_units=('Hz',),
setpoint_labels=('{} frequency'.format(instrument.short_name),),
setpoint_names=('{}_frequency'.format(instrument.short_name),))
self.set_sweep(start, stop, npts)
self._channel = channel
def set_sweep(self, start, stop, npts):
# needed to update config of the software parameter on sweep change
# freq setpoints tuple as needs to be hashable for look up
f = tuple(np.linspace(int(start), int(stop), num=npts))
self.setpoints = (f,)
self.shape = (npts,)
def get_raw(self):
data = self._instrument._get_sweep_data()
if self._instrument.format() in ['Polar', 'Complex',
'Smith', 'Inverse Smith']:
log.warning("QCoDeS Dataset does not currently support Complex "
"values. Will discard the imaginary part. In order to "
"acquire phase and amplitude use the "
"FrequencySweepMagPhase parameter.")
return data
class ZNBChannel(InstrumentChannel):
def __init__(self, parent: 'ZNB', name: str, channel: int, vna_parameter: str=None,
existing_trace_to_bind_to: Optional[str]=None) -> None:
"""
Args:
parent: Instrument that this channel is bound to.
name: Name to use for this channel.
channel: channel on the VNA to use
vna_parameter: Name of parameter on the vna that this should
measure such as S12. If left empty this will fall back to
`name`.
existing_trace_to_bind_to: Name of an existing trace on the VNA.
If supplied try to bind to an existing trace with this name
rather than creating a new trace.
"""
n = channel
self._instrument_channel = channel
if vna_parameter is None:
vna_parameter = name
self._vna_parameter = vna_parameter
super().__init__(parent, name)
if existing_trace_to_bind_to is None:
self._tracename = "Trc{}".format(channel)
else:
traces = self._parent.ask(f"CONFigure:TRACe:CATalog?")
if existing_trace_to_bind_to not in traces:
raise RuntimeError(f"Trying to bind to {existing_trace_to_bind_to} "
f"which is not in {traces}")
self._tracename = existing_trace_to_bind_to
# map hardware channel to measurement
# hardware channels are mapped one to one to qcodes channels
# we are not using sub traces within channels.
if existing_trace_to_bind_to is None:
self.write("CALC{}:PAR:SDEF '{}', '{}'".format(self._instrument_channel,
self._tracename,
self._vna_parameter))
# source power is dependent on model, but not well documented.
# here we assume -60 dBm for ZNB20, the others are set,
# due to lack of knowledge, to -80 dBm as of before the edit
full_modelname = self._parent.get_idn()['model']
model: Optional[str]
if full_modelname is not None:
model = full_modelname.split('-')[0]
else:
raise RuntimeError("Could not determine ZNB model")
mSourcePower = {'ZNB4':-80, 'ZNB8':-80, 'ZNB20':-60}
if model not in mSourcePower.keys():
raise RuntimeError("Unsupported ZNB model: {}".format(model))
self._min_source_power: float
self._min_source_power = mSourcePower[model]
self.add_parameter(name='vna_parameter',
label='VNA parameter',
get_cmd="CALC{}:PAR:MEAS? '{}'".format(self._instrument_channel,
self._tracename),
get_parser=self._strip)
self.add_parameter(name='power',
label='Power',
unit='dBm',
get_cmd='SOUR{}:POW?'.format(n),
set_cmd='SOUR{}:POW {{:.4f}}'.format(n),
get_parser=float,
vals=vals.Numbers(self._min_source_power, 25))
# there is an 'increased bandwidth option' (p. 4 of manual) that does
# not get taken into account here
self.add_parameter(name='bandwidth',
label='Bandwidth',
unit='Hz',
get_cmd='SENS{}:BAND?'.format(n),
set_cmd='SENS{}:BAND {{:.4f}}'.format(n),
get_parser=int,
vals=vals.Enum(
*np.append(10**6,
np.kron([1, 1.5, 2, 3, 5, 7],
10**np.arange(6))))
)
self.add_parameter(name='avg',
label='Averages',
unit='',
get_cmd='SENS{}:AVER:COUN?'.format(n),
set_cmd='SENS{}:AVER:COUN {{:.4f}}'.format(n),
get_parser=int,
vals=vals.Ints(1, 5000))
self.add_parameter(name='start',
get_cmd='SENS{}:FREQ:START?'.format(n),
set_cmd=self._set_start,
get_parser=float,
vals=vals.Numbers(self._parent._min_freq, self._parent._max_freq - 10))
self.add_parameter(name='stop',
get_cmd='SENS{}:FREQ:STOP?'.format(n),
set_cmd=self._set_stop,
get_parser=float,
vals=vals.Numbers(self._parent._min_freq + 1, self._parent._max_freq))
self.add_parameter(name='center',
get_cmd='SENS{}:FREQ:CENT?'.format(n),
set_cmd=self._set_center,
get_parser=float,
vals=vals.Numbers(self._parent._min_freq + 0.5, self._parent._max_freq - 10))
self.add_parameter(name='span',
get_cmd='SENS{}:FREQ:SPAN?'.format(n),
set_cmd=self._set_span,
get_parser=float,
vals=vals.Numbers(1, self._parent._max_freq - self._parent._min_freq))
self.add_parameter(name='npts',
get_cmd='SENS{}:SWE:POIN?'.format(n),
set_cmd=self._set_npts,
get_parser=int)
self.add_parameter(name='status',
get_cmd='CONF:CHAN{}:MEAS?'.format(n),
set_cmd='CONF:CHAN{}:MEAS {{}}'.format(n),
get_parser=int)
self.add_parameter(name='format',
get_cmd=partial(self._get_format, tracename=self._tracename),
set_cmd=self._set_format,
val_mapping={'dB': 'MLOG\n',
'Linear Magnitude': 'MLIN\n',
'Phase': 'PHAS\n',
'Unwr Phase': 'UPH\n',
'Polar': 'POL\n',
'Smith': 'SMIT\n',
'Inverse Smith': 'ISM\n',
'SWR': 'SWR\n',
'Real': 'REAL\n',
'Imaginary': 'IMAG\n',
'Delay': "GDEL\n",
'Complex': "COMP\n"
})
self.add_parameter(name='trace_mag_phase',
start=self.start(),
stop=self.stop(),
npts=self.npts(),
channel=n,
parameter_class=FrequencySweepMagPhase)
self.add_parameter(name='trace',
start=self.start(),
stop=self.stop(),
npts=self.npts(),
channel=n,
parameter_class=FrequencySweep)
self.add_function('autoscale',
call_cmd='DISPlay:TRACe1:Y:SCALe:AUTO ONCE, "{}"'.format(self._tracename))
def _get_format(self, tracename):
n = self._instrument_channel
self.write(f"CALC{n}:PAR:SEL '{tracename}'")
return self.ask(f"CALC{n}:FORM?")
def _set_format(self, val):
unit_mapping = {'MLOG\n': 'dB',
'MLIN\n': '',
'PHAS\n': 'rad',
'UPH\n': 'rad',
'POL\n': '',
'SMIT\n': '',
'ISM\n': '',
'SWR\n': 'U',
'REAL\n': 'U',
'IMAG\n': 'U',
'GDEL\n': 'S',
'COMP\n': ''}
label_mapping = {'MLOG\n': 'Magnitude',
'MLIN\n': 'Magnitude',
'PHAS\n': 'Phase',
'UPH\n': 'Unwrapped phase',
'POL\n': 'Complex Magnitude',
'SMIT\n': 'Complex Magnitude',
'ISM\n': 'Complex Magnitude',
'SWR\n': 'Standing Wave Ratio',
'REAL\n': 'Real Magnitude',
'IMAG\n': 'Imaginary Magnitude',
'GDEL\n': 'Delay',
'COMP\n': 'Complex Magnitude'}
channel = self._instrument_channel
self.write(f"CALC{channel}:PAR:SEL '{self._tracename}'")
self.write(f"CALC{channel}:FORM {val}")
self.trace.unit = unit_mapping[val]
self.trace.label = "{} {}".format(
self.short_name, label_mapping[val])
def _strip(self, var):
"Strip newline and quotes from instrument reply"
return var.rstrip()[1:-1]
def _set_start(self, val):
channel = self._instrument_channel
self.write('SENS{}:FREQ:START {:.7f}'.format(channel, val))
stop = self.stop()
if val >= stop:
raise ValueError(
"Stop frequency must be larger than start frequency.")
# we get start as the vna may not be able to set it to the exact value provided
start = self.start()
if val != start:
log.warning(
"Could not set start to {} setting it to {}".format(val, start))
self.update_traces()
def _set_stop(self, val):
channel = self._instrument_channel
start = self.start()
if val <= start:
raise ValueError(
"Stop frequency must be larger than start frequency.")
self.write('SENS{}:FREQ:STOP {:.7f}'.format(channel, val))
# we get stop as the vna may not be able to set it to the exact value provided
stop = self.stop()
if val != stop:
log.warning(
"Could not set stop to {} setting it to {}".format(val, stop))
self.update_traces()
def _set_npts(self, val):
channel = self._instrument_channel
self.write('SENS{}:SWE:POIN {:.7f}'.format(channel, val))
self.update_traces()
def _set_span(self, val):
channel = self._instrument_channel
self.write('SENS{}:FREQ:SPAN {:.7f}'.format(channel, val))
self.update_traces()
def _set_center(self, val):
channel = self._instrument_channel
self.write('SENS{}:FREQ:CENT {:.7f}'.format(channel, val))
self.update_traces()
def update_traces(self):
""" updates start, stop and npts of all trace parameters"""
start = self.start()
stop = self.stop()
npts = self.npts()
for _, parameter in self.parameters.items():
if isinstance(parameter, (ArrayParameter, MultiParameter)):
try:
parameter.set_sweep(start, stop, npts)
except AttributeError:
pass
def _get_sweep_data(self, force_polar=False):
if not self._parent.rf_power():
log.warning("RF output is off when getting sweep data")
# it is possible that the instrument and qcodes disagree about
# which parameter is measured on this channel
instrument_parameter = self.vna_parameter()
if instrument_parameter != self._vna_parameter:
raise RuntimeError("Invalid parameter. Tried to measure "
"{} got {}".format(self._vna_parameter,
instrument_parameter))
self.write('SENS{}:AVER:STAT ON'.format(self._instrument_channel))
self.write('SENS{}:AVER:CLE'.format(self._instrument_channel))
# preserve original state of the znb
initial_state = self.status()
self.status(1)
self._parent.cont_meas_off()
try:
# if force polar is set, the SDAT data format will be used. Here
# the data will be transferred as a complex number independent of
# the set format in the instrument.
if force_polar:
data_format_command = 'SDAT'
else:
data_format_command = 'FDAT'
# instrument averages over its last 'avg' number of sweeps
# need to ensure averaged result is returned
for avgcount in range(self.avg()):
self.write('INIT{}:IMM; *WAI'.format(self._instrument_channel))
self._parent.write(f"CALC{self._instrument_channel}:PAR:SEL '{self._tracename}'")
data_str = self.ask(
'CALC{}:DATA? {}'.format(self._instrument_channel,
data_format_command))
data = np.array(data_str.rstrip().split(',')).astype('float64')
if self.format() in ['Polar', 'Complex',
'Smith', 'Inverse Smith']:
data = data[0::2] + 1j * data[1::2]
finally:
self._parent.cont_meas_on()
self.status(initial_state)
return data
class ZNB(VisaInstrument):
"""
qcodes driver for the Rohde & Schwarz ZNB8 and ZNB20
virtual network analyser. It can probably be extended to ZNB4 and 40
without too much work.
Requires FrequencySweep parameter for taking a trace
Args:
name: instrument name
address: Address of instrument probably in format
'TCPIP0::192.168.15.100::inst0::INSTR'
init_s_params: Automatically setup channels for all S parameters on the
VNA.
reset_channels: If True any channels defined on the VNA at the time
of initialization are reset and removed.
**kwargs: passed to base class
TODO:
- check initialisation settings and test functions
"""
CHANNEL_CLASS = ZNBChannel
def __init__(self, name: str, address: str, init_s_params: bool = True,
reset_channels: bool = True, **kwargs) -> None:
super().__init__(name=name, address=address, **kwargs)
# TODO(JHN) I could not find a way to get max and min freq from
# the API, if that is possible replace below with that
# See page 1025 in the manual. 7.3.15.10 for details of max/min freq
# no attempt to support ZNB40, not clear without one how the format
# is due to variants
fullmodel = self.get_idn()['model']
if fullmodel is not None:
model = fullmodel.split('-')[0]
else:
raise RuntimeError("Could not determine ZNB model")
# format seems to be ZNB8-4Port
mFrequency = {'ZNB4':(9e3, 4.5e9), 'ZNB8':(9e3, 8.5e9), 'ZNB20':(100e3, 20e9)}
if model not in mFrequency.keys():
raise RuntimeError("Unsupported ZNB model {}".format(model))
self._min_freq: float
self._max_freq: float
self._min_freq, self._max_freq = mFrequency[model]
self.add_parameter(name='num_ports',
get_cmd='INST:PORT:COUN?',
get_parser=int)
num_ports = self.num_ports()
self.add_parameter(name='rf_power',
get_cmd='OUTP1?',
set_cmd='OUTP1 {}',
val_mapping={True: '1\n', False: '0\n'})
self.add_function('reset', call_cmd='*RST')
self.add_function('tooltip_on', call_cmd='SYST:ERR:DISP ON')
self.add_function('tooltip_off', call_cmd='SYST:ERR:DISP OFF')
self.add_function('cont_meas_on', call_cmd='INIT:CONT:ALL ON')
self.add_function('cont_meas_off', call_cmd='INIT:CONT:ALL OFF')
self.add_function('update_display_once', call_cmd='SYST:DISP:UPD ONCE')
self.add_function('update_display_on', call_cmd='SYST:DISP:UPD ON')
self.add_function('update_display_off', call_cmd='SYST:DISP:UPD OFF')
self.add_function('display_sij_split', call_cmd='DISP:LAY GRID;:DISP:LAY:GRID {},{}'.format(
num_ports, num_ports))
self.add_function('display_single_window',
call_cmd='DISP:LAY GRID;:DISP:LAY:GRID 1,1')
self.add_function('display_dual_window',
call_cmd='DISP:LAY GRID;:DISP:LAY:GRID 2,1')
self.add_function('rf_off', call_cmd='OUTP1 OFF')
self.add_function('rf_on', call_cmd='OUTP1 ON')
if reset_channels:
self.reset()
self.clear_channels()
channels = ChannelList(self, "VNAChannels", self.CHANNEL_CLASS,
snapshotable=True)
self.add_submodule("channels", channels)
if init_s_params:
for i in range(1, num_ports + 1):
for j in range(1, num_ports + 1):
ch_name = 'S' + str(i) + str(j)
self.add_channel(ch_name)
self.channels.lock()
self.display_sij_split()
self.channels.autoscale()
self.update_display_on()
if reset_channels:
self.rf_off()
self.connect_message()
def display_grid(self, rows: int, cols: int):
"""
Display a grid of channels rows by cols
"""
self.write('DISP:LAY GRID;:DISP:LAY:GRID {},{}'.format(rows, cols))
def add_channel(self, channel_name: str, **kwargs):
i_channel = len(self.channels) + 1
channel = self.CHANNEL_CLASS(self, channel_name, i_channel, **kwargs)
self.channels.append(channel)
if i_channel == 1:
self.display_single_window()
if i_channel == 2:
self.display_dual_window()
# shortcut
setattr(self, channel_name, channel)
# initialising channel
self.write('SENS{}:SWE:TYPE LIN'.format(i_channel))
self.write('SENS{}:SWE:TIME:AUTO ON'.format(i_channel))
self.write('TRIG{}:SEQ:SOUR IMM'.format(i_channel))
self.write('SENS{}:AVER:STAT ON'.format(i_channel))
def clear_channels(self):
"""
Remove all channels from the instrument and channel list and
unlock the channel list.
"""
self.write('CALCulate:PARameter:DELete:ALL')
for submodule in self.submodules.values():
if isinstance(submodule, ChannelList):
submodule._channels = []
submodule._channel_mapping = {}
submodule._locked = False
|
the-stack_106_19352
|
import sys
import os
import json
parent_dir = os.path.abspath(os.path.dirname(__file__))
vendor_dir = os.path.join(parent_dir, 'vendor')
sys.path.insert(1, vendor_dir)
from multiprocessing import Process
from twisted.internet import reactor
from scrapy.crawler import CrawlerRunner
from scrapy.spiderloader import SpiderLoader
from scrapy.utils.log import configure_logging
from scrapy.utils.project import get_project_settings
def main_handler(event, context):
spider_event = event
if event.get('body'):
spider_event = json.loads(event['body'])
print("trigger spider with event {0}".format(spider_event))
try:
crawl(**spider_event)
return "OK"
except Exception as e:
print(e)
raise e
def run_spider(spider_name, project_settings, spider_kwargs):
runner = CrawlerRunner(project_settings)
deferred = runner.crawl(spider_name)
spider_loader = SpiderLoader(project_settings)
spider_cls = spider_loader.load(spider_name)
runner.crawl(spider_cls, **spider_kwargs)
deferred.addBoth(lambda _: reactor.stop())
reactor.run()
def crawl(settings={}, spider_name="toscrape-css", spider_kwargs={}):
project_settings = get_project_settings()
configure_logging(project_settings)
# SCF can only write to the /tmp folder.
settings['HTTPCACHE_DIR'] = "/tmp"
settings['FEED_URI'] = ""
settings['FEED_FORMAT'] = "json"
crawler_process = Process(target=run_spider, args=(spider_name, project_settings, spider_kwargs))
crawler_process.start()
crawler_process.join()
if __name__ == "__main__":
event = {
"spider_name": "toscrape-css",
"spider_kwargs": {
"key1": "value1",
"key2": "value2"
}
}
main_handler(event, {})
# sample apigw event
# apigw_event = {
# "body": "{\n \"spider_name\": \"toscrape-css\",\n \"spider_kwargs\": {\n \"key1\": \"value1\",\n \"key2\": \"value2\"\n }\n}",
# "headerParameters": {},
# "headers": {
# "accept": "*/*",
# "accept-encoding": "gzip, deflate, br",
# },
# "httpMethod": "POST",
# "isBase64Encoded": "false",
# "path": "/python_simple_demo",
# "pathParameters": {},
# "queryString": {},
# "queryStringParameters": {}
# }
# main_handler(apigw_event, {})
|
the-stack_106_19353
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Unit tests for Superset"""
import cgi
import csv
import datetime
import doctest
import io
import json
import logging
import os
import pytz
import random
import re
import string
from typing import Any, Dict
import unittest
from unittest import mock, skipUnless
import pandas as pd
import sqlalchemy as sqla
from tests.test_app import app
from superset import (
dataframe,
db,
jinja_context,
security_manager,
sql_lab,
is_feature_enabled,
)
from superset.common.query_context import QueryContext
from superset.connectors.connector_registry import ConnectorRegistry
from superset.connectors.sqla.models import SqlaTable
from superset.db_engine_specs.base import BaseEngineSpec
from superset.db_engine_specs.mssql import MssqlEngineSpec
from superset.models import core as models
from superset.models.dashboard import Dashboard
from superset.models.datasource_access_request import DatasourceAccessRequest
from superset.models.slice import Slice
from superset.models.sql_lab import Query
from superset.result_set import SupersetResultSet
from superset.utils import core as utils
from superset.views import core as views
from superset.views.database.views import DatabaseView
from .base_tests import SupersetTestCase
logger = logging.getLogger(__name__)
class CoreTests(SupersetTestCase):
def __init__(self, *args, **kwargs):
super(CoreTests, self).__init__(*args, **kwargs)
def setUp(self):
db.session.query(Query).delete()
db.session.query(DatasourceAccessRequest).delete()
db.session.query(models.Log).delete()
self.table_ids = {
tbl.table_name: tbl.id for tbl in (db.session.query(SqlaTable).all())
}
self.original_unsafe_db_setting = app.config["PREVENT_UNSAFE_DB_CONNECTIONS"]
def tearDown(self):
db.session.query(Query).delete()
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = self.original_unsafe_db_setting
def test_login(self):
resp = self.get_resp("/login/", data=dict(username="admin", password="general"))
self.assertNotIn("User confirmation needed", resp)
resp = self.get_resp("/logout/", follow_redirects=True)
self.assertIn("User confirmation needed", resp)
resp = self.get_resp(
"/login/", data=dict(username="admin", password="wrongPassword")
)
self.assertIn("User confirmation needed", resp)
def test_dashboard_endpoint(self):
resp = self.client.get("/superset/dashboard/-1/")
assert resp.status_code == 404
def test_slice_endpoint(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
resp = self.get_resp("/superset/slice/{}/".format(slc.id))
assert "Time Column" in resp
assert "List Roles" in resp
# Testing overrides
resp = self.get_resp("/superset/slice/{}/?standalone=true".format(slc.id))
assert '<div class="navbar' not in resp
resp = self.client.get("/superset/slice/-1/")
assert resp.status_code == 404
def _get_query_context(self) -> Dict[str, Any]:
self.login(username="admin")
slc = self.get_slice("Girl Name Cloud", db.session)
return {
"datasource": {"id": slc.datasource_id, "type": slc.datasource_type},
"queries": [
{
"granularity": "ds",
"groupby": ["name"],
"metrics": [{"label": "sum__num"}],
"filters": [],
"row_limit": 100,
}
],
}
def _get_query_context_with_post_processing(self) -> Dict[str, Any]:
self.login(username="admin")
slc = self.get_slice("Girl Name Cloud", db.session)
return {
"datasource": {"id": slc.datasource_id, "type": slc.datasource_type},
"queries": [
{
"granularity": "ds",
"groupby": ["name", "state"],
"metrics": [{"label": "sum__num"}],
"filters": [],
"row_limit": 100,
"post_processing": [
{
"operation": "aggregate",
"options": {
"groupby": ["state"],
"aggregates": {
"q1": {
"operator": "percentile",
"column": "sum__num",
"options": {"q": 25},
},
"median": {
"operator": "median",
"column": "sum__num",
},
},
},
},
{
"operation": "sort",
"options": {"columns": {"q1": False, "state": True},},
},
],
}
],
}
def test_viz_cache_key(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
viz = slc.viz
qobj = viz.query_obj()
cache_key = viz.cache_key(qobj)
self.assertEqual(cache_key, viz.cache_key(qobj))
qobj["groupby"] = []
self.assertNotEqual(cache_key, viz.cache_key(qobj))
def test_cache_key_changes_when_datasource_is_updated(self):
qc_dict = self._get_query_context()
# construct baseline cache_key
query_context = QueryContext(**qc_dict)
query_object = query_context.queries[0]
cache_key_original = query_context.cache_key(query_object)
# make temporary change and revert it to refresh the changed_on property
datasource = ConnectorRegistry.get_datasource(
datasource_type=qc_dict["datasource"]["type"],
datasource_id=qc_dict["datasource"]["id"],
session=db.session,
)
description_original = datasource.description
datasource.description = "temporary description"
db.session.commit()
datasource.description = description_original
db.session.commit()
# create new QueryContext with unchanged attributes and extract new cache_key
query_context = QueryContext(**qc_dict)
query_object = query_context.queries[0]
cache_key_new = query_context.cache_key(query_object)
# the new cache_key should be different due to updated datasource
self.assertNotEqual(cache_key_original, cache_key_new)
def test_query_context_time_range_endpoints(self):
query_context = QueryContext(**self._get_query_context())
query_object = query_context.queries[0]
extras = query_object.to_dict()["extras"]
self.assertTrue("time_range_endpoints" in extras)
self.assertEquals(
extras["time_range_endpoints"],
(utils.TimeRangeEndpoint.INCLUSIVE, utils.TimeRangeEndpoint.EXCLUSIVE),
)
def test_get_superset_tables_not_allowed(self):
example_db = utils.get_example_database()
schema_name = self.default_schema_backend_map[example_db.backend]
self.login(username="gamma")
uri = f"superset/tables/{example_db.id}/{schema_name}/undefined/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_get_superset_tables_substr(self):
example_db = utils.get_example_database()
self.login(username="admin")
schema_name = self.default_schema_backend_map[example_db.backend]
uri = f"superset/tables/{example_db.id}/{schema_name}/ab_role/"
rv = self.client.get(uri)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
expeted_response = {
"options": [
{
"label": "ab_role",
"schema": schema_name,
"title": "ab_role",
"type": "table",
"value": "ab_role",
}
],
"tableLength": 1,
}
self.assertEqual(response, expeted_response)
def test_get_superset_tables_not_found(self):
self.login(username="admin")
uri = f"superset/tables/invalid/public/undefined/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_api_v1_query_endpoint(self):
self.login(username="admin")
qc_dict = self._get_query_context()
data = json.dumps(qc_dict)
resp = json.loads(self.get_resp("/api/v1/query/", {"query_context": data}))
self.assertEqual(resp[0]["rowcount"], 100)
def test_api_v1_query_endpoint_with_post_processing(self):
self.login(username="admin")
qc_dict = self._get_query_context_with_post_processing()
data = json.dumps(qc_dict)
resp = json.loads(self.get_resp("/api/v1/query/", {"query_context": data}))
self.assertEqual(resp[0]["rowcount"], 6)
def test_old_slice_json_endpoint(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
json_endpoint = "/superset/explore_json/{}/{}/".format(
slc.datasource_type, slc.datasource_id
)
resp = self.get_resp(
json_endpoint, {"form_data": json.dumps(slc.viz.form_data)}
)
assert '"Jennifer"' in resp
def test_slice_json_endpoint(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
resp = self.get_resp(slc.explore_json_url)
assert '"Jennifer"' in resp
def test_old_slice_csv_endpoint(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
csv_endpoint = "/superset/explore_json/{}/{}/?csv=true".format(
slc.datasource_type, slc.datasource_id
)
resp = self.get_resp(csv_endpoint, {"form_data": json.dumps(slc.viz.form_data)})
assert "Jennifer," in resp
def test_slice_csv_endpoint(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
csv_endpoint = "/superset/explore_json/?csv=true"
resp = self.get_resp(
csv_endpoint, {"form_data": json.dumps({"slice_id": slc.id})}
)
assert "Jennifer," in resp
def test_admin_only_permissions(self):
def assert_admin_permission_in(role_name, assert_func):
role = security_manager.find_role(role_name)
permissions = [p.permission.name for p in role.permissions]
assert_func("can_sync_druid_source", permissions)
assert_func("can_approve", permissions)
assert_admin_permission_in("Admin", self.assertIn)
assert_admin_permission_in("Alpha", self.assertNotIn)
assert_admin_permission_in("Gamma", self.assertNotIn)
def test_admin_only_menu_views(self):
def assert_admin_view_menus_in(role_name, assert_func):
role = security_manager.find_role(role_name)
view_menus = [p.view_menu.name for p in role.permissions]
assert_func("ResetPasswordView", view_menus)
assert_func("RoleModelView", view_menus)
assert_func("Security", view_menus)
assert_func("SQL Lab", view_menus)
assert_admin_view_menus_in("Admin", self.assertIn)
assert_admin_view_menus_in("Alpha", self.assertNotIn)
assert_admin_view_menus_in("Gamma", self.assertNotIn)
def test_save_slice(self):
self.login(username="admin")
slice_name = f"Energy Sankey"
slice_id = self.get_slice(slice_name, db.session).id
copy_name_prefix = "Test Sankey"
copy_name = f"{copy_name_prefix}[save]{random.random()}"
tbl_id = self.table_ids.get("energy_usage")
new_slice_name = f"{copy_name_prefix}[overwrite]{random.random()}"
url = (
"/superset/explore/table/{}/?slice_name={}&"
"action={}&datasource_name=energy_usage"
)
form_data = {
"adhoc_filters": [],
"viz_type": "sankey",
"groupby": ["target"],
"metric": "sum__value",
"row_limit": 5000,
"slice_id": slice_id,
"time_range_endpoints": ["inclusive", "exclusive"],
}
# Changing name and save as a new slice
resp = self.client.post(
url.format(tbl_id, copy_name, "saveas"),
data={"form_data": json.dumps(form_data)},
)
db.session.expunge_all()
new_slice_id = resp.json["form_data"]["slice_id"]
slc = db.session.query(Slice).filter_by(id=new_slice_id).one()
self.assertEqual(slc.slice_name, copy_name)
form_data.pop("slice_id") # We don't save the slice id when saving as
self.assertEqual(slc.viz.form_data, form_data)
form_data = {
"adhoc_filters": [],
"viz_type": "sankey",
"groupby": ["source"],
"metric": "sum__value",
"row_limit": 5000,
"slice_id": new_slice_id,
"time_range": "now",
"time_range_endpoints": ["inclusive", "exclusive"],
}
# Setting the name back to its original name by overwriting new slice
self.client.post(
url.format(tbl_id, new_slice_name, "overwrite"),
data={"form_data": json.dumps(form_data)},
)
db.session.expunge_all()
slc = db.session.query(Slice).filter_by(id=new_slice_id).one()
self.assertEqual(slc.slice_name, new_slice_name)
self.assertEqual(slc.viz.form_data, form_data)
# Cleanup
slices = (
db.session.query(Slice)
.filter(Slice.slice_name.like(copy_name_prefix + "%"))
.all()
)
for slc in slices:
db.session.delete(slc)
db.session.commit()
def test_filter_endpoint(self):
self.login(username="admin")
slice_name = "Energy Sankey"
slice_id = self.get_slice(slice_name, db.session).id
db.session.commit()
tbl_id = self.table_ids.get("energy_usage")
table = db.session.query(SqlaTable).filter(SqlaTable.id == tbl_id)
table.filter_select_enabled = True
url = (
"/superset/filter/table/{}/target/?viz_type=sankey&groupby=source"
"&metric=sum__value&flt_col_0=source&flt_op_0=in&flt_eq_0=&"
"slice_id={}&datasource_name=energy_usage&"
"datasource_id=1&datasource_type=table"
)
# Changing name
resp = self.get_resp(url.format(tbl_id, slice_id))
assert len(resp) > 0
assert "Carbon Dioxide" in resp
def test_slice_data(self):
# slice data should have some required attributes
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
slc_data_attributes = slc.data.keys()
assert "changed_on" in slc_data_attributes
assert "modified" in slc_data_attributes
def test_slices(self):
# Testing by hitting the two supported end points for all slices
self.login(username="admin")
Slc = Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [
(slc.slice_name, "explore", slc.slice_url),
(slc.slice_name, "explore_json", slc.explore_json_url),
]
for name, method, url in urls:
logger.info(f"[{name}]/[{method}]: {url}")
print(f"[{name}]/[{method}]: {url}")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_tablemodelview_list(self):
self.login(username="admin")
url = "/tablemodelview/list/"
resp = self.get_resp(url)
# assert that a table is listed
table = db.session.query(SqlaTable).first()
assert table.name in resp
assert "/superset/explore/table/{}".format(table.id) in resp
def test_add_slice(self):
self.login(username="admin")
# assert that /chart/add responds with 200
url = "/chart/add"
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_get_user_slices(self):
self.login(username="admin")
userid = security_manager.find_user("admin").id
url = f"/sliceasync/api/read?_flt_0_created_by={userid}"
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_slices_V2(self):
# Add explore-v2-beta role to admin user
# Test all slice urls as user with with explore-v2-beta role
security_manager.add_role("explore-v2-beta")
security_manager.add_user(
"explore_beta",
"explore_beta",
" user",
"[email protected]",
security_manager.find_role("explore-v2-beta"),
password="general",
)
self.login(username="explore_beta", password="general")
Slc = Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [(slc.slice_name, "slice_url", slc.slice_url)]
for name, method, url in urls:
print(f"[{name}]/[{method}]: {url}")
self.client.get(url)
def test_doctests(self):
modules = [utils, models, sql_lab]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_misc(self):
assert self.get_resp("/health") == "OK"
assert self.get_resp("/healthcheck") == "OK"
assert self.get_resp("/ping") == "OK"
def test_testconn(self, username="admin"):
# need to temporarily allow sqlite dbs, teardown will undo this
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = False
self.login(username=username)
database = utils.get_example_database()
# validate that the endpoint works with the password-masked sqlalchemy uri
data = json.dumps(
{
"uri": database.safe_sqlalchemy_uri(),
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
# validate that the endpoint works with the decrypted sqlalchemy uri
data = json.dumps(
{
"uri": database.sqlalchemy_uri_decrypted,
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
def test_testconn_failed_conn(self, username="admin"):
self.login(username=username)
data = json.dumps(
{"uri": "broken://url", "name": "examples", "impersonate_user": False}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 400
assert response.headers["Content-Type"] == "application/json"
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {"error": "Could not load database driver: broken"}
assert response_body == expected_body, "%s != %s" % (
response_body,
expected_body,
)
def test_testconn_unsafe_uri(self, username="admin"):
self.login(username=username)
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = True
response = self.client.post(
"/superset/testconn",
data=json.dumps(
{
"uri": "sqlite:///home/superset/unsafe.db",
"name": "unsafe",
"impersonate_user": False,
}
),
content_type="application/json",
)
self.assertEqual(400, response.status_code)
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {
"error": "SQLite database cannot be used as a data source for security reasons."
}
self.assertEqual(expected_body, response_body)
def test_custom_password_store(self):
database = utils.get_example_database()
conn_pre = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
def custom_password_store(uri):
return "password_store_test"
models.custom_password_store = custom_password_store
conn = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
if conn_pre.password:
assert conn.password == "password_store_test"
assert conn.password != conn_pre.password
# Disable for password store for later tests
models.custom_password_store = None
def test_databaseview_edit(self, username="admin"):
# validate that sending a password-masked uri does not over-write the decrypted
# uri
self.login(username=username)
database = utils.get_example_database()
sqlalchemy_uri_decrypted = database.sqlalchemy_uri_decrypted
url = "databaseview/edit/{}".format(database.id)
data = {k: database.__getattribute__(k) for k in DatabaseView.add_columns}
data["sqlalchemy_uri"] = database.safe_sqlalchemy_uri()
self.client.post(url, data=data)
database = utils.get_example_database()
self.assertEqual(sqlalchemy_uri_decrypted, database.sqlalchemy_uri_decrypted)
# Need to clean up after ourselves
database.impersonate_user = False
database.allow_dml = False
database.allow_run_async = False
db.session.commit()
def test_warm_up_cache(self):
slc = self.get_slice("Girls", db.session)
data = self.get_json_resp("/superset/warm_up_cache?slice_id={}".format(slc.id))
self.assertEqual(
data, [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
)
data = self.get_json_resp(
"/superset/warm_up_cache?table_name=energy_usage&db_name=main"
)
assert len(data) > 0
def test_shortner(self):
self.login(username="admin")
data = (
"//superset/explore/table/1/?viz_type=sankey&groupby=source&"
"groupby=target&metric=sum__value&row_limit=5000&where=&having=&"
"flt_col_0=source&flt_op_0=in&flt_eq_0=&slice_id=78&slice_name="
"Energy+Sankey&collapsed_fieldsets=&action=&datasource_name="
"energy_usage&datasource_id=1&datasource_type=table&"
"previous_viz_type=sankey"
)
resp = self.client.post("/r/shortner/", data=dict(data=data))
assert re.search(r"\/r\/[0-9]+", resp.data.decode("utf-8"))
@skipUnless(
(is_feature_enabled("KV_STORE")), "skipping as /kv/ endpoints are not enabled"
)
def test_kv(self):
self.login(username="admin")
resp = self.client.get("/kv/10001/")
self.assertEqual(404, resp.status_code)
value = json.dumps({"data": "this is a test"})
resp = self.client.post("/kv/store/", data=dict(data=value))
self.assertEqual(resp.status_code, 200)
kv = db.session.query(models.KeyValue).first()
kv_value = kv.value
self.assertEqual(json.loads(value), json.loads(kv_value))
resp = self.client.get("/kv/{}/".format(kv.id))
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(value), json.loads(resp.data.decode("utf-8")))
def test_gamma(self):
self.login(username="gamma")
assert "Charts" in self.get_resp("/chart/list/")
assert "Dashboards" in self.get_resp("/dashboard/list/")
def test_csv_endpoint(self):
self.login("admin")
sql = """
SELECT name
FROM birth_names
WHERE name = 'James'
LIMIT 1
"""
client_id = "{}".format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp("/superset/csv/{}".format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(io.StringIO("name\nJames\n"))
client_id = "{}".format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp("/superset/csv/{}".format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(io.StringIO("name\nJames\n"))
self.assertEqual(list(expected_data), list(data))
self.logout()
def test_extra_table_metadata(self):
self.login("admin")
dbid = utils.get_example_database().id
self.get_json_resp(
f"/superset/extra_table_metadata/{dbid}/birth_names/superset/"
)
def test_process_template(self):
maindb = utils.get_example_database()
sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}'"
tp = jinja_context.get_template_processor(database=maindb)
rendered = tp.process_template(sql)
self.assertEqual("SELECT '2017-01-01T00:00:00'", rendered)
def test_get_template_kwarg(self):
maindb = utils.get_example_database()
s = "{{ foo }}"
tp = jinja_context.get_template_processor(database=maindb, foo="bar")
rendered = tp.process_template(s)
self.assertEqual("bar", rendered)
def test_template_kwarg(self):
maindb = utils.get_example_database()
s = "{{ foo }}"
tp = jinja_context.get_template_processor(database=maindb)
rendered = tp.process_template(s, foo="bar")
self.assertEqual("bar", rendered)
def test_templated_sql_json(self):
self.login("admin")
sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}' as test"
data = self.run_sql(sql, "fdaklj3ws")
self.assertEqual(data["data"][0]["test"], "2017-01-01T00:00:00")
@mock.patch("tests.superset_test_custom_template_processors.datetime")
def test_custom_process_template(self, mock_dt) -> None:
"""Test macro defined in custom template processor works."""
mock_dt.utcnow = mock.Mock(return_value=datetime.datetime(1970, 1, 1))
db = mock.Mock()
db.backend = "presto"
tp = jinja_context.get_template_processor(database=db)
sql = "SELECT '$DATE()'"
rendered = tp.process_template(sql)
self.assertEqual("SELECT '{}'".format("1970-01-01"), rendered)
sql = "SELECT '$DATE(1, 2)'"
rendered = tp.process_template(sql)
self.assertEqual("SELECT '{}'".format("1970-01-02"), rendered)
def test_custom_get_template_kwarg(self):
"""Test macro passed as kwargs when getting template processor
works in custom template processor."""
db = mock.Mock()
db.backend = "presto"
s = "$foo()"
tp = jinja_context.get_template_processor(database=db, foo=lambda: "bar")
rendered = tp.process_template(s)
self.assertEqual("bar", rendered)
def test_custom_template_kwarg(self) -> None:
"""Test macro passed as kwargs when processing template
works in custom template processor."""
db = mock.Mock()
db.backend = "presto"
s = "$foo()"
tp = jinja_context.get_template_processor(database=db)
rendered = tp.process_template(s, foo=lambda: "bar")
self.assertEqual("bar", rendered)
def test_custom_template_processors_overwrite(self) -> None:
"""Test template processor for presto gets overwritten by custom one."""
db = mock.Mock()
db.backend = "presto"
tp = jinja_context.get_template_processor(database=db)
sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}'"
rendered = tp.process_template(sql)
self.assertEqual(sql, rendered)
sql = "SELECT '{{ DATE(1, 2) }}'"
rendered = tp.process_template(sql)
self.assertEqual(sql, rendered)
def test_custom_template_processors_ignored(self) -> None:
"""Test custom template processor is ignored for a difference backend
database."""
maindb = utils.get_example_database()
sql = "SELECT '$DATE()'"
tp = jinja_context.get_template_processor(database=maindb)
rendered = tp.process_template(sql)
self.assertEqual(sql, rendered)
@mock.patch("tests.superset_test_custom_template_processors.datetime")
@mock.patch("superset.sql_lab.get_sql_results")
def test_custom_templated_sql_json(self, sql_lab_mock, mock_dt) -> None:
"""Test sqllab receives macros expanded query."""
mock_dt.utcnow = mock.Mock(return_value=datetime.datetime(1970, 1, 1))
self.login("admin")
sql = "SELECT '$DATE()' as test"
resp = {
"status": utils.QueryStatus.SUCCESS,
"query": {"rows": 1},
"data": [{"test": "'1970-01-01'"}],
}
sql_lab_mock.return_value = resp
dbobj = self.create_fake_presto_db()
json_payload = dict(database_id=dbobj.id, sql=sql)
self.get_json_resp(
"/superset/sql_json/", raise_on_error=False, json_=json_payload
)
assert sql_lab_mock.called
self.assertEqual(sql_lab_mock.call_args[0][1], "SELECT '1970-01-01' as test")
self.delete_fake_presto_db()
def test_fetch_datasource_metadata(self):
self.login(username="admin")
url = "/superset/fetch_datasource_metadata?" "datasourceKey=1__table"
resp = self.get_json_resp(url)
keys = [
"name",
"type",
"order_by_choices",
"granularity_sqla",
"time_grain_sqla",
"id",
]
for k in keys:
self.assertIn(k, resp.keys())
def test_user_profile(self, username="admin"):
self.login(username=username)
slc = self.get_slice("Girls", db.session)
# Setting some faves
url = "/superset/favstar/Slice/{}/select/".format(slc.id)
resp = self.get_json_resp(url)
self.assertEqual(resp["count"], 1)
dash = db.session.query(Dashboard).filter_by(slug="births").first()
url = "/superset/favstar/Dashboard/{}/select/".format(dash.id)
resp = self.get_json_resp(url)
self.assertEqual(resp["count"], 1)
userid = security_manager.find_user("admin").id
resp = self.get_resp("/superset/profile/admin/")
self.assertIn('"app"', resp)
data = self.get_json_resp("/superset/recent_activity/{}/".format(userid))
self.assertNotIn("message", data)
data = self.get_json_resp("/superset/created_slices/{}/".format(userid))
self.assertNotIn("message", data)
data = self.get_json_resp("/superset/created_dashboards/{}/".format(userid))
self.assertNotIn("message", data)
data = self.get_json_resp("/superset/fave_slices/{}/".format(userid))
self.assertNotIn("message", data)
data = self.get_json_resp("/superset/fave_dashboards/{}/".format(userid))
self.assertNotIn("message", data)
data = self.get_json_resp(
"/superset/fave_dashboards_by_username/{}/".format(username)
)
self.assertNotIn("message", data)
def test_slice_id_is_always_logged_correctly_on_web_request(self):
# superset/explore case
slc = db.session.query(Slice).filter_by(slice_name="Girls").one()
qry = db.session.query(models.Log).filter_by(slice_id=slc.id)
self.get_resp(slc.slice_url, {"form_data": json.dumps(slc.form_data)})
self.assertEqual(1, qry.count())
def test_slice_id_is_always_logged_correctly_on_ajax_request(self):
# superset/explore_json case
self.login(username="admin")
slc = db.session.query(Slice).filter_by(slice_name="Girls").one()
qry = db.session.query(models.Log).filter_by(slice_id=slc.id)
slc_url = slc.slice_url.replace("explore", "explore_json")
self.get_json_resp(slc_url, {"form_data": json.dumps(slc.form_data)})
self.assertEqual(1, qry.count())
def test_import_csv(self):
self.login(username="admin")
table_name = "".join(random.choice(string.ascii_uppercase) for _ in range(5))
filename_1 = "testCSV.csv"
test_file_1 = open(filename_1, "w+")
test_file_1.write("a,b\n")
test_file_1.write("john,1\n")
test_file_1.write("paul,2\n")
test_file_1.close()
filename_2 = "testCSV2.csv"
test_file_2 = open(filename_2, "w+")
test_file_2.write("b,c,d\n")
test_file_2.write("john,1,x\n")
test_file_2.write("paul,2,y\n")
test_file_2.close()
example_db = utils.get_example_database()
example_db.allow_csv_upload = True
db_id = example_db.id
db.session.commit()
form_data = {
"csv_file": open(filename_1, "rb"),
"sep": ",",
"name": table_name,
"con": db_id,
"if_exists": "fail",
"index_label": "test_label",
"mangle_dupe_cols": False,
}
url = "/databaseview/list/"
add_datasource_page = self.get_resp(url)
self.assertIn("Upload a CSV", add_datasource_page)
url = "/csvtodatabaseview/form"
form_get = self.get_resp(url)
self.assertIn("CSV to Database configuration", form_get)
try:
# initial upload with fail mode
resp = self.get_resp(url, data=form_data)
self.assertIn(
f'CSV file "{filename_1}" uploaded to table "{table_name}"', resp
)
# upload again with fail mode; should fail
form_data["csv_file"] = open(filename_1, "rb")
resp = self.get_resp(url, data=form_data)
self.assertIn(
f'Unable to upload CSV file "{filename_1}" to table "{table_name}"',
resp,
)
# upload again with append mode
form_data["csv_file"] = open(filename_1, "rb")
form_data["if_exists"] = "append"
resp = self.get_resp(url, data=form_data)
self.assertIn(
f'CSV file "{filename_1}" uploaded to table "{table_name}"', resp
)
# upload again with replace mode
form_data["csv_file"] = open(filename_1, "rb")
form_data["if_exists"] = "replace"
resp = self.get_resp(url, data=form_data)
self.assertIn(
f'CSV file "{filename_1}" uploaded to table "{table_name}"', resp
)
# try to append to table from file with different schema
form_data["csv_file"] = open(filename_2, "rb")
form_data["if_exists"] = "append"
resp = self.get_resp(url, data=form_data)
self.assertIn(
f'Unable to upload CSV file "{filename_2}" to table "{table_name}"',
resp,
)
# replace table from file with different schema
form_data["csv_file"] = open(filename_2, "rb")
form_data["if_exists"] = "replace"
resp = self.get_resp(url, data=form_data)
self.assertIn(
f'CSV file "{filename_2}" uploaded to table "{table_name}"', resp
)
table = (
db.session.query(SqlaTable)
.filter_by(table_name=table_name, database_id=db_id)
.first()
)
# make sure the new column name is reflected in the table metadata
self.assertIn("d", table.column_names)
finally:
os.remove(filename_1)
os.remove(filename_2)
def test_dataframe_timezone(self):
tz = pytz.FixedOffset(60)
data = [
(datetime.datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=tz),),
(datetime.datetime(2017, 11, 18, 22, 6, 30, tzinfo=tz),),
]
results = SupersetResultSet(list(data), [["data"]], BaseEngineSpec)
df = results.to_pandas_df()
data = dataframe.df_to_records(df)
json_str = json.dumps(data, default=utils.pessimistic_json_iso_dttm_ser)
self.assertDictEqual(
data[0], {"data": pd.Timestamp("2017-11-18 21:53:00.219225+0100", tz=tz)}
)
self.assertDictEqual(
data[1], {"data": pd.Timestamp("2017-11-18 22:06:30+0100", tz=tz)}
)
self.assertEqual(
json_str,
'[{"data": "2017-11-18T21:53:00.219225+01:00"}, {"data": "2017-11-18T22:06:30+01:00"}]',
)
def test_mssql_engine_spec_pymssql(self):
# Test for case when tuple is returned (pymssql)
data = [
(1, 1, datetime.datetime(2017, 10, 19, 23, 39, 16, 660000)),
(2, 2, datetime.datetime(2018, 10, 19, 23, 39, 16, 660000)),
]
results = SupersetResultSet(
list(data), [["col1"], ["col2"], ["col3"]], MssqlEngineSpec
)
df = results.to_pandas_df()
data = dataframe.df_to_records(df)
self.assertEqual(len(data), 2)
self.assertEqual(
data[0],
{"col1": 1, "col2": 1, "col3": pd.Timestamp("2017-10-19 23:39:16.660000")},
)
def test_comments_in_sqlatable_query(self):
clean_query = "SELECT '/* val 1 */' as c1, '-- val 2' as c2 FROM tbl"
commented_query = "/* comment 1 */" + clean_query + "-- comment 2"
table = SqlaTable(
table_name="test_comments_in_sqlatable_query_table", sql=commented_query
)
rendered_query = str(table.get_from_clause())
self.assertEqual(clean_query, rendered_query)
def test_slice_payload_no_results(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
json_endpoint = "/superset/explore_json/"
form_data = slc.form_data
form_data.update(
{
"adhoc_filters": [
{
"clause": "WHERE",
"comparator": "NA",
"expressionType": "SIMPLE",
"operator": "==",
"subject": "gender",
}
]
}
)
data = self.get_json_resp(json_endpoint, {"form_data": json.dumps(form_data)})
self.assertEqual(data["status"], utils.QueryStatus.SUCCESS)
self.assertEqual(data["error"], None)
def test_slice_payload_invalid_query(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
form_data = slc.form_data
form_data.update({"groupby": ["N/A"]})
data = self.get_json_resp(
"/superset/explore_json/", {"form_data": json.dumps(form_data)}
)
self.assertEqual(data["status"], utils.QueryStatus.FAILED)
def test_slice_payload_no_datasource(self):
self.login(username="admin")
data = self.get_json_resp("/superset/explore_json/", raise_on_error=False)
self.assertEqual(
data["error"], "The datasource associated with this chart no longer exists"
)
@mock.patch("superset.security.SupersetSecurityManager.schemas_accessible_by_user")
@mock.patch("superset.security.SupersetSecurityManager.database_access")
@mock.patch("superset.security.SupersetSecurityManager.all_datasource_access")
def test_schemas_access_for_csv_upload_endpoint(
self, mock_all_datasource_access, mock_database_access, mock_schemas_accessible
):
self.login(username="admin")
dbobj = self.create_fake_db()
mock_all_datasource_access.return_value = False
mock_database_access.return_value = False
mock_schemas_accessible.return_value = ["this_schema_is_allowed_too"]
data = self.get_json_resp(
url="/superset/schemas_access_for_csv_upload?db_id={db_id}".format(
db_id=dbobj.id
)
)
assert data == ["this_schema_is_allowed_too"]
self.delete_fake_db()
def test_select_star(self):
self.login(username="admin")
examples_db = utils.get_example_database()
resp = self.get_resp(f"/superset/select_star/{examples_db.id}/birth_names")
self.assertIn("gender", resp)
def test_get_select_star_not_allowed(self):
"""
Database API: Test get select star not allowed
"""
self.login(username="gamma")
example_db = utils.get_example_database()
resp = self.client.get(f"/superset/select_star/{example_db.id}/birth_names")
self.assertEqual(resp.status_code, 404)
@mock.patch("superset.views.core.results_backend_use_msgpack", False)
@mock.patch("superset.views.core.results_backend")
@mock.patch("superset.views.core.db")
def test_display_limit(self, mock_superset_db, mock_results_backend):
query_mock = mock.Mock()
query_mock.sql = "SELECT *"
query_mock.database = 1
query_mock.schema = "superset"
mock_superset_db.session.query().filter_by().one_or_none.return_value = (
query_mock
)
data = [{"col_0": i} for i in range(100)]
payload = {
"status": utils.QueryStatus.SUCCESS,
"query": {"rows": 100},
"data": data,
}
# do not apply msgpack serialization
use_msgpack = app.config["RESULTS_BACKEND_USE_MSGPACK"]
app.config["RESULTS_BACKEND_USE_MSGPACK"] = False
serialized_payload = sql_lab._serialize_payload(payload, False)
compressed = utils.zlib_compress(serialized_payload)
mock_results_backend.get.return_value = compressed
# get all results
result = json.loads(self.get_resp("/superset/results/key/"))
expected = {"status": "success", "query": {"rows": 100}, "data": data}
self.assertEqual(result, expected)
# limit results to 1
limited_data = data[:1]
result = json.loads(self.get_resp("/superset/results/key/?rows=1"))
expected = {
"status": "success",
"query": {"rows": 100},
"data": limited_data,
"displayLimitReached": True,
}
self.assertEqual(result, expected)
app.config["RESULTS_BACKEND_USE_MSGPACK"] = use_msgpack
def test_results_default_deserialization(self):
use_new_deserialization = False
data = [("a", 4, 4.0, "2019-08-18T16:39:16.660000")]
cursor_descr = (
("a", "string"),
("b", "int"),
("c", "float"),
("d", "datetime"),
)
db_engine_spec = BaseEngineSpec()
results = SupersetResultSet(data, cursor_descr, db_engine_spec)
query = {
"database_id": 1,
"sql": "SELECT * FROM birth_names LIMIT 100",
"status": utils.QueryStatus.PENDING,
}
(
serialized_data,
selected_columns,
all_columns,
expanded_columns,
) = sql_lab._serialize_and_expand_data(
results, db_engine_spec, use_new_deserialization
)
payload = {
"query_id": 1,
"status": utils.QueryStatus.SUCCESS,
"state": utils.QueryStatus.SUCCESS,
"data": serialized_data,
"columns": all_columns,
"selected_columns": selected_columns,
"expanded_columns": expanded_columns,
"query": query,
}
serialized_payload = sql_lab._serialize_payload(
payload, use_new_deserialization
)
self.assertIsInstance(serialized_payload, str)
query_mock = mock.Mock()
deserialized_payload = views._deserialize_results_payload(
serialized_payload, query_mock, use_new_deserialization
)
self.assertDictEqual(deserialized_payload, payload)
query_mock.assert_not_called()
def test_results_msgpack_deserialization(self):
use_new_deserialization = True
data = [("a", 4, 4.0, "2019-08-18T16:39:16.660000")]
cursor_descr = (
("a", "string"),
("b", "int"),
("c", "float"),
("d", "datetime"),
)
db_engine_spec = BaseEngineSpec()
results = SupersetResultSet(data, cursor_descr, db_engine_spec)
query = {
"database_id": 1,
"sql": "SELECT * FROM birth_names LIMIT 100",
"status": utils.QueryStatus.PENDING,
}
(
serialized_data,
selected_columns,
all_columns,
expanded_columns,
) = sql_lab._serialize_and_expand_data(
results, db_engine_spec, use_new_deserialization
)
payload = {
"query_id": 1,
"status": utils.QueryStatus.SUCCESS,
"state": utils.QueryStatus.SUCCESS,
"data": serialized_data,
"columns": all_columns,
"selected_columns": selected_columns,
"expanded_columns": expanded_columns,
"query": query,
}
serialized_payload = sql_lab._serialize_payload(
payload, use_new_deserialization
)
self.assertIsInstance(serialized_payload, bytes)
with mock.patch.object(
db_engine_spec, "expand_data", wraps=db_engine_spec.expand_data
) as expand_data:
query_mock = mock.Mock()
query_mock.database.db_engine_spec.expand_data = expand_data
deserialized_payload = views._deserialize_results_payload(
serialized_payload, query_mock, use_new_deserialization
)
df = results.to_pandas_df()
payload["data"] = dataframe.df_to_records(df)
self.assertDictEqual(deserialized_payload, payload)
expand_data.assert_called_once()
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"FOO": lambda x: 1},
clear=True,
)
def test_feature_flag_serialization(self):
"""
Functions in feature flags don't break bootstrap data serialization.
"""
self.login()
encoded = json.dumps(
{"FOO": lambda x: 1, "super": "set"},
default=utils.pessimistic_json_iso_dttm_ser,
)
html = cgi.escape(encoded).replace("'", "'").replace('"', """)
urls = [
"/superset/sqllab",
"/superset/welcome",
"/superset/dashboard/1/",
"/superset/profile/admin/",
"/superset/explore/table/1",
]
for url in urls:
data = self.get_resp(url)
self.assertTrue(html in data)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"SQLLAB_BACKEND_PERSISTENCE": True},
clear=True,
)
def test_sqllab_backend_persistence_payload(self):
username = "admin"
self.login(username)
user_id = security_manager.find_user(username).id
# create a tab
data = {
"queryEditor": json.dumps(
{
"title": "Untitled Query 1",
"dbId": 1,
"schema": None,
"autorun": False,
"sql": "SELECT ...",
"queryLimit": 1000,
}
)
}
resp = self.get_json_resp("/tabstateview/", data=data)
tab_state_id = resp["id"]
# run a query in the created tab
self.run_sql(
"SELECT name FROM birth_names",
"client_id_1",
user_name=username,
raise_on_error=True,
sql_editor_id=tab_state_id,
)
# run an orphan query (no tab)
self.run_sql(
"SELECT name FROM birth_names",
"client_id_2",
user_name=username,
raise_on_error=True,
)
# we should have only 1 query returned, since the second one is not
# associated with any tabs
payload = views.Superset._get_sqllab_tabs(user_id=user_id)
self.assertEqual(len(payload["queries"]), 1)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_19354
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import string
from django.core.exceptions import ValidationError
from django.test import SimpleTestCase
from select_multiple_field.codecs import encode_list_to_csv
from select_multiple_field.models import SelectMultipleField
from select_multiple_field.validators import MaxLengthValidator
class SelectMultipleFieldValidatorsTestCase(SimpleTestCase):
def setUp(self):
self.choices = tuple((c, c) for c in string.ascii_letters)
self.choices_list = [c[0] for c in self.choices[0:len(self.choices)]]
def test_max_length_single(self):
item = SelectMultipleField(choices=self.choices, max_length=1)
self.assertEqual(item.max_length, 1)
self.assertIsInstance(item.validators[0], MaxLengthValidator)
choice = self.choices_list[0:1]
self.assertIs(item.run_validators(value=choice), None)
def test_max_length_many(self):
for n in range(2, len(self.choices_list)):
many_choices = self.choices_list[0:n]
encoded_choices_len = len(encode_list_to_csv(many_choices))
item = SelectMultipleField(
choices=self.choices, max_length=encoded_choices_len)
self.assertEqual(item.max_length, encoded_choices_len)
self.assertIsInstance(item.validators[0], MaxLengthValidator)
self.assertIs(item.run_validators(value=many_choices), None)
def test_max_length_validationerror_single(self):
item = SelectMultipleField(choices=self.choices, max_length=1)
self.assertEqual(item.max_length, 1)
self.assertIsInstance(item.validators[0], MaxLengthValidator)
two_choices = self.choices_list[0:2]
with self.assertRaises(ValidationError) as cm:
item.run_validators(value=two_choices)
self.assertEqual(
cm.exception.messages[0],
MaxLengthValidator.message % {'limit_value': 1, 'show_value': 3}
)
def test_max_length_validationerror_many(self):
for n in range(2, len(self.choices_list)):
test_max_length = 2 * n - 2 # One less than encoded list len
item = SelectMultipleField(
choices=self.choices, max_length=test_max_length)
self.assertEqual(item.max_length, test_max_length)
self.assertIsInstance(item.validators[0], MaxLengthValidator)
many_choices = self.choices_list[0:n]
many_choices_len = len(encode_list_to_csv(many_choices))
self.assertTrue(many_choices_len > test_max_length)
with self.assertRaises(ValidationError) as cm:
item.run_validators(value=many_choices)
self.assertEqual(
cm.exception.messages[0],
MaxLengthValidator.message % {
'limit_value': item.max_length,
'show_value': many_choices_len}
)
|
the-stack_106_19358
|
"""
raven.contrib.django.client
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.http import HttpRequest
from django.template import TemplateSyntaxError
from django.template.loader import LoaderOrigin
from raven.base import Client
from raven.contrib.django.utils import get_data_from_template, get_host
from raven.contrib.django.middleware import SentryLogMiddleware
from raven.utils.wsgi import get_headers, get_environ
__all__ = ('DjangoClient',)
class DjangoClient(Client):
logger = logging.getLogger('sentry.errors.client.django')
def get_user_info(self, user):
if not user.is_authenticated():
return {'is_authenticated': False}
user_info = {
'id': user.pk,
'is_authenticated': True,
}
if hasattr(user, 'email'):
user_info['email'] = user.email
if hasattr(user, 'get_username'):
user_info['username'] = user.get_username()
elif hasattr(user, 'username'):
user_info['username'] = user.username
return user_info
def get_data_from_request(self, request):
try:
from django.contrib.auth.models import AbstractBaseUser as BaseUser
except ImportError:
from django.contrib.auth.models import User as BaseUser # NOQA
result = {}
if hasattr(request, 'user') and isinstance(request.user, BaseUser):
result['user'] = self.get_user_info(request.user)
try:
uri = request.build_absolute_uri()
except SuspiciousOperation:
# attempt to build a URL for reporting as Django won't allow us to
# use get_host()
if request.is_secure():
scheme = 'https'
else:
scheme = 'http'
host = get_host(request)
uri = '%s://%s%s' % (scheme, host, request.path)
if request.method != 'GET':
try:
data = request.body
except Exception:
try:
data = request.raw_post_data
except Exception:
# assume we had a partial read.
try:
data = request.POST or '<unavailable>'
except Exception:
data = '<unavailable>'
else:
data = None
environ = request.META
result.update({
'request': {
'method': request.method,
'url': uri,
'query_string': request.META.get('QUERY_STRING'),
'data': data,
'cookies': dict(request.COOKIES),
'headers': dict(get_headers(environ)),
'env': dict(get_environ(environ)),
}
})
return result
def build_msg(self, *args, **kwargs):
data = super(DjangoClient, self).build_msg(*args, **kwargs)
stacks = [
data.get('stacktrace'),
]
if 'exception' in data:
stacks.append(data['exception']['values'][0]['stacktrace'])
for stacktrace in filter(bool, stacks):
for frame in stacktrace['frames']:
module = frame.get('module')
if not module:
continue
if module.startswith('django.'):
frame['in_app'] = False
if not self.site and 'django.contrib.sites' in settings.INSTALLED_APPS:
try:
from django.contrib.sites.models import Site
site = Site.objects.get_current()
site_name = site.name or site.domain
data['tags'].setdefault('site', site_name)
except Exception:
# Database error? Fallback to the id
data['tags'].setdefault('site', settings.SITE_ID)
return data
def capture(self, event_type, request=None, **kwargs):
if 'data' not in kwargs:
kwargs['data'] = data = {}
else:
data = kwargs['data']
if request is None:
request = getattr(SentryLogMiddleware.thread, 'request', None)
is_http_request = isinstance(request, HttpRequest)
if is_http_request:
data.update(self.get_data_from_request(request))
if kwargs.get('exc_info'):
exc_value = kwargs['exc_info'][1]
# As of r16833 (Django) all exceptions may contain a ``django_template_source`` attribute (rather than the
# legacy ``TemplateSyntaxError.source`` check) which describes template information.
if hasattr(exc_value, 'django_template_source') or ((isinstance(exc_value, TemplateSyntaxError) and
isinstance(getattr(exc_value, 'source', None), (tuple, list)) and isinstance(exc_value.source[0], LoaderOrigin))):
source = getattr(exc_value, 'django_template_source', getattr(exc_value, 'source', None))
if source is None:
self.logger.info('Unable to get template source from exception')
data.update(get_data_from_template(source))
result = super(DjangoClient, self).capture(event_type, **kwargs)
if is_http_request and result:
# attach the sentry object to the request
request.sentry = {
'project_id': data.get('project', self.remote.project),
'id': self.get_ident(result),
}
return result
|
the-stack_106_19359
|
from .forms import NewProjectForm,ProfileForm,Votes
from django.contrib.auth.decorators import login_required
from django.shortcuts import render,redirect,get_object_or_404
from django.http import HttpResponse
from .models import Project,Profile,Ratings
from django.contrib.auth.models import User
# from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.views import APIView
from django.http import Http404
from .serializer import MerchSerializer,ProfileSerializer
from rest_framework import status
from .permissions import IsAdminOrReadOnly
# Create your views here.
@login_required
def post(request):
posts = Project.objects.all()
return render(request,'all-awards/post.html',{"posts":posts})
def awards(request):
vote = Votes()
if request.method == 'POST':
vote_form = Votes(request.POST)
if vote_form.is_valid():
design = vote_form.cleaned_data['design']
usability = vote_form.cleaned_data['usability']
content = vote_form.cleaned_data['content']
creativity = vote_form.cleaned_data['creativity']
rating = Ratings(design=design,usability=usability,
content=content,creativity=creativity,
user=request.user,post=project)
rating.save()
return redirect('/')
else:
vote_form = Votes()
return render(request,'awards.html',{"vote":vote_form})
@login_required(login_url='/accounts/login/')
def projects(request, projects_id):
project = Project.objects.get(id=projects_id)
likes = Ratings.objects.filter(post=project)
design = []
usability = []
creativity = []
content = []
for x in likes:
design.append(x.design)
usability.append(x.usability)
creativity.append(x.creativity)
content.append(x.content)
de = []
us = []
cre = []
con = []
if len(usability)>0:
usa = (sum(usability)/len(usability))
us.append(usa)
if len(creativity)>0:
crea = (sum(creativity)/len(creativity))
cre.append(crea)
if len(design)>0:
des = (sum(design)/len(design))
de.append(des)
if len(content)>0:
cont = (sum(content)/len(content))
con.append(cont)
vote = Votes()
if request.method == 'POST':
vote_form = Votes(request.POST)
if vote_form.is_valid():
design = vote_form.cleaned_data['design']
usability = vote_form.cleaned_data['usability']
content = vote_form.cleaned_data['content']
creativity = vote_form.cleaned_data['creativity']
rating = Ratings(design=design,usability=usability,
content=content,creativity=creativity,
user=request.user,post=project)
rating.save()
return redirect('/')
return render(request,"awards.html",{"post":project,"des":de,"usa":us,"cont":con,"crea":cre,"vote":vote})
# try:
# project = Project.objects.get(id = project_id)
# except DoesNotExist:
# raise Http404()
# return render(request,"all-awards/awards.html", {"project":project})
def search_results(request):
if 'project' in request.GET and request.GET["project"]:
search_term = request.GET.get("project")
searched_project = Project.search_by_title(search_term)
message = f"{search_term}"
return render(request, 'all-awards/search.html',{"message":message,"project": searched_projects})
else:
message = "You haven't searched for any term"
return render(request, 'all-awards/search.html',{"message":message})
@login_required(login_url='/accounts/login/')
def new_project(request):
current_user = request.user
if request.method == 'POST':
form = NewProjectForm(request.POST,request.FILES)
if form.is_valid():
post = form.save(commit=False)
post.user = request.user
post.save()
return redirect('home')
print('saved')
return redirect('home')
else:
form = NewProjectForm()
return render(request, 'new_project.html', {"form": form})
def profile(request, username):
profile = get_object_or_404(User,username=username)
try:
profile_details = Profile.get_by_id(profile.id)
except:
profile_details = Profile.filter_by_id(profile.id)
# images = Project.get_profile_images(profile.id)
title = f'@{profile.username} Instagram photos and videos'
return render(request, 'profile/profile.html', {'title':title, 'profile':profile, 'profile_details':profile_details})
def edit_profile(request):
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES)
if form.is_valid():
edit = form.save(commit=False)
edit.user = request.user
edit.save()
username = request.user.username
return redirect('profile', username=username)
else:
form = ProfileForm()
return render(request, 'profile/edit_profile.html', {'form': form})
class MerchList(APIView):
permission_classes = (IsAdminOrReadOnly,)
def get(self, request, format=None):
all_merch = Project.objects.all()
serializers = MerchSerializer(all_merch, many=True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = MerchSerializer(data=request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status=status.HTTP_201_CREATED)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
class MerchDescription(APIView):
permission_classes = (IsAdminOrReadOnly,)
def get_merch(self, pk):
try:
return Profile.objects.get(id=pk)
except Profile.DoesNotExist:
return Http404
def get(self, request, pk, format=None):
merch = self.get_merch(pk)
serializers = ProfileSerializer(merch)
return Response(serializers.data)
def put(self, request, pk, format=None):
merch = self.get_merch(pk)
serializers = MerchSerializer(merch, request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data)
else:
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
merch = self.get_merch(pk)
merch.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
the-stack_106_19362
|
from __future__ import print_function
import sys
import os
import keras
from tensorflow.python.platform import gfile
import numpy as np
import tensorflow as tf
from tensorflow.python.layers.core import Dense
from utils.data_manager import load_data
from utils.vocab import load_vocab_all
from utils.bleu import moses_multi_bleu
from collections import defaultdict
from argparse import ArgumentParser
import utils.vocab as vocab
import sys
reload(sys)
sys.setdefaultencoding('utf8')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
_PAD = vocab._PAD
_GO = vocab._GO
_END = vocab._END
def train(sess, env, X_data, y_data, epochs=10, load=False, shuffle=True, batch_size=128,
name='model', base=0, model2load=''):
"""
Train TF model by env.train_op
"""
if load:
print('\nLoading saved model')
env.saver.restore(sess, model2load )
print('\nTrain model')
n_sample = X_data.shape[0]
n_batch = int((n_sample+batch_size-1) / batch_size)
for epoch in range(epochs):
print('\nEpoch {0}/{1}'.format(epoch+1, epochs))
sys.stdout.flush()
if shuffle:
print('\nShuffling data')
ind = np.arange(n_sample)
np.random.shuffle(ind)
X_data = X_data[ind]
y_data = y_data[ind]
for batch in range(n_batch):
print(' batch {0}/{1}'.format(batch+1, n_batch),end='\r')
start = batch * batch_size
end = min(n_sample, start+batch_size)
sess.run(env.train_op, feed_dict={env.x: X_data[start:end],
env.y: y_data[start:end],
env.training: True})
evaluate(sess, env, X_data, y_data, batch_size=batch_size)
if (epoch+1) == epochs:
print('\n Saving model')
env.saver.save(sess, 'model/{0}-{1}'.format(name, base))
return 'model/{0}-{1}'.format(name, base)
def evaluate(sess, env, X_data, y_data, batch_size=128):
"""
Evaluate TF model by running env.loss and env.acc.
"""
print('\nEvaluating')
n_sample = X_data.shape[0]
n_batch = int((n_sample+batch_size-1) / batch_size)
loss, acc = 0, 0
for batch in range(n_batch):
print(' batch {0}/{1}'.format(batch+1, n_batch),end='\r')
sys.stdout.flush()
start = batch * batch_size
end = min(n_sample, start+batch_size)
cnt = end - start
batch_loss, batch_acc = sess.run(
[env.loss,env.acc],
feed_dict={env.x: X_data[start:end],
env.y: y_data[start:end]})
loss += batch_loss * cnt
acc += batch_acc * cnt
loss /= n_sample
acc /= n_sample
print(' loss: {0:.4f} acc: {1:.4f}'.format(loss, acc))
return acc
def _decode_data(sess, env, X_data, batch_size, reverse_vocab_dict):
print('\nDecoding')
logics_all = []
n_sample = X_data.shape[0]
n_batch = int((n_sample+batch_size-1) / batch_size)
for batch in range(n_batch):
print(' batch {0}/{1}'.format(batch+1, n_batch),end='\r')
sys.stdout.flush()
start = batch * batch_size
end = min(n_sample, start+batch_size)
cnt = end - start
ybar = sess.run(env.pred_ids,
feed_dict={env.x: X_data[start:end]})
ybar = np.asarray(ybar)
ybar = np.squeeze(ybar[:,0,:]) # pick top prediction
for seq in ybar:
seq = np.append(seq, _END)
seq = seq[:list(seq).index(_END)]
logic = " ".join([reverse_vocab_dict[idx] for idx in seq])
logics_all.append(logic)
return logics_all
def decode_data_recover(sess, env, args, X_data, y_data, s, batch_size=128):
"""
Inference and calculate EM acc based on recovered SQL
"""
annotation_path = args.annotation_path
i, acc = 0, 0
_, reverse_vocab_dict, _, _ = load_vocab_all(args)
inf_logics = _decode_data(sess, env, X_data, batch_size, reverse_vocab_dict)
xtru, ytru = X_data, y_data
with gfile.GFile(annotation_path+'%s_infer.txt'%s, mode='w') as output, gfile.GFile(annotation_path+'%s_ground_truth.txt'%s, mode='r') as S_ori_file, \
gfile.GFile(annotation_path+'%s_sym_pairs.txt'%s, mode='r') as sym_pair_file:
sym_pairs = sym_pair_file.readlines() # annotation pairs from question & table files
S_oris = S_ori_file.readlines() # SQL files before annotation
for true_seq, logic, x, sym_pair, S_ori in zip(ytru, inf_logics, xtru, sym_pairs, S_oris):
sym_pair = sym_pair.replace('<>\n','')
S_ori = S_ori.replace('\n','')
Qpairs = []
for pair in sym_pair.split('<>'):
Qpairs.append(pair.split('=>'))
true_seq = true_seq[1:] #delete <bos>
x = x[1:] #delete <bos>
true_seq = true_seq[:list(true_seq).index(_END)]
x = np.append(x, _END)
x = x[:list(x).index(_END)]
xseq = " ".join([reverse_vocab_dict[idx] for idx in x])
true_logic = " ".join([reverse_vocab_dict[idx] for idx in true_seq])
logic = logic.replace(' (','').replace(' )','')
true_logic = true_logic.replace(' (','').replace(' )','')
logic = _switch_cond(logic, true_logic)
recover_S = logic
for sym, word in Qpairs:
recover_S = recover_S.replace(sym, word)
acc += (recover_S==S_ori)
output.write(recover_S + '\n')
i += 1
print('EM: %.4f'%(acc*1./len(y_data)))
print('number of correct ones:%d'%acc)
return acc*1./len(y_data)
def decode_data(sess, env, args, X_data, y_data, batch_size=128, filename='output.txt'):
"""
Inference w/o recover annotation symbols
"""
i, acc = 0, 0
_, reverse_vocab_dict, _, _ = load_vocab_all(args)
logics_all = _decode_data(sess, env, X_data, batch_size, reverse_vocab_dict)
xtru, ytru = X_data, y_data
for true_seq, logic, x in zip(ytru, logics_all, xtru):
true_seq = true_seq[1:]
x = x[1:]
true_seq = np.append(true_seq, _END)
x = np.append(x, _END)
true_seq=true_seq[:list(true_seq).index(_END)]
x=x[:list(x).index(_END)]
xseq = " ".join([reverse_vocab_dict[idx] for idx in x ])
true_logic = " ".join([reverse_vocab_dict[idx] for idx in true_seq ])
logic = logic.replace(' (','').replace(' )','')
true_logic = true_logic.replace(' (','').replace(' )','')
logic = _switch_cond(logic, true_logic)
acc += (logic==true_logic)
i += 1
print('EM: %.4f'%(acc*1./len(y_data)))
print('number of correct ones:%d'%acc)
return acc*1./len(y_data)
def _switch_cond(logic, true_logic):
logic_tokens = logic.split()
if len(logic_tokens) > 8 and logic_tokens[5] == 'and':
newlogic = [x for x in logic_tokens]
newlogic[2], newlogic[6], newlogic[4], newlogic[8] = logic_tokens[6], logic_tokens[2], logic_tokens[8], logic_tokens[4]
newline = ' '.join(newlogic)
if newline == true_logic:
logic = newline
elif len(logic_tokens) > 9 and logic_tokens[6] == 'and':
newlogic = [x for x in logic_tokens]
newlogic[3], newlogic[7], newlogic[5], newlogic[9] = logic_tokens[7], logic_tokens[3], logic_tokens[9], logic_tokens[5]
newline = ' '.join(newlogic)
if newline == true_logic:
logic = newline
return logic
|
the-stack_106_19363
|
# -*- coding: utf-8 -*-
"""ProximityForest test code."""
import numpy as np
from numpy import testing
from sktime.classification.distance_based import ProximityForest
from sktime.datasets import load_unit_test
def test_pf_on_unit_test_data():
"""Test of ProximityForest on unit test data."""
# load unit test data
X_train, y_train = load_unit_test(split="train")
X_test, y_test = load_unit_test(split="test")
indices = np.random.RandomState(0).choice(len(y_train), 10, replace=False)
# train PF
pf = ProximityForest(n_estimators=5, random_state=0)
pf.fit(X_train, y_train)
# assert probabilities are the same
probas = pf.predict_proba(X_test.iloc[indices])
testing.assert_array_almost_equal(probas, pf_unit_test_probas, decimal=2)
pf_unit_test_probas = np.array(
[
[
0.0,
1.0,
],
[
1.0,
0.0,
],
[
0.0,
1.0,
],
[
1.0,
0.0,
],
[
0.8,
0.2,
],
[
1.0,
0.0,
],
[
0.0,
1.0,
],
[
0.0,
1.0,
],
[
0.2,
0.8,
],
[
1.0,
0.0,
],
]
)
|
the-stack_106_19365
|
'''Parsing HTML forms.'''
import re
import warnings
from lxml import html
from .htmlib import rm_ws
from .httplib import retry_get
from .quick import contains
# 判断表格是否包含相关数据
def is_table(tables, within, without):
'''Determine whether the table contains relevant data.'''
is_tab, table = False, None
for table in tables:
string = re.sub(r'\s', '', table.text_content())
if contains(string, within, without):
is_tab = True
break
return is_tab, table
# 提取表格全部文本
def extract_string(table) -> str:
'''Extract all text in the table.'''
return rm_ws(table.text_content())
# 按行按列提取表格文本放入元组列表
def extract_list(table) -> list:
'''Extract the table text by row and column
and put it into the tuple list.'''
rows = table.xpath('tr | */tr')
form = []
for row in rows:
cols = row.xpath('td | th')
res = []
for col in cols:
res.append(rm_ws(col.text_content(), ''))
if res:
form.append(tuple(res))
return form
# 判断是否是每行列数相同的表格
def is_equal(form):
'''Determine whether it is a table with the
same number of columns in each row.'''
return all(len(form[0]) == len(
form[pos]) for pos in range(1, len(form)))
# 打包全部操作
def parse_form(form_url, within, without=tuple(), encoding='utf-8', sep='</t', to_list=False):
response = retry_get(form_url)
if not response:
raise Exception('Request failed, invalid URL or parameter.')
string = response.content.decode(encoding)
elements = html.fromstring(string.replace(sep, '\n'+sep))
tables = elements.xpath('//table') # 找到全部表格
is_tab, table = is_table(tables, within, without)
if is_tab:
return extract_list(table) if to_list else extract_string(table)
warnings.warn('Cannot find table with required data.')
return False
|
the-stack_106_19366
|
# -*- encoding: utf-8 -*-
"""
Copyright (c) Minu Kim - [email protected]
Templates from AppSeed.us
"""
from app import db, login, app
from flask_login import UserMixin, AnonymousUserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from time import time
import jwt
import json
enrolment = db.Table('enrolment',
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), primary_key=True),
db.Column('course_id', db.Integer, db.ForeignKey('course.id'), primary_key=True))
class User(UserMixin, db.Model):
"""
db.Model class that defines a user.
Parameters
----------
id, username, email: db.Column
stores id (in number id), username (user specified username), and email
password_hash: db.Column
stores a hash value of a password.
admitted_year: db.Column
admitted year to KAIST in integer (e.g. 2018)
department: db.relationship
db.Model class of department for each user
courses: db.relationship
relationship table of db.Model for taken courses for each user.
course_info: db.Column
string of json that stores course information for each user
doubly_recognized: db.Column
string of json that stores the list of courses in course.id
that are chosen to be doubly recognized (상호인정 과목)
replaced: db.Column
string of json that stores the list of courses in course.id
that are chosen to be replaced by different courses. (대체 교과목)
individual: db.Column
string of json that stores the list of courses in course.id
that are chosen to be determined as courses for individually designed major.
recognized_as: db.Column
string of json that stores the list of courses in course.id
that are chosen to be recognized as other major courses (타학과 인정 교과목)
"""
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
admitted_year = db.Column(db.Integer)
department = db.relationship('Department', backref='user', uselist=False)
courses = db.relationship('Course', secondary=enrolment, backref='enrolment')
course_info = db.Column(db.Text)
doubly_recognized = db.Column(db.String(64))
replaced = db.Column(db.String(64))
individual = db.Column(db.String(64))
recognized_as = db.Column(db.String(128))
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
"""
stores hashed values of user typed password.
uses werkzeug security modules
Parameters
----------
password: str
user typed string value
"""
self.password_hash = generate_password_hash(password)
def check_password(self, password):
"""
checks whether the user typed password equals the stored value
when hashed with werkzeug security modules
Parameters
----------
password: str
user typed string value
Returns
-------
check_password: bool
boolean value that specifies whether the typed password
matches stored value when hashed.
"""
return check_password_hash(self.password_hash, password)
def get_reset_password_token(self, expires_in=600):
"""
gets a reset_password_token in case user requests.
TBA later
"""
return jwt.encode(
{'reset_password': self.id, 'exp': time() + expires_in},
app.config['SECRET_KEY'], algorithm='HS256')
@staticmethod
def verify_reset_password_token(token):
"""
verifies a reset password token
TBA later
"""
try:
id = jwt.decode(token, app.config['SECRET_KEY'], algorithms=['HS256'])['reset_password']
except:
return
return User.query.get(id)
def valid_department(self):
"""
checks whether the user chosen department is valid
in terms of graduation requirements.
Returns
-------
valid_department: bool
True if the department choice is valid.
"""
try:
if self.department.major is None:
return False
if self.department.major == "":
return False
except AttributeError:
return False
if self.department.double_major + self.department.minor == "":
if self.department.is_advanced_major + self.department.is_individually_designed == 0:
return False
if self.department.major == "MSB":
if self.department.double_major == "":
return False
return True
def required_major_credits(self):
"""
calculates and returns the required major credits for graduation.
considers the departments and admitted year of the user.
Returns
-------
dict_major_credits: dict
dictionary of required major credit values
"""
if not self.valid_department():
return None
year = self.admitted_year
if year is None:
year = 2021
major = self.department.major
minor = self.department.minor
double_major = self.department.double_major
is_advanced_major = self.department.is_advanced_major
dict_major_credits = {}
# 건설및환경공학과학과 전공과목 이수요건
if year <= 2015:
if major == "CE":
dict_major_credits["CE"] = (12, 45)
if "CE" in minor:
dict_major_credits["CE"] = (12, 21)
if "CE" in double_major:
dict_major_credits["CE"] = (12, 40)
if year >= 2016:
if major == "CE":
dict_major_credits["CE"] = (12, 45)
if is_advanced_major:
dict_major_credits["CE"] = (12, 57)
if "CE" in minor:
dict_major_credits["CE"] = (12, 18)
if "CE" in double_major:
dict_major_credits["CE"] = (12, 40)
# 기계공학과 전공과목 이수요건
if year <= 2015:
if major == "ME":
if year <= 2012:
dict_major_credits["ME"] = (9, 49)
else:
dict_major_credits["ME"] = (12, 59)
if "ME" in minor:
dict_major_credits["ME"] = (9, 21)
if "ME" in double_major:
dict_major_credits["ME"] = (12, 40)
if year >= 2016:
if major == "ME":
dict_major_credits["ME"] = (12, 48)
if is_advanced_major:
dict_major_credits["ME"] = (12, 63)
if "ME" in minor:
dict_major_credits["ME"] = (6, 21)
if "ME" in double_major:
dict_major_credits["ME"] = (12, 40)
# 기술경영학부 전공과목 이수요건
if year <= 2015:
if major == "MSB":
dict_major_credits["MSB"] = (9, 42)
if "MSB" in minor:
dict_major_credits["MSB"] = (6, 18)
if "MSB" in double_major:
dict_major_credits["MSB"] = (6, 40)
if year >= 2016:
if major == "MSB":
dict_major_credits["MSB"] = (9, 48)
if "MSB" in minor:
dict_major_credits["MSB"] = (6, 18)
if "MSB" in double_major:
dict_major_credits["MSB"] = (6, 40)
# 물리학과 전공과목 이수요건
if year <= 2015:
if major == "PH":
dict_major_credits["PH"] = (19, 40)
if "PH" in minor:
dict_major_credits["PH"] = (6, 19)
if "PH" in double_major:
dict_major_credits["PH"] = (19, 40)
if year >= 2016:
if major == "PH":
dict_major_credits["PH"] = (19, 43)
if is_advanced_major:
dict_major_credits["PH"] = (19, 55)
if "PH" in minor:
dict_major_credits["PH"] = (6, 18)
if "PH" in double_major:
dict_major_credits["PH"] = (19, 40)
# 바이오및뇌공학과 전공과목 이수요건
if year <= 2015:
if major == "BiS":
dict_major_credits["BiS"] = (14, 42)
if "BiS" in minor:
dict_major_credits["BiS"] = (14, 18)
if "BiS" in double_major:
dict_major_credits["BiS"] = (14, 40)
if year >= 2016:
if major == "BiS":
dict_major_credits["BiS"] = (14, 44)
if is_advanced_major:
dict_major_credits["BiS"] = (14, 56)
if "BiS" in minor:
dict_major_credits["BiS"] = (14, 18)
if "BiS" in double_major:
dict_major_credits["BiS"] = (14, 40)
# 산업디자인학과 전공과목 이수요건
if year <= 2015:
if major == "ID":
dict_major_credits["ID"] = (27, 54)
if "ID" in minor:
dict_major_credits["ID"] = (9, 18)
if "ID" in double_major:
dict_major_credits["ID"] = (27, 40)
if year >= 2016:
if major == "ID":
dict_major_credits["ID"] = (15, 45)
if is_advanced_major:
dict_major_credits["ID"] = (15, 57)
if "ID" in minor:
dict_major_credits["ID"] = (9, 18)
if "ID" in double_major:
dict_major_credits["ID"] = (15, 40)
# 산업및시스템공학과 전공과목 이수요건
if year <= 2015:
if major == "IE":
dict_major_credits["IE"] = (24, 51)
if "IE" in minor:
dict_major_credits["IE"] = (0, 18)
if "IE" in double_major:
dict_major_credits["IE"] = (24, 40)
if year >= 2016:
if major == "IE":
dict_major_credits["IE"] = (24, 45)
if is_advanced_major:
dict_major_credits["IE"] = (24, 57)
if "IE" in minor:
dict_major_credits["IE"] = (0, 18)
if "IE" in double_major:
dict_major_credits["IE"] = (24, 40)
# 생명과학과 전공과목 이수요건
if year <= 2015:
if major == "BS":
dict_major_credits["BS"] = (18, 48)
if "BS" in minor:
dict_major_credits["BS"] = (12, 21)
if "BS" in double_major:
dict_major_credits["BS"] = (18, 40)
if year >= 2016:
if major == "BS":
dict_major_credits["BS"] = (18, 42)
if is_advanced_major:
dict_major_credits["BS"] = (18, 54)
if "BS" in minor:
dict_major_credits["BS"] = (12, 21)
if "BS" in double_major:
dict_major_credits["BS"] = (18, 40)
# 생명화학공학과 전공과목 이수요건
if year <= 2015:
if major == "CBE":
dict_major_credits["CBE"] = (21, 41)
if year <= 2013:
dict_major_credits["CBE"] = (18, 41)
if year <= 2010:
dict_major_credits["CBE"] = (6, 41)
if "CBE" in minor:
if year <= 2010:
dict_major_credits["CBE"] = (3, 18)
else:
dict_major_credits["CBE"] = (9, 18)
if "CBE" in double_major:
dict_major_credits["CBE"] = (21, 41)
if year >= 2016:
if major == "CBE":
dict_major_credits["CBE"] = (21, 42)
if is_advanced_major:
dict_major_credits["CBE"] = (21, 54)
if "CBE" in minor:
dict_major_credits["CBE"] = (9, 18)
if "CBE" in double_major:
dict_major_credits["CBE"] = (21, 42)
# 수리과학과 전공과목 이수요건
if year <= 2015:
if major == "MAS":
dict_major_credits["MAS"] = (0, 42)
if "MAS" in minor:
dict_major_credits["MAS"] = (0, 18)
if "MAS" in double_major:
dict_major_credits["MAS"] = (0, 40)
if year >= 2016:
if major == "MAS":
dict_major_credits["MAS"] = (0, 42)
if is_advanced_major:
dict_major_credits["MAS"] = (0, 55)
if "MAS" in minor:
dict_major_credits["MAS"] = (0, 18)
if "MAS" in double_major:
dict_major_credits["MAS"] = (0, 40)
# 신소재공학과 전공과목 이수요건
if year <= 2015:
if major == "MS":
dict_major_credits["MS"] = (18, 42)
if "@MS@" in minor:
dict_major_credits["MS"] = (9, 18)
if "@MS@" in double_major:
dict_major_credits["MS"] = (18, 40)
if year >= 2016:
if major == "MS":
dict_major_credits["MS"] = (18, 42)
if is_advanced_major:
dict_major_credits["MS"] = (18, 57)
if "@MS@" in minor:
dict_major_credits["MS"] = (9, 18)
if "@MS@" in double_major:
dict_major_credits["MS"] = (18, 40)
# 원자력및양자공학과 전공과목 이수요건
if year <= 2015:
if major == "NQE":
dict_major_credits["NQE"] = (25, 43)
if "NQE" in minor:
dict_major_credits["NQE"] = (15, 21)
if "NQE" in double_major:
dict_major_credits["NQE"] = (25, 40)
if year >= 2016:
if major == "NQE":
dict_major_credits["NQE"] = (25, 43)
if is_advanced_major:
dict_major_credits["NQE"] = (25, 55)
if "NQE" in minor:
dict_major_credits["NQE"] = (15, 21)
if "NQE" in double_major:
dict_major_credits["NQE"] = (25, 40)
# 융합인재학부 이수요건
if major == "TS":
dict_major_credits["TS"] = (21, 42)
if is_advanced_major:
dict_major_credits["TS"] = (21, 54)
if "TS" in minor:
dict_major_credits["TS"] = (18, 18)
if "TS" in double_major:
dict_major_credits["TS"] = (21, 42)
# 전기및전자공학부 전공과목 이수요건
if year <= 2015:
if major == "EE":
dict_major_credits["EE"] = (18, 53)
if double_major + minor != "":
dict_major_credits["EE"] = (18, 47)
if year <= 2013:
dict_major_credits["EE"] = (18, 47)
if "EE" in minor:
dict_major_credits["EE"] = (12, 21)
if "EE" in double_major:
dict_major_credits["EE"] = (18, 40)
if year >= 2016:
if major == "EE":
dict_major_credits["EE"] = (18, 50)
if is_advanced_major:
dict_major_credits["EE"] = (18, 62)
if "EE" in minor:
dict_major_credits["EE"] = (3, 21)
if "EE" in double_major:
dict_major_credits["EE"] = (18, 40)
if year >= 2018:
if major == "EE":
dict_major_credits["EE"] = (15, 50)
if is_advanced_major:
dict_major_credits["EE"] = (15, 62)
if "EE" in minor:
dict_major_credits["EE"] = (3, 21)
if "EE" in double_major:
dict_major_credits["EE"] = (15, 40)
# 전산학부 전공과목 이수요건
if year <= 2015:
if major == "CS":
dict_major_credits["CS"] = (19, 43)
if "CS" in minor:
dict_major_credits["CS"] = (15, 21)
if "CS" in double_major:
dict_major_credits["CS"] = (19, 40)
if year >= 2016:
if major == "CS":
dict_major_credits["CS"] = (19, 49)
if is_advanced_major:
dict_major_credits["CS"] = (19, 61)
if "CS" in minor:
dict_major_credits["CS"] = (15, 21)
if "CS" in double_major:
dict_major_credits["CS"] = (19, 40)
# 항공우주공학과 전공과목 이수요건
if year <= 2015:
if major == "AE":
dict_major_credits["AE"] = (19, 49)
if "AE" in minor:
dict_major_credits["AE"] = (12, 21)
if "AE" in double_major:
dict_major_credits["AE"] = (19, 40)
if year >= 2016:
if major == "AE":
dict_major_credits["AE"] = (21, 42)
if is_advanced_major:
dict_major_credits["AE"] = (21, 60)
if "AE" in minor:
dict_major_credits["AE"] = (9, 18)
if "AE" in double_major:
dict_major_credits["AE"] = (21, 42)
# 화학과 전공과목 이수요건
if year <= 2015:
if major == "CH":
dict_major_credits["CH"] = (24, 42)
if year <= 2011:
dict_major_credits["CH"] = (18, 42)
if "CH" in minor:
dict_major_credits["CH"] = (12, 21)
if "CH" in double_major:
dict_major_credits["CH"] = (24, 40)
if year >= 2016:
if major == "CH":
dict_major_credits["CH"] = (24, 42)
if is_advanced_major:
dict_major_credits["CH"] = (24, 54)
if "CH" in minor:
dict_major_credits["CH"] = (12, 21)
if "CH" in double_major:
dict_major_credits["CH"] = (24, 40)
return dict_major_credits
def required_other_credits(self):
"""
calculates and returns credits other than major credits
that are required for graduation.
considers department and admitted year of the user.
Returns
-------
elective_basic: int
required credit for elective basic courses
research: int
requried credit for research courses
total_major: int
required credit for his/her major courses.
calculated from required_major_credits()
total_other_major: int
required credit for his/her other majors other than his/her department.
calculated from required_major_credits()
humanities: int
required credit for humanities courses.
"""
# 전공 필수 및 교양 필수 학점을 제외한 필요 학점만 계산함.
if not self.valid_department():
return None
# 기초 선택 학점 이수 요건
if self.department.double_major == "":
elective_basic = 9
else:
if self.department.major in ["ME", "PH", "BiS", "MAS", "EE", "CS"]:
elective_basic = 3
else:
elective_basic = 6
# 연구 학점 이수 요건
if self.department.double_major != "":
research = 0
else:
if self.department.major in ["MSB", "IE", "CBE"]:
research = 4
elif self.department.major in ["PH", "BiS"]:
research = 5
else:
research = 3
# 총 전공 학점 이수 요건
required_major_credits = self.required_major_credits()
total_major = required_major_credits[self.department.major][1]
total_other_major = 0
for key in required_major_credits:
if key != self.department.major:
total_other_major += required_major_credits[key][1]
if self.department.is_individually_designed == 1:
total_other_major += 12
# 인문 사회 선택 학점 이수 요건
humanities = 21
if self.department.double_major != "":
humanities = 12
return elective_basic, research, total_major, total_other_major, humanities
def required_credits(self):
"""
calculates and returns all credits including the total credits
that are required for graduation.
Returns
-------
mandatory_general: int
required credit for mandatory general courses
mandatory_basic: int
required credit for mandatory basic courses
elective_basic: int
required credit for elective basic courses
research: int
requried credit for research courses
total_major: int
required credit for his/her major courses.
calculated from required_major_credits()
total_other_major: int
required credit for his/her other majors other than his/her department.
calculated from required_major_credits()
humanities: int
required credit for humanities courses.
"""
if not self.valid_department():
return None
required_other_credits = self.required_other_credits()
mandatory_general = 7
mandatory_basic = 23
elective_basic = required_other_credits[0]
research = required_other_credits[1]
major = required_other_credits[2]
other_major = required_other_credits[3]
humanities = required_other_credits[4]
if self.admitted_year <= 2015:
total = 130
else:
total = 136
return mandatory_general, mandatory_basic, elective_basic, research, \
major, other_major, humanities, total
def completed_credits(self):
"""
calculates and returns completed credits for a user.
considers db.department, db.courses and db.admitted_year.
Returns
-------
mandatory_general: int
mandatory_au: int
mandatory_basic: int
elective_basic: int
major: int
completed credits for all major requirements
of his/her department (summed up)
other_major: int
completed credits for all other major requirements
that are not from his/her department (summed up)
humanities: int
others: int
research: int
dict_completed_major: dict
dictionary that stores completed credits
for major courses of each department
individual_major: dict
total: int
total credit one has completed
"""
if not self.valid_department():
return None
courses = self.courses
if courses is None:
courses = []
for course in courses:
if "EE Co-op 1" in course.name:
course1 = Course.query.filter_by(code="EE405").first()
if (course1 not in courses) and (course1 is not None):
courses.append(course1)
course2 = Course.query.filter_by(code="EE490").first()
if (course2 not in courses) and (course2 is not None):
courses.append(course2)
if "ISysE Co-op 1" in course.name:
course1 = Course.query.filter_by(code="IE436").first()
if (course1 not in courses) and (course1 is not None):
courses.append(course1)
course2 = Course.query.filter_by(code="IE481").first()
if (course2 not in courses) and (course2 is not None):
courses.append(course2)
course3 = Course.query.filter_by(code="IE490").first()
if (course3 not in courses) and (course3 is not None):
courses.append(course3)
if "ME Co-op 1" in course.name:
course1 = Course.query.filter_by(code="ME490").first()
if (course1 not in courses) and (course1 is not None):
courses.append(course1)
if "SoC Co-op 1" in course.name:
course1 = Course.query.filter_by(code="CS490").first()
if (course1 not in courses) and (course1 is not None):
courses.append(course1)
course2 = Course.query.filter_by(code="CS409").first()
if (course2 not in courses) and (course2 is not None):
courses.append(course2)
mandatory_general = 0
mandatory_au = 0
mandatory_basic = 0
elective_basic = 0
major = 0
other_major = 0
humanities = 0
research = 0
others = 0
individual_major = 0
dict_completed_major = {}
total = 0
required_major_credits = self.required_major_credits()
for key in required_major_credits:
dict_completed_major[key] = [0, 0]
for course in courses:
try:
course_info = json.loads(self.course_info)
if str(course.id) in course_info:
if course_info[str(course.id)]["letter"] in ["F", "U"]:
continue
except KeyError:
pass
except TypeError:
pass
if "Co-op 2" in course.name:
others += 3
total += 3
continue
if "ISysE Co-op 1" in course.name:
continue
if "ME Co-op 1" in course.name:
others += 3
try:
dict_completed_major["ME"][1] += 3
except KeyError:
total -= 3
total += 6
continue
if "Co-op 1" in course.name:
others += 3
total += 3
continue
if "@" + str(course.id) + "@" in self.doubly_recognized:
recognizables = course.recognizables() + [course.extract_department(replace=False)]
for key in required_major_credits:
if key in recognizables:
if key == course.extract_department(replace=False):
if '필수' in course.subject_type:
dict_completed_major[key][0] += int(course.credit)
dict_completed_major[key][1] += int(course.credit)
total += int(course.credit)
else:
dict_completed_major[key][1] += int(course.credit)
total += int(course.credit)
else:
dict_completed_major[key][1] += int(course.credit)
total += int(course.credit)
continue
if "@" + str(course.id) + "@" in self.replaced:
course = Course.query.filter_by(code=course.replaceables()).first()
if "AU" in course.credit:
mandatory_au += int(course.credit[0])
elif course.subject_type == '교양필수':
mandatory_general += int(course.credit)
total += int(course.credit)
elif course.subject_type == '기초필수':
mandatory_basic += int(course.credit)
total += int(course.credit)
elif course.subject_type == '기초선택':
elective_basic += int(course.credit)
total += int(course.credit)
elif course.subject_type == '인문사회선택':
humanities += int(course.credit)
total += int(course.credit)
elif ('전공' in course.subject_type) or ("석/박" in course.subject_type):
if "@" + str(course.id) + "@" in self.individual:
individual_major += int(course.credit)
total += int(course.credit)
continue
subject_type = course.subject_type
code = course.code
recognized = json.loads(self.recognized_as)
if str(course.id) in recognized:
subject_type = "전공선택"
code = recognized[str(course.id)]
for key in required_major_credits:
# 해당 과목이 이수요건에 필요한 전공 교과목일 경우
if key in code:
# 해당 과목이 주전공 교과목일 경우
if key == self.department.major:
if '필수' in subject_type:
dict_completed_major[key][0] += int(course.credit)
dict_completed_major[key][1] += int(course.credit)
total += int(course.credit)
else:
dict_completed_major[key][1] += int(course.credit)
total += int(course.credit)
# 해당 과목이 주전공 이외 복/부전공 학과의 교과목일 경우
else:
# 그 외 일반적인 경우
if '필수' in subject_type:
dict_completed_major[key][0] += int(course.credit)
dict_completed_major[key][1] += int(course.credit)
total += int(course.credit)
else:
dict_completed_major[key][1] += int(course.credit)
total += int(course.credit)
is_other = True
for key in required_major_credits:
if key in code:
is_other = False
if is_other:
total += int(course.credit)
others += int(course.credit)
elif ('연구' in course.subject_type) or ("세미나" in course.subject_type):
if self.department.major in course.code:
research += int(course.credit)
else:
others += int(course.credit)
total += int(course.credit)
else:
try:
others += int(course.credit)
total += int(course.credit)
except ValueError:
print('Unexpected non-integer credit in: ', course.name)
pass
for key in dict_completed_major:
if key == self.department.major:
major += dict_completed_major[key][1]
else:
other_major += dict_completed_major[key][1]
other_major += individual_major
return mandatory_general, mandatory_au, mandatory_basic, elective_basic, major, other_major, \
humanities, others, research, dict_completed_major, individual_major, total
def total_credits(self):
"""
calculates and returns total credits a user has taken
this excludes AU credits
Returns
-------
total: int
total credit a user has taken
"""
total = 0
for course in self.courses:
if "AU" not in course.credit:
total += int(course.credit)
return total
def remaining_credits(self):
"""
calculates and returns remaining credits to be taken
until the requirement for graduation is fulfilled.
Returns
-------
happy, exciting, leadership, physical, mandatory_general, \
mandatory_basic, elective_basic, research, humanities: int
major, other_major: list
list of integers of size 2 which consists of mandatory major credits and others.
(전공필수 학점, 전공선택 학점)
"""
if not self.valid_department():
return None
courses = self.courses
if courses is None:
courses = []
required_other_credits = self.required_other_credits()
mandatory_general = 7
mandatory_basic = 23
elective_basic = required_other_credits[0]
research = required_other_credits[1]
humanities = required_other_credits[4]
happy = 1
exciting = 1
leadership = 2
physical = 4
rm = self.required_major_credits()
cm = self.completed_credits()[-3]
completed_individual = self.completed_credits()[-2]
dict_remaining_major = {}
try:
for key in rm:
dict_remaining_major[key] = (max(0, rm[key][0] - cm[key][0]),
max(0, rm[key][1] - cm[key][1]))
except:
return None
other_major = [0, 0]
for key in dict_remaining_major:
if key == self.department.major:
major = dict_remaining_major[key]
else:
other_major = [other_major[0] + dict_remaining_major[key][0],
other_major[1] + dict_remaining_major[key][1]]
if self.department.is_individually_designed:
other_major[1] += (12 - completed_individual)
for course in courses:
try:
course_info = json.loads(self.course_info)
if str(course.id) in course_info:
if course_info[str(course.id)]["letter"] in ["F", "U"]:
continue
except KeyError:
pass
except TypeError:
pass
if "AU" in course.credit:
if course.code == 'HSS090':
happy = 0
elif course.code == 'HSS091':
exciting = 0
elif '인성/리더십' in course.name:
leadership -= 1
else:
physical -= 2
elif course.code in ["HSS001", "HSS022", "HSS023", "HSS024", "HSS025"]:
mandatory_general -= int(course.credit)
elif course.subject_type == "기초필수":
mandatory_basic -= int(course.credit)
elif course.subject_type == "기초선택":
elective_basic -= int(course.credit)
elif course.subject_type == "인문사회선택":
humanities -= int(course.credit)
elif ('연구' in course.subject_type) or ("세미나" in course.subject_type):
research -= int(course.credit)
elif "전공" in course.subject_type:
pass
else:
pass
return happy, exciting, max(0, leadership), max(0, physical), \
max(0, mandatory_general), max(0, mandatory_basic), max(0, elective_basic), \
max(0, research), major, other_major, max(0, humanities)
def render_remaining_total_credits(self):
"""
calculates and returns remaining total credits
the valud is used to render html file
Returns
-------
remaining_total: int
remaining total credit until graduation
"""
if not self.valid_department():
return None
remaining_credits = self.remaining_credits()
completed_credits = self.completed_credits()
requirement = self.required_credits()[-1]
remaining = sum(remaining_credits[4:8]) + remaining_credits[8][1] + \
remaining_credits[9][1] + remaining_credits[10]
total = sum(completed_credits[:-3]) - completed_credits[1]
return max(requirement - total, remaining)
def remaining_courses(self):
"""
calculates and returns remaining courses that must be taken to graduate.
return value consists of multiple tuples that contain 1) a list of course codes and
2) number of courses that may be skipped.
Returns
-------
general_mandatory: tuple
major_mandatory: tuple
"""
if not self.valid_department():
return None
courses = self.courses
if courses is None:
courses = []
# 기초필수, 교양필수 과목 이수 요건
general_mandatory = ["HSS022", "HSS023", "HSS024", "HSS025", "HSS090", "HSS091", "HSS001",
("PH141", "PH161"), ("PH142", "PH162"), "BS120", "CH101", "MAS101", "MAS102",
("CH102", "CH451"), "CS101"]
if self.admitted_year >= 2020:
general_mandatory.append(("PH151", "PH152"))
else:
general_mandatory.append("PH151")
for course in courses:
if "EE Co-op 1" in course.name:
course1 = Course.query.filter_by(code="EE405").first()
if (course1 not in courses) and (course1 is not None):
courses.append(course1)
course2 = Course.query.filter_by(code="EE490").first()
if (course2 not in courses) and (course2 is not None):
courses.append(course2)
if "ISysE Co-op 1" in course.name:
course1 = Course.query.filter_by(code="IE436").first()
if (course1 not in courses) and (course1 is not None):
courses.append(course1)
course2 = Course.query.filter_by(code="IE481").first()
if (course2 not in courses) and (course2 is not None):
courses.append(course2)
course3 = Course.query.filter_by(code="IE490").first()
if (course3 not in courses) and (course3 is not None):
courses.append(course3)
if "ME Co-op 1" in course.name:
course1 = Course.query.filter_by(code="ME490").first()
if (course1 not in courses) and (course1 is not None):
courses.append(course1)
if "SoC Co-op 1" in course.name:
course1 = Course.query.filter_by(code="CS490").first()
if (course1 not in courses) and (course1 is not None):
courses.append(course1)
course2 = Course.query.filter_by(code="CS409").first()
if (course2 not in courses) and (course2 is not None):
courses.append(course2)
for course in courses:
for code in general_mandatory:
if course.code in code:
general_mandatory.remove(code)
if course.code == "PH171":
for code in general_mandatory:
if "PH141" in code:
general_mandatory.remove(code)
if "PH151" in code:
general_mandatory.remove(code)
if course.code == "PH172":
for code in general_mandatory:
if "PH142" in code:
general_mandatory.remove(code)
if "PH152" in code:
general_mandatory.remove(code)
# 전공과목 이수요건
major_mandatory = []
# 건설및환경공학과학과 이수요건
if self.department.major == "CE":
major_mandatory.append((["CE201", "CE230", "CE350", "CE371"], 0))
if self.department.double_major == "":
major_mandatory.append((["CE490"], 0))
if "CE" in self.department.minor:
major_mandatory.append((["CE201", "CE230", "CE350", "CE371"], 0))
if "CE" in self.department.double_major:
major_mandatory.append((["CE201", "CE230", "CE350", "CE371"], 0))
# 기계공학과 이수요건
if self.admitted_year <= 2015:
if self.department.major == "ME":
if self.department.double_major == "":
major_mandatory.append((["MAS109", "MAS201", "MAS202"], 1))
major_mandatory.append((["ME490", "ME401"], 1))
if self.admitted_year <= 2012:
major_mandatory.append((["ME200", "ME205", "ME400"], 0))
major_mandatory.append((["ME231", "ME340", "ME251", "ME360", "ME361",
"ME211", "ME221", "ME307", "ME370"], 3))
else:
major_mandatory.append((["ME200", "ME205", "ME400", "ME340"], 0))
major_mandatory.append((["ME231", "ME340", "ME251", "ME360", "ME361",
"ME211", "ME221", "ME307", "ME370", "ME311"], 4))
if "ME" in self.department.minor:
major_mandatory.append((["ME200", "ME205", "ME400"], 0))
major_mandatory.append((["ME231", "ME340", "ME251", "ME360", "ME361",
"ME211", "ME221", "ME307", "ME370", "ME311"], 6))
if "ME" in self.department.double_major:
major_mandatory.append((["ME200", "ME205", "ME400", "ME340"], 0))
if self.admitted_year >= 2016:
if self.department.major == "ME":
if self.department.double_major == "":
major_mandatory.append((["MAS109", "MAS201", "MAS202"], 1))
major_mandatory.append((["ME490", "ME401"], 1))
major_mandatory.append((["ME231", "ME340", "ME251", "ME360", "ME361",
"ME211", "ME221", "ME307", "ME370", "ME311"], 5))
if "ME" in self.department.minor:
major_mandatory.append((["ME200", "ME205", "ME400", "ME340"], 2))
if "ME" in self.department.double_major:
major_mandatory.append((["ME200", "ME205", "ME400", "ME340"], 0))
# 기술경영학부 이수요건
if self.department.major == "MSB":
major_mandatory.append((["MSB200", "MSB204", "MSB351"], 0))
major_mandatory.append((["MSB493", "MSB490"], 1))
major_mandatory.append((["MSB496"], 0))
if "MSB" in self.department.minor:
major_mandatory.append((["MSB200", "MSB204", "MSB351"], 1))
if "MSB" in self.department.double_major:
major_mandatory.append((["MSB200", "MSB204", "MSB351"], 1))
# 물리학과 이수요건
if self.department.major == "PH":
major_mandatory.append((["PH152"], 0))
major_mandatory.append((["PH221", "PH231", "PH251", "PH301", "PH302", "PH311", "PH351"], 0))
if self.department.double_major == "":
major_mandatory.append((["PH490", "PH496"], 0))
if "PH" in self.department.minor:
major_mandatory.append((["PH301", "PH351"], 0))
if "PH" in self.department.double_major:
major_mandatory.append((["PH221", "PH231", "PH251", "PH301", "PH302", "PH311", "PH351"], 0))
# 바이오및뇌공학과 이수요건
if self.department.major == "BiS":
if self.department.double_major == "":
major_mandatory.append((["MAS109", "MAS201", "MAS250"], 0))
major_mandatory.append((["BiS490", "BiS496"], 0))
else:
major_mandatory.append((["MAS109", "MAS201", "MAS250"], 2))
major_mandatory.append((["BiS200", "BiS222", "BiS301", "BiS350"], 0))
if "BiS" in self.department.minor:
major_mandatory.append((["BiS200", "BiS222", "BiS301", "BiS350"], 0))
if "BiS" in self.department.double_major:
major_mandatory.append((["BiS200", "BiS222", "BiS301", "BiS350"], 0))
# 산업디자인학과 이수요건
if self.admitted_year <= 2015:
if self.department.major == "ID":
major_mandatory.append((["ID202"], 0))
major_mandatory.append((["ID211", "ID212", "ID213", "ID301",
"ID304", "ID402", "ID403", "ID409", "ID414"], 0))
if self.department.double_major == "":
major_mandatory.append((["ID490"], 0))
if "ID" in self.department.minor:
major_mandatory.append((["ID212", "ID213", "ID301"], 0))
if "ID" in self.department.double_major:
major_mandatory.append((["ID211", "ID212", "ID213", "ID301",
"ID304", "ID402", "ID403", "ID409", "ID414"], 0))
if self.admitted_year >= 2016:
if self.department.major == "ID":
major_mandatory.append((["ID202"], 0))
major_mandatory.append((["ID212", "ID213", "ID301", "ID304", "ID403"], 0))
if self.department.double_major == "":
major_mandatory.append((["ID490"], 0))
if self.department.is_advanced_major == 1:
major_mandatory.append((["ID409", "ID414"], 0))
if "ID" in self.department.minor:
major_mandatory.append((["ID212", "ID213", "ID301"], 0))
if "ID" in self.department.double_major:
major_mandatory.append((["ID212", "ID213", "ID301", "ID304", "ID403"], 0))
# 산업및시스템공학과 이수요건
if self.department.major == "IE":
major_mandatory.append((["MAS109"], 0))
major_mandatory.append((["IE241", "IE251", "IE260", "IE261", "IE331"], 0))
major_mandatory.extend([(["IE221", "IE321"], 1), (["IE232", "IE332"], 1), (["IE242", "IE341"], 1)])
if self.department.double_major == "":
major_mandatory.append((["IE490", "IE496"], 0))
if "IE" in self.department.double_major:
major_mandatory.append((["IE241", "IE251", "IE260", "IE261", "IE331"], 0))
major_mandatory.extend([(["IE221", "IE321"], 1), (["IE232", "IE332"], 1), (["IE242", "IE341"], 1)])
# 생명과학과 이수요건
if self.department.major == "BS":
major_mandatory.append((["CH103"], 0))
if self.department.double_major == "":
major_mandatory.append((["BS490"], 0))
major_mandatory.append((["BS209", "BS205", "BS208", "BS200", "BS202", "BS319"], 0))
if "BS" in self.department.double_major:
major_mandatory.append((["BS209", "BS205", "BS208", "BS200", "BS202", "BS319"], 0))
# 생명화학공학과 이수요건
if self.admitted_year <= 2015:
if self.department.major == "CBE":
if self.admitted_year <= 2010:
major_mandatory.append((["CBE201", "CBE301"], 0))
elif self.admitted_year <= 2013:
major_mandatory.append((["CBE201", "CBE202", "CBE203", "CBE205", "CBE221", "CBE301"], 0))
else:
major_mandatory.append((["CBE201", "CBE202", "CBE203", "CBE205",
"CBE221", "CBE301", "CBE442"], 0))
if self.department.double_major == "":
major_mandatory.append((["CBE490", "CBE496"], 0))
if "CBE" in self.department.minor:
major_mandatory.append((["CBE201", "CBE301"], 1))
if self.admitted_year > 2010:
major_mandatory.append((["CBE202"], 0))
if "CBE" in self.department.double_major:
major_mandatory.append((["CBE201", "CBE202", "CBE203", "CBE205",
"CBE221", "CBE301", "CBE442"], 0))
if self.admitted_year >= 2016:
if self.department.major == "CBE":
major_mandatory.append((["CBE201", "CBE202", "CBE203", "CBE205",
"CBE221", "CBE301", "CBE442"], 0))
if self.department.double_major == "":
major_mandatory.append((["CBE490", "CBE496"], 0))
if self.department.is_advanced_major == 1:
major_mandatory.append((["CBE206", "CBE261", "CBE311", "CBE331", "CBE332", "CBE351"], 2))
if "CBE" in self.department.minor:
major_mandatory.append((["CBE201", "CBE301"], 1))
major_mandatory.append((["CBE202"], 0))
if "CBE" in self.department.double_major:
major_mandatory.append((["CBE201", "CBE202", "CBE203", "CBE205",
"CBE221", "CBE301", "CBE442"], 0))
# 수리과학과 이수요건
if self.admitted_year <= 2015:
if self.department.major == "MAS":
if self.department.double_major == "":
major_mandatory.append((["MAS109", "MAS201", "MAS202"], 1))
else:
major_mandatory.append(["MAS201", "MAS202"], 1)
major_mandatory.append((["MAS212", "MAS241", "MAS311", "MAS321", "MAS331", "MAS341", "MAS250"], 3))
if self.department.is_advanced_major == 1:
major_mandatory.append((["MAS242", "MAS312", "MAS430", "MAS440"], 0))
if "MAS" in self.department.double_major:
major_mandatory.append((["MAS212", "MAS241", "MAS311", "MAS321", "MAS331", "MAS341", "MAS250"], 3))
if self.admitted_year >= 2016:
if self.department.major == "MAS":
if self.department.double_major == "":
major_mandatory.append((["MAS109", "MAS201", "MAS202", "MAS250"], 2))
else:
major_mandatory.append((["MAS201", "MAS202", "MAS250"], 2))
major_mandatory.append((["MAS212", "MAS241", "MAS311", "MAS321", "MAS331", "MAS341", "MAS355"], 3))
if self.department.is_advanced_major == 1:
major_mandatory.append((["MAS242", "MAS312", "MAS430", "MAS440"], 0))
if "MAS" in self.department.double_major:
major_mandatory.append((["MAS212", "MAS241", "MAS311", "MAS321", "MAS331", "MAS341", "MAS355"], 3))
# 신소재공학과 이수요건
if self.department.major == "MS":
major_mandatory.append((["MS212", "MS213", "MS310", "MS311", "MS321", "MS322"], 0))
if self.department.double_major == "":
major_mandatory.append((["MS490"], 0))
if "@MS@" in self.department.double_major:
major_mandatory.append((["MS212", "MS213", "MS310", "MS311", "MS321", "MS322"], 0))
# 원자력및양자공학과 이수요건
if self.department.major == "NQE":
major_mandatory.append((["NQE201", "NQE202", "NQE203", "NQE204",
"NQE301", "NQE303", "NQE401", "NQE402"], 0))
if self.department.double_major == "":
major_mandatory.append((["NQE490"], 0))
# 융합인재학부 이수요건
# 아직 개설되지 않은 교과목이 많아 과목 코드를 알 수 없어 보류함.
# 전기및전자공학부 이수요건
if self.admitted_year <= 2015:
if self.department.major == "EE":
if self.department.double_major == "":
major_mandatory.append((["MAS201", "MAS109", "MAS202"], 1))
major_mandatory.append((["EE490"], 0))
else:
major_mandatory.append((["MAS109", "MAS201", "MAS202"], 2))
major_mandatory.append((["EE305", "EE405", "EE201", "EE202", "EE204", "EE209"], 0))
if "EE" in self.department.minor:
major_mandatory.append((["EE201", "EE202", "EE204", "EE303", "EE304", "EE305"], 0))
if "EE" in self.department.double_major:
major_mandatory.append((["EE305", "EE405", "EE201", "EE202", "EE204", "EE209"], 0))
elif self.admitted_year <= 2017:
if self.department.major == "EE":
if self.department.double_major == "":
major_mandatory.append((["MAS201", "MAS109", "MAS202"], 1))
major_mandatory.append((["EE490"], 0))
else:
major_mandatory.append((["MAS109", "MAS201", "MAS202"], 2))
major_mandatory.append((["EE305", "EE405", "EE201", "EE202", "EE204", "EE209"], 0))
if "EE" in self.department.minor:
major_mandatory.append((["EE305"], 0))
if "EE" in self.department.double_major:
major_mandatory.append((["EE305", "EE405", "EE201", "EE202", "EE204", "EE209"], 0))
else:
if self.department.major == "EE":
if self.department.double_major == "":
major_mandatory.append((["MAS201", "MAS109", "MAS202"], 1))
major_mandatory.append((["EE490"], 0))
else:
major_mandatory.append((["MAS109", "MAS201", "MAS202"], 2))
major_mandatory.append((["EE305", "EE405"], 0))
major_mandatory.append((["EE201", "EE202", "EE204", "EE209", "EE210", "EE211"], 3))
if "EE" in self.department.minor:
major_mandatory.append((["EE305"], 0))
if "EE" in self.department.double_major:
major_mandatory.append((["EE305", "EE405", "EE201", "EE202", "EE204", "EE209", "EE201", "EE211"], 3))
# 전산학부 이수요건
if self.admitted_year <= 2015:
if self.department.major == "CS":
major_mandatory.append((["MAS109"], 0))
major_mandatory.append((["CS204", "CS206", "CS300", "CS311", "CS320", "CS330"], 0))
if self.department.double_major == "":
major_mandatory.append((["CS490", "CS408"], 1))
if "CS" in self.department.double_major:
major_mandatory.append((["CS204", "CS206", "CS300", "CS311", "CS320", "CS330"], 0))
if self.admitted_year >= 2016:
if self.department.major == "CS":
major_mandatory.append((["MAS109"], 0))
major_mandatory.append((["CS204", "CS206", "CS300", "CS311", "CS320", "CS330"], 0))
if self.admitted_year >= 2020:
major_mandatory.append((["CS350", "CS360", "CS374", "CS408", "CS409", "CS423",
"CS442", "CS453", "CS454", "CS457", "CS459", "CS473",
"CS474", "CS482"], 13))
if self.department.double_major == "":
major_mandatory.append((["CS490", "CS408"], 1))
if "CS" in self.department.double_major:
major_mandatory.append((["CS204", "CS206", "CS300", "CS311", "CS320", "CS330"], 0))
# 항공우주공학과 이수요건
if self.admitted_year <= 2015:
if self.department.major == "AE":
if self.department.double_major == "":
major_mandatory.append((["MAS109", "MAS201", "MAS202"], 1))
major_mandatory.append((["MAE406", "MAE490", "AE490", "AE401"], 3))
else:
major_mandatory.append((["MAS109", "MAS202", "MAS201"], 2))
major_mandatory.append((["AE210", "MAE210", "MAE211"], 2))
major_mandatory.append((["AE220", "MAE220", "MAE221"], 2))
major_mandatory.append((["AE300", "MAE365"], 1))
major_mandatory.append((["AE208", "AE308", "MAE308"], 2))
major_mandatory.append((["AE307", "AE309", "MAE309"], 2))
major_mandatory.append((["AE330", "MAE335"], 1))
major_mandatory.append((["AE400", "MAE405"], 1))
if "AE" in self.department.double_major:
major_mandatory.append((["AE210", "MAE210", "MAE211"], 2))
major_mandatory.append((["AE220", "MAE220", "MAE221"], 2))
major_mandatory.append((["AE300", "MAE365"], 1))
major_mandatory.append((["AE208", "AE308", "MAE308"], 2))
major_mandatory.append((["AE307", "AE309", "MAE309"], 2))
major_mandatory.append((["AE330", "MAE335"], 1))
major_mandatory.append((["AE400", "MAE405"], 1))
if self.admitted_year >= 2016:
if self.department.major == "AE":
major_mandatory.append((["AE208", "AE210", "AE220", "AE300", "AE307", "AE330", "AE400"], 0))
if self.department.double_major == "":
major_mandatory.append((["AE490", "AE401"]))
if "AE" in self.department.double_major:
major_mandatory.append((["AE208", "AE210", "AE220", "AE300", "AE307", "AE330", "AE400"], 0))
# 화학과 이수요건
if self.department.major == "CH":
if self.department.double_major == "":
major_mandatory.append((["CH103", "CH104"], 0))
major_mandatory.append((["CH490"], 0))
else:
major_mandatory.append((["CH103"], 0))
major_mandatory.append((["CH491", "CH492"], 0))
major_mandatory.append((["CH211", "CH213", "CH221", "CH223", "CH252", "CH352", "CH353"], 0))
major_mandatory.append((["CH241", "CH344"], 1))
major_mandatory.append((["CH263", "CH361"], 1))
if "CH" in self.department.double_major:
major_mandatory.append((["CH211", "CH213", "CH221", "CH223", "CH252", "CH352", "CH353"], 0))
major_mandatory.append((["CH241", "CH344"], 1))
major_mandatory.append((["CH263", "CH361"], 1))
# 이수 요건에 충족하였는지 계산
for course in courses:
if "@" + str(course.id) + "@" in self.replaced:
course = Course.query.filter_by(code=course.replaceables()).first()
for requirement in major_mandatory:
if ("490" in course.code) and ("490" in requirement[0]):
requirement[0].remove(course.code)
if course.code in requirement[0]:
requirement[0].remove(course.code)
if len(requirement[0]) <= int(requirement[1]):
major_mandatory.remove(requirement)
return general_mandatory, major_mandatory
def remaining_course_alert(self):
"""
returns alerts for remaining courses.
the list of return values consists of tuples that includes
1) number of currently taken courses
2) number of left like courses that must be taken
3) course codes that must be taken for graduation
if there are no courses left to be taken, returns 'Success' string.
"""
if not self.valid_department():
return None
general_mandatory = self.remaining_courses()[0]
major_mandatory = self.remaining_courses()[1]
if len(general_mandatory) + len(major_mandatory) == 0:
return "Success"
alerts, others = [], []
for course in general_mandatory:
if len(course) < 3:
course1 = Course.query.filter_by(code=course[0]).first()
course2 = Course.query.filter_by(code=course[1]).first()
alerts.append((2, 1, [course1, course2]))
else:
others.append(Course.query.filter_by(code=course).first())
if len(others) > 0:
alerts.append((len(others), len(others), others))
for code in major_mandatory:
courses = []
for course in code[0]:
courses.append(Course.query.filter_by(code=course).first())
alerts.append((len(code[0]), len(code[0]) - code[1], courses))
return alerts
def remaining_credit_alert(self):
happy, exciting, leadership, physical, mg, mb, me, rs, mj, om, hm = self.remaining_credits()
alerts = {"즐거운 대학생활": 1 if happy != 0 else None, "신나는 대학생활": 1 if exciting != 0 else None,
"인성/리더십": leadership if leadership != 0 else None, "체육": physical if physical != 0 else None,
"교양필수": mg if mg != 0 else None, "기초필수": mb if mb != 0 else None, "기초선택": me if me != 0 else None,
"연구": rs if rs != 0 else None, "전공 교과목": mj[1] if mj[1] != 0 else None,
"전공필수": mj[0] if mj[0] != 0 else None, "타학과 전공 교과목": om[1] if om[1] != 0 else None,
"타학과 전공필수": om[0] if om[0] != 0 else None, "인문사회선택": hm if hm != 0 else None}
return alerts
def check_credit_requirements(self):
"""
checks whether the credit requirement for graduation is fulfilled.
Returns
check_credit_requirements: bool
True if there are no remaining credit requirements
"""
return self.render_remaining_total_credits() == 0
def check_course_requirements(self):
"""
checks whether the course requirement for graduation is fulfilled.
Returns
check_course_requirements: bool
True if there are no remaining course requirements
"""
if self.department.is_individually_designed == 1:
other_majors = []
for course in self.courses:
if "@" + str(course.id) + "@" in self.individual:
department = course.extract_department(replace=False)
if department not in other_majors:
other_majors.append(department)
if len(other_majors) < 2:
return False
return len(self.remaining_courses()[0] + self.remaining_courses()[1]) == 0
def graduate_percent(self):
"""
renders graduate percentage until so far.
this percentage is not totally accurate, but implemented for visual fulfillment :)
Returns
-------
percentage: float
percentage achieved for graduation.
"""
try:
remaining = self.render_remaining_total_credits()
required = self.required_credits()[7]
percent = round((1 - remaining / max(required, remaining)) * 100,1)
except TypeError:
return 0
try:
if not self.check_course_requirements():
percent -= 1
except TypeError:
pass
return max(0, percent)
def render_replaceables(self):
"""
renders replaceable courses that can be chosen for requirement calculation.
Returns
-------
render: list
list of tuples of course codes and code for replaceable courses.
"""
if not self.valid_department():
return None
courses = self.courses
render = []
for course in courses:
if course.extract_department(replace=True) in self.department.major + self.department.double_major:
replaceable = Course.query.filter_by(code=course.replaceables()).first()
render.append((course, replaceable))
elif (course.extract_department(replace=True) == "CE") and ("CE" in self.department.minor):
replaceable = Course.query.filter_by(code=course.replaceables()).first()
render.append((course, replaceable))
return render
def render_recognizables(self):
"""
renders recognizable courses that can be chosen for requirement calculation.
Returns
-------
render: list
list of tuples of course codes and codes for recognizable courses.
"""
if not self.valid_department():
return None
dict_departments = {"MAS": "수리과학과", "PH": "물리학과", "CH": "화학과", "BS": "생명과학과",
"ME": "기계공학과", "AE": "항공우주공학과", "EE": "전기및전자공학부", "CE": "건설및환경공학과학과",
"CS": "전산학부", "BiS": "바이오및뇌공학과", "CBE": "생명화학공학과", "ID": "산업디자인학과",
"IE": "산업및시스템공학과", "MS": "신소재공학과", "NQE": "원자력및양자공학과",
"MSB": "기술경영학부", "TS": "융합인재학부"}
courses = self.courses
render = []
for course in courses:
if "@" + str(course.id) + "@" in self.replaced:
continue
availables = []
recognizables = course.recognizables()
if recognizables == "None":
continue
for recognizable in recognizables:
if recognizable in self.department.major + self.department.double_major:
if (self.department.major in course.code) and (recognizable == self.department.major):
pass
else:
availables.append(dict_departments[recognizable])
if availables:
render.append((course, availables))
return render
def render_doubly_recognizables(self):
"""
renders doubly recognizable courses that can be chosen for requirement calculation.
Returns
-------
render: list
list of tuples of course codes and codes for doubly recognizable courses.
"""
if not self.valid_department():
return None
dict_departments = {"MAS": "수리과학과", "PH": "물리학과", "CH": "화학과", "BS": "생명과학과",
"ME": "기계공학과", "AE": "항공우주공학과", "EE": "전기및전자공학부", "CE": "건설및환경공학과학과",
"CS": "전산학부", "BiS": "바이오및뇌공학과", "CBE": "생명화학공학과", "ID": "산업디자인학과",
"IE": "산업및시스템공학과", "MS": "신소재공학과", "NQE": "원자력및양자공학과",
"MSB": "기술경영학부", "TS": "융합인재학부"}
courses = self.courses
render = []
for course in courses:
availables = []
if "@" + str(course.id) + "@" in self.replaced:
continue
recognizables = course.recognizables()
if recognizables == "None":
continue
course_code = course.extract_department(replace=False)
if course_code not in recognizables:
recognizables.append(course_code)
more_than_two = 0
ce_in_minor = "CE" if "CE" in self.department.minor else ""
for recognizable in recognizables:
if recognizable in self.department.major + self.department.double_major + ce_in_minor:
more_than_two += 1
availables.append(dict_departments[recognizable])
if more_than_two >= 2:
render.append((course, availables))
return render
def render_individual(self):
if not self.valid_department():
return None
render = []
courses = self.courses
for course in courses:
if "@" + str(course.id) + "@" in self.replaced:
if "@" + str(course.id) + "@" in self.individual:
self.individual = self.individual.remove("@" + str(course.id) + "@", "")
db.session.commit()
continue
if "전공" in course.subject_type:
if self.department.major not in course.code:
render.append(course)
return render
@login.user_loader
def load_user(id):
return User.query.get(int(id))
class Department(db.Model):
"""
db.Model class that stores choices of departments for each user class
contains major department, minor/double_major departments and etc.
Parameters
----------
id, userid: db.Column
major: db.Column
string that stores the code for major department
is_advanced_major: db.Column
binary integer where 1 for advanced majors and 0 if not
is_individually_designed: db.Column
binary integer where 1 if chosen individually designed major and 0 otherwise.
double_major: db.Column
string that contains list of double majors of choices
minor: db.Column
string that contains list of minors of choice
"""
id = db.Column(db.Integer, primary_key=True)
userid = db.Column(db.Integer, db.ForeignKey('user.id'))
major = db.Column(db.String(64))
is_advanced_major = db.Column(db.Integer)
is_individually_designed = db.Column(db.Integer)
double_major = db.Column(db.String(64))
minor = db.Column(db.String(64))
def major_name_kr(self):
"""
renders the department name in Korean.
Returns
-------
major, double_major, minor: str
names in Korean.
"""
dict_departments = {"MAS": "수리과학과", "PH": "물리학과", "CH": "화학과", "BS": "생명과학과",
"ME": "기계공학과", "AE": "항공우주공학과", "EE": "전기및전자공학부", "CE": "건설및환경공학과학과",
"CS": "전산학부", "BiS": "바이오및뇌공학과", "CBE": "생명화학공학과", "ID": "산업디자인학과",
"IE": "산업및시스템공학과", "@MS@": "신소재공학과", "NQE": "원자력및양자공학과",
"MSB": "기술경영학부", "TS": "융합인재학부"}
major = '(선택 정보 없음)'
double = []
minor = []
for key in dict_departments:
if key == self.major:
major = dict_departments[key]
elif key in self.double_major:
double.append(dict_departments[key])
elif key in self.minor:
minor.append(dict_departments[key])
return major, double, minor
def validate_major(self):
"""
validates major choices to avoid error raises.
prevents duplicates of same departments chosen for both minor, double major and major.
"""
if self.major is None or self.major == "":
return
dict_departments = {"MAS": "수리과학과", "PH": "물리학과", "CH": "화학과", "BS": "생명과학과",
"ME": "기계공학과", "AE": "항공우주공학과", "EE": "전기및전자공학부", "CE": "건설및환경공학과학과",
"CS": "전산학부", "BiS": "바이오및뇌공학과", "CBE": "생명화학공학과", "ID": "산업디자인학과",
"IE": "산업및시스템공학과", "MS": "신소재공학과", "NQE": "원자력및양자공학과",
"MSB": "기술경영학부", "TS": "융합인재학부"}
for key in dict_departments:
if key in self.minor:
if key in self.double_major:
self.minor.replace(key, "")
db.session.commit()
class Course(db.Model):
"""
db.Model class that stores courses that are able to be taken for each users.
a relationship table is defined with users so that each user can query courses that he/she has taken
or can query for each course the users who have taken it
Parameters
----------
id: db.Column
course_id: db.Column
course id that is identical to the CAIS system (e.g. 09.113)
department: db.Column
department that opens the course (e.g. 수리과학과)
subject_type: db.Column
type of the subject (e.g. 기초필수)
code: db.Column
course code of the subject (e.g. MAS101)
name: db.Column
name of the course
credit: db.Column
credit of the course. includes 'AU' at the end if AU credit.
"""
id = db.Column(db.Integer, primary_key=True)
# userid = db.Column(db.Integer, db.ForeignKey('user.id'))
course_id = db.Column(db.String(8))
department = db.Column(db.String(32))
subject_type = db.Column(db.String(32))
code = db.Column(db.String(32))
name = db.Column(db.String(200))
credit = db.Column(db.String(8))
def recognizables(self):
"""
renders departments that can recognize the course
Returns
-------
recognizables: list
"""
if self.code in ["ME203", "ME301", "ME312", "ME330", "ME351", "ME420",
"IE363", "CH211", "CH241"]:
return ["CE"]
if self.code in ["CH223", "CH325", "BiS335"]:
return ["BS"]
if self.code in ["CH221", "CH263"]:
return ["CE", "BS"]
if self.code in ["PH212", "PH221", "PH301", "MAE200", "MAE221", "MAE230", "MAE231",
"IE331", "IE341", "IE342", "EE202", "EE204", "PH231", "EE321", "CS206", "CS300"]:
return ["MAS"]
if self.code in ["ME221"]:
return ["CE", "MAS"]
if self.code in ["ME231"]:
return ["CE", "MAS", "IE"]
if self.code in ["CBE471", "EE381"]:
return ["CE", "IE"]
if self.code in ["BiS438"]:
return ["BS", "IE"]
if self.code in ["CE206", "CE350", "ME200", "ME205", "ME303", "ME208", "ME370", "ME453",
"ME460", "MSB230", "PH221", "BiS200", "BiS252", "BiS321", "BiS351", "BiS352", "BiS377",
"BiS437", "BiS470", "ID213", "ID216", "ID301", "ID303", "ID304", "ID307", "ID308",
"ID309", "ID310", "ID403", "ID407", "BS223", "BS357", "CBE260", "CBE362", "CBE483",
"MAS212", "MAS241", "MAS242", "MAS270", "MAS275", "MAS311", "MAS365", "MAS475", "MAS476",
"MS481", "MS635", "NQE201", "NQE202", "NQE272", "NQE281", "EE201", "EE202", "EE204", "EE303",
"EE304", "EE206", "EE305", "EE312", "EE321", "EE324", "EE342", "EE372", "EE411",
"EE414", "EE421", "CS204", "CS211", "CS230", "CS310", "CS320", "CS322", "CS341", "CS350",
"CS370", "CS376", "CS380", "CS402", "CS440", "CS470", "MAE230"]:
return ["IE"]
return "None"
def replaceables(self):
"""
renders courses that can replace the course
Returns
-------
replaceable: str
"""
if self.code == "ME221":
return "AE210"
if self.code == "ME231":
return "AE230"
if self.code == "ME311":
return "AE311"
if self.code == "ME301":
return "AE370"
if self.code == "MAS275":
return "CS204"
if self.code == "EE312":
return "CS311"
if self.code == "CH221":
return "CBE203"
if self.code == "BS209":
return "CBE260"
if self.code == "CH213":
return "CBE303"
if self.code == "BiS438":
return "CBE362"
if self.code == "CH211":
return "CBE404"
if self.code == "BiS622":
return "CBE567"
if self.code == "MAE633":
return "CBE653"
if self.code == "MS654":
return "CBE712"
if self.code == "BS760":
return "CBE861"
if self.code == "IE362":
return "IE260"
if self.code == "IE442":
return "IE343"
if self.code == "IE641":
return "IE541"
if self.code == "CS206":
return "IE260"
return "None"
def extract_department(self, replace=False):
"""
extracts the department name when a course code is given
Returns
-------
code: str
"""
if not replace:
code = self.code
if replace:
code = self.replaceables()
try:
for i in range(10):
code = code.replace(str(i), "")
except NameError:
return None
return code
|
the-stack_106_19368
|
from django.shortcuts import redirect
from django.utils.cache import patch_vary_headers
from amo.helpers import urlparams
from amo.urlresolvers import set_url_prefix
from mkt.constants.carriers import CARRIER_MAP
from . import set_carrier
class CarrierURLMiddleware(object):
"""
Supports psuedo-URL prefixes that define a custom carrier store.
For example, if you browse the Marketplace at /telefonica/ then this
middleware will
1. strip off the telefonica part so all other URLs work as expected;
2. allow you to access 'telefonica' from mkt.carriers.get_carrier(); and
3. set a prefix so that reverse('whatever') returns /telefonica/whatever.
See bug 769421
"""
def process_request(self, request):
carrier = stored_carrier = None
set_url_prefix(None)
set_carrier(None)
# If I have a cookie use that carrier.
remembered = request.COOKIES.get('carrier')
if remembered in CARRIER_MAP:
carrier = stored_carrier = remembered
choice = request.REQUEST.get('carrier')
if choice in CARRIER_MAP:
carrier = choice
elif 'carrier' in request.GET:
# We are clearing the carrier.
carrier = None
# Update cookie if value have changed.
if carrier != stored_carrier:
request.set_cookie('carrier', carrier)
set_carrier(carrier)
def process_response(self, request, response):
if request.REQUEST.get('vary') != '0':
patch_vary_headers(response, ['Accept-Language', 'Cookie'])
return response
|
the-stack_106_19370
|
from django.http import Http404
from django.views.decorators.cache import cache_page
from django.utils.decorators import method_decorator
from rest_framework.response import Response
from rest_framework.generics import ListAPIView
from rest_framework.views import APIView
from . import filters
from . import models
from . import serializers
from .settings import CACHE_PAGE_TIMEOUT
import json
DEFAULT_COUNTRY_CODE = 'CHN'
class LatestStatisticsView(APIView):
"""最新统计信息"""
def get_object(self):
result = {}
inst = models.Statistics.objects.order_by('-id').first()
if inst is None:
raise Http404
return inst
@method_decorator(cache_page(
CACHE_PAGE_TIMEOUT, key_prefix='statistics-lastest'))
def get(self, request):
obj = self.get_object()
result = {}
for field in models.Statistics._meta.fields:
name = field.attname
value = getattr(obj, name)
if name not in models.Statistics.JSON_FIELDS:
result[name] = value
continue
try:
value = json.loads(value)
except ValueError:
value = None
result[name] = value
serializer = serializers.LatestStatisticsSerializer(result)
return Response(serializer.data)
class StatisticsListView(ListAPIView):
"""统计信息列表"""
serializer_class = serializers.StatisticsSerializer
def get_queryset(self):
result = []
qs = models.Statistics.objects.all().order_by('-modifyTime')
values_fields = (
'globalStatistics', 'domesticStatistics',
'internationalStatistics', 'modifyTime', 'createTime')
for item in qs.values_list(*values_fields):
item = dict(zip(values_fields, item))
statistics = {}
for name, value in item.items():
if name not in models.Statistics.JSON_FIELDS:
statistics[name] = value
continue
try:
value = json.loads(value)
except ValueError:
value = None
statistics[name] = value
result.append(statistics)
return result
@method_decorator(cache_page(
CACHE_PAGE_TIMEOUT, key_prefix='statistics-list'))
def dispatch(self, *args, **kwargs):
return super(StatisticsListView, self).dispatch(*args, **kwargs)
class CountryListView(ListAPIView):
serializer_class = serializers.CountrySerializer
filter_class = filters.CountryFilter
def get_queryset(self):
return models.Country.objects.all().order_by(
'continents', 'countryCode')
@method_decorator(cache_page(
CACHE_PAGE_TIMEOUT, key_prefix='country-list'))
def dispatch(self, *args, **kwargs):
return super(CountryListView, self).dispatch(*args, **kwargs)
class CountryListDailyView(ListAPIView):
serializer_class = serializers.CountryDailySerializer
filter_class = filters.CountryFilter
def get_queryset(self):
return models.Country.objects.all().order_by(
'continents', 'countryCode')
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
result = []
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
for item in serializer.data:
dailyData = json.loads(item['dailyData'])
result.extend(dailyData)
return self.get_paginated_response(result)
serializer = self.get_serializer(queryset, many=True)
for item in serializer.data:
dailyData = json.loads(item['dailyData'])
result.extend(dailyData)
return Response(result)
@method_decorator(cache_page(
CACHE_PAGE_TIMEOUT, key_prefix='country-list-daily'))
def dispatch(self, *args, **kwargs):
return super(CountryListDailyView, self).dispatch(*args, **kwargs)
class CountryRetrieveView(APIView):
def get_object(self, countryCode):
country = models.Country.objects.filter(
countryCode=countryCode).first()
if country is None:
raise Http404
return country
@method_decorator(cache_page(
CACHE_PAGE_TIMEOUT, key_prefix='country-detail'))
def get(self, request, countryCode):
country = self.get_object(countryCode)
serializer = serializers.CountrySerializer(country)
return Response(serializer.data)
class CountryDailyView(APIView):
def get_object(self, countryCode):
country = models.Country.objects.filter(
countryCode=countryCode).first()
if country is None:
raise Http404
return country
@method_decorator(cache_page(
CACHE_PAGE_TIMEOUT, key_prefix='country-daily-list'))
def get(self, request, countryCode):
country = self.get_object(countryCode)
result = country.dailyData
result = json.loads(result)
return Response(result)
class ProvinceListView(ListAPIView):
"""省列表"""
serializer_class = serializers.ProvinceSerializer
filter_class = filters.ProvinceFilter
def get_queryset(self):
countryCode = self.kwargs['countryCode']
if not countryCode:
countryCode = DEFAULT_COUNTRY_CODE
return models.Province.objects.filter(
countryCode=countryCode).order_by('provinceCode')
@method_decorator(cache_page(
CACHE_PAGE_TIMEOUT, key_prefix='province-list'))
def dispatch(self, *args, **kwargs):
return super(ProvinceListView, self).dispatch(*args, **kwargs)
class ProvinceDailyView(APIView):
"""省按天返回列表"""
def get_object(self, countryCode, provinceCode):
province = models.Province.objects.filter(
countryCode=countryCode, provinceCode=provinceCode).first()
if province is None:
raise Http404
return province
@method_decorator(cache_page(
CACHE_PAGE_TIMEOUT, key_prefix='province-daily-list'))
def get(self, request, countryCode, provinceCode):
if countryCode is None:
countryCode = DEFAULT_COUNTRY_CODE
province = self.get_object(countryCode, provinceCode)
result = province.dailyData
result = json.loads(result)
return Response(result)
class ProvinceDailyByNameView(APIView):
"""省按天返回列表"""
def get_object(self, countryCode, provinceName):
province = models.Province.objects.filter(
countryCode=countryCode, provinceName=provinceName).first()
if province is None:
raise Http404
return province
@method_decorator(cache_page(
CACHE_PAGE_TIMEOUT, key_prefix='province-daily-list-by-name'))
def get(self, request, countryCode, provinceName):
if countryCode is None:
countryCode = DEFAULT_COUNTRY_CODE
province = self.get_object(countryCode, provinceName)
result = province.dailyData
result = json.loads(result)
return Response(result)
class ProvinceListDailyView(ListAPIView):
serializer_class = serializers.ProvinceDailySerializer
filter_class = filters.ProvinceFilter
def get_queryset(self):
countryCode = self.kwargs['countryCode']
if not countryCode:
countryCode = DEFAULT_COUNTRY_CODE
return models.Province.objects.filter(
countryCode=countryCode).order_by('provinceCode')
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
result = []
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
for item in serializer.data:
dailyData = json.loads(item['dailyData'])
result.extend(dailyData)
return self.get_paginated_response(result)
serializer = self.get_serializer(queryset, many=True)
for item in serializer.data:
dailyData = json.loads(item['dailyData'])
result.extend(dailyData)
return Response(result)
@method_decorator(cache_page(
CACHE_PAGE_TIMEOUT, key_prefix='province-list-daily'))
def dispatch(self, *args, **kwargs):
return super(ProvinceListDailyView, self).dispatch(*args, **kwargs)
class ProvinceRetrieveView(APIView):
"""通过省编码获取数据"""
def get_object(self, countryCode, provinceCode):
province = models.Province.objects.filter(
countryCode=countryCode, provinceCode=provinceCode).first()
if province is None:
raise Http404
return province
@method_decorator(cache_page(
CACHE_PAGE_TIMEOUT, key_prefix='province-detail'))
def get(self, request, countryCode, provinceCode):
if countryCode is None:
countryCode = DEFAULT_COUNTRY_CODE
province = self.get_object(countryCode, provinceCode)
serializer = serializers.ProvinceSerializer(province)
return Response(serializer.data)
class ProvinceRetrieveByNameView(APIView):
"""通过省名获取数据"""
def get_object(self, countryCode, provinceName):
province = models.Province.objects.filter(
countryCode=countryCode, provinceName=provinceName).first()
if province is None:
raise Http404
return province
@method_decorator(cache_page(
CACHE_PAGE_TIMEOUT, key_prefix='province-detail-by-name'))
def get(self, request, countryCode, provinceName=None):
if countryCode is None:
countryCode = DEFAULT_COUNTRY_CODE
province = self.get_object(countryCode, provinceName)
serializer = serializers.ProvinceSerializer(province)
return Response(serializer.data)
class CityListView(ListAPIView):
serializer_class = serializers.CitySerializer
filter_class = filters.CityFilter
def get_queryset(self):
countryCode = self.kwargs['countryCode']
if not countryCode:
countryCode = DEFAULT_COUNTRY_CODE
return models.City.objects.filter(
countryCode=countryCode).order_by('provinceCode', 'cityName')
@method_decorator(cache_page(
CACHE_PAGE_TIMEOUT, key_prefix='city-list'))
def dispatch(self, *args, **kwargs):
return super(CityListView, self).dispatch(*args, **kwargs)
class CityRetrieveByNameView(APIView):
def get_object(self, countryCode, cityName):
city = models.City.objects.filter(
countryCode=countryCode, cityName=cityName).first()
if city is None:
raise Http404
return city
@method_decorator(cache_page(
CACHE_PAGE_TIMEOUT, key_prefix='city-detail-by-name'))
def get(self, request, countryCode, cityName):
if countryCode is None:
countryCode = DEFAULT_COUNTRY_CODE
city = self.get_object(countryCode, cityName)
serializer = serializers.CitySerializer(city)
return Response(serializer.data)
|
the-stack_106_19371
|
import numpy as np
import cv2
import json
import sys
shape='n/a'
imgPath="C:\\xampp\\htdocs\\projektmunka\\python\\haromszog.png"
img = cv2.imread(imgPath, -1)
alpha = img[:,:,3]
img = ~alpha
thresh = 100
ret,thresh_img = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
img_contours = np.zeros(img.shape)
img=cv2.drawContours(img_contours, contours, -1, (0,255,0), 3)
for contour in contours:
approx = cv2.approxPolyDP(contour, 0.01* cv2.arcLength(contour, True), True)
cv2.drawContours(img, [approx], 0, (0, 0, 0), 5)
x = approx.ravel()[0]
y = approx.ravel()[1] - 5
if len(approx) == 3:
shape="Triangle"
elif len(approx) == 4:
x1 ,y1, w, h = cv2.boundingRect(approx)
aspectRatio = float(w)/h
if aspectRatio >= 0.95 and aspectRatio <= 1.05:
shape="Square"
else:
shape="Rectangle"
elif len(approx) == 5:
shape="Pentagon"
elif len(approx) == 6:
shape="Hexagon"
elif len(approx) == 7:
shape="Optagon"
elif len(approx) == 8:
shape="Octagon"
elif len(approx) == 10:
shape="Star"
else:
shape="Circle"
print(shape)
|
the-stack_106_19373
|
from django.conf import settings
from django.db.models import Model
from wagtail import hooks
from wagtail.models import Locale
class SimpleTranslation(Model):
"""
SimpleTranslation, dummy model to create the `submit_translation` permission.
We need this model to be concrete or the following management commands will misbehave:
- `remove_stale_contenttypes`, will drop the perm
- `dump_data`, will complain about the missing table
"""
class Meta:
default_permissions = []
permissions = [
("submit_translation", "Can submit translations"),
]
@hooks.register("after_create_page")
def after_create_page(request, page):
"""Creates page aliases in other locales when a page is created.
Whenever a page is created under a specific locale, this signal handler
creates an alias page for that page under the other locales.
e.g. When an editor creates the page "blog/my-blog-post" under the English
tree, this signal handler creates an alias of that page called
"blog/my-blog-post" under the other locales' trees.
"""
if getattr(settings, "WAGTAILSIMPLETRANSLATION_SYNC_PAGE_TREE", False):
# Check if the source tree needs to be synchronised into any other trees
# Create aliases in all those locales
for locale in Locale.objects.exclude(pk=page.locale_id):
if not page.has_translation(locale):
page.copy_for_translation(locale, copy_parents=True, alias=True)
|
the-stack_106_19375
|
import asyncio
import json
from multiprocessing import Queue
from typing import Awaitable, Dict, List
import websockets
from liualgotrader.common import market_data
from liualgotrader.common.tlog import tlog
from ..common import config
from .streaming_base import StreamingBase, WSConnectState
NY = "America/New_York"
class AlpacaStreaming(StreamingBase):
END_POINT = "wss://data.alpaca.markets/stream"
def __init__(self, key: str, secret: str, queues: List[Queue]):
self.key = key
self.secret = secret
self.state: WSConnectState = WSConnectState.NOT_CONNECTED
self.websocket: websockets.client.WebSocketClientProtocol
self.consumer_task: asyncio.Task
self.stream_map: Dict = {}
super().__init__(queues)
async def connect(self) -> bool:
"""Connect web-socket and authenticate, update internal state"""
try:
self.websocket = await websockets.client.connect(self.END_POINT)
self.state = WSConnectState.CONNECTED
except websockets.WebSocketException as wse:
tlog(f"Exception when connecting to Alpaca WS {wse}")
self.state = WSConnectState.NOT_CONNECTED
return False
auth_payload = {
"action": "authenticate",
"data": {"key_id": self.key, "secret_key": self.secret},
}
await self.websocket.send(json.dumps(auth_payload))
_greeting = await self.websocket.recv()
if isinstance(_greeting, bytes):
_greeting = _greeting.decode("utf-8")
msg = json.loads(_greeting)
if msg.get("data", {}).get("status") != "authorized":
tlog(f"Invalid Alpaca API credentials, Failed to authenticate: {msg}")
raise ValueError(
f"Invalid Alpaca API credentials, Failed to authenticate: {msg}"
)
self.state = WSConnectState.AUTHENTICATED
self.consumer_task = asyncio.create_task(
self._consumer(), name="alpaca-streaming-consumer-task"
)
tlog("Successfully connected to Alpaca web-socket")
return True
async def close(self) -> None:
"""Close open websocket, if open"""
if self.state not in (
WSConnectState.AUTHENTICATED,
WSConnectState.CONNECTED,
):
raise ValueError("can't close a non-open connection")
try:
await self.websocket.close()
except websockets.WebSocketException as wse:
tlog(f"failed to close web-socket w exception {wse}")
self.state = WSConnectState.NOT_CONNECTED
async def subscribe(self, symbol: str, handler: Awaitable) -> bool:
if self.state != WSConnectState.AUTHENTICATED:
raise ValueError(
f"{symbol} web-socket not ready for listening, make sure connect passed successfully"
)
_subscribe_payload = {
"action": "listen",
"data": {"streams": [f"alpacadatav1/AM.{symbol}"]},
}
await self.websocket.send(json.dumps(_subscribe_payload))
q_id = int(
list(market_data.minute_history.keys()).index(symbol)
/ config.num_consumer_processes_ratio
)
self.stream_map[symbol] = (handler, q_id)
return True
async def unsubscribe(self, symbol: str) -> bool:
if self.state != WSConnectState.AUTHENTICATED:
raise ValueError(
f"{symbol} web-socket not ready for listening, make sure connect passed successfully"
)
_subscribe_payload = {
"action": "unlisten",
"data": {"streams": [f"alpacadatav1/AM.{symbol}"]},
}
await self.websocket.send(json.dumps(_subscribe_payload))
self.stream_map.pop(symbol, None)
return False
async def _reconnect(self) -> None:
"""automatically reconnect socket, and re-subscribe, internal"""
tlog(f"{self.consumer_task.get_name()} reconnecting")
await self.close()
if await self.connect():
_dict = self.stream_map.copy()
self.stream_map.clear()
for symbol in _dict:
await self.subscribe(symbol, _dict[symbol])
else:
tlog(
f"{self.consumer_task.get_name()} failed reconnect check log for reason"
)
async def _consumer(self) -> None:
"""Main tread loop for consuming incoming messages, internal only """
tlog(f"{self.consumer_task.get_name()} starting")
try:
while True:
_msg = await self.websocket.recv()
if isinstance(_msg, bytes):
_msg = _msg.decode("utf-8")
msg = json.loads(_msg)
stream = msg.get("stream")
if stream != "listening":
try:
_func, _q_id = self.stream_map.get(stream[3:], None)
if _func:
await _func(stream, msg["data"], self.queues[_q_id])
else:
tlog(
f"{self.consumer_task.get_name()} received {_msg} to an unknown stream {stream}"
)
except Exception as e:
tlog(
f"{self.consumer_task.get_name()} exception {e.__class__.__qualname__}:{e}"
)
except websockets.WebSocketException as wse:
tlog(f"{self.consumer_task.get_name()} received WebSocketException {wse}")
await self._reconnect()
except asyncio.CancelledError:
tlog(f"{self.consumer_task.get_name()} cancelled")
tlog(f"{self.consumer_task.get_name()} completed")
@classmethod
async def minutes_handler(cls, symbol: str, data: Dict, queue: Queue) -> None:
if data["ev"] != "AM":
tlog(
f"AlpacaStreaming.minutes_handler() got invalid event data: {symbol}:{data}"
)
return
if symbol[3:] != data["T"]:
tlog(
f"AlpacaStreaming.minutes_handler() symbol does not match data payload {symbol}:{data}"
)
return
try:
data["EV"] = "AM"
data["open"] = data["o"]
data["high"] = data["h"]
data["low"] = data["l"]
data["close"] = data["c"]
data["volume"] = data["v"]
data["vwap"] = data["vw"]
data["average"] = data["a"]
queue.put(json.dumps(data))
except Exception as e:
tlog(
f"Exception in handle_minute_bar(): exception of type {type(e).__name__} with args {e.args}"
)
|
the-stack_106_19378
|
class Wire:
def __init__( self, nm, layer, direction, *, clg, spg):
self.nm = nm
self.layer = layer
self.direction = direction
assert direction in ['v','h']
self.clg = clg
self.spg = spg
def segment( self, netName, pinName, center, bIdx, eIdx, *, bS=None, eS=None):
if bS is None: bS=self.spg
if eS is None: eS=self.spg
(c,(w,clr)) = self.clg.value( center)
c0 = c - w//2
c1 = c + w//2
bPhys = bS.value(bIdx)[0]
ePhys = eS.value(eIdx)[0]
if self.direction == 'h':
rect = [ bPhys, c0, ePhys, c1]
else:
rect = [ c0, bPhys, c1, ePhys]
data = { 'netName' : netName, 'layer' : self.layer, 'rect' : rect}
if pinName is not None:
data['pin'] = pinName
if clr is not None:
data['color'] = clr
return data
class Region:
def __init__( self, nm, layer, *, h_grid, v_grid):
self.nm = nm
self.layer = layer
self.h_grid = h_grid
self.v_grid = v_grid
def physical_x( self, grid_x):
return self.v_grid.value( grid_x)[0]
def physical_y( self, grid_y):
return self.h_grid.value( grid_y)[0]
def segment( self, netName, pinName, grid_x0, grid_y0, grid_x1, grid_y1):
rect = [self.physical_x(grid_x0), self.physical_y(grid_y0),
self.physical_x(grid_x1), self.physical_y(grid_y1)]
data = { 'netName' : netName, 'layer' : self.layer, 'rect' : rect}
if pinName is not None:
data['pin'] = pinName
return data
class Via:
def __init__( self, nm, layer, *, h_clg, v_clg, h_ext=1, v_ext=1):
self.nm = nm
self.layer = layer
self.h_clg = h_clg
self.v_clg = v_clg
self.h_ext = h_ext
self.v_ext = v_ext
def physical_xs( self, p):
(c,(w,_)) = self.v_clg.value( p)
return (c-w//2,c+w//2)
def physical_ys( self, p):
(c,(w,_)) = self.h_clg.value( p)
return (c-w//2,c+w//2)
def segment( self, netName, pinName, grid_cx, grid_cy):
(x0,x1) = self.physical_xs( grid_cx)
(y0,y1) = self.physical_ys( grid_cy)
rect = [ x0, y0, x1, y1]
data = { 'netName' : netName, 'layer' : self.layer, 'rect' : rect}
if pinName is not None:
data['pin'] = pinName
return data
def center_to_metal_edge(self, direction):
assert direction in ('h', 'v')
return getattr(self, f'{direction}_ext')
|
the-stack_106_19381
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import heapq
import tensorflow as tf
from zoo import init_nncontext
from zoo.tfpark import TFOptimizer, TFDataset
from bigdl.optim.optimizer import *
import numpy as np
import sys
from bigdl.dataset import mnist
from bigdl.dataset.transformer import *
sys.path.append("/tmp/models/slim") # add the slim library
from nets import lenet
slim = tf.contrib.slim
def main(max_epoch, data_num):
sc = init_nncontext()
# get data, pre-process and create TFDataset
def get_data_rdd(dataset):
(images_data, labels_data) = mnist.read_data_sets("/tmp/mnist", dataset)
image_rdd = sc.parallelize(images_data[:data_num])
labels_rdd = sc.parallelize(labels_data[:data_num])
rdd = image_rdd.zip(labels_rdd) \
.map(lambda rec_tuple: [normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD),
np.array(rec_tuple[1])])
return rdd
training_rdd = get_data_rdd("train")
testing_rdd = get_data_rdd("test")
dataset = TFDataset.from_rdd(training_rdd,
names=["features", "labels"],
shapes=[[28, 28, 1], []],
types=[tf.float32, tf.int32],
batch_size=280,
val_rdd=testing_rdd
)
# construct the model from TFDataset
images, labels = dataset.tensors
with slim.arg_scope(lenet.lenet_arg_scope()):
logits, end_points = lenet.lenet(images, num_classes=10, is_training=True)
loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=labels))
# create a optimizer
optimizer = TFOptimizer(loss, Adam(1e-3),
val_outputs=[logits],
val_labels=[labels],
val_method=Top1Accuracy(), model_dir="/tmp/lenet/")
# kick off training
optimizer.optimize(end_trigger=MaxEpoch(max_epoch))
saver = tf.train.Saver()
saver.save(optimizer.sess, "/tmp/lenet/model")
if __name__ == '__main__':
max_epoch = 5
data_num = 60000
if len(sys.argv) > 1:
max_epoch = int(sys.argv[1])
data_num = int(sys.argv[2])
main(max_epoch, data_num)
|
the-stack_106_19382
|
import sys
from os.path import abspath, dirname, join
import airsim
import numpy as np
import os
import tempfile
import pprint
from time import sleep
import random
import re
import torch
import pickle
import time
from problems.flocking import FlockingProblem
from utils import *
import argparse
from network_agg import Network as Network_Agg
from network_yolo import Network as Network_Feat
parser = argparse.ArgumentParser("trace")
parser.add_argument('--model_feat_path', type=str, default='None', help='path to save the model')
parser.add_argument('--model_agg_path', type=str, default='None', help='path to save the model')
args = parser.parse_args()
def main():
## load model
model_feat = Network_Feat()
params = torch.load(args.model_feat_path)['state_dict']
model_feat.load_state_dict(params)
model_feat = torch.nn.DataParallel(model_feat).cuda()
model_feat.eval()
model_agg = Network_Agg()
params = torch.load(args.model_agg_path)['state_dict']
model_agg.load_state_dict(params)
model_agg = torch.nn.DataParallel(model_agg).cuda()
model_agg.eval()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
img_size = 416
model_def = '/home/grads/t/tkhu/PyTorch-YOLOv3-tkhu/config/yolov3-custom.cfg'
weights_path = '/home/grads/t/tkhu/PyTorch-YOLOv3-tkhu/checkpoints/yolov3_ckpt_99.pth'
model_yolo = Darknet(model_def, img_size=img_size).to(device)
model_yolo.load_state_dict(torch.load(weights_path))
problem = pickle.load(open('problem.pkl', 'rb'))
problem.comm_radius = 1.5
# parse settings file with drone names and home locations
fname = '/home/grads/t/tkhu/Documents/AirSim/settings.json'
names, home = parse_settings(fname)
n_agents = len(names)
problem.n_nodes = n_agents
#####################
# use centralized controller
problem.centralized_controller = True
# rescale locations and velocities to avoid changing potential function
scale = 6
init_scale = 6
# duration of velocity commands
duration = 0.01 # duration of each control action used for training
true_dt = 1.0 / 7.5 # average of actual measurements
# make the problem easier because of slow control loop frequency
problem.v_max = 3
print(problem.v_max)
problem.v_max = problem.v_max * 0.075
problem.v_bias = problem.v_bias * 0.3
# connect to the AirSim simulator
client = airsim.MultirotorClient()
client.confirmConnection()
display_msg(client, 'Initializing...')
measure_deltat = True
#z = -40
z = np.random.uniform(low=-43, high=-37, size=(n_agents,))
len_exp = 100
for nx in range(5 , 15):
# airsim setup and takeoff
# client.simPause(False)
setup_drones(n_agents, names, client)
# get drone locations and mean locations
mean_x = 2.0
mean_y = 0.0
cost = 0
################################################################
# option 1: two flocks colliding
#x0, v0 = twoflocks(n_agents)
#initial_v_dt = 8.0 # good for twoflocks()
# initial_v_dt = 2.0 # better for the rest of the cases
# option 2: two circles with inwards velocities
# x0, v0 = circle(N)
# option 3: tight grid of agents
# x0 = grid(N)
# option 4: random initialization as in training
#states = benchmark[nx,:,:]
states = problem.initialize()
trace = {}
trace['init_state'] = states
trace['z'] = z
x0 = states[:,0:3]
v0 = states[:,2:4]
initial_v_dt = 2.0
######################################################################
# scale positions and velocities
x0 = x0 * init_scale
if v0 is not None:
v0 = v0 * init_scale
display_msg(client, 'Moving to new positions...')
send_loc_commands(client, n_agents, x0, home, mean_x, mean_y, z, names)
if v0 is not None:
send_velocity_commands(client, n_agents, v0, z, initial_v_dt, names)
display_msg(client, 'Flocking...')
history_imgs = np.zeros((n_agents, 3, 128 ))
if isinstance(problem, FlockingProblem):
x_agg3 = np.zeros((problem.n_nodes, problem.nx * problem.filter_len, problem.n_pools))
else:
x_agg3 = np.zeros((problem.n_agents * problem.nx, problem.episode_len))
cost = 0
states = np.zeros((n_agents, 4, len_exp))
costs = np.zeros((1, len_exp))
for t in range(0, len_exp):
xt1 = getStates(n_agents, names, client, home) / scale # get drone locations and velocities
ut1_gt = problem.controller(xt1)
states[:,:, t] = xt1
image = get_imgs(client, n_agents, names, model_yolo, device)
if t == 0:
history_imgs[:, 0, : ] = image[:, 0, : ]
history_imgs[:, 1, : ] = image[:, 0, : ]
history_imgs[:, 2, : ] = image[:, 0, : ]
else:
history_imgs = np.concatenate((image, history_imgs), axis=1)
history_imgs = np.delete(history_imgs, [3], axis=1)
new_state = get_feats(client, n_agents, names, model_feat, history_imgs[:, :, :])
new_state = np.clip(new_state, -30, 30)
a_net = problem.get_connectivity(xt1)
new_agg = problem.get_comms(problem.get_features(x_agg3[:, :, 0]), a_net)
new_agg = problem.pooling[0](new_agg, axis=1)
new_agg = new_agg.reshape((new_agg.shape[0], new_agg.shape[-1]))
new_feat = np.concatenate((new_state, new_agg), axis=1)
x_agg3[:, :, 0] = np.clip(new_feat, -100, 100)
x_agg3_t = np.clip(x_agg3.reshape((problem.n_nodes, problem.filter_len * problem.nx * problem.n_pools)), -100, 100)
record_images(client, n_agents, xt1, ut1_gt, names, nx, t, x_agg3_t, new_state)
#x_agg3_t = x_agg3.reshape((problem.n_nodes, problem.filter_len * problem.nx * problem.n_pools))
x_agg3_t = torch.from_numpy(x_agg3_t).float().cuda()
ut1 = model_agg(x_agg3_t).data.cpu().numpy().reshape(problem.n_nodes, problem.nu)
if t == 0:
init_cost = problem.instant_cost(xt1, ut1)
current_cost = problem.instant_cost(xt1, ut1) * problem.dt
costs[:,t] = current_cost
cost = cost + current_cost
new_vel = (ut1 * true_dt + xt1[:, 2:4]) * scale
# random pertubt z for visibility
z = np.random.uniform(low=-43, high=-37, size=(n_agents,))
send_velocity_commands(client, n_agents, new_vel, z, duration, names)
print('current time step is ' + str(t) + ' , current cost is ' + str(current_cost))
trace['states'] = states
trace['costs'] = costs
f = open('./trace/gt_' + str(nx) + '_' + str(t)+'.pkl',"wb")
pickle.dump(trace,f)
f.close()
print('final_cost = ' + str(cost))
client.reset()
trace['states'] = states
trace['costs'] = costs
f = open('./trace/gt' + str(nx) + '.pkl',"wb")
pickle.dump(trace,f)
f.close()
if __name__ == '__main__':
main()
|
the-stack_106_19384
|
import numpy as np
import matplotlib.pyplot as plt
from custom_poling.utils.pmf import pmf
class Crystal:
""" A class for a poled crystal.
Attr:
domain_width
number_domains
z0
length = domain_width * number_domains
domain_walls
domain_middles
"""
def __init__(self, domain_width, number_domains, z0=0):
""" Initialize the Crystal class.
Params:
domain_width
number_domains
"""
self.domain_width = domain_width
self.number_domains = number_domains
self.z0 = z0
self.length = self.number_domains * self.domain_width
self.domain_walls = np.arange(z0, z0 + (self.number_domains + 1) * self.domain_width, self.domain_width)
self.domain_middles = (self.domain_walls + self.domain_width/2)[0:-1]
def compute_pmf(self, domain_configuration, k_array):
"""Returns the phasematching function (PMF) as a function of k for a given domain_configuration.
Args:
domain_configuration (list of int): elements of list must be +1 or -1
k_array (array of floats)
Returns:
PMF as an array of floats
"""
self.domain_configuration = domain_configuration
self.k_array = k_array
crystal_pmf = pmf(self.domain_walls, self.domain_configuration, self.k_array)
return crystal_pmf
def plot_domains(self,domain_configuration,n_max=None,show=True,save_as=False,fix_ticks=False):
x_axis = self.domain_walls
y_axis = np.concatenate(([domain_configuration[0]],domain_configuration))
if n_max != None and n_max < len(x_axis):
x_axis = x_axis[0:n_max]
y_axis = y_axis[0:n_max]
plt.step(x_axis,y_axis)
plt.xlabel('z')
plt.ylabel('g(z)')
plt.ylim([-1.2, 1.2])
if fix_ticks==True:
plt.xticks(rotation=45)
if type(save_as)==str:
plt.savefig(save_as)
plt.close()
print("Saved figure as: " + save_as)
if show==False:
plt.close()
if show:
plt.show()
|
the-stack_106_19385
|
"""
Copyright 2017 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: [email protected]
"""
import argparse
from typing import Callable, Dict, List, Optional
FunctionType = Callable[[argparse.Namespace], None]
ParserConfigType = Callable[[argparse.ArgumentParser], None]
class CLIException(Exception):
def __init__(self, *args: str, exitcode: int) -> None:
self.exitcode = exitcode
super(CLIException, self).__init__(*args)
class ShowUsageException(Exception):
"""
Raise this exception to show the usage message of the given level
"""
class Commander(object):
"""
This class handles commands
"""
__command_functions: Dict[str, Dict[str, object]] = {}
@classmethod
def add(
cls,
name: str,
function: FunctionType,
help_msg: str,
parser_config: Optional[ParserConfigType],
require_project: bool = False,
aliases: List[str] = [],
) -> None:
"""
Add a new export function
"""
if name in cls.__command_functions:
raise Exception("Command %s already registered" % name)
cls.__command_functions[name] = {
"function": function,
"help": help_msg,
"parser_config": parser_config,
"require_project": require_project,
"aliases": aliases,
}
config = None
@classmethod
def reset(cls) -> None:
"""
Return a list of commands
"""
cls.__command_functions = {}
@classmethod
def commands(cls) -> Dict[str, Dict[str, object]]:
"""
Return a list of commands
"""
return cls.__command_functions
class command(object): # noqa: N801
"""
A decorator that registers an export function
"""
def __init__(
self,
name: str,
help_msg: str,
parser_config: Optional[ParserConfigType] = None,
require_project: bool = False,
aliases: List[str] = [],
) -> None:
self.name = name
self.help = help_msg
self.require_project = require_project
self.parser_config = parser_config
self.aliases = aliases
def __call__(self, function: FunctionType) -> FunctionType:
"""
The wrapping
"""
Commander.add(self.name, function, self.help, self.parser_config, self.require_project, self.aliases)
return function
|
the-stack_106_19386
|
from unittest.mock import Mock
from django.test import RequestFactory, TestCase
from .middleware import WiretapMiddleware
from .models import Message, Tap
class WiretapTestCase(TestCase):
def setUp(self):
self.request_factory = RequestFactory()
self.mock = Mock()
self.wiretap_middleware = WiretapMiddleware(self.mock)
def test_initialization(self):
self.assertEqual(self.wiretap_middleware.get_response, self.mock)
def test_no_taps(self):
self.assertEqual(Tap.objects.count(), 0)
self.wiretap_middleware(self.request_factory.get("/"))
self.assertEqual(Message.objects.count(), 0)
def test_tap_match(self):
Tap.objects.create(path="/test", is_active=True)
self.mock.side_effect = [
Mock(
name="response",
items=dict().items,
status_code=200,
reason_phrase="OK",
content=b"",
)
]
self.wiretap_middleware(self.request_factory.get("/test"))
self.assertEqual(Message.objects.count(), 1)
def test_tap_mismatch(self):
Tap.objects.create(path="/test", is_active=True)
self.wiretap_middleware(self.request_factory.get("/real"))
self.assertEqual(Message.objects.count(), 0)
def test_tap_not_active(self):
Tap.objects.create(path="/test", is_active=False)
self.wiretap_middleware(self.request_factory.get("/test"))
self.assertEqual(Message.objects.count(), 0)
|
the-stack_106_19387
|
import sys, os.path
import math
dir_nodo = (os.path.abspath(os.path.join(os.path.dirname(__file__), '..','..')) + '\\EXPRESION\\EXPRESION\\')
sys.path.append(dir_nodo)
ent_nodo = (os.path.abspath(os.path.join(os.path.dirname(__file__), '..','..')) + '\\ENTORNO\\')
sys.path.append(ent_nodo)
from Expresion import Expresion
from Tipo import Data_Type
from Tipo_Expresion import Type_Expresion
class Function_Substr(Expresion):
def __init__(self, nombreNodo, fila, columna, valor):
Expresion.__init__(self, nombreNodo, fila, columna, valor)
def execute(self, enviroment):
exp = self.hijos[0]
exp2 = self.hijos[1]
exp3 = self.hijos[2]
valueExp = exp.execute(enviroment)
valueExp2 = exp2.execute(enviroment)
valueExp3 = exp3.execute(enviroment)
if exp.tipo.data_type == Data_Type.character :
if exp2.tipo.data_type == Data_Type.numeric and exp3.tipo.data_type == Data_Type.numeric :
try :
rango = slice(valueExp2-1, valueExp3)
self.tipo = Type_Expresion(Data_Type.character)
self.valorExpresion = valueExp[rango]
return self.valorExpresion
except :
self.tipo = Type_Expresion(Data_Type.error)
self.valorExpresion = None
return self.valorExpresion
else :
self.tipo = Type_Expresion(Data_Type.error)
self.valorExpresion = None
return self.valorExpresion
else :
self.tipo = Type_Expresion(Data_Type.error)
self.valorExpresion = None
return self.valorExpresion
|
the-stack_106_19389
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
""" Userbot module for having some fun with people. """
import time
import datetime
from telethon import events
import io
import os
import urllib
from urllib.request import urlopen
import requests
from bs4 import BeautifulSoup
import re
from re import sub
from cowpy import cow
from asyncio import sleep
from collections import deque
from random import choice, getrandbits, randint
from telethon.tl.types import MessageMediaPhoto
from PIL import Image
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from userbot import bot, CMD_HELP
from userbot.events import register
from userbot.modules.admin import get_user_from_event
# ================= CONSTANT =================
METOOSTR = [
"Me too thanks",
"Haha yes, me too",
"Same lol",
"Me irl",
"Same here",
"Haha yes",
"Me rn",
]
ZALG_LIST = [[
"̖",
" ̗",
" ̘",
" ̙",
" ̜",
" ̝",
" ̞",
" ̟",
" ̠",
" ̤",
" ̥",
" ̦",
" ̩",
" ̪",
" ̫",
" ̬",
" ̭",
" ̮",
" ̯",
" ̰",
" ̱",
" ̲",
" ̳",
" ̹",
" ̺",
" ̻",
" ̼",
" ͅ",
" ͇",
" ͈",
" ͉",
" ͍",
" ͎",
" ͓",
" ͔",
" ͕",
" ͖",
" ͙",
" ͚",
" ",
],
[
" ̍",
" ̎",
" ̄",
" ̅",
" ̿",
" ̑",
" ̆",
" ̐",
" ͒",
" ͗",
" ͑",
" ̇",
" ̈",
" ̊",
" ͂",
" ̓",
" ̈́",
" ͊",
" ͋",
" ͌",
" ̃",
" ̂",
" ̌",
" ͐",
" ́",
" ̋",
" ̏",
" ̽",
" ̉",
" ͣ",
" ͤ",
" ͥ",
" ͦ",
" ͧ",
" ͨ",
" ͩ",
" ͪ",
" ͫ",
" ͬ",
" ͭ",
" ͮ",
" ͯ",
" ̾",
" ͛",
" ͆",
" ̚",
],
[
" ̕",
" ̛",
" ̀",
" ́",
" ͘",
" ̡",
" ̢",
" ̧",
" ̨",
" ̴",
" ̵",
" ̶",
" ͜",
" ͝",
" ͞",
" ͟",
" ͠",
" ͢",
" ̸",
" ̷",
" ͡",
]]
EMOJIS = [
"😂",
"😂",
"👌",
"✌",
"💞",
"👍",
"👌",
"💯",
"🎶",
"👀",
"😂",
"👓",
"👏",
"👐",
"🍕",
"💥",
"🍴",
"💦",
"💦",
"🍑",
"🍆",
"😩",
"😏",
"👉👌",
"👀",
"👅",
"😩",
"🚰",
]
INSULT_STRINGS = [
"Owww ... Such a stupid idiot.",
"Don't drink and type.",
"I think you should go home or better a mental asylum.",
"Command not found. Just like your brain.",
"Do you realize you are making a fool of yourself? Apparently not.",
"You can type better than that.",
"Bot rule 544 section 9 prevents me from replying to stupid humans like you.",
"Sorry, we do not sell brains.",
"Believe me you are not normal.",
"I bet your brain feels as good as new, seeing that you never use it.",
"If I wanted to kill myself I'd climb your ego and jump to your IQ.",
"Zombies eat brains... you're safe.",
"You didn't evolve from apes, they evolved from you.",
"Come back and talk to me when your I.Q. exceeds your age.",
"I'm not saying you're stupid, I'm just saying you've got bad luck when it comes to thinking.",
"What language are you speaking? Cause it sounds like bullshit.",
"Stupidity is not a crime so you are free to go.",
"You are proof that evolution CAN go in reverse.",
"I would ask you how old you are but I know you can't count that high.",
"As an outsider, what do you think of the human race?",
"Brains aren't everything. In your case they're nothing.",
"Ordinarily people live and learn. You just live.",
"I don't know what makes you so stupid, but it really works.",
"Keep talking, someday you'll say something intelligent! (I doubt it though)",
"Shock me, say something intelligent.",
"Your IQ's lower than your shoe size.",
"Alas! Your neurotransmitters are no more working.",
"Are you crazy you fool.",
"Everyone has the right to be stupid but you are abusing the privilege.",
"I'm sorry I hurt your feelings when I called you stupid. I thought you already knew that.",
"You should try tasting cyanide.",
"Your enzymes are meant to digest rat poison.",
"You should try sleeping forever.",
"Pick up a gun and shoot yourself.",
"You could make a world record by jumping from a plane without parachute.",
"Stop talking BS and jump in front of a running bullet train.",
"Try bathing with Hydrochloric Acid instead of water.",
"Try this: if you hold your breath underwater for an hour, you can then hold it forever.",
"Go Green! Stop inhaling Oxygen.",
"God was searching for you. You should leave to meet him.",
"give your 100%. Now, go donate blood.",
"Try jumping from a hundred story building but you can do it only once.",
"You should donate your brain seeing that you never used it.",
"Volunteer for target in an firing range.",
"Head shots are fun. Get yourself one.",
"You should try swimming with great white sharks.",
"You should paint yourself red and run in a bull marathon.",
"You can stay underwater for the rest of your life without coming back up.",
"How about you stop breathing for like 1 day? That'll be great.",
"Try provoking a tiger while you both are in a cage.",
"Have you tried shooting yourself as high as 100m using a canon.",
"You should try holding TNT in your mouth and igniting it.",
"Try playing catch and throw with RDX its fun.",
"I heard phogine is poisonous but i guess you wont mind inhaling it for fun.",
"Launch yourself into outer space while forgetting oxygen on Earth.",
"You should try playing snake and ladders, with real snakes and no ladders.",
"Dance naked on a couple of HT wires.",
"Active Volcano is the best swimming pool for you.",
"You should try hot bath in a volcano.",
"Try to spend one day in a coffin and it will be yours forever.",
"Hit Uranium with a slow moving neutron in your presence. It will be a worthwhile experience.",
"You can be the first person to step on sun. Have a try.",
]
UWUS = [
"(・`ω´・)",
";;w;;",
"owo",
"UwU",
">w<",
"^w^",
r"\(^o\) (/o^)/",
"( ^ _ ^)∠☆",
"(ô_ô)",
"~:o",
";-;",
"(*^*)",
"(>_",
"(♥_♥)",
"*(^O^)*",
"((+_+))",
]
IWIS = [
"┐(´д`)┌",
"┐(´~`)┌",
"┐(´ー`)┌",
"┐( ̄ヘ ̄)┌",
"╮(╯∀╰)╭",
"╮(╯_╰)╭",
"┐(´д`)┌",
"┐(´∀`)┌",
"ʅ(́◡◝)ʃ",
"┐(゚~゚)┌",
"┐('д')┌",
"┐(‘~`;)┌",
"ヘ(´-`;)ヘ",
"┐( -“-)┌",
"ʅ(´◔౪◔)ʃ",
"ヽ(゜~゜o)ノ",
"ヽ(~~~ )ノ",
"┐(~ー~;)┌",
"┐(-。ー;)┌",
r"¯\_(ツ)_/¯",
r"¯\_(⊙_ʖ⊙)_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
]
FACEREACTS = [
"ʘ‿ʘ",
"ヾ(-_- )ゞ",
"(っ˘ڡ˘ς)",
"(´ж`ς)",
"( ಠ ʖ̯ ಠ)",
"(° ͜ʖ͡°)╭∩╮",
"(ᵟຶ︵ ᵟຶ)",
"(งツ)ว",
"ʚ(•`",
"(っ▀¯▀)つ",
"(◠﹏◠)",
"( ͡ಠ ʖ̯ ͡ಠ)",
"( ఠ ͟ʖ ఠ)",
"(∩`-´)⊃━☆゚.*・。゚",
"(⊃。•́‿•̀。)⊃",
"(._.)",
"{•̃_•̃}",
"(ᵔᴥᵔ)",
"♨_♨",
"⥀.⥀",
"ح˚௰˚づ ",
"(҂◡_◡)",
"ƪ(ړײ)ƪ",
"(っ•́。•́)♪♬",
"◖ᵔᴥᵔ◗ ♪ ♫ ",
"(☞゚ヮ゚)☞",
"[¬º-°]¬",
"(Ծ‸ Ծ)",
"(•̀ᴗ•́)و ̑̑",
"ヾ(´〇`)ノ♪♪♪",
"(ง'̀-'́)ง",
"ლ(•́•́ლ)",
"ʕ •́؈•̀ ₎",
"♪♪ ヽ(ˇ∀ˇ )ゞ",
"щ(゚Д゚щ)",
"( ˇ෴ˇ )",
"눈_눈",
"(๑•́ ₃ •̀๑) ",
"( ˘ ³˘)♥ ",
"ԅ(≖‿≖ԅ)",
"♥‿♥",
"◔_◔",
"⁽⁽ଘ( ˊᵕˋ )ଓ⁾⁾",
"乁( ◔ ౪◔)「 ┑( ̄Д  ̄)┍",
"( ఠൠఠ )ノ",
"٩(๏_๏)۶",
"┌(ㆆ㉨ㆆ)ʃ",
"ఠ_ఠ",
"(づ。◕‿‿◕。)づ",
"(ノಠ ∩ಠ)ノ彡( \\o°o)\\",
"“ヽ(´▽`)ノ”",
"༼ ༎ຶ ෴ ༎ຶ༽",
"。゚( ゚இ‸இ゚)゚。",
"(づ ̄ ³ ̄)づ",
"(⊙.☉)7",
"ᕕ( ᐛ )ᕗ",
"t(-_-t)",
"(ಥ⌣ಥ)",
"ヽ༼ ಠ益ಠ ༽ノ",
"༼∵༽ ༼⍨༽ ༼⍢༽ ༼⍤༽",
"ミ●﹏☉ミ",
"(⊙_◎)",
"¿ⓧ_ⓧﮌ",
"ಠ_ಠ",
"(´・_・`)",
"ᕦ(ò_óˇ)ᕤ",
"⊙﹏⊙",
"(╯°□°)╯︵ ┻━┻",
r"¯\_(⊙︿⊙)_/¯",
"٩◔̯◔۶",
"°‿‿°",
"ᕙ(⇀‸↼‶)ᕗ",
"⊂(◉‿◉)つ",
"V•ᴥ•V",
"q(❂‿❂)p",
"ಥ_ಥ",
"ฅ^•ﻌ•^ฅ",
"ಥ﹏ಥ",
"( ^_^)o自自o(^_^ )",
"ಠ‿ಠ",
"ヽ(´▽`)/",
"ᵒᴥᵒ#",
"( ͡° ͜ʖ ͡°)",
"┬─┬ ノ( ゜-゜ノ)",
"ヽ(´ー`)ノ",
"☜(⌒▽⌒)☞",
"ε=ε=ε=┌(;*´Д`)ノ",
"(╬ ಠ益ಠ)",
"┬─┬⃰͡ (ᵔᵕᵔ͜ )",
"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻",
r"¯\_(ツ)_/¯",
"ʕᵔᴥᵔʔ",
"(`・ω・´)",
"ʕ•ᴥ•ʔ",
"ლ(`ー´ლ)",
"ʕʘ̅͜ʘ̅ʔ",
"( ゚Д゚)",
r"¯\(°_o)/¯",
"(。◕‿◕。)",
]
RUNS_STR = [
"Runs to Thanos..",
"Runs far, far away from earth..",
"Running faster than Bolt coz i'mma userbot !!",
"Runs to Marie..",
"This Group is too cancerous to deal with.",
"Cya bois",
"Kys",
"I go away",
"I am just walking off, coz me is too fat.",
"I Fugged off!",
"Will run for chocolate.",
"I run because I really like food.",
"Running...\nbecause dieting is not an option.",
"Wicked fast runnah",
"If you wanna catch me, you got to be fast...\nIf you wanna stay with me, you got to be good...\nBut if you wanna pass me...\nYou've got to be kidding.",
"Anyone can run a hundred meters, it's the next forty-two thousand and two hundred that count.",
"Why are all these people following me?",
"Are the kids still chasing me?",
"Running a marathon...there's an app for that.",
]
CHASE_STR = [
"Where do you think you're going?",
"Huh? what? did they get away?",
"ZZzzZZzz... Huh? what? oh, just them again, nevermind.",
"Get back here!",
"Not so fast...",
"Look out for the wall!",
"Don't leave me alone with them!!",
"You run, you die.",
"Jokes on you, I'm everywhere",
"You're gonna regret that...",
"You could also try /kickme, I hear that's fun.",
"Go bother someone else, no-one here cares.",
"You can run, but you can't hide.",
"Is that all you've got?",
"I'm behind you...",
"You've got company!",
"We can do this the easy way, or the hard way.",
"You just don't get it, do you?",
"Yeah, you better run!",
"Please, remind me how much I care?",
"I'd run faster if I were you.",
"That's definitely the droid we're looking for.",
"May the odds be ever in your favour.",
"Famous last words.",
"And they disappeared forever, never to be seen again.",
"\"Oh, look at me! I'm so cool, I can run from a bot!\" - this person",
"Yeah yeah, just tap /kickme already.",
"Here, take this ring and head to Mordor while you're at it.",
"Legend has it, they're still running...",
"Unlike Harry Potter, your parents can't protect you from me.",
"Fear leads to anger. Anger leads to hate. Hate leads to suffering. If you keep running in fear, you might "
"be the next Vader.",
"Multiple calculations later, I have decided my interest in your shenanigans is exactly 0.",
"Legend has it, they're still running.",
"Keep it up, not sure we want you here anyway.",
"You're a wiza- Oh. Wait. You're not Harry, keep moving.",
"NO RUNNING IN THE HALLWAYS!",
"Hasta la vista, baby.",
"Who let the dogs out?",
"It's funny, because no one cares.",
"Ah, what a waste. I liked that one.",
"Frankly, my dear, I don't give a damn.",
"My milkshake brings all the boys to yard... So run faster!",
"You can't HANDLE the truth!",
"A long time ago, in a galaxy far far away... Someone would've cared about that. Not anymore though.",
"Hey, look at them! They're running from the inevitable banhammer... Cute.",
"Han shot first. So will I.",
"What are you running after, a white rabbit?",
"As The Doctor would say... RUN!",
]
HELLOSTR = [
"Hi !",
"‘Ello, gov'nor!",
"What’s crackin’?",
"‘Sup, homeslice?",
"Howdy, howdy ,howdy!",
"Hello, who's there, I'm talking.",
"You know who this is.",
"Yo!",
"Whaddup.",
"Greetings and salutations!",
"Hello, sunshine!",
"Hey, howdy, hi!",
"What’s kickin’, little chicken?",
"Peek-a-boo!",
"Howdy-doody!",
"Hey there, freshman!",
"I come in peace!",
"Ahoy, matey!",
"Hiya!",
]
SHGS = [
"┐(´д`)┌",
"┐(´~`)┌",
"┐(´ー`)┌",
"┐( ̄ヘ ̄)┌",
"╮(╯∀╰)╭",
"╮(╯_╰)╭",
"┐(´д`)┌",
"┐(´∀`)┌",
"ʅ(́◡◝)ʃ",
"┐(゚~゚)┌",
"┐('д')┌",
"┐(‘~`;)┌",
"ヘ(´-`;)ヘ",
"┐( -“-)┌",
"ʅ(´◔౪◔)ʃ",
"ヽ(゜~゜o)ノ",
"ヽ(~~~ )ノ",
"┐(~ー~;)┌",
"┐(-。ー;)┌",
r"¯\_(ツ)_/¯",
r"¯\_(⊙_ʖ⊙)_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
]
CRI = [
"أ‿أ",
"╥﹏╥",
"(;﹏;)",
"(ToT)",
"(┳Д┳)",
"(ಥ﹏ಥ)",
"(;へ:)",
"(T_T)",
"(πーπ)",
"(T▽T)",
"(⋟﹏⋞)",
"(iДi)",
"(´Д⊂ヽ",
"(;Д;)",
"(>﹏<)",
"(TдT)",
"(つ﹏⊂)",
"༼☯﹏☯༽",
"(ノ﹏ヽ)",
"(ノAヽ)",
"(╥_╥)",
"(T⌓T)",
"(༎ຶ⌑༎ຶ)",
"(☍﹏⁰)。",
"(ಥ_ʖಥ)",
"(つд⊂)",
"(≖͞_≖̥)",
"(இ﹏இ`。)",
"༼ಢ_ಢ༽",
"༼ ༎ຶ ෴ ༎ຶ༽",
]
SLAP_TEMPLATES_EN = [
"{hits} {victim} with a {item}.",
"{hits} {victim} in the face with a {item}.",
"{hits} {victim} around a bit with a {item}.",
"`{throws} a {item} at {victim}.`",
"grabs a {item} and {throws} it at {victim}'s face.",
"{hits} a {item} at {victim}.", "{throws} a few {item} at {victim}.",
"grabs a {item} and {throws} it in {victim}'s face.",
"launches a {item} in {victim}'s general direction.",
"sits on {victim}'s face while slamming a {item} {where}.",
"starts slapping {victim} silly with a {item}.",
"pins {victim} down and repeatedly {hits} them with a {item}.",
"grabs up a {item} and {hits} {victim} with it.",
"starts slapping {victim} silly with a {item}.",
"holds {victim} down and repeatedly {hits} them with a {item}.",
"prods {victim} with a {item}.",
"picks up a {item} and {hits} {victim} with it.",
"`ties {victim} to a chair and {throws} a {item} at them.`",
"{hits} {victim} {where} with a {item}.",
"ties {victim} to a pole and whips them {where} with a {item}."
"gave a friendly push to help {victim} learn to swim in lava.",
"sent {victim} to /dev/null.", "sent {victim} down the memory hole.",
"beheaded {victim}.", "threw {victim} off a building.",
"replaced all of {victim}'s music with Nickelback.",
"spammed {victim}'s email.", "made {victim} a knuckle sandwich.",
"slapped {victim} with pure nothing.",
"hit {victim} with a small, interstellar spaceship.",
"quickscoped {victim}.", "put {victim} in check-mate.",
"RSA-encrypted {victim} and deleted the private key.",
"put {victim} in the friendzone.",
"slaps {victim} with a DMCA takedown request!"
]
ITEMS_EN = [
"cast iron skillet",
"large trout",
"baseball bat",
"cricket bat",
"wooden cane",
"nail",
"printer",
"shovel",
"pair of trousers",
"CRT monitor",
"diamond sword",
"baguette",
"physics textbook",
"toaster",
"portrait of Richard Stallman",
"television",
"mau5head",
"five ton truck",
"roll of duct tape",
"book",
"laptop",
"old television",
"sack of rocks",
"rainbow trout",
"cobblestone block",
"lava bucket",
"rubber chicken",
"spiked bat",
"gold block",
"fire extinguisher",
"heavy rock",
"chunk of dirt",
"beehive",
"piece of rotten meat",
"bear",
"ton of bricks",
]
THROW_EN = [
"throws",
"flings",
"chucks",
"hurls",
]
HIT_EN = [
"hits",
"whacks",
"slaps",
"smacks",
"bashes",
]
WHERE_EN = ["in the chest", "on the head", "on the butt", "on the crotch"]
# ID translation by @yincen
SLAP_TEMPLATES_ID = [
"{hits} {victim} dengan {item}.",
"{throws} sebuah {item} kepada {victim}.",
"mengambil {item} dan {hits} {victim} .",
"Mengambil Sebuah {item} dan {hits} {victim} Dengan itu.",
"Menjatuhkan {victim} Ke Lava.",
"Mengirimkan {victim} ke Kawah.",
"Membuang {victim} Ke Laut.",
"Mengeluarkan {victim} Dari Bumi.",
"Melempar {victim} Ke luar angkasa.",
"Menaruh {victim} di Pluto.",
"Melemparkan sebuah {item} ke {victim}.",
"Melemparkan {item} kepada {victim}.",
"Menampar {victim} menggunakan {item}.",
"Membuang {victim} Ke udara.",
"Menghapus {victim} Dari Daftar Teman.",
"Melemparkan {item} {where} {victim}.",
"Meletakan {item} {where} {victim}.",
"Menyerang {victim} menggunakan {anime}.",
"Mengehack Seluruh akun {victim}"
]
ITEMS_ID = [
"Tabung Gas",
"Televisi 42 In",
"Raket",
"Raket Nyamuk",
"Kaca",
"Buku",
"Ringgis",
"Telur",
"Jarum",
"Monitor Tabung",
"Obeng",
"Almunium",
"Emas",
"Printer",
"Speaker",
"Gas Lpg",
"Tangki Bensin",
"Tandon Air",
"Bola Boling",
"Laptop",
"Hardisk Rusak",
"Wajan Panas",
"Virus Corona",
"Meja Kantor",
"Meja Arsip",
"Lemari",
"Ember Besi",
"Besi Beton",
"Timah Panas",
"Harimau",
"Batu Krikil",
"Makanan Basi",
"Pesawat AirBus",
"Roket Nasa",
"Satelit Nasa",
"Matahari",
"Meteor",
"Berkas Kantor",
"Beton panas",
"Cermin",
"Batu Giok",
"Botol",
"Nezuko",
"Kaset Pita",
"Tiang Jemuran",
"Pisau Lipat",
"Bongkahan Es ",
"Asteroid",
]
THROW_ID = [
"Melempar",
"Melemparkan",
]
HIT_ID = [
"Memukul",
"melemparkan",
"Memukuli",
]
WHERE_ID = ["di pipi", "di kepala", "di bokong", "di badan"]
SLAP_TEMPLATES_Jutsu = [
"Menyerang {victim} Menggunakan {hits}.",
"Menyerang {victim} Menggunakan {item}.",
"Melemparkan {throws} kepada {victim} .",
"Melemparkan {throws} {where} {victim}."
]
ITEMS_Jutsu = [
"KAA MEE HAA MEE HAA",
"Chibaku Tensei",
]
THROW_Jutsu = [
"Futon Rasen Shuriken",
"Shuriken",
]
HIT_Jutsu = [
"Rasengan",
"Chidori",
]
GAMBAR_TITIT = """
😘😘
😘😘😘
😘😘😘
😘😘😘
😘😘😘
😘😘😘
😘😘😘
😘😘😘
😘😘😘
😘😘😘
😘😘😘😘
😘😘😘😘😘😘
😘😘😘 😘😘😘
😘😘 😘😘
"""
WHERE_Jutsu = ["Di Pipi", "Di Kepala", "Di Bokong", "Di Badan ,Di Pantat"]
normiefont = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z']
weebyfont = ['卂', '乃', '匚', '刀', '乇', '下', '厶', '卄', '工', '丁', '长', '乚', '从', '𠘨', '口', '尸', '㔿', '尺', '丂', '丅', '凵',
'リ', '山', '乂', '丫', '乙']
# ===========================================
@register(outgoing=True, pattern=r"^\.(\w+)say (.*)")
async def univsaye(cowmsg):
""" For .cowsay module, userbot wrapper for cow which says things. """
arg = cowmsg.pattern_match.group(1).lower()
text = cowmsg.pattern_match.group(2)
if arg == "cow":
arg = "default"
if arg not in cow.COWACTERS:
return
cheese = cow.get_cow(arg)
cheese = cheese()
await cowmsg.edit(f"`{cheese.milk(text).replace('`', '´')}`")
@register(outgoing=True, pattern=r"^\.coinflip (.*)")
async def coin(event):
r = choice(["heads", "tails"])
input_str = event.pattern_match.group(1)
if input_str:
input_str = input_str.lower()
if r == "heads":
if input_str == "heads":
await event.edit(
"The coin landed on: **Heads**.\nYou were correct.")
elif input_str == "tails":
await event.edit(
"The coin landed on: **Heads**.\nYou weren't correct, try again ..."
)
else:
await event.edit("The coin landed on: **Heads**.")
elif r == "tails":
if input_str == "tails":
await event.edit(
"The coin landed on: **Tails**.\nYou were correct.")
elif input_str == "heads":
await event.edit(
"The coin landed on: **Tails**.\nYou weren't correct, try again ..."
)
else:
await event.edit("The coin landed on: **Tails**.")
@register(pattern="^\.slap(?: |$)(.*)", outgoing=True)
async def who(event):
""" slaps a user, or get slapped if not a reply. """
replied_user = await get_user_from_event(event)
if replied_user:
replied_user = replied_user[0]
else:
return
caption = await slap(replied_user, event)
try:
await event.edit(caption)
except BaseException:
await event.edit(
"`Can't slap this person, need to fetch some sticks and stones !!`"
)
async def slap(replied_user, event):
""" Construct a funny slap sentence !! """
user_id = replied_user.id
first_name = replied_user.first_name
username = replied_user.username
if username:
slapped = "@{}".format(username)
else:
slapped = f"[{first_name}](tg://user?id={user_id})"
slap_str = event.pattern_match.group(1)
if slap_str == "en":
temp = choice(SLAP_TEMPLATES_EN)
item = choice(ITEMS_EN)
hit = choice(HIT_EN)
throw = choice(THROW_EN)
where = choice(WHERE_EN)
elif slap_str == "id":
temp = choice(SLAP_TEMPLATES_ID)
item = choice(ITEMS_ID)
hit = choice(HIT_ID)
throw = choice(THROW_ID)
where = choice(WHERE_ID)
elif slap_str == "jutsu":
temp = choice(SLAP_TEMPLATES_Jutsu)
item = choice(ITEMS_Jutsu)
hit = choice(HIT_Jutsu)
throw = choice(THROW_Jutsu)
where = choice(WHERE_Jutsu)
else:
temp = choice(SLAP_TEMPLATES_EN)
item = choice(ITEMS_EN)
hit = choice(HIT_EN)
throw = choice(THROW_EN)
where = choice(WHERE_EN)
caption = "..." + temp.format(
victim=slapped, item=item, hits=hit, throws=throw, where=where)
return caption
@register(outgoing=True, pattern="^\.boobs(?: |$)(.*)")
async def boobs(e):
await e.edit("`Finding some big boobs...`")
await sleep(3)
await e.edit("`Sending some big boobs...`")
nsfw = requests.get('http://api.oboobs.ru/noise/1').json()[0]["preview"]
urllib.request.urlretrieve("http://media.oboobs.ru/{}".format(nsfw), "*.jpg")
os.rename('*.jpg', 'boobs.jpg')
await e.client.send_file(e.chat_id, "boobs.jpg")
os.remove("boobs.jpg")
await e.delete()
@register(outgoing=True, pattern="^\.butts(?: |$)(.*)")
async def butts(e):
await e.edit("`Finding some beautiful butts...`")
await sleep(3)
await e.edit("`Sending some beautiful butts...`")
nsfw = requests.get('http://api.obutts.ru/noise/1').json()[0]["preview"]
urllib.request.urlretrieve("http://media.obutts.ru/{}".format(nsfw), "*.jpg")
os.rename('*.jpg', 'butts.jpg')
await e.client.send_file(e.chat_id, "butts.jpg")
os.remove("butts.jpg")
await e.delete()
@register(outgoing=True, pattern="^\.(yes|no|maybe|decide)$")
async def decide(event):
decision = event.pattern_match.group(1).lower()
message_id = event.reply_to_msg_id if event.reply_to_msg_id else None
if decision != "decide":
r = requests.get(f"https://yesno.wtf/api?force={decision}").json()
else:
r = requests.get(f"https://yesno.wtf/api").json()
await event.delete()
await event.client.send_message(event.chat_id,
str(r["answer"]).upper(),
reply_to=message_id,
file=r["image"])
@register(outgoing=True, pattern="^\.fp$")
async def facepalm(e):
""" Facepalm 🤦♂ """
await e.edit("🤦♂")
@register(outgoing=True, pattern="^\.cry$")
async def cry(e):
""" y u du dis, i cry everytime !! """
await e.edit(choice(CRI))
@register(outgoing=True, pattern="^\.insult$")
async def insult(e):
""" I make you cry !! """
await e.edit(choice(INSULT_STRINGS))
@register(outgoing=True, pattern="^\.cp(?: |$)(.*)")
async def copypasta(cp_e):
""" Copypasta the famous meme """
textx = await cp_e.get_reply_message()
message = cp_e.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await cp_e.edit("`😂🅱️IvE👐sOME👅text👅for✌️Me👌tO👐MAkE👀iT💞funNy!💦`")
reply_text = choice(EMOJIS)
# choose a random character in the message to be substituted with 🅱️
b_char = choice(message).lower()
for owo in message:
if owo == " ":
reply_text += choice(EMOJIS)
elif owo in EMOJIS:
reply_text += owo
reply_text += choice(EMOJIS)
elif owo.lower() == b_char:
reply_text += "🅱️"
else:
if bool(getrandbits(1)):
reply_text += owo.upper()
else:
reply_text += owo.lower()
reply_text += choice(EMOJIS)
await cp_e.edit(reply_text)
@register(outgoing=True, pattern="^\.vapor(?: |$)(.*)")
async def vapor(vpr):
""" Vaporize everything! """
reply_text = list()
textx = await vpr.get_reply_message()
message = vpr.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await vpr.edit("`Give some text for vapor!`")
for charac in message:
if 0x21 <= ord(charac) <= 0x7F:
reply_text.append(chr(ord(charac) + 0xFEE0))
elif ord(charac) == 0x20:
reply_text.append(chr(0x3000))
else:
reply_text.append(charac)
await vpr.edit("".join(reply_text))
@register(outgoing=True, pattern="^\.str(?: |$)(.*)")
async def stretch(stret):
""" Stretch it."""
textx = await stret.get_reply_message()
message = stret.text
message = stret.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await stret.edit("`GiiiiiiiB sooooooomeeeeeee teeeeeeext!`")
count = randint(3, 10)
reply_text = sub(r"([aeiouAEIOUaeiouAEIOUаеиоуюяыэё])", (r"\1" * count),
message)
await stret.edit(reply_text)
@register(outgoing=True, pattern="^\.zal(?: |$)(.*)")
async def zal(zgfy):
""" Invoke the feeling of chaos. """
reply_text = list()
textx = await zgfy.get_reply_message()
message = zgfy.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await zgfy.edit(
"`gͫ ̆ i̛ ̺ v͇̆ ȅͅ a̢ͦ s̴̪ c̸̢ ä̸ rͩͣ y͖͞ t̨͚ é̠ x̢͖ t͔͛`"
)
for charac in message:
if not charac.isalpha():
reply_text.append(charac)
continue
for _ in range(0, 3):
rand = randint(0, 2)
if rand == 0:
charac = charac.strip() + \
choice(ZALG_LIST[0]).strip()
elif rand == 1:
charac = charac.strip() + \
choice(ZALG_LIST[1]).strip()
else:
charac = charac.strip() + \
choice(ZALG_LIST[2]).strip()
reply_text.append(charac)
await zgfy.edit("".join(reply_text))
@register(outgoing=True, pattern="^\.hi$")
async def hoi(hello):
""" Greet everyone! """
await hello.edit(choice(HELLOSTR))
@register(outgoing=True, pattern="^\.owo(?: |$)(.*)")
async def faces(owo):
""" UwU """
textx = await owo.get_reply_message()
message = owo.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await owo.edit("` UwU no text given! `")
reply_text = sub(r"(r|l)", "w", message)
reply_text = sub(r"(R|L)", "W", reply_text)
reply_text = sub(r"n([aeiou])", r"ny\1", reply_text)
reply_text = sub(r"N([aeiouAEIOU])", r"Ny\1", reply_text)
reply_text = sub(r"\!+", " " + choice(UWUS), reply_text)
reply_text = reply_text.replace("ove", "uv")
reply_text += " " + choice(UWUS)
await owo.edit(reply_text)
@register(outgoing=True, pattern="^\.react$")
async def react_meme(react):
""" Make your userbot react to everything. """
await react.edit(choice(FACEREACTS))
@register(outgoing=True, pattern="^\.shg$")
async def shrugger(shg):
r""" ¯\_(ツ)_/¯ """
await shg.edit(choice(SHGS))
@register(outgoing=True, pattern="^\.chase$")
async def police(chase):
""" Run boi run, i'm gonna catch you !! """
await chase.edit(choice(CHASE_STR))
@register(outgoing=True, pattern="^\.run$")
async def runner_lol(run):
""" Run, run, RUNNN! """
await run.edit(choice(RUNS_STR))
@register(outgoing=True, pattern="^\.metoo$")
async def metoo(hahayes):
""" Haha yes """
await hahayes.edit(choice(METOOSTR))
@register(outgoing=True, pattern="^\.oem$")
async def oem(e):
t = "Oem"
for j in range(16):
t = t[:-1] + "em"
await e.edit(t)
@register(outgoing=True, pattern="^\.Oem$")
async def Oem(e):
t = "Oem"
for j in range(16):
t = t[:-1] + "em"
await e.edit(t)
@register(outgoing=True, pattern="^\.10iq$")
async def iqless(e):
await e.edit("♿")
@register(outgoing=True, pattern="^\.moon$")
async def moon(event):
deq = deque(list("🌗🌘🌑🌒🌓🌔🌕🌖"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^\.clock$")
async def clock(event):
deq = deque(list("🕙🕘🕗🕖🕕🕔🕓🕒🕑🕐🕛"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.rain$")
async def rain(event):
deq = deque(list("☀️🌤⛅️🌥☁️🌧⛈"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.love$")
async def love(event):
deq = deque(list("❤️🧡💛💚💙💜🖤💕💞💓💗💖💘💝"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.earth$")
async def earth(event):
deq = deque(list("🌏🌍🌎🌎🌍🌏🌍🌎"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^\.mock(?: |$)(.*)")
async def spongemocktext(mock):
""" Do it and find the real fun. """
reply_text = list()
textx = await mock.get_reply_message()
message = mock.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await mock.edit("`gIvE sOMEtHInG tO MoCk!`")
for charac in message:
if charac.isalpha() and randint(0, 1):
to_app = charac.upper() if charac.islower() else charac.lower()
reply_text.append(to_app)
else:
reply_text.append(charac)
await mock.edit("".join(reply_text))
@register(outgoing=True, pattern="^\.weeb(?: |$)(.*)")
async def weebify(e):
args = e.pattern_match.group(1)
if not args:
get = await e.get_reply_message()
args = get.text
if not args:
await e.edit("`What I am Supposed to Weebify U Dumb`")
return
string = ' '.join(args).lower()
for normiecharacter in string:
if normiecharacter in normiefont:
weebycharacter = weebyfont[normiefont.index(normiecharacter)]
string = string.replace(normiecharacter, weebycharacter)
await e.edit(string)
@register(outgoing=True, pattern="^\.clap(?: |$)(.*)")
async def claptext(memereview):
""" Praise people! """
textx = await memereview.get_reply_message()
message = memereview.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await memereview.edit("`Hah, I don't clap pointlessly!`")
reply_text = "👏 "
reply_text += message.replace(" ", " 👏 ")
reply_text += " 👏"
await memereview.edit(reply_text)
@register(outgoing=True, pattern="^\.bt$")
async def bluetext(bt_e):
""" Believe me, you will find this useful. """
if await bt_e.get_reply_message() and bt_e.is_group:
await bt_e.edit(
"/BLUETEXT /MUST /CLICK.\n"
"/ARE /YOU /A /STUPID /ANIMAL /WHICH /IS /ATTRACTED /TO /COLOURS?")
@register(outgoing=True, pattern=r"^\.f (.*)")
async def payf(event):
paytext = event.pattern_match.group(1)
pay = "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}".format(
paytext * 8, paytext * 8, paytext * 2, paytext * 2, paytext * 2,
paytext * 6, paytext * 6, paytext * 2, paytext * 2, paytext * 2,
paytext * 2, paytext * 2)
await event.edit(pay)
@register(outgoing=True, pattern="^\.lfy (.*)")
async def let_me_google_that_for_you(lmgtfy_q):
textx = await lmgtfy_q.get_reply_message()
qry = lmgtfy_q.pattern_match.group(1)
if qry:
query = str(qry)
elif textx:
query = textx
query = query.message
query_encoded = query.replace(" ", "+")
lfy_url = f"http://lmgtfy.com/?s=g&iie=1&q={query_encoded}"
payload = {'format': 'json', 'url': lfy_url}
r = requests.get('http://is.gd/create.php', params=payload)
await lmgtfy_q.edit("Here you are, help yourself."
f"\n[{query}]({r.json()['shorturl']})")
@register(outgoing=True, pattern="^\.sayhi$")
async def sayhi(e):
await e.edit(
"\n💰💰💰💰💰💰💰💰💰💰💰💰"
"\n💰🔷💰💰💰🔷💰💰🔷🔷🔷💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷🔷🔷🔷🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰🔷🔷🔷💰"
"\n💰💰💰💰💰💰💰💰💰💰💰💰")
@register(pattern=r".scam(?: |$)(.*)", outgoing=True)
async def scam(event):
""" Just a small command to fake chat actions for fun !! """
options = [
'typing', 'contact', 'game', 'location', 'voice', 'round', 'video',
'photo', 'document', 'cancel'
]
input_str = event.pattern_match.group(1)
args = input_str.split()
if len(args) == 0: # Let bot decide action and time
scam_action = choice(options)
scam_time = randint(30, 60)
elif len(args) == 1: # User decides time/action, bot decides the other.
try:
scam_action = str(args[0]).lower()
scam_time = randint(30, 60)
except ValueError:
scam_action = choice(options)
scam_time = int(args[0])
elif len(args) == 2: # User decides both action and time
scam_action = str(args[0]).lower()
scam_time = int(args[1])
else:
await event.edit("`Invalid Syntax !!`")
return
try:
if (scam_time > 0):
await event.delete()
async with event.client.action(event.chat_id, scam_action):
await sleep(scam_time)
except BaseException:
return
@register(pattern=r".type(?: |$)(.*)", outgoing=True)
async def typewriter(typew):
""" Just a small command to make your keyboard become a typewriter! """
textx = await typew.get_reply_message()
message = typew.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await typew.edit("`Give a text to type!`")
sleep_time = 0.03
typing_symbol = "|"
old_text = ""
await typew.edit(typing_symbol)
await sleep(sleep_time)
for character in message:
old_text = old_text + "" + character
typing_text = old_text + "" + typing_symbol
await typew.edit(typing_text)
await sleep(sleep_time)
await typew.edit(old_text)
await sleep(sleep_time)
@register(outgoing=True, pattern="^\.leave$")
async def leave(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`You must Leaving dis Group kek!`")
@register(outgoing=True, pattern="^\.fail$")
async def fail(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ `"
"`\n████▌▄▌▄▐▐▌█████ `"
"`\n████▌▄▌▄▐▐▌▀████ `"
"`\n▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ `")
@register(outgoing=True, pattern="^\.lol$")
async def lol(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╱┏┓╱╱╱╭━━━╮┏┓╱╱╱╱ `"
"`\n╱┃┃╱╱╱┃╭━╮┃┃┃╱╱╱╱ `"
"`\n╱┃┗━━┓┃╰━╯┃┃┗━━┓╱ `"
"`\n╱┗━━━┛╰━━━╯┗━━━┛╱ `")
@register(outgoing=True, pattern="^\.lool$")
async def lool(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╭╭━━━╮╮┈┈┈┈┈┈┈┈┈┈\n┈┃╭━━╯┈┈┈┈▕╲▂▂╱▏┈\n┈┃┃╱▔▔▔▔▔▔▔▏╱▋▋╮┈`"
"`\n┈┃╰▏┃╱╭╮┃╱╱▏╱╱▆┃┈\n┈╰━▏┗━╰╯┗━╱╱╱╰┻┫┈\n┈┈┈▏┏┳━━━━▏┏┳━━╯┈`"
"`\n┈┈┈▏┃┃┈┈┈┈▏┃┃┈┈┈┈ `")
@register(outgoing=True, pattern="^\.stfu$")
async def stfu(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n██████████████████████████████`"
"`\n██▀▀▀▀████▀▀▀▀████▀▀▀▀▀███▀▀██▀▀█`"
"`\n█──────██──────██───────██──██──█`"
"`\n█──██▄▄████──████──███▄▄██──██──█`"
"`\n█▄────▀████──████────█████──██──█`"
"`\n█▀▀██──████──████──███████──██──█`"
"`\n█──────████──████──███████──────█`"
"`\n██▄▄▄▄█████▄▄████▄▄████████▄▄▄▄██`"
"`\n█████████████████████████████████`")
@register(outgoing=True, pattern="^\.gtfo$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n███████████████████████████████ `"
"`\n█▀▀▀▀▀▀▀█▀▀▀▀▀▀█▀▀▀▀▀▀▀█▀▀▀▀▀▀█ `"
"`\n█───────█──────█───────█──────█ `"
"`\n█──███──███──███──███▄▄█──██──█ `"
"`\n█──███▄▄███──███─────███──██──█ `"
"`\n█──██───███──███──██████──██──█ `"
"`\n█──▀▀▀──███──███──██████──────█ `"
"`\n█▄▄▄▄▄▄▄███▄▄███▄▄██████▄▄▄▄▄▄█ `"
"`\n███████████████████████████████ `")
@register(outgoing=True, pattern="^\.nih$")
async def nih(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n(\_/)`"
"`\n(●_●)`"
"`\n />🌹 *ini buat kamu`"
"`\n `"
"`\n(\_/)`"
"`\n(●_●)`"
"`\n🌹<\ *tapi boong`")
@register(outgoing=True, pattern="^\.fag$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n█████████`"
"`\n█▄█████▄█`"
"`\n█▼▼▼▼▼`"
"`\n█ STFU FAGGOT'S`"
"`\n█▲▲▲▲▲`"
"`\n█████████`"
"`\n ██ ██`")
@register(outgoing=True, pattern="^\.taco$")
async def taco(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n{\__/}"
"\n(●_●)"
"\n( >🌮 Want a taco?")
@register(outgoing=True, pattern="^\.paw$")
async def paw(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`(=ↀωↀ=)")
@register(outgoing=True, pattern="^\.tf$")
async def tf(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("(̿▀̿ ̿Ĺ̯̿̿▀̿ ̿)̄ ")
@register(outgoing=True, pattern="^\.gey$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈NIGGA U GEY`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern="^\.gay$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈BAPAQ U GAY`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern="^\.bot$")
async def bot(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("` \n ╲╲╭━━━━╮ \n╭╮┃▆┈┈▆┃╭╮ \n┃╰┫▽▽▽┣╯┃ \n╰━┫△△△┣━╯`"
"`\n╲╲┃┈┈┈┈┃ \n╲╲┃┈┏┓┈┃ `")
@register(outgoing=True, pattern="^\.hey$")
async def hey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n┈┈┈╱▔▔▔▔╲┈╭━━━━━\n┈┈▕▂▂▂▂▂▂▏┃HEY!┊😀`"
"`\n┈┈▕▔▇▔▔┳▔▏╰┳╮HEY!┊\n┈┈▕╭━╰╯━╮▏━╯╰━━━\n╱▔▔▏▅▅▅▅▕▔▔╲┈┈┈┈`"
"`\n▏┈┈╲▂▂▂▂╱┈┈┈▏┈┈┈`")
@register(outgoing=True, pattern="^\.nou$")
async def nou(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈╭╮╭╮\n┈┃┃┃┃\n╭┻┗┻┗╮`"
"`\n┃┈▋┈▋┃\n┃┈╭▋━╮━╮\n┃┈┈╭╰╯╰╯╮`"
"`\n┫┈┈ NoU\n┃┈╰╰━━━━╯`"
"`\n┗━━┻━┛`")
@register(outgoing=True, pattern="^\.iwi(?: |$)(.*)")
async def faces(siwis):
""" IwI """
textx = await siwis.get_reply_message()
message = siwis.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await siwis.edit("` IwI no text given! `")
return
reply_text = sub(r"(a|i|u|e|o)", "i", message)
reply_text = sub(r"(A|I|U|E|O)", "I", reply_text)
reply_text = sub(r"\!+", " " + choice(IWIS), reply_text)
reply_text += " " + choice(IWIS)
await siwis.edit(reply_text)
@register(outgoing=True, pattern="^.koc$")
async def koc(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("8✊===D")
await e.edit("8=✊==D")
await e.edit("8==✊=D")
await e.edit("8===✊D")
await e.edit("8==✊=D")
await e.edit("8=✊==D")
await e.edit("8✊===D")
await e.edit("8=✊==D")
await e.edit("8==✊=D")
await e.edit("8===✊D")
await e.edit("8==✊=D")
await e.edit("8=✊==D")
await e.edit("8✊===D")
await e.edit("8=✊==D")
await e.edit("8==✊=D")
await e.edit("8===✊D")
await e.edit("8===✊D💦")
await e.edit("8====D💦💦")
await e.edit("crooottssssssss")
await e.edit(choice(FACEREACTS))
@register(outgoing=True, pattern="^\.shg$")
async def shrugger(shg):
r""" ¯\_(ツ)_/¯ """
await shg.edit(choice(SHGS))
@register(outgoing=True, pattern=r"^\.(?:penis|dick)\s?(.)?")
async def emoji_penis(e):
emoji = e.pattern_match.group(1)
titid = GAMBAR_TITIT
if emoji:
titid = titid.replace('🤡', emoji)
await e.edit(titid)
CMD_HELP.update({
"memes":
">`.cowsay`"
"\nUsage: cow which says things."
"\n\n>`.cp`"
"\nUsage: Copypasta the famous meme"
"\n\n>`.vapor`"
"\nUsage: Vaporize everything!"
"\n\n>`.str`"
"\nUsage: Stretch it."
"\n\n>`.10iq`"
"\nUsage: You retard !!"
"\n\n>`.zal`"
"\nUsage: Invoke the feeling of chaos."
"\n\n>`Oem`"
"\nUsage: Oeeeem"
"\n\n>`.fp`"
"\nUsage: Facepalm :P"
"\n\n>`.moon`"
"\nUsage: kensar moon animation."
"\n\n>`.clock`"
"\nUsage: kensar clock animation."
"\n\n>`.hi`"
"\nUsage: Greet everyone!"
"\n\n>`.coinflip <heads/tails>`"
"\nUsage: Flip a coin !!"
"\n\n>`.owo`"
"\nUsage: UwU"
"\n\n>`.react`"
"\nUsage: Make your userbot react to everything."
"\n\n>`.slap` <id/jutsu>"
"\nUsage: reply to slap them with random objects !!"
"\n\n>`.cry`"
"\nUsage: y u du dis, i cri."
"\n\n>`.shg`"
"\nUsage: Shrug at it !!"
"\n\n>`.run`"
"\nUsage: Let Me Run, run, RUNNN!"
"\n\n>`.chase`"
"\nUsage: You better start running"
"\n\n>`.metoo`"
"\nUsage: Haha yes"
"\n\n>`.mock`"
"\nUsage: Do it and find the real fun."
"\n\n>`.clap`"
"\nUsage: Praise people!"
"\n\n>`.boobs`"
"\nUsage: Get b00bs imej"
"\n\n>`.butts`"
"\nUsage: Get 🅱️utts imej"
"\n\n>`.f <emoji/character>`"
"\nUsage: Pay Respects."
"\n\n>`.bt`"
"\nUsage: Believe me, you will find this useful."
"\n\n>`.weeb`"
"\nUsage: To Weeb-ify your text."
"\n\n>`.type`"
"\nUsage: Just a small command to make your keyboard become a typewriter!"
"\n\n>`.lfy <query>`"
"\nUsage: Let me Google that for you real quick !!"
"\n\n>`.decide [Alternates: (.yes, .no, .maybe)]`"
"\nUsage: Make a quick decision."
"\n\n>`.scam <action> <time>`"
"\n[Available Actions: (typing, contact, game, location, voice, round, video, photo, document, cancel)]"
"\nUsage: Create fake chat actions, for fun. (Default action: typing)"
"\n\nAnd many more"
"`\n>.nou ; .bot ; .gey ; .gey ; .tf ; .paw ; .taco ; .nih ;`"
"`\n>.fag ; .gtfo ; .stfu ; .lol ; .lool ; .fail ; .leave`"
"`\n>.iwi ; .sayhi` ; .koc` ; .earth` ; .love` ; .rain` ; .penis`"
"\n\n\nThanks to 🅱️ottom🅱️ext🅱️ot (@NotAMemeBot) for some of these."
})
|
the-stack_106_19390
|
# -*- coding: utf-8 -*-
# (C) Copyright Ji Liu and Luciano Bello 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from os import path
from qiskit import QuantumCircuit
QASMDIR = path.dirname(path.abspath(__file__))
def circuits():
for n_qubits in range(2, 15, 2):
filename = "VQE_" + str(n_qubits) + "node.qasm"
qc = QuantumCircuit.from_qasm_file(path.join(QASMDIR, filename))
yield qc
|
the-stack_106_19392
|
import datetime
import os
import tempfile
from io import StringIO
from wsgiref.util import FileWrapper
from django import forms
from django.conf.urls import url
from django.contrib import admin
from django.contrib.admin import BooleanFieldListFilter
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import GroupAdmin, UserAdmin
from django.contrib.auth.models import Group, User
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.core.mail import EmailMessage
from django.db import models
from django.forms.models import BaseModelFormSet
from django.http import HttpResponse, StreamingHttpResponse
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from .forms import MediaActionForm
from .models import (
Actor, AdminOrderedAdminMethod, AdminOrderedCallable, AdminOrderedField,
AdminOrderedModelMethod, Album, Answer, Answer2, Article, BarAccount, Book,
Bookmark, Category, Chapter, ChapterXtra1, Child, ChildOfReferer, Choice,
City, Collector, Color, Color2, ComplexSortedPerson, CoverLetter,
CustomArticle, CyclicOne, CyclicTwo, DependentChild, DooHickey, EmptyModel,
EmptyModelHidden, EmptyModelMixin, EmptyModelVisible, ExplicitlyProvidedPK,
ExternalSubscriber, Fabric, FancyDoodad, FieldOverridePost,
FilteredManager, FooAccount, FoodDelivery, FunkyTag, Gadget, Gallery,
GenRelReference, Grommet, ImplicitlyGeneratedPK, Ingredient,
InlineReference, InlineReferer, Inquisition, Language, Link,
MainPrepopulated, ModelWithStringPrimaryKey, NotReferenced, OldSubscriber,
OtherStory, Paper, Parent, ParentWithDependentChildren, ParentWithUUIDPK,
Person, Persona, Picture, Pizza, Plot, PlotDetails, PlotProxy,
PluggableSearchPerson, Podcast, Post, PrePopulatedPost,
PrePopulatedPostLargeSlug, PrePopulatedSubPost, Promo, Question,
ReadablePizza, ReadOnlyPizza, Recipe, Recommendation, Recommender,
ReferencedByGenRel, ReferencedByInline, ReferencedByParent,
RelatedPrepopulated, RelatedWithUUIDPKModel, Report, Reservation,
Restaurant, RowLevelChangePermissionModel, Section, ShortMessage, Simple,
Sketch, State, Story, StumpJoke, Subscriber, SuperVillain, Telegram, Thing,
Topping, UnchangeableObject, UndeletableObject, UnorderedObject,
UserMessenger, Villain, Vodcast, Whatsit, Widget, Worker, WorkHour,
)
def callable_year(dt_value):
try:
return dt_value.year
except AttributeError:
return None
callable_year.admin_order_field = 'date'
class ArticleInline(admin.TabularInline):
model = Article
fk_name = 'section'
prepopulated_fields = {
'title': ('content',)
}
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section')
})
)
class ChapterInline(admin.TabularInline):
model = Chapter
class ChapterXtra1Admin(admin.ModelAdmin):
list_filter = (
'chap',
'chap__title',
'chap__book',
'chap__book__name',
'chap__book__promo',
'chap__book__promo__name',
'guest_author__promo__book',
)
class ArticleForm(forms.ModelForm):
extra_form_field = forms.BooleanField(required=False)
class Meta:
fields = '__all__'
model = Article
class ArticleAdmin(admin.ModelAdmin):
list_display = (
'content', 'date', callable_year, 'model_year', 'modeladmin_year',
'model_year_reversed', 'section', lambda obj: obj.title,
'order_by_expression',
)
list_editable = ('section',)
list_filter = ('date', 'section')
autocomplete_fields = ('section',)
view_on_site = False
form = ArticleForm
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content', 'extra_form_field'),
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section', 'sub_section')
})
)
def order_by_expression(self, obj):
return obj.model_year
# This ordering isn't particularly useful but shows that expressions can
# be used for admin_order_field.
order_by_expression.admin_order_field = models.F('date') + datetime.timedelta(days=3)
def changelist_view(self, request):
return super().changelist_view(request, extra_context={'extra_var': 'Hello!'})
def modeladmin_year(self, obj):
return obj.date.year
modeladmin_year.admin_order_field = 'date'
modeladmin_year.short_description = None
def delete_model(self, request, obj):
EmailMessage(
'Greetings from a deleted object',
'I hereby inform you that some user deleted me',
'[email protected]',
['[email protected]']
).send()
return super().delete_model(request, obj)
def save_model(self, request, obj, form, change=True):
EmailMessage(
'Greetings from a created object',
'I hereby inform you that some user created me',
'[email protected]',
['[email protected]']
).send()
return super().save_model(request, obj, form, change)
class ArticleAdmin2(admin.ModelAdmin):
def has_module_permission(self, request):
return False
class RowLevelChangePermissionModelAdmin(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
""" Only allow changing objects with even id number """
return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0)
def has_view_permission(self, request, obj=None):
"""Only allow viewing objects if id is a multiple of 3."""
return request.user.is_staff and obj is not None and obj.id % 3 == 0
class CustomArticleAdmin(admin.ModelAdmin):
"""
Tests various hooks for using custom templates and contexts.
"""
change_list_template = 'custom_admin/change_list.html'
change_form_template = 'custom_admin/change_form.html'
add_form_template = 'custom_admin/add_form.html'
object_history_template = 'custom_admin/object_history.html'
delete_confirmation_template = 'custom_admin/delete_confirmation.html'
delete_selected_confirmation_template = 'custom_admin/delete_selected_confirmation.html'
popup_response_template = 'custom_admin/popup_response.html'
def changelist_view(self, request):
return super().changelist_view(request, extra_context={'extra_var': 'Hello!'})
class ThingAdmin(admin.ModelAdmin):
list_filter = ('color', 'color__warm', 'color__value', 'pub_date')
class InquisitionAdmin(admin.ModelAdmin):
list_display = ('leader', 'country', 'expected', 'sketch')
def sketch(self, obj):
# A method with the same name as a reverse accessor.
return 'list-display-sketch'
class SketchAdmin(admin.ModelAdmin):
raw_id_fields = ('inquisition', 'defendant0', 'defendant1')
class FabricAdmin(admin.ModelAdmin):
list_display = ('surface',)
list_filter = ('surface',)
class BasePersonModelFormSet(BaseModelFormSet):
def clean(self):
for person_dict in self.cleaned_data:
person = person_dict.get('id')
alive = person_dict.get('alive')
if person and alive and person.name == "Grace Hopper":
raise forms.ValidationError("Grace is not a Zombie")
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'gender', 'alive')
list_editable = ('gender', 'alive')
list_filter = ('gender',)
search_fields = ('^name',)
save_as = True
def get_changelist_formset(self, request, **kwargs):
return super().get_changelist_formset(request, formset=BasePersonModelFormSet, **kwargs)
def get_queryset(self, request):
# Order by a field that isn't in list display, to be able to test
# whether ordering is preserved.
return super().get_queryset(request).order_by('age')
class FooAccountAdmin(admin.StackedInline):
model = FooAccount
extra = 1
class BarAccountAdmin(admin.StackedInline):
model = BarAccount
extra = 1
class PersonaAdmin(admin.ModelAdmin):
inlines = (
FooAccountAdmin,
BarAccountAdmin
)
class SubscriberAdmin(admin.ModelAdmin):
actions = ['mail_admin']
action_form = MediaActionForm
def delete_queryset(self, request, queryset):
SubscriberAdmin.overridden = True
super().delete_queryset(request, queryset)
def mail_admin(self, request, selected):
EmailMessage(
'Greetings from a ModelAdmin action',
'This is the test email from an admin action',
'[email protected]',
['[email protected]']
).send()
def external_mail(modeladmin, request, selected):
EmailMessage(
'Greetings from a function action',
'This is the test email from a function action',
'[email protected]',
['[email protected]']
).send()
external_mail.short_description = 'External mail (Another awesome action)'
def redirect_to(modeladmin, request, selected):
from django.http import HttpResponseRedirect
return HttpResponseRedirect('/some-where-else/')
redirect_to.short_description = 'Redirect to (Awesome action)'
def download(modeladmin, request, selected):
buf = StringIO('This is the content of the file')
return StreamingHttpResponse(FileWrapper(buf))
download.short_description = 'Download subscription'
def no_perm(modeladmin, request, selected):
return HttpResponse(content='No permission to perform this action', status=403)
no_perm.short_description = 'No permission to run'
class ExternalSubscriberAdmin(admin.ModelAdmin):
actions = [redirect_to, external_mail, download, no_perm]
class PodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'release_date')
list_editable = ('release_date',)
date_hierarchy = 'release_date'
ordering = ('name',)
class VodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'released')
list_editable = ('released',)
ordering = ('name',)
class ChildInline(admin.StackedInline):
model = Child
class ParentAdmin(admin.ModelAdmin):
model = Parent
inlines = [ChildInline]
save_as = True
list_display = ('id', 'name',)
list_display_links = ('id',)
list_editable = ('name',)
def save_related(self, request, form, formsets, change):
super().save_related(request, form, formsets, change)
first_name, last_name = form.instance.name.split()
for child in form.instance.child_set.all():
if len(child.name.split()) < 2:
child.name = child.name + ' ' + last_name
child.save()
class EmptyModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return super().get_queryset(request).filter(pk__gt=1)
class OldSubscriberAdmin(admin.ModelAdmin):
actions = None
temp_storage = FileSystemStorage(tempfile.mkdtemp())
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class PictureInline(admin.TabularInline):
model = Picture
extra = 1
class GalleryAdmin(admin.ModelAdmin):
inlines = [PictureInline]
class PictureAdmin(admin.ModelAdmin):
pass
class LanguageAdmin(admin.ModelAdmin):
list_display = ['iso', 'shortlist', 'english_name', 'name']
list_editable = ['shortlist']
class RecommendationAdmin(admin.ModelAdmin):
show_full_result_count = False
search_fields = ('=titletranslation__text', '=the_recommender__titletranslation__text',)
class WidgetInline(admin.StackedInline):
model = Widget
class DooHickeyInline(admin.StackedInline):
model = DooHickey
class GrommetInline(admin.StackedInline):
model = Grommet
class WhatsitInline(admin.StackedInline):
model = Whatsit
class FancyDoodadInline(admin.StackedInline):
model = FancyDoodad
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'collector', 'order')
list_editable = ('order',)
class CategoryInline(admin.StackedInline):
model = Category
class CollectorAdmin(admin.ModelAdmin):
inlines = [
WidgetInline, DooHickeyInline, GrommetInline, WhatsitInline,
FancyDoodadInline, CategoryInline
]
class LinkInline(admin.TabularInline):
model = Link
extra = 1
readonly_fields = ("posted", "multiline", "readonly_link_content")
def multiline(self, instance):
return "InlineMultiline\ntest\nstring"
class SubPostInline(admin.TabularInline):
model = PrePopulatedSubPost
prepopulated_fields = {
'subslug': ('subtitle',)
}
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('subslug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PrePopulatedPostAdmin(admin.ModelAdmin):
list_display = ['title', 'slug']
prepopulated_fields = {
'slug': ('title',)
}
inlines = [SubPostInline]
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('slug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'public']
readonly_fields = (
'posted', 'awesomeness_level', 'coolness', 'value',
'multiline', 'multiline_html', lambda obj: "foo",
'readonly_content',
)
inlines = [
LinkInline
]
def coolness(self, instance):
if instance.pk:
return "%d amount of cool." % instance.pk
else:
return "Unknown coolness."
def value(self, instance):
return 1000
value.short_description = 'Value in $US'
def multiline(self, instance):
return "Multiline\ntest\nstring"
def multiline_html(self, instance):
return mark_safe("Multiline<br>\nhtml<br>\ncontent")
class FieldOverridePostForm(forms.ModelForm):
model = FieldOverridePost
class Meta:
help_texts = {
'posted': 'Overridden help text for the date',
}
labels = {
'public': 'Overridden public label',
}
class FieldOverridePostAdmin(PostAdmin):
form = FieldOverridePostForm
class CustomChangeList(ChangeList):
def get_queryset(self, request):
return self.root_queryset.order_by('pk').filter(pk=9999) # Doesn't exist
class GadgetAdmin(admin.ModelAdmin):
def get_changelist(self, request, **kwargs):
return CustomChangeList
class ToppingAdmin(admin.ModelAdmin):
readonly_fields = ('pizzas',)
class PizzaAdmin(admin.ModelAdmin):
readonly_fields = ('toppings',)
class StudentAdmin(admin.ModelAdmin):
search_fields = ('name',)
class ReadOnlyPizzaAdmin(admin.ModelAdmin):
readonly_fields = ('name', 'toppings')
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return True
def has_delete_permission(self, request, obj=None):
return True
class WorkHourAdmin(admin.ModelAdmin):
list_display = ('datum', 'employee')
list_filter = ('employee',)
class FoodDeliveryAdmin(admin.ModelAdmin):
list_display = ('reference', 'driver', 'restaurant')
list_editable = ('driver', 'restaurant')
class CoverLetterAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing CoverLetter
instances. Note that the CoverLetter model defines a __str__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super().get_queryset(request).defer('date_written')
class PaperAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Paper
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super().get_queryset(request).only('title')
class ShortMessageAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing ShortMessage
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super().get_queryset(request).defer('timestamp')
class TelegramAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Telegram
instances. Note that the Telegram model defines a __str__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super().get_queryset(request).only('title')
class StoryForm(forms.ModelForm):
class Meta:
widgets = {'title': forms.HiddenInput}
class StoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title',) # 'id' not in list_display_links
list_editable = ('content',)
form = StoryForm
ordering = ['-id']
class OtherStoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title', 'id') # 'id' in list_display_links
list_editable = ('content',)
ordering = ['-id']
class ComplexSortedPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age', 'is_employee', 'colored_name')
ordering = ('name',)
def colored_name(self, obj):
return format_html('<span style="color: #ff00ff;">{}</span>', obj.name)
colored_name.admin_order_field = 'name'
class PluggableSearchPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age')
search_fields = ('name',)
def get_search_results(self, request, queryset, search_term):
queryset, use_distinct = super().get_search_results(request, queryset, search_term)
try:
search_term_as_int = int(search_term)
except ValueError:
pass
else:
queryset |= self.model.objects.filter(age=search_term_as_int)
return queryset, use_distinct
class AlbumAdmin(admin.ModelAdmin):
list_filter = ['title']
class QuestionAdmin(admin.ModelAdmin):
ordering = ['-posted']
search_fields = ['question']
autocomplete_fields = ['related_questions']
class AnswerAdmin(admin.ModelAdmin):
autocomplete_fields = ['question']
class PrePopulatedPostLargeSlugAdmin(admin.ModelAdmin):
prepopulated_fields = {
'slug': ('title',)
}
class AdminOrderedFieldAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'order')
class AdminOrderedModelMethodAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'some_order')
class AdminOrderedAdminMethodAdmin(admin.ModelAdmin):
def some_admin_order(self, obj):
return obj.order
some_admin_order.admin_order_field = 'order'
ordering = ('order',)
list_display = ('stuff', 'some_admin_order')
def admin_ordered_callable(obj):
return obj.order
admin_ordered_callable.admin_order_field = 'order'
class AdminOrderedCallableAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', admin_ordered_callable)
class ReportAdmin(admin.ModelAdmin):
def extra(self, request):
return HttpResponse()
def get_urls(self):
# Corner case: Don't call parent implementation
return [url(r'^extra/$', self.extra, name='cable_extra')]
class CustomTemplateBooleanFieldListFilter(BooleanFieldListFilter):
template = 'custom_filter_template.html'
class CustomTemplateFilterColorAdmin(admin.ModelAdmin):
list_filter = (('warm', CustomTemplateBooleanFieldListFilter),)
# For Selenium Prepopulated tests -------------------------------------
class RelatedPrepopulatedInline1(admin.StackedInline):
fieldsets = (
(None, {
'fields': (
('fk', 'm2m'),
('pubdate', 'status'),
('name', 'slug1', 'slug2',),
),
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
model = RelatedPrepopulated
extra = 1
autocomplete_fields = ['fk', 'm2m']
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class RelatedPrepopulatedInline2(admin.TabularInline):
model = RelatedPrepopulated
extra = 1
autocomplete_fields = ['fk', 'm2m']
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class RelatedPrepopulatedInline3(admin.TabularInline):
model = RelatedPrepopulated
extra = 0
autocomplete_fields = ['fk', 'm2m']
class MainPrepopulatedAdmin(admin.ModelAdmin):
inlines = [RelatedPrepopulatedInline1, RelatedPrepopulatedInline2, RelatedPrepopulatedInline3]
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2', 'slug3'))
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
prepopulated_fields = {
'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name'],
'slug3': ['name'],
}
class UnorderedObjectAdmin(admin.ModelAdmin):
list_display = ['id', 'name']
list_display_links = ['id']
list_editable = ['name']
list_per_page = 2
class UndeletableObjectAdmin(admin.ModelAdmin):
def change_view(self, *args, **kwargs):
kwargs['extra_context'] = {'show_delete': False}
return super().change_view(*args, **kwargs)
class UnchangeableObjectAdmin(admin.ModelAdmin):
def get_urls(self):
# Disable change_view, but leave other urls untouched
urlpatterns = super().get_urls()
return [p for p in urlpatterns if p.name and not p.name.endswith("_change")]
def callable_on_unknown(obj):
return obj.unknown
class AttributeErrorRaisingAdmin(admin.ModelAdmin):
list_display = [callable_on_unknown]
class CustomManagerAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return FilteredManager.objects
class MessageTestingAdmin(admin.ModelAdmin):
actions = ["message_debug", "message_info", "message_success",
"message_warning", "message_error", "message_extra_tags"]
def message_debug(self, request, selected):
self.message_user(request, "Test debug", level="debug")
def message_info(self, request, selected):
self.message_user(request, "Test info", level="info")
def message_success(self, request, selected):
self.message_user(request, "Test success", level="success")
def message_warning(self, request, selected):
self.message_user(request, "Test warning", level="warning")
def message_error(self, request, selected):
self.message_user(request, "Test error", level="error")
def message_extra_tags(self, request, selected):
self.message_user(request, "Test tags", extra_tags="extra_tag")
class ChoiceList(admin.ModelAdmin):
list_display = ['choice']
readonly_fields = ['choice']
fields = ['choice']
class DependentChildAdminForm(forms.ModelForm):
"""
Issue #20522
Form to test child dependency on parent object's validation
"""
def clean(self):
parent = self.cleaned_data.get('parent')
if parent.family_name and parent.family_name != self.cleaned_data.get('family_name'):
raise ValidationError("Children must share a family name with their parents " +
"in this contrived test case")
return super().clean()
class DependentChildInline(admin.TabularInline):
model = DependentChild
form = DependentChildAdminForm
class ParentWithDependentChildrenAdmin(admin.ModelAdmin):
inlines = [DependentChildInline]
# Tests for ticket 11277 ----------------------------------
class FormWithoutHiddenField(forms.ModelForm):
first = forms.CharField()
second = forms.CharField()
class FormWithoutVisibleField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField(widget=forms.HiddenInput)
class FormWithVisibleAndHiddenField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField()
class EmptyModelVisibleAdmin(admin.ModelAdmin):
form = FormWithoutHiddenField
fieldsets = (
(None, {
'fields': (('first', 'second'),),
}),
)
class EmptyModelHiddenAdmin(admin.ModelAdmin):
form = FormWithoutVisibleField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class EmptyModelMixinAdmin(admin.ModelAdmin):
form = FormWithVisibleAndHiddenField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class CityInlineAdmin(admin.TabularInline):
model = City
view_on_site = False
class StateAdmin(admin.ModelAdmin):
inlines = [CityInlineAdmin]
class RestaurantInlineAdmin(admin.TabularInline):
model = Restaurant
view_on_site = True
class CityAdmin(admin.ModelAdmin):
inlines = [RestaurantInlineAdmin]
view_on_site = True
class WorkerAdmin(admin.ModelAdmin):
def view_on_site(self, obj):
return '/worker/%s/%s/' % (obj.surname, obj.name)
class WorkerInlineAdmin(admin.TabularInline):
model = Worker
def view_on_site(self, obj):
return '/worker_inline/%s/%s/' % (obj.surname, obj.name)
class RestaurantAdmin(admin.ModelAdmin):
inlines = [WorkerInlineAdmin]
view_on_site = False
def get_changeform_initial_data(self, request):
return {'name': 'overridden_value'}
class FunkyTagAdmin(admin.ModelAdmin):
list_display = ('name', 'content_object')
class InlineReferenceInline(admin.TabularInline):
model = InlineReference
class InlineRefererAdmin(admin.ModelAdmin):
inlines = [InlineReferenceInline]
class PlotReadonlyAdmin(admin.ModelAdmin):
readonly_fields = ('plotdetails',)
class GetFormsetsArgumentCheckingAdmin(admin.ModelAdmin):
fields = ['name']
def add_view(self, request, *args, **kwargs):
request.is_add_view = True
return super().add_view(request, *args, **kwargs)
def change_view(self, request, *args, **kwargs):
request.is_add_view = False
return super().change_view(request, *args, **kwargs)
def get_formsets_with_inlines(self, request, obj=None):
if request.is_add_view and obj is not None:
raise Exception("'obj' passed to get_formsets_with_inlines wasn't None during add_view")
if not request.is_add_view and obj is None:
raise Exception("'obj' passed to get_formsets_with_inlines was None during change_view")
return super().get_formsets_with_inlines(request, obj)
site = admin.AdminSite(name="admin")
site.site_url = '/my-site-url/'
site.register(Article, ArticleAdmin)
site.register(CustomArticle, CustomArticleAdmin)
site.register(
Section, save_as=True, inlines=[ArticleInline],
readonly_fields=['name_property'], search_fields=['name'],
)
site.register(ModelWithStringPrimaryKey)
site.register(Color)
site.register(Thing, ThingAdmin)
site.register(Actor)
site.register(Inquisition, InquisitionAdmin)
site.register(Sketch, SketchAdmin)
site.register(Person, PersonAdmin)
site.register(Persona, PersonaAdmin)
site.register(Subscriber, SubscriberAdmin)
site.register(ExternalSubscriber, ExternalSubscriberAdmin)
site.register(OldSubscriber, OldSubscriberAdmin)
site.register(Podcast, PodcastAdmin)
site.register(Vodcast, VodcastAdmin)
site.register(Parent, ParentAdmin)
site.register(EmptyModel, EmptyModelAdmin)
site.register(Fabric, FabricAdmin)
site.register(Gallery, GalleryAdmin)
site.register(Picture, PictureAdmin)
site.register(Language, LanguageAdmin)
site.register(Recommendation, RecommendationAdmin)
site.register(Recommender)
site.register(Collector, CollectorAdmin)
site.register(Category, CategoryAdmin)
site.register(Post, PostAdmin)
site.register(FieldOverridePost, FieldOverridePostAdmin)
site.register(Gadget, GadgetAdmin)
site.register(Villain)
site.register(SuperVillain)
site.register(Plot)
site.register(PlotDetails)
site.register(PlotProxy, PlotReadonlyAdmin)
site.register(Bookmark)
site.register(CyclicOne)
site.register(CyclicTwo)
site.register(WorkHour, WorkHourAdmin)
site.register(Reservation)
site.register(FoodDelivery, FoodDeliveryAdmin)
site.register(RowLevelChangePermissionModel, RowLevelChangePermissionModelAdmin)
site.register(Paper, PaperAdmin)
site.register(CoverLetter, CoverLetterAdmin)
site.register(ShortMessage, ShortMessageAdmin)
site.register(Telegram, TelegramAdmin)
site.register(Story, StoryAdmin)
site.register(OtherStory, OtherStoryAdmin)
site.register(Report, ReportAdmin)
site.register(MainPrepopulated, MainPrepopulatedAdmin)
site.register(UnorderedObject, UnorderedObjectAdmin)
site.register(UndeletableObject, UndeletableObjectAdmin)
site.register(UnchangeableObject, UnchangeableObjectAdmin)
site.register(State, StateAdmin)
site.register(City, CityAdmin)
site.register(Restaurant, RestaurantAdmin)
site.register(Worker, WorkerAdmin)
site.register(FunkyTag, FunkyTagAdmin)
site.register(ReferencedByParent)
site.register(ChildOfReferer)
site.register(ReferencedByInline)
site.register(InlineReferer, InlineRefererAdmin)
site.register(ReferencedByGenRel)
site.register(GenRelReference)
site.register(ParentWithUUIDPK)
site.register(RelatedPrepopulated, search_fields=['name'])
site.register(RelatedWithUUIDPKModel)
# We intentionally register Promo and ChapterXtra1 but not Chapter nor ChapterXtra2.
# That way we cover all four cases:
# related ForeignKey object registered in admin
# related ForeignKey object not registered in admin
# related OneToOne object registered in admin
# related OneToOne object not registered in admin
# when deleting Book so as exercise all four paths through
# contrib.admin.utils's get_deleted_objects function.
site.register(Book, inlines=[ChapterInline])
site.register(Promo)
site.register(ChapterXtra1, ChapterXtra1Admin)
site.register(Pizza, PizzaAdmin)
site.register(ReadOnlyPizza, ReadOnlyPizzaAdmin)
site.register(ReadablePizza)
site.register(Topping, ToppingAdmin)
site.register(Album, AlbumAdmin)
site.register(Question, QuestionAdmin)
site.register(Answer, AnswerAdmin, date_hierarchy='question__posted')
site.register(Answer2, date_hierarchy='question__expires')
site.register(PrePopulatedPost, PrePopulatedPostAdmin)
site.register(ComplexSortedPerson, ComplexSortedPersonAdmin)
site.register(FilteredManager, CustomManagerAdmin)
site.register(PluggableSearchPerson, PluggableSearchPersonAdmin)
site.register(PrePopulatedPostLargeSlug, PrePopulatedPostLargeSlugAdmin)
site.register(AdminOrderedField, AdminOrderedFieldAdmin)
site.register(AdminOrderedModelMethod, AdminOrderedModelMethodAdmin)
site.register(AdminOrderedAdminMethod, AdminOrderedAdminMethodAdmin)
site.register(AdminOrderedCallable, AdminOrderedCallableAdmin)
site.register(Color2, CustomTemplateFilterColorAdmin)
site.register(Simple, AttributeErrorRaisingAdmin)
site.register(UserMessenger, MessageTestingAdmin)
site.register(Choice, ChoiceList)
site.register(ParentWithDependentChildren, ParentWithDependentChildrenAdmin)
site.register(EmptyModelHidden, EmptyModelHiddenAdmin)
site.register(EmptyModelVisible, EmptyModelVisibleAdmin)
site.register(EmptyModelMixin, EmptyModelMixinAdmin)
site.register(StumpJoke)
site.register(Recipe)
site.register(Ingredient)
site.register(NotReferenced)
site.register(ExplicitlyProvidedPK, GetFormsetsArgumentCheckingAdmin)
site.register(ImplicitlyGeneratedPK, GetFormsetsArgumentCheckingAdmin)
# Register core models we need in our tests
site.register(User, UserAdmin)
site.register(Group, GroupAdmin)
# Used to test URL namespaces
site2 = admin.AdminSite(name="namespaced_admin")
site2.register(User, UserAdmin)
site2.register(Group, GroupAdmin)
site2.register(ParentWithUUIDPK)
site2.register(
RelatedWithUUIDPKModel,
list_display=['pk', 'parent'],
list_editable=['parent'],
raw_id_fields=['parent'],
)
site2.register(Person, save_as_continue=False)
site7 = admin.AdminSite(name="admin7")
site7.register(Article, ArticleAdmin2)
site7.register(Section)
# Used to test ModelAdmin.sortable_by and get_sortable_by().
class ArticleAdmin6(admin.ModelAdmin):
list_display = (
'content', 'date', callable_year, 'model_year', 'modeladmin_year',
'model_year_reversed', 'section',
)
sortable_by = ('date', callable_year)
def modeladmin_year(self, obj):
return obj.date.year
modeladmin_year.admin_order_field = 'date'
class ActorAdmin6(admin.ModelAdmin):
list_display = ('name', 'age')
sortable_by = ('name',)
def get_sortable_by(self, request):
return ('age',)
class ChapterAdmin6(admin.ModelAdmin):
list_display = ('title', 'book')
sortable_by = ()
class ColorAdmin6(admin.ModelAdmin):
list_display = ('value',)
def get_sortable_by(self, request):
return ()
site6 = admin.AdminSite(name='admin6')
site6.register(Article, ArticleAdmin6)
site6.register(Actor, ActorAdmin6)
site6.register(Chapter, ChapterAdmin6)
site6.register(Color, ColorAdmin6)
class ArticleAdmin9(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
# Simulate that the user can't change a specific object.
return obj is None
site9 = admin.AdminSite(name='admin9')
site9.register(Article, ArticleAdmin9)
class ArticleAdmin10(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
return False
site10 = admin.AdminSite(name='admin10')
site10.register(Article, ArticleAdmin10)
|
the-stack_106_19393
|
import numpy as np
import tensorflow as tf
from tfumap.umap import compute_cross_entropy
from pynndescent import NNDescent
from scipy.sparse import csr_matrix
from sklearn.utils import check_random_state, check_array
from umap.umap_ import fuzzy_simplicial_set, discrete_metric_simplicial_set_intersection
from scipy import optimize
from functools import partial
random_state = check_random_state(None)
def build_fuzzy_simplicial_set(X, y=None, n_neighbors=15):
"""
Build nearest neighbor graph, then fuzzy simplicial set
Parameters
----------
X : [type]
[description]
n_neighbors : int, optional
[description], by default 15
"""
n_trees = 5 + int(round((X.shape[0]) ** 0.5 / 20.0))
n_iters = max(5, int(round(np.log2(X.shape[0]))))
# get nearest neighbors
nnd = NNDescent(
X,
n_neighbors=n_neighbors,
metric="euclidean",
n_trees=n_trees,
n_iters=n_iters,
max_candidates=60,
)
# get indices and distances
knn_indices, knn_dists = nnd.neighbor_graph
random_state = check_random_state(None)
# build graph
umap_graph, sigmas, rhos = fuzzy_simplicial_set(
X=X,
n_neighbors=n_neighbors,
metric="euclidean",
random_state=random_state,
knn_indices=knn_indices,
knn_dists=knn_dists,
)
if y is not None:
# set far_dist based on the assumption that target_weight == 1
far_dist = 1.0e12
y_ = check_array(y, ensure_2d=False)
umap_graph = discrete_metric_simplicial_set_intersection(
umap_graph, y_, far_dist=far_dist
)
return umap_graph
def convert_distance_to_probability(distances, a, b):
""" convert distance representation into probability,
as a function of a, b params
"""
return 1.0 / (1.0 + a * distances ** (2 * b))
def find_a_b(min_dist=0.1):
""" determine optimal params a, b to such that distances less than
min_dist have a probability of zero
"""
# input distances
x = np.linspace(0, 3, 300)
# optimal output (if close enough, don't try to make closer)
y = np.exp(-x + min_dist) * (x > min_dist) + (x < min_dist)
# run through scipy,optimize a, b parameters for min_dist
# get the optimal
(a, b), _ = optimize.curve_fit(f=convert_distance_to_probability, xdata=x, ydata=y)
# a and b parameters for computing probability in low-d
a = tf.constant(a, dtype=tf.float32,)
b = tf.constant(b, dtype=tf.float32,)
return a, b
def batch_data(x, batch_size=100):
n_batch = int(np.ceil((len(x) / batch_size)))
return [x[batch_size * i : batch_size * (i + 1)] for i in range(n_batch)]
def compute_classifier_loss(X, y, encoder, classifier, sparse_ce):
""" compute the cross entropy loss for classification
"""
d = classifier(encoder(X))
return sparse_ce(y, d)
def compute_classifier_loss_batch(X, y, encoder, classifier, sparse_ce):
""" compute the cross entropy loss for classification
"""
X_batch = batch_data(X)
pred = tf.stack([classifier(encoder(i)) for i in X_batch])
return sparse_ce(y, pred)
def compute_umap_loss(
batch_to,
batch_from,
embedder,
encoder,
_a,
_b,
negative_sample_rate=5,
repulsion_strength=1,
):
"""
compute the cross entropy loss for learning embeddings
Parameters
----------
batch_to : tf.int or tf.float32
Either X or the index locations of the embeddings for verticies (to)
batch_from : tf.int or tf.float32
Either X or the index locations of the embeddings for verticies (from)
Returns
-------
ce_loss : tf.float
cross entropy loss for UMAP
embedding_to : tf.float
embeddings for verticies (to)
embedding_from : tf.float
embeddings for verticies (from)
"""
# encode
embedding_to = embedder(encoder(batch_to))
embedding_from = embedder(encoder(batch_from))
# get negative samples
embedding_neg_to = tf.repeat(embedding_to, negative_sample_rate, axis=0)
repeat_neg = tf.repeat(embedding_from, negative_sample_rate, axis=0)
embedding_neg_from = tf.gather(
repeat_neg, tf.random.shuffle(tf.range(tf.shape(repeat_neg)[0]))
)
# distances between samples
distance_embedding = tf.concat(
[
tf.norm(embedding_to - embedding_from, axis=1),
tf.norm(embedding_neg_to - embedding_neg_from, axis=1),
],
axis=0,
)
# convert probabilities to distances
probabilities_distance = convert_distance_to_probability(distance_embedding, _a, _b)
# treat positive samples as p=1, and negative samples as p=0
probabilities_graph = tf.concat(
[tf.ones(embedding_to.shape[0]), tf.zeros(embedding_neg_to.shape[0])], axis=0,
)
# cross entropy loss
(attraction_loss, repellant_loss, ce_loss) = compute_cross_entropy(
probabilities_graph,
probabilities_distance,
repulsion_strength=repulsion_strength,
)
return (
attraction_loss,
repellant_loss,
ce_loss,
)
def batch_epoch_edges(edges_to, edges_from, batch_size):
""" permutes and batches edges for epoch
"""
# compute the number of batches in one epoch
n_batches = int(len(edges_to) / batch_size)
# permute list of edges
permutation_mask = np.random.permutation(len(edges_to))[: n_batches * batch_size]
to_all = tf.reshape(tf.gather(edges_to, permutation_mask), (n_batches, batch_size))
from_all = tf.reshape(
tf.gather(edges_from, permutation_mask), (n_batches, batch_size)
)
# return a tensorflow dataset of one epoch's worth of batches
return tf.data.Dataset.from_tensor_slices((to_all, from_all))
def create_edge_iterator(
head, tail, weight, batch_size, max_sample_repeats_per_epoch=25
):
""" create an iterator for edges
"""
# set the maximum number of times each edge should be repeated per epoch
epochs_per_sample = np.clip(
(weight / np.max(weight)) * max_sample_repeats_per_epoch,
1,
max_sample_repeats_per_epoch,
).astype("int")
edges_to_exp, edges_from_exp = (
np.array([np.repeat(head, epochs_per_sample.astype("int"))]),
np.array([np.repeat(tail, epochs_per_sample.astype("int"))]),
)
edge_iter = tf.data.Dataset.from_tensor_slices((edges_to_exp, edges_from_exp))
edge_iter = edge_iter.repeat()
# edge_iter = edge_iter.map(batch_epoch_edges)
edge_iter = edge_iter.map(partial(batch_epoch_edges, batch_size=batch_size))
edge_iter = edge_iter.prefetch(buffer_size=10)
return iter(edge_iter), np.shape(edges_to_exp)[1]
def create_classification_iterator(X_labeled, y_labeled, batch_size):
"""
Creates a tensorflow iterator for classification data (X, y)
"""
#
# create labeled data iterator
labeled_data = tf.data.Dataset.from_tensor_slices((X_labeled, y_labeled))
labeled_data = labeled_data.repeat()
labeled_data = labeled_data.shuffle(np.min([10000, len(y_labeled)]))
labeled_data = labeled_data.batch(batch_size)
labeled_data = labeled_data.prefetch(buffer_size=1)
return iter(labeled_data)
def create_validation_iterator(valid_X, valid_Y, batch_size, repeat=False):
""" Create an iterator that returns validation X and Y
"""
data_valid = tf.data.Dataset.from_tensor_slices((valid_X, valid_Y))
data_valid = data_valid.cache()
if repeat:
data_valid = data_valid.repeat()
data_valid = data_valid.batch(batch_size)
data_valid = data_valid.prefetch(buffer_size=1)
return data_valid, len(valid_X)
|
the-stack_106_19397
|
import csv
import requests
from bs4 import BeautifulSoup
import re
'''
Created on 28 Sep 2013
@author: rob dobson
'''
class StockSymbolList():
def getStocksFromCSV(self):
self.stockList = []
with open('ukstocks.csv', 'r') as csvfile:
stkReader = csv.reader(csvfile)
for row in stkReader:
self.stockList.append(row[0:2])
self.stockList = sorted(self.stockList)
def getStocksFromWeb(self):
self.stockList = []
r = None
for getStocksAttempt in range(3):
try:
r = requests.get('http://www.lse.co.uk/index-constituents.asp?index=idx:asx')
break
except Exception as excp:
print("Faield to get FTSE list from LSE, attempt", getStocksAttempt)
if r is None:
return
soup = BeautifulSoup(r.text, "html.parser")
for x in soup.find_all('a', attrs={'class':"linkTabs"}):
#print (x.text)
mtch = re.match("(.+?)\((.+?)\)", x.text)
if (mtch != None and mtch.lastindex == 2):
#print (mtch.group(1), mtch.group(2))
# Append .L to make it work with Yahoo
coName = mtch.group(1)
symb = mtch.group(2) + ".L" if (mtch.group(2)[-1]!='.') else mtch.group(2) + "L"
self.stockList.append([coName,symb])
else:
print("Failed Match", x.text)
def getNumStocks(self):
return len(self.stockList)
def getStockList(self):
return self.stockList
|
the-stack_106_19398
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 5 22:56:49 2019
@author: MiaoLi
"""
import time
import idea1_stimuliGeneration_a0428
# https://morvanzhou.github.io/tutorials/python-basic/multiprocessing/5-pool/
import multiprocessing as mp
from functools import partial
from tqdm import tqdm
# =============================================================================
# parameter before running
# =============================================================================
crowding_cons = 0 # 0: no crowding; 1: crowding; 2: reference
if not crowding_cons == 2:
ellipse_ka = 0.25
ellipse_kb = 0.1
if crowding_cons == 2:
ellipse_ka = 0.158
ellipse_kb = 0.158
drawEllipseFig = False
# drawEllipseFig = True
newWindowSize = 0.3
# newWindowSize = 0.4
# newWindowSize = 0.5
# newWindowSize = 0.6
# newWindowSize = 0.7
runN = 2 # run times
# =============================================================================
# run with pool
# =============================================================================
start = time.time()
multiParaFunc = partial(idea1_stimuliGeneration_a0428.runStimuliGeneration, crowding_cons, newWindowSize, drawEllipseFig, ellipse_ka, ellipse_kb)
# def runStimuliGeneration(crowding_cons, newWindowSize, visualization = False, ka = 0.25, kb = 0.1,loop_number=1):
if __name__ == '__main__':
# tqdm task time Bar: https://github.com/tqdm/tqdm/issues/484
pool = mp.Pool()
# pool.map(multiParaFunc, range(0,runN))
# https://www.zhihu.com/question/52188800
for _ in tqdm(pool.imap_unordered(multiParaFunc, range(0,runN)), total=runN):#range(1,50) runs 49 times from 1 to 49
pass
# multicore()
pool.close()
pool.join()
end = time.time()
runtime = round((end-start)*0.0167,2)
print('This lovely code runs', runtime, 'minutes')
# =============================================================================
# call os to run
# =============================================================================
# import os
# loop_number=1
# start = time.time()
# while(loop_number <= 10):
# os.system('python xxx.py' + ' ' + str(loop_number))
# # os.system('start /min python XXX.py' + ' ' + str(loop_number))
# loop_number += 1
# end = time.time()
# print('time', str(end-start))
|
the-stack_106_19399
|
from __future__ import annotations
from datetime import timedelta
import operator
from typing import (
TYPE_CHECKING,
Any,
Callable,
Literal,
Sequence,
)
import numpy as np
from pandas._libs import algos as libalgos
from pandas._libs.arrays import NDArrayBacked
from pandas._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Timedelta,
delta_to_nanoseconds,
dt64arr_to_periodarr as c_dt64arr_to_periodarr,
iNaT,
parsing,
period as libperiod,
to_offset,
)
from pandas._libs.tslibs.dtypes import FreqGroup
from pandas._libs.tslibs.fields import isleapyear_arr
from pandas._libs.tslibs.offsets import (
Tick,
delta_to_tick,
)
from pandas._libs.tslibs.period import (
DIFFERENT_FREQ,
IncompatibleFrequency,
Period,
get_period_field_arr,
period_asfreq_arr,
)
from pandas._typing import (
AnyArrayLike,
Dtype,
NpDtype,
npt,
)
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.common import (
TD64NS_DTYPE,
ensure_object,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_period_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.generic import (
ABCIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaArray,
)
from pandas.core.dtypes.missing import (
isna,
notna,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import datetimelike as dtl
from pandas.core.arrays.base import ExtensionArray
import pandas.core.common as com
if TYPE_CHECKING:
from pandas._typing import (
NumpySorter,
NumpyValueArrayLike,
)
from pandas.core.arrays import DatetimeArray
_shared_doc_kwargs = {
"klass": "PeriodArray",
}
def _field_accessor(name: str, docstring=None):
def f(self):
base = self.freq._period_dtype_code
result = get_period_field_arr(name, self.asi8, base)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class PeriodArray(dtl.DatelikeOps):
"""
Pandas ExtensionArray for storing Period data.
Users should use :func:`~pandas.period_array` to create new instances.
Alternatively, :func:`~pandas.array` can be used to create new instances
from a sequence of Period scalars.
Parameters
----------
values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex]
The data to store. These should be arrays that can be directly
converted to ordinals without inference or copy (PeriodArray,
ndarray[int64]), or a box around such an array (Series[period],
PeriodIndex).
dtype : PeriodDtype, optional
A PeriodDtype instance from which to extract a `freq`. If both
`freq` and `dtype` are specified, then the frequencies must match.
freq : str or DateOffset
The `freq` to use for the array. Mostly applicable when `values`
is an ndarray of integers, when `freq` is required. When `values`
is a PeriodArray (or box around), it's checked that ``values.freq``
matches `freq`.
copy : bool, default False
Whether to copy the ordinals before storing.
Attributes
----------
None
Methods
-------
None
See Also
--------
Period: Represents a period of time.
PeriodIndex : Immutable Index for period data.
period_range: Create a fixed-frequency PeriodArray.
array: Construct a pandas array.
Notes
-----
There are two components to a PeriodArray
- ordinals : integer ndarray
- freq : pd.tseries.offsets.Offset
The values are physically stored as a 1-D ndarray of integers. These are
called "ordinals" and represent some kind of offset from a base.
The `freq` indicates the span covered by each element of the array.
All elements in the PeriodArray have the same `freq`.
"""
# array priority higher than numpy scalars
__array_priority__ = 1000
_typ = "periodarray" # ABCPeriodArray
_scalar_type = Period
_recognized_scalars = (Period,)
_is_recognized_dtype = is_period_dtype
_infer_matches = ("period",)
# Names others delegate to us
_other_ops: list[str] = []
_bool_ops: list[str] = ["is_leap_year"]
_object_ops: list[str] = ["start_time", "end_time", "freq"]
_field_ops: list[str] = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"weekday",
"week",
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"qyear",
"days_in_month",
"daysinmonth",
]
_datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops
_datetimelike_methods: list[str] = ["strftime", "to_timestamp", "asfreq"]
_dtype: PeriodDtype
# --------------------------------------------------------------------
# Constructors
def __init__(
self, values, dtype: Dtype | None = None, freq=None, copy: bool = False
):
freq = validate_dtype_freq(dtype, freq)
if freq is not None:
freq = Period._maybe_convert_freq(freq)
if isinstance(values, ABCSeries):
values = values._values
if not isinstance(values, type(self)):
raise TypeError("Incorrect dtype")
elif isinstance(values, ABCPeriodIndex):
values = values._values
if isinstance(values, type(self)):
if freq is not None and freq != values.freq:
raise raise_on_incompatible(values, freq)
values, freq = values._ndarray, values.freq
values = np.array(values, dtype="int64", copy=copy)
if freq is None:
raise ValueError("freq is not specified and cannot be inferred")
NDArrayBacked.__init__(self, values, PeriodDtype(freq))
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
@classmethod
def _simple_new( # type: ignore[override]
cls,
values: np.ndarray,
freq: BaseOffset | None = None,
dtype: Dtype | None = None,
) -> PeriodArray:
# alias for PeriodArray.__init__
assertion_msg = "Should be numpy array of type i8"
assert isinstance(values, np.ndarray) and values.dtype == "i8", assertion_msg
return cls(values, freq=freq, dtype=dtype)
@classmethod
def _from_sequence(
cls: type[PeriodArray],
scalars: Sequence[Period | None] | AnyArrayLike,
*,
dtype: Dtype | None = None,
copy: bool = False,
) -> PeriodArray:
if dtype and isinstance(dtype, PeriodDtype):
freq = dtype.freq
else:
freq = None
if isinstance(scalars, cls):
validate_dtype_freq(scalars.dtype, freq)
if copy:
scalars = scalars.copy()
return scalars
periods = np.asarray(scalars, dtype=object)
freq = freq or libperiod.extract_freq(periods)
ordinals = libperiod.extract_ordinals(periods, freq)
return cls(ordinals, freq=freq)
@classmethod
def _from_sequence_of_strings(
cls, strings, *, dtype: Dtype | None = None, copy: bool = False
) -> PeriodArray:
return cls._from_sequence(strings, dtype=dtype, copy=copy)
@classmethod
def _from_datetime64(cls, data, freq, tz=None) -> PeriodArray:
"""
Construct a PeriodArray from a datetime64 array
Parameters
----------
data : ndarray[datetime64[ns], datetime64[ns, tz]]
freq : str or Tick
tz : tzinfo, optional
Returns
-------
PeriodArray[freq]
"""
data, freq = dt64arr_to_periodarr(data, freq, tz)
return cls(data, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
periods = dtl.validate_periods(periods)
if freq is not None:
freq = Period._maybe_convert_freq(freq)
field_count = len(fields)
if start is not None or end is not None:
if field_count > 0:
raise ValueError(
"Can either instantiate from fields or endpoints, but not both"
)
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError("Not enough parameters to construct Period range")
return subarr, freq
# -----------------------------------------------------------------
# DatetimeLike Interface
# error: Argument 1 of "_unbox_scalar" is incompatible with supertype
# "DatetimeLikeArrayMixin"; supertype defines the argument type as
# "Union[Union[Period, Any, Timedelta], NaTType]"
def _unbox_scalar( # type: ignore[override]
self,
value: Period | NaTType,
setitem: bool = False,
) -> np.int64:
if value is NaT:
# error: Item "Period" of "Union[Period, NaTType]" has no attribute "value"
return np.int64(value.value) # type: ignore[union-attr]
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value, setitem=setitem)
return np.int64(value.ordinal)
else:
raise ValueError(f"'value' should be a Period. Got '{value}' instead.")
def _scalar_from_string(self, value: str) -> Period:
return Period(value, freq=self.freq)
def _check_compatible_with(self, other, setitem: bool = False):
if other is NaT:
return
self._require_matching_freq(other)
# --------------------------------------------------------------------
# Data / Attributes
@cache_readonly
def dtype(self) -> PeriodDtype:
return self._dtype
# error: Read-only property cannot override read-write property
@property # type: ignore[misc]
def freq(self) -> BaseOffset:
"""
Return the frequency object for this PeriodArray.
"""
return self.dtype.freq
def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
if dtype == "i8":
return self.asi8
elif dtype == bool:
return ~self._isnan
# This will raise TypeError for non-object dtypes
return np.array(list(self), dtype=object)
def __arrow_array__(self, type=None):
"""
Convert myself into a pyarrow Array.
"""
import pyarrow
from pandas.core.arrays._arrow_utils import ArrowPeriodType
if type is not None:
if pyarrow.types.is_integer(type):
return pyarrow.array(self._ndarray, mask=self.isna(), type=type)
elif isinstance(type, ArrowPeriodType):
# ensure we have the same freq
if self.freqstr != type.freq:
raise TypeError(
"Not supported to convert PeriodArray to array with different "
f"'freq' ({self.freqstr} vs {type.freq})"
)
else:
raise TypeError(
f"Not supported to convert PeriodArray to '{type}' type"
)
period_type = ArrowPeriodType(self.freqstr)
storage_array = pyarrow.array(self._ndarray, mask=self.isna(), type="int64")
return pyarrow.ExtensionArray.from_storage(period_type, storage_array)
# --------------------------------------------------------------------
# Vectorized analogues of Period properties
year = _field_accessor(
"year",
"""
The year of the period.
""",
)
month = _field_accessor(
"month",
"""
The month as January=1, December=12.
""",
)
day = _field_accessor(
"day",
"""
The days of the period.
""",
)
hour = _field_accessor(
"hour",
"""
The hour of the period.
""",
)
minute = _field_accessor(
"minute",
"""
The minute of the period.
""",
)
second = _field_accessor(
"second",
"""
The second of the period.
""",
)
weekofyear = _field_accessor(
"week",
"""
The week ordinal of the year.
""",
)
week = weekofyear
day_of_week = _field_accessor(
"day_of_week",
"""
The day of the week with Monday=0, Sunday=6.
""",
)
dayofweek = day_of_week
weekday = dayofweek
dayofyear = day_of_year = _field_accessor(
"day_of_year",
"""
The ordinal day of the year.
""",
)
quarter = _field_accessor(
"quarter",
"""
The quarter of the date.
""",
)
qyear = _field_accessor("qyear")
days_in_month = _field_accessor(
"days_in_month",
"""
The number of days in the month.
""",
)
daysinmonth = days_in_month
@property
def is_leap_year(self) -> np.ndarray:
"""
Logical indicating if the date belongs to a leap year.
"""
return isleapyear_arr(np.asarray(self.year))
def to_timestamp(self, freq=None, how: str = "start") -> DatetimeArray:
"""
Cast to DatetimeArray/Index.
Parameters
----------
freq : str or DateOffset, optional
Target frequency. The default is 'D' for week or longer,
'S' otherwise.
how : {'s', 'e', 'start', 'end'}
Whether to use the start or end of the time period being converted.
Returns
-------
DatetimeArray/Index
"""
from pandas.core.arrays import DatetimeArray
how = libperiod.validate_end_alias(how)
end = how == "E"
if end:
if freq == "B" or self.freq == "B":
# roll forward to ensure we land on B date
adjust = Timedelta(1, "D") - Timedelta(1, "ns")
return self.to_timestamp(how="start") + adjust
else:
adjust = Timedelta(1, "ns")
return (self + self.freq).to_timestamp(how="start") - adjust
if freq is None:
freq = self._get_to_timestamp_base()
base = freq
else:
freq = Period._maybe_convert_freq(freq)
base = freq._period_dtype_code
new_parr = self.asfreq(freq, how=how)
new_data = libperiod.periodarr_to_dt64arr(new_parr.asi8, base)
dta = DatetimeArray(new_data)
if self.freq.name == "B":
# See if we can retain BDay instead of Day in cases where
# len(self) is too small for infer_freq to distinguish between them
diffs = libalgos.unique_deltas(self.asi8)
if len(diffs) == 1:
diff = diffs[0]
if diff == self.freq.n:
dta._freq = self.freq
elif diff == 1:
dta._freq = self.freq.base
# TODO: other cases?
return dta
else:
return dta._with_freq("infer")
# --------------------------------------------------------------------
def _time_shift(self, periods: int, freq=None) -> PeriodArray:
"""
Shift each value by `periods`.
Note this is different from ExtensionArray.shift, which
shifts the *position* of each element, padding the end with
missing values.
Parameters
----------
periods : int
Number of periods to shift by.
freq : pandas.DateOffset, pandas.Timedelta, or str
Frequency increment to shift by.
"""
if freq is not None:
raise TypeError(
"`freq` argument is not supported for "
f"{type(self).__name__}._time_shift"
)
values = self.asi8 + periods * self.freq.n
if self._hasna:
values[self._isnan] = iNaT
return type(self)(values, freq=self.freq)
def _box_func(self, x) -> Period | NaTType:
return Period._from_ordinal(ordinal=x, freq=self.freq)
@doc(**_shared_doc_kwargs, other="PeriodIndex", other_name="PeriodIndex")
def asfreq(self, freq=None, how: str = "E") -> PeriodArray:
"""
Convert the {klass} to the specified frequency `freq`.
Equivalent to applying :meth:`pandas.Period.asfreq` with the given arguments
to each :class:`~pandas.Period` in this {klass}.
Parameters
----------
freq : str
A frequency.
how : str {{'E', 'S'}}, default 'E'
Whether the elements should be aligned to the end
or start within pa period.
* 'E', 'END', or 'FINISH' for end,
* 'S', 'START', or 'BEGIN' for start.
January 31st ('END') vs. January 1st ('START') for example.
Returns
-------
{klass}
The transformed {klass} with the new frequency.
See Also
--------
{other}.asfreq: Convert each Period in a {other_name} to the given frequency.
Period.asfreq : Convert a :class:`~pandas.Period` object to the given frequency.
Examples
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'],
dtype='period[A-DEC]')
>>> pidx.asfreq('M')
PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12',
'2015-12'], dtype='period[M]')
>>> pidx.asfreq('M', how='S')
PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01',
'2015-01'], dtype='period[M]')
"""
how = libperiod.validate_end_alias(how)
freq = Period._maybe_convert_freq(freq)
base1 = self.freq._period_dtype_code
base2 = freq._period_dtype_code
asi8 = self.asi8
# self.freq.n can't be negative or 0
end = how == "E"
if end:
ordinal = asi8 + self.freq.n - 1
else:
ordinal = asi8
new_data = period_asfreq_arr(ordinal, base1, base2, end)
if self._hasna:
new_data[self._isnan] = iNaT
return type(self)(new_data, freq=freq)
# ------------------------------------------------------------------
# Rendering Methods
def _formatter(self, boxed: bool = False):
if boxed:
return str
return "'{}'".format
@dtl.ravel_compat
def _format_native_types(
self, *, na_rep="NaT", date_format=None, **kwargs
) -> np.ndarray:
"""
actually format my specific types
"""
values = self.astype(object)
if date_format:
formatter = lambda dt: dt.strftime(date_format)
else:
formatter = lambda dt: str(dt)
if self._hasna:
mask = self._isnan
values[mask] = na_rep
imask = ~mask
values[imask] = np.array([formatter(dt) for dt in values[imask]])
else:
values = np.array([formatter(dt) for dt in values])
return values
# ------------------------------------------------------------------
def astype(self, dtype, copy: bool = True):
# We handle Period[T] -> Period[U]
# Our parent handles everything else.
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, self._dtype):
if not copy:
return self
else:
return self.copy()
if is_period_dtype(dtype):
return self.asfreq(dtype.freq)
if is_datetime64_any_dtype(dtype):
# GH#45038 match PeriodIndex behavior.
tz = getattr(dtype, "tz", None)
return self.to_timestamp().tz_localize(tz)
return super().astype(dtype, copy=copy)
def searchsorted(
self,
value: NumpyValueArrayLike | ExtensionArray,
side: Literal["left", "right"] = "left",
sorter: NumpySorter = None,
) -> npt.NDArray[np.intp] | np.intp:
npvalue = self._validate_searchsorted_value(value).view("M8[ns]")
# Cast to M8 to get datetime-like NaT placement
m8arr = self._ndarray.view("M8[ns]")
return m8arr.searchsorted(npvalue, side=side, sorter=sorter)
def fillna(self, value=None, method=None, limit=None) -> PeriodArray:
if method is not None:
# view as dt64 so we get treated as timelike in core.missing
dta = self.view("M8[ns]")
result = dta.fillna(value=value, method=method, limit=limit)
# error: Incompatible return value type (got "Union[ExtensionArray,
# ndarray[Any, Any]]", expected "PeriodArray")
return result.view(self.dtype) # type: ignore[return-value]
return super().fillna(value=value, method=method, limit=limit)
# ------------------------------------------------------------------
# Arithmetic Methods
def _sub_datelike(self, other):
assert other is not NaT
return NotImplemented
def _sub_period(self, other):
# If the operation is well-defined, we return an object-Index
# of DateOffsets. Null entries are filled with pd.NaT
self._check_compatible_with(other)
asi8 = self.asi8
new_data = asi8 - other.ordinal
new_data = np.array([self.freq * x for x in new_data])
if self._hasna:
new_data[self._isnan] = NaT
return new_data
def _sub_period_array(self, other):
"""
Subtract a Period Array/Index from self. This is only valid if self
is itself a Period Array/Index, raises otherwise. Both objects must
have the same frequency.
Parameters
----------
other : PeriodIndex or PeriodArray
Returns
-------
result : np.ndarray[object]
Array of DateOffset objects; nulls represented by NaT.
"""
self._require_matching_freq(other)
new_values = algos.checked_add_with_arr(
self.asi8, -other.asi8, arr_mask=self._isnan, b_mask=other._isnan
)
new_values = np.array([self.freq.base * x for x in new_values])
if self._hasna or other._hasna:
mask = self._isnan | other._isnan
new_values[mask] = NaT
return new_values
def _addsub_int_array(
self, other: np.ndarray, op: Callable[[Any, Any], Any]
) -> PeriodArray:
"""
Add or subtract array of integers; equivalent to applying
`_time_shift` pointwise.
Parameters
----------
other : np.ndarray[integer-dtype]
op : {operator.add, operator.sub}
Returns
-------
result : PeriodArray
"""
assert op in [operator.add, operator.sub]
if op is operator.sub:
other = -other
res_values = algos.checked_add_with_arr(self.asi8, other, arr_mask=self._isnan)
res_values = res_values.view("i8")
np.putmask(res_values, self._isnan, iNaT)
return type(self)(res_values, freq=self.freq)
def _add_offset(self, other: BaseOffset):
assert not isinstance(other, Tick)
self._require_matching_freq(other, base=True)
# Note: when calling parent class's _add_timedeltalike_scalar,
# it will call delta_to_nanoseconds(delta). Because delta here
# is an integer, delta_to_nanoseconds will return it unchanged.
result = super()._add_timedeltalike_scalar(other.n)
return type(self)(result, freq=self.freq)
def _add_timedeltalike_scalar(self, other):
"""
Parameters
----------
other : timedelta, Tick, np.timedelta64
Returns
-------
PeriodArray
"""
if not isinstance(self.freq, Tick):
# We cannot add timedelta-like to non-tick PeriodArray
raise raise_on_incompatible(self, other)
if notna(other):
# special handling for np.timedelta64("NaT"), avoid calling
# _check_timedeltalike_freq_compat as that would raise TypeError
other = self._check_timedeltalike_freq_compat(other)
# Note: when calling parent class's _add_timedeltalike_scalar,
# it will call delta_to_nanoseconds(delta). Because delta here
# is an integer, delta_to_nanoseconds will return it unchanged.
return super()._add_timedeltalike_scalar(other)
def _add_timedelta_arraylike(self, other):
"""
Parameters
----------
other : TimedeltaArray or ndarray[timedelta64]
Returns
-------
result : ndarray[int64]
"""
if not isinstance(self.freq, Tick):
# We cannot add timedelta-like to non-tick PeriodArray
raise TypeError(
f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}"
)
if not np.all(isna(other)):
delta = self._check_timedeltalike_freq_compat(other)
else:
# all-NaT TimedeltaIndex is equivalent to a single scalar td64 NaT
return self + np.timedelta64("NaT")
ordinals = self._addsub_int_array(delta, operator.add).asi8
return type(self)(ordinals, dtype=self.dtype)
def _check_timedeltalike_freq_compat(self, other):
"""
Arithmetic operations with timedelta-like scalars or array `other`
are only valid if `other` is an integer multiple of `self.freq`.
If the operation is valid, find that integer multiple. Otherwise,
raise because the operation is invalid.
Parameters
----------
other : timedelta, np.timedelta64, Tick,
ndarray[timedelta64], TimedeltaArray, TimedeltaIndex
Returns
-------
multiple : int or ndarray[int64]
Raises
------
IncompatibleFrequency
"""
assert isinstance(self.freq, Tick) # checked by calling function
base_nanos = self.freq.base.nanos
if isinstance(other, (timedelta, np.timedelta64, Tick)):
nanos = delta_to_nanoseconds(other)
elif isinstance(other, np.ndarray):
# numpy timedelta64 array; all entries must be compatible
assert other.dtype.kind == "m"
if other.dtype != TD64NS_DTYPE:
# i.e. non-nano unit
# TODO: disallow unit-less timedelta64
other = other.astype(TD64NS_DTYPE)
nanos = other.view("i8")
else:
# TimedeltaArray/Index
nanos = other.asi8
if np.all(nanos % base_nanos == 0):
# nanos being added is an integer multiple of the
# base-frequency to self.freq
delta = nanos // base_nanos
# delta is the integer (or integer-array) number of periods
# by which will be added to self.
return delta
raise raise_on_incompatible(self, other)
# ------------------------------------------------------------------
# TODO: See if we can re-share this with Period
def _get_to_timestamp_base(self) -> int:
"""
Return frequency code group used for base of to_timestamp against
frequency code.
Return day freq code against longer freq than day.
Return second freq code against hour between second.
Returns
-------
int
"""
base = self._dtype._dtype_code
if base < FreqGroup.FR_BUS.value:
return FreqGroup.FR_DAY.value
elif FreqGroup.FR_HR.value <= base <= FreqGroup.FR_SEC.value:
return FreqGroup.FR_SEC.value
return base
@property
def start_time(self) -> DatetimeArray:
return self.to_timestamp(how="start")
@property
def end_time(self) -> DatetimeArray:
return self.to_timestamp(how="end")
def _require_matching_freq(self, other, base: bool = False) -> None:
# See also arrays.period.raise_on_incompatible
if isinstance(other, BaseOffset):
other_freq = other
else:
other_freq = other.freq
if base:
condition = self.freq.base != other_freq.base
else:
condition = self.freq != other_freq
if condition:
msg = DIFFERENT_FREQ.format(
cls=type(self).__name__,
own_freq=self.freqstr,
other_freq=other_freq.freqstr,
)
raise IncompatibleFrequency(msg)
def raise_on_incompatible(left, right):
"""
Helper function to render a consistent error message when raising
IncompatibleFrequency.
Parameters
----------
left : PeriodArray
right : None, DateOffset, Period, ndarray, or timedelta-like
Returns
-------
IncompatibleFrequency
Exception to be raised by the caller.
"""
# GH#24283 error message format depends on whether right is scalar
if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None:
other_freq = None
elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, BaseOffset)):
other_freq = right.freqstr
else:
other_freq = delta_to_tick(Timedelta(right)).freqstr
msg = DIFFERENT_FREQ.format(
cls=type(left).__name__, own_freq=left.freqstr, other_freq=other_freq
)
return IncompatibleFrequency(msg)
# -------------------------------------------------------------------
# Constructor Helpers
def period_array(
data: Sequence[Period | str | None] | AnyArrayLike,
freq: str | Tick | None = None,
copy: bool = False,
) -> PeriodArray:
"""
Construct a new PeriodArray from a sequence of Period scalars.
Parameters
----------
data : Sequence of Period objects
A sequence of Period objects. These are required to all have
the same ``freq.`` Missing values can be indicated by ``None``
or ``pandas.NaT``.
freq : str, Tick, or Offset
The frequency of every element of the array. This can be specified
to avoid inferring the `freq` from `data`.
copy : bool, default False
Whether to ensure a copy of the data is made.
Returns
-------
PeriodArray
See Also
--------
PeriodArray
pandas.PeriodIndex
Examples
--------
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A')])
<PeriodArray>
['2017', '2018']
Length: 2, dtype: period[A-DEC]
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A'),
... pd.NaT])
<PeriodArray>
['2017', '2018', 'NaT']
Length: 3, dtype: period[A-DEC]
Integers that look like years are handled
>>> period_array([2000, 2001, 2002], freq='D')
<PeriodArray>
['2000-01-01', '2001-01-01', '2002-01-01']
Length: 3, dtype: period[D]
Datetime-like strings may also be passed
>>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q')
<PeriodArray>
['2000Q1', '2000Q2', '2000Q3', '2000Q4']
Length: 4, dtype: period[Q-DEC]
"""
data_dtype = getattr(data, "dtype", None)
if is_datetime64_dtype(data_dtype):
return PeriodArray._from_datetime64(data, freq)
if is_period_dtype(data_dtype):
return PeriodArray(data, freq=freq)
# other iterable of some kind
if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)):
data = list(data)
arrdata = np.asarray(data)
dtype: PeriodDtype | None
if freq:
dtype = PeriodDtype(freq)
else:
dtype = None
if is_float_dtype(arrdata) and len(arrdata) > 0:
raise TypeError("PeriodIndex does not allow floating point in construction")
if is_integer_dtype(arrdata.dtype):
arr = arrdata.astype(np.int64, copy=False)
ordinals = libperiod.from_ordinals(arr, freq)
return PeriodArray(ordinals, dtype=dtype)
data = ensure_object(arrdata)
return PeriodArray._from_sequence(data, dtype=dtype)
def validate_dtype_freq(dtype, freq):
"""
If both a dtype and a freq are available, ensure they match. If only
dtype is available, extract the implied freq.
Parameters
----------
dtype : dtype
freq : DateOffset or None
Returns
-------
freq : DateOffset
Raises
------
ValueError : non-period dtype
IncompatibleFrequency : mismatch between dtype and freq
"""
if freq is not None:
freq = to_offset(freq)
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError("dtype must be PeriodDtype")
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
raise IncompatibleFrequency("specified freq and dtype are different")
return freq
def dt64arr_to_periodarr(data, freq, tz=None):
"""
Convert an datetime-like array to values Period ordinals.
Parameters
----------
data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]
freq : Optional[Union[str, Tick]]
Must match the `freq` on the `data` if `data` is a DatetimeIndex
or Series.
tz : Optional[tzinfo]
Returns
-------
ordinals : ndarray[int64]
freq : Tick
The frequency extracted from the Series or DatetimeIndex if that's
used.
"""
if data.dtype != np.dtype("M8[ns]"):
raise ValueError(f"Wrong dtype: {data.dtype}")
if freq is None:
if isinstance(data, ABCIndex):
data, freq = data._values, data.freq
elif isinstance(data, ABCSeries):
data, freq = data._values, data.dt.freq
elif isinstance(data, (ABCIndex, ABCSeries)):
data = data._values
freq = Period._maybe_convert_freq(freq)
base = freq._period_dtype_code
return c_dt64arr_to_periodarr(data.view("i8"), base, tz), freq
def _get_ordinal_range(start, end, periods, freq, mult=1):
if com.count_not_none(start, end, periods) != 2:
raise ValueError(
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
if freq is not None:
freq = to_offset(freq)
mult = freq.n
if start is not None:
start = Period(start, freq)
if end is not None:
end = Period(end, freq)
is_start_per = isinstance(start, Period)
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
raise ValueError("start and end must have same freq")
if start is NaT or end is NaT:
raise ValueError("start and end must not be NaT")
if freq is None:
if is_start_per:
freq = start.freq
elif is_end_per:
freq = end.freq
else: # pragma: no cover
raise ValueError("Could not infer freq from start/end")
if periods is not None:
periods = periods * mult
if start is None:
data = np.arange(
end.ordinal - periods + mult, end.ordinal + 1, mult, dtype=np.int64
)
else:
data = np.arange(
start.ordinal, start.ordinal + periods, mult, dtype=np.int64
)
else:
data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64)
return data, freq
def _range_from_fields(
year=None,
month=None,
quarter=None,
day=None,
hour=None,
minute=None,
second=None,
freq=None,
) -> tuple[np.ndarray, BaseOffset]:
if hour is None:
hour = 0
if minute is None:
minute = 0
if second is None:
second = 0
if day is None:
day = 1
ordinals = []
if quarter is not None:
if freq is None:
freq = to_offset("Q")
base = FreqGroup.FR_QTR.value
else:
freq = to_offset(freq)
base = libperiod.freq_to_dtype_code(freq)
if base != FreqGroup.FR_QTR.value:
raise AssertionError("base must equal FR_QTR")
freqstr = freq.freqstr
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
y, m = parsing.quarter_to_myear(y, q, freqstr)
val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
freq = to_offset(freq)
base = libperiod.freq_to_dtype_code(freq)
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in zip(*arrays):
ordinals.append(libperiod.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
return np.array(ordinals, dtype=np.int64), freq
def _make_field_arrays(*fields) -> list[np.ndarray]:
length = None
for x in fields:
if isinstance(x, (list, np.ndarray, ABCSeries)):
if length is not None and len(x) != length:
raise ValueError("Mismatched Period array lengths")
elif length is None:
length = len(x)
# error: Argument 2 to "repeat" has incompatible type "Optional[int]"; expected
# "Union[Union[int, integer[Any]], Union[bool, bool_], ndarray, Sequence[Union[int,
# integer[Any]]], Sequence[Union[bool, bool_]], Sequence[Sequence[Any]]]"
return [
np.asarray(x)
if isinstance(x, (np.ndarray, list, ABCSeries))
else np.repeat(x, length) # type: ignore[arg-type]
for x in fields
]
|
the-stack_106_19400
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from collections import OrderedDict
from decimal import Decimal
from io import BytesIO
from test_framework.messages import CTransaction, ToHex
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, bytes_to_hex_str, connect_nodes_bi, hex_str_to_bytes
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-addresstype=legacy", "-txindex"], ["-addresstype=legacy", "-txindex"], ["-addresstype=legacy", "-txindex"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
connect_nodes_bi(self.nodes, 0, 2)
def run_test(self):
self.log.info('prepare some coins for multiple *rawtransaction commands')
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
self.log.info('Test getrawtransaction on genesis block coinbase returns an error')
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
self.log.info('Check parameter types and required parameters of createrawtransaction')
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", self.nodes[0].createrawtransaction, [{'txid': 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
address2 = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-1, "JSON value is not an array as expected", self.nodes[0].createrawtransaction, [], 'foo')
self.nodes[0].createrawtransaction(inputs=[], outputs={}) # Should not throw for backwards compatibility
self.nodes[0].createrawtransaction(inputs=[], outputs=[])
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid BitcoinMeta address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], [{"data": 'aa'}, {"data": "bb"}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], multidict([("data", 'aa'), ("data", "bb")]))
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']])
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
self.log.info('Check that createrawtransaction accepts an array and object as outputs')
tx = CTransaction()
# One output
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99}))))
assert_equal(len(tx.vout), 1)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]),
)
# Two outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)])))))
assert_equal(len(tx.vout), 2)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]),
)
# Multiple mixed outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')])))))
assert_equal(len(tx.vout), 3)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}, {'data': '99'}]),
)
for type in ["bech32", "p2sh-segwit", "legacy"]:
addr = self.nodes[0].getnewaddress("", type)
addrinfo = self.nodes[0].getaddressinfo(addr)
pubkey = addrinfo["scriptPubKey"]
self.log.info('sendrawtransaction with missing prevtx info (%s)' %(type))
# Test `signrawtransactionwithwallet` invalid `prevtxs`
inputs = [ {'txid' : txid, 'vout' : 3, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
prevtx = dict(txid=txid, scriptPubKey=pubkey, vout=3, amount=1)
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type == "legacy":
del prevtx["amount"]
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type != "legacy":
assert_raises_rpc_error(-3, "Missing amount", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"vout": 3,
}
])
assert_raises_rpc_error(-3, "Missing vout", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing txid", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"scriptPubKey": pubkey,
"vout": 3,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing scriptPubKey", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"vout": 3,
"amount": 1
}
])
#########################################
# sendrawtransaction with missing input #
#########################################
self.log.info('sendrawtransaction with missing input')
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should not have the 'in_active_chain' flag when we don't provide a block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getrawtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 6, for 'foobar')", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 8, for 'abcd1234')", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getrawtransaction, tx, True, "ZZZ0000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # createmultisig can only take public keys
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here.
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS AN INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx2['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned1['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.debug(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000102616100000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
####################################
# TRANSACTION VERSION NUMBER TESTS #
####################################
# Test the minimum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = -0x80000000
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], -0x80000000)
# Test the maximum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = 0x7fffffff
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x7fffffff)
if __name__ == '__main__':
RawTransactionsTest().main()
|
the-stack_106_19401
|
# -*- coding: utf-8 -*-
from branca.element import CssLink, Element, Figure, JavascriptLink
from branca.utilities import none_max, none_min
from folium.map import Layer
from jinja2 import Template
class HeatMapWithTime(Layer):
"""
Create a HeatMapWithTime layer
Parameters
----------
data: list of list of points of the form [lat, lng] or [lat, lng, weight]
The points you want to plot. The outer list corresponds to the various time
steps in sequential order. (weight is in (0, 1] range and defaults to 1 if
not specified for a point)
index: Index giving the label (or timestamp) of the elements of data. Should have
the same length as data, or is replaced by a simple count if not specified.
name : string, default None
The name of the Layer, as it will appear in LayerControls.
radius: default 15.
The radius used around points for the heatmap.
min_opacity: default 0
The minimum opacity for the heatmap.
max_opacity: default 0.6
The maximum opacity for the heatmap.
scale_radius: default False
Scale the radius of the points based on the zoom level.
gradient: dict, default None
Match point density values to colors. Color can be a name ('red'),
RGB values ('rgb(255,0,0)') or a hex number ('#FF0000').
use_local_extrema: default False
Defines whether the heatmap uses a global extrema set found from the input data
OR a local extrema (the maximum and minimum of the currently displayed view).
auto_play: default False
Automatically play the animation across time.
display_index: default True
Display the index (usually time) in the time control.
index_steps: default 1
Steps to take in the index dimension between aimation steps.
min_speed: default 0.1
Minimum fps speed for animation.
max_speed: default 10
Maximum fps speed for animation.
speed_step: default 0.1
Step between different fps speeds on the speed slider.
position: default 'bottomleft'
Position string for the time slider. Format: 'bottom/top'+'left/right'.
overlay : bool, default True
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
show: bool, default True
Whether the layer will be shown on opening (only for overlays).
"""
_template = Template(u"""
{% macro script(this, kwargs) %}
var times = {{this.times}};
{{this._parent.get_name()}}.timeDimension = L.timeDimension(
{times : times, currentTime: new Date(1)}
);
var {{this._control_name}} = new L.Control.TimeDimensionCustom({{this.index}}, {
autoPlay: {{this.auto_play}},
backwardButton: {{this.backward_button}},
displayDate: {{this.display_index}},
forwardButton: {{this.forward_button}},
limitMinimumRange: {{this.limit_minimum_range}},
limitSliders: {{this.limit_sliders}},
loopButton: {{this.loop_button}},
maxSpeed: {{this.max_speed}},
minSpeed: {{this.min_speed}},
playButton: {{this.play_button}},
playReverseButton: {{this.play_reverse_button}},
position: "{{this.position}}",
speedSlider: {{this.speed_slider}},
speedStep: {{this.speed_step}},
styleNS: "{{this.style_NS}}",
timeSlider: {{this.time_slider}},
timeSliderDrapUpdate: {{this.time_slider_drap_update}},
timeSteps: {{this.index_steps}}
})
.addTo({{this._parent.get_name()}});
var {{this.get_name()}} = new TDHeatmap({{this.data}},
{heatmapOptions: {
radius: {{this.radius}},
minOpacity: {{this.min_opacity}},
maxOpacity: {{this.max_opacity}},
scaleRadius: {{this.scale_radius}},
useLocalExtrema: {{this.use_local_extrema}},
defaultWeight: 1,
{% if this.gradient %}gradient: {{ this.gradient }}{% endif %}
}
})
.addTo({{this._parent.get_name()}});
{% endmacro %}
""")
def __init__(self, data, index=None, name=None, radius=15, min_opacity=0,
max_opacity=0.6, scale_radius=False, gradient=None,
use_local_extrema=False, auto_play=False,
display_index=True, index_steps=1, min_speed=0.1,
max_speed=10, speed_step=0.1, position='bottomleft',
overlay=True, control=True, show=True):
super(HeatMapWithTime, self).__init__(name=name, overlay=overlay,
control=control, show=show)
self._name = 'HeatMap'
self._control_name = self.get_name() + 'Control'
# Input data.
self.data = data
self.index = index if index is not None else [str(i) for i in
range(1, len(data)+1)]
if len(self.data) != len(self.index):
raise ValueError('Input data and index are not of compatible lengths.') # noqa
self.times = list(range(1, len(data)+1))
# Heatmap settings.
self.radius = radius
self.min_opacity = min_opacity
self.max_opacity = max_opacity
self.scale_radius = 'true' if scale_radius else 'false'
self.use_local_extrema = 'true' if use_local_extrema else 'false'
self.gradient = gradient
# Time dimension settings.
self.auto_play = 'true' if auto_play else 'false'
self.display_index = 'true' if display_index else 'false'
self.min_speed = min_speed
self.max_speed = max_speed
self.position = position
self.speed_step = speed_step
self.index_steps = index_steps
# Hard coded defaults for simplicity.
self.backward_button = 'true'
self.forward_button = 'true'
self.limit_sliders = 'true'
self.limit_minimum_range = 5
self.loop_button = 'true'
self.speed_slider = 'true'
self.time_slider = 'true'
self.play_button = 'true'
self.play_reverse_button = 'true'
self.time_slider_drap_update = 'false'
self.style_NS = 'leaflet-control-timecontrol'
def render(self, **kwargs):
super(HeatMapWithTime, self).render(**kwargs)
figure = self.get_root()
assert isinstance(figure, Figure), ('You cannot render this Element '
'if it is not in a Figure.')
figure.header.add_child(
JavascriptLink('https://rawgit.com/socib/Leaflet.TimeDimension/master/dist/leaflet.timedimension.min.js'), # noqa
name='leaflet.timedimension.min.js')
figure.header.add_child(
JavascriptLink(
'https://rawgit.com/python-visualization/folium/master/folium/templates/pa7_hm.min.js'), # noqa
name='heatmap.min.js')
figure.header.add_child(
JavascriptLink('https://rawgit.com/pa7/heatmap.js/develop/plugins/leaflet-heatmap/leaflet-heatmap.js'), # noqa
name='leaflet-heatmap.js')
figure.header.add_child(
CssLink('http://apps.socib.es/Leaflet.TimeDimension/dist/leaflet.timedimension.control.min.css'), # noqa
name='leaflet.timedimension.control.min.css')
figure.header.add_child(
Element(
"""
<script>
var TDHeatmap = L.TimeDimension.Layer.extend({
initialize: function(data, options) {
var heatmapCfg = {
radius: 15,
maxOpacity: 1.,
scaleRadius: false,
useLocalExtrema: false,
latField: 'lat',
lngField: 'lng',
valueField: 'count',
defaultWeight : 1,
};
heatmapCfg = $.extend({}, heatmapCfg, options.heatmapOptions || {});
var layer = new HeatmapOverlay(heatmapCfg);
L.TimeDimension.Layer.prototype.initialize.call(this, layer, options);
this._currentLoadedTime = 0;
this._currentTimeData = {
data: []
};
this.data= data;
this.defaultWeight = heatmapCfg.defaultWeight || 1;
},
onAdd: function(map) {
L.TimeDimension.Layer.prototype.onAdd.call(this, map);
map.addLayer(this._baseLayer);
if (this._timeDimension) {
this._getDataForTime(this._timeDimension.getCurrentTime());
}
},
_onNewTimeLoading: function(ev) {
this._getDataForTime(ev.time);
return;
},
isReady: function(time) {
return (this._currentLoadedTime == time);
},
_update: function() {
this._baseLayer.setData(this._currentTimeData);
return true;
},
_getDataForTime: function(time) {
delete this._currentTimeData.data;
this._currentTimeData.data = [];
var data = this.data[time-1];
for (var i = 0; i < data.length; i++) {
this._currentTimeData.data.push({
lat: data[i][0],
lng: data[i][1],
count: data[i].length>2 ? data[i][2] : this.defaultWeight
});
}
this._currentLoadedTime = time;
if (this._timeDimension && time == this._timeDimension.getCurrentTime() && !this._timeDimension.isLoading()) {
this._update();
}
this.fire('timeload', {
time: time
});
}
});
L.Control.TimeDimensionCustom = L.Control.TimeDimension.extend({
initialize: function(index, options) {
var playerOptions = {
buffer: 1,
minBufferReady: -1
};
options.playerOptions = $.extend({}, playerOptions, options.playerOptions || {});
L.Control.TimeDimension.prototype.initialize.call(this, options);
this.index = index;
},
_getDisplayDateFormat: function(date){
return this.index[date.getTime()-1];
}
});
</script>
""", # noqa
template_name='timeControlScript'
)
)
def _get_self_bounds(self):
"""
Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]].
"""
bounds = [[None, None], [None, None]]
for point in self.data:
bounds = [
[
none_min(bounds[0][0], point[0]),
none_min(bounds[0][1], point[1]),
],
[
none_max(bounds[1][0], point[0]),
none_max(bounds[1][1], point[1]),
],
]
return bounds
|
the-stack_106_19402
|
import os
import angr
import angrop # pylint: disable=unused-import
BIN_DIR = os.path.join(os.path.dirname(__file__), "..", "..", "binaries")
def test_arm_conditional():
"""
Currently, we don't model conditional execution in arm. So we don't allow
conditional execution in arm at this moment.
"""
proj = angr.Project(os.path.join(BIN_DIR, "tests", "armel", "helloworld"))
rop = proj.analyses.ROP(rebase=False)
rop.find_gadgets_single_threaded(show_progress=False)
cond_gadget_addrs = [0x10368, 0x1036c, 0x10370, 0x10380, 0x10384, 0x1038c, 0x1039c,
0x103a0, 0x103b8, 0x103bc, 0x103c4, 0x104e8, 0x104ec]
assert all(x.addr not in cond_gadget_addrs for x in rop._gadgets)
def test_jump_gadget():
"""
Ensure it finds gadgets ending with jumps
Ensure angrop can use jump gadgets to build ROP chains
"""
proj = angr.Project(os.path.join(BIN_DIR, "tests", "mipsel", "fauxware"))
rop = proj.analyses.ROP(rebase=False)
rop.find_gadgets_single_threaded(show_progress=False)
jump_gadgets = [x for x in rop._gadgets if x.gadget_type == "jump"]
assert len(jump_gadgets) > 0
jump_regs = [x.jump_reg for x in jump_gadgets]
assert 't9' in jump_regs
assert 'ra' in jump_regs
def run_all():
functions = globals()
all_functions = dict(filter((lambda kv: kv[0].startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
all_functions[f]()
if __name__ == "__main__":
import sys
import logging
logging.getLogger("angrop.rop").setLevel(logging.DEBUG)
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
|
the-stack_106_19403
|
a,b=input().split()
a,b=int(a),int(b)
c=a-b
c1=str(a)
c2=str(b)
c=str(c)
if(len(c)==1):
if(int(c)!=9):
print(int(c)+1)
else:
print(int(c)-1)
else:
if(int(c[0])!=9):
print(str(int(c[0])+1),end="")
else:
print(str(int(c[0])-1),end="")
print(c[1:])
|
the-stack_106_19404
|
from asyncio import sleep
from json import loads
from json.decoder import JSONDecodeError
from os import environ
from sys import setrecursionlimit
import spotify_token as st
from requests import get
from telethon.tl.functions.account import UpdateProfileRequest
from sample_config import Config
from uniborg.util import admin_cmd
# =================== CONSTANT ===================
SPO_BIO_ENABLED = "```Spotify Current Music to Bio enabled.```"
SPO_BIO_DISABLED = "```Spotify Current Music to Bio disabled. Bio is default now.```"
SPO_BIO_RUNNING = "```Spotify Current Music to Bio already running.```"
SPO_BIO_CONFIG_ERROR = "```Error.```"
ERROR_MSG = "```Module halted, Unexpected error.```"
sp_dc = Config.SPOTIFY_DC
sp_key = Config.SPOTIFY_KEY
ARTIST = 0
SONG = 0
BIOPREFIX = "Now Playing.."
SPOTIFYCHECK = False
RUNNING = False
OLDEXCEPT = False
PARSE = False
# ================================================
async def get_spotify_token():
sptoken = st.start_session(sp_dc, sp_key)
access_token = sptoken[0]
environ["spftoken"] = access_token
async def update_spotify_info():
global ARTIST
global SONG
global PARSE
global SPOTIFYCHECK
global RUNNING
global OLDEXCEPT
oldartist = ""
oldsong = ""
while SPOTIFYCHECK:
try:
RUNNING = True
spftoken = environ.get("spftoken", None)
hed = {"Authorization": "Bearer " + spftoken}
url = "https://api.spotify.com/v1/me/player/currently-playing"
response = get(url, headers=hed)
data = loads(response.content)
artist = data["item"]["album"]["artists"][0]["name"]
song = data["item"]["name"]
OLDEXCEPT = False
oldsong = environ.get("oldsong", None)
if song != oldsong and artist != oldartist:
oldartist = artist
environ["oldsong"] = song
spobio = BIOPREFIX + " 🎧: " + artist + " - " + song
await borg(UpdateProfileRequest(about=spobio))
environ["errorcheck"] = "0"
except KeyError:
errorcheck = environ.get("errorcheck", None)
if errorcheck == 0:
await update_token()
elif errorcheck == 1:
SPOTIFYCHECK = False
await borg(UpdateProfileRequest(about=Config.DEFAULT_BIO))
print(ERROR_MSG)
if Config.LOGGER:
await borg.send_message(Config.PM_LOGGR_BOT_API_ID, ERROR_MSG)
except JSONDecodeError:
OLDEXCEPT = True
await sleep(6)
await borg(UpdateProfileRequest(about=Config.DEFAULT_BIO))
except TypeError:
await dirtyfix()
SPOTIFYCHECK = False
await sleep(2)
await dirtyfix()
RUNNING = False
async def update_token():
sptoken = st.start_session(sp_dc, sp_key)
access_token = sptoken[0]
environ["spftoken"] = access_token
environ["errorcheck"] = "1"
await update_spotify_info()
async def dirtyfix():
global SPOTIFYCHECK
SPOTIFYCHECK = True
await sleep(4)
await update_spotify_info()
@borg.on(admin_cmd(pattern="enablespotify ?(.*)")) # pylint:disable=E0602
async def set_biostgraph(setstbio):
setrecursionlimit(700000)
if not SPOTIFYCHECK:
environ["errorcheck"] = "0"
await setstbio.edit(SPO_BIO_ENABLED)
await get_spotify_token()
await dirtyfix()
else:
await setstbio.edit(SPO_BIO_RUNNING)
@borg.on(admin_cmd(pattern="disablespotify ?(.*)")) # pylint:disable=E0602
async def set_biodgraph(setdbio):
global SPOTIFYCHECK
global RUNNING
SPOTIFYCHECK = False
RUNNING = False
await borg(UpdateProfileRequest(about=Config.DEFAULT_BIO))
await setdbio.edit(SPO_BIO_DISABLED)
|
the-stack_106_19406
|
from mcpi.minecraft import Minecraft
mc = Minecraft.create()
from flask import Flask
app = Flask(__name__)
@app.route("/")
def showName():
pos = mc.player.getTilePos()
return "Player position: x: "+str(pos.x)+" y: "+str(pos.y) +" z: "+str(pos.z)
if __name__ == '__main__':
app.run()
|
the-stack_106_19407
|
"""
plottr/apps/inspectr.py -- tool for browsing qcodes data.
This module provides a GUI tool to browsing qcodes .db files.
You can drap/drop .db files into the inspectr window, then browse through
datasets by date. The inspectr itself shows some elementary information
about each dataset and you can launch a plotting window that allows visualizing
the data in it.
Note that this tool is essentially only visualizing some basic structure of the
runs contained in the database. It does not to any handling or loading of
data. it relies on the public qcodes API to get its information.
"""
import os
import time
import sys
import argparse
import logging
from typing import Optional, Sequence, List, Dict, Iterable, Union, cast, Tuple
from typing_extensions import TypedDict
from numpy import rint
import pandas
from plottr import QtCore, QtWidgets, Signal, Slot, QtGui, Flowchart
from .. import log as plottrlog
from ..data.qcodes_dataset import (get_runs_from_db_as_dataframe,
get_ds_structure, load_dataset_from)
from plottr.gui.widgets import MonitorIntervalInput, FormLayoutWrapper, dictToTreeWidgetItems
from .autoplot import autoplotQcodesDataset, QCAutoPlotMainWindow
__author__ = 'Wolfgang Pfaff'
__license__ = 'MIT'
def logger() -> logging.Logger:
logger = plottrlog.getLogger('plottr.apps.inspectr')
return logger
### Database inspector tool
class DateList(QtWidgets.QListWidget):
"""Displays a list of dates for which there are runs in the database."""
datesSelected = Signal(list)
fileDropped = Signal(str)
def __init__(self, parent: Optional[QtWidgets.QWidget] = None):
super().__init__(parent)
self.setAcceptDrops(True)
self.setDefaultDropAction(QtCore.Qt.CopyAction)
self.setSelectionMode(QtWidgets.QListView.ExtendedSelection)
self.itemSelectionChanged.connect(self.sendSelectedDates)
@Slot(list)
def updateDates(self, dates: Sequence[str]) -> None:
for d in dates:
if len(self.findItems(d, QtCore.Qt.MatchExactly)) == 0:
self.insertItem(0, d)
i = 0
while i < self.count():
if self.item(i).text() not in dates:
item = self.takeItem(i)
del item
else:
i += 1
if i >= self.count():
break
self.sortItems(QtCore.Qt.DescendingOrder)
@Slot()
def sendSelectedDates(self) -> None:
selection = [item.text() for item in self.selectedItems()]
self.datesSelected.emit(selection)
### Drag/drop handling
def dragEnterEvent(self, event: QtGui.QDragEnterEvent) -> None:
if event.mimeData().hasUrls():
urls = event.mimeData().urls()
if len(urls) == 1:
url = urls[0]
if url.isLocalFile():
event.accept()
else:
event.ignore()
else:
event.ignore()
def dropEvent(self, event: QtGui.QDropEvent) -> None:
url = event.mimeData().urls()[0].toLocalFile()
self.fileDropped.emit(url)
def mimeTypes(self) -> List[str]:
return ([
'text/uri-list',
'application/x-qabstractitemmodeldatalist',
])
class SortableTreeWidgetItem(QtWidgets.QTreeWidgetItem):
"""
QTreeWidgetItem with an overridden comparator that sorts numerical values
as numbers instead of sorting them alphabetically.
"""
def __init__(self, strings: Iterable[str]):
super().__init__(strings)
def __lt__(self, other: "SortableTreeWidgetItem") -> bool:
col = self.treeWidget().sortColumn()
text1 = self.text(col)
text2 = other.text(col)
try:
return float(text1) < float(text2)
except ValueError:
return text1 < text2
class RunList(QtWidgets.QTreeWidget):
"""Shows the list of runs for a given date selection."""
cols = ['Run ID', 'Experiment', 'Sample', 'Name', 'Started', 'Completed', 'Records', 'GUID']
runSelected = Signal(int)
runActivated = Signal(int)
def __init__(self, parent: Optional[QtWidgets.QWidget] = None):
super().__init__(parent)
self.setColumnCount(len(self.cols))
self.setHeaderLabels(self.cols)
self.itemSelectionChanged.connect(self.selectRun)
self.itemActivated.connect(self.activateRun)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.copy_to_clipboard)
@Slot(QtCore.QPoint)
def copy_to_clipboard(self, position: QtCore.QPoint) -> None:
menu = QtWidgets.QMenu()
copy_icon = self.style().standardIcon(QtWidgets.QStyle.SP_DialogSaveButton)
copy_action = menu.addAction(copy_icon, "Copy")
action = menu.exec_(self.mapToGlobal(position))
if action == copy_action:
model_index = self.indexAt(position)
item = self.itemFromIndex(model_index)
QtWidgets.QApplication.clipboard().setText(item.text(
model_index.column()))
def addRun(self, runId: int, **vals: str) -> None:
lst = [str(runId)]
lst.append(vals.get('experiment', ''))
lst.append(vals.get('sample', ''))
lst.append(vals.get('name', ''))
lst.append(vals.get('started_date', '') + ' ' + vals.get('started_time', ''))
lst.append(vals.get('completed_date', '') + ' ' + vals.get('completed_time', ''))
lst.append(str(vals.get('records', '')))
lst.append(vals.get('guid', ''))
item = SortableTreeWidgetItem(lst)
self.addTopLevelItem(item)
def setRuns(self, selection: Dict[int, Dict[str, str]]) -> None:
self.clear()
# disable sorting before inserting values to avoid performance hit
self.setSortingEnabled(False)
for runId, record in selection.items():
self.addRun(runId, **record)
self.setSortingEnabled(True)
for i in range(len(self.cols)):
self.resizeColumnToContents(i)
def updateRuns(self, selection: Dict[int, Dict[str, str]]) -> None:
run_added = False
for runId, record in selection.items():
item = self.findItems(str(runId), QtCore.Qt.MatchExactly)
if len(item) == 0:
self.setSortingEnabled(False)
self.addRun(runId, **record)
run_added = True
elif len(item) == 1:
completed = record.get('completed_date', '') + ' ' + record.get(
'completed_time', '')
if completed != item[0].text(5):
item[0].setText(5, completed)
num_records = str(record.get('records', ''))
if num_records != item[0].text(6):
item[0].setText(6, num_records)
else:
raise RuntimeError(f"More than one runs found with runId: "
f"{runId}")
if run_added:
self.setSortingEnabled(True)
for i in range(len(self.cols)):
self.resizeColumnToContents(i)
@Slot()
def selectRun(self) -> None:
selection = self.selectedItems()
if len(selection) == 0:
return
runId = int(selection[0].text(0))
self.runSelected.emit(runId)
@Slot(QtWidgets.QTreeWidgetItem, int)
def activateRun(self, item: QtWidgets.QTreeWidgetItem, column: int) -> None:
runId = int(item.text(0))
self.runActivated.emit(runId)
class RunInfo(QtWidgets.QTreeWidget):
"""widget that shows some more details on a selected run.
When sending information in form of a dictionary, it will create
a tree view of that dictionary and display that.
"""
def __init__(self, parent: Optional[QtWidgets.QWidget] = None):
super().__init__(parent)
self.setHeaderLabels(['Key', 'Value'])
self.setColumnCount(2)
@Slot(dict)
def setInfo(self, infoDict: Dict[str, Union[dict, str]]) -> None:
self.clear()
items = dictToTreeWidgetItems(infoDict)
for item in items:
self.addTopLevelItem(item)
item.setExpanded(True)
self.expandAll()
for i in range(2):
self.resizeColumnToContents(i)
class LoadDBProcess(QtCore.QObject):
"""
Worker object for getting a qcodes db overview as pandas dataframe.
It's good to have this in a separate thread because it can be a bit slow
for large databases.
"""
dbdfLoaded = Signal(object)
pathSet = Signal()
def setPath(self, path: str) -> None:
self.path = path
self.pathSet.emit()
def loadDB(self) -> None:
dbdf = get_runs_from_db_as_dataframe(self.path)
self.dbdfLoaded.emit(dbdf)
class QCodesDBInspector(QtWidgets.QMainWindow):
"""
Main window of the inspectr tool.
"""
#: `Signal ()` -- Emitted when when there's an update to the internally
#: cached data (the *data base data frame* :)).
dbdfUpdated = Signal()
#: Signal (`dict`) -- emitted to communicate information about a given
#: run to the widget that displays the information
_sendInfo = Signal(dict)
def __init__(self, parent: Optional[QtWidgets.QWidget] = None,
dbPath: Optional[str] = None):
"""Constructor for :class:`QCodesDBInspector`."""
super().__init__(parent)
self._plotWindows: Dict[int, WindowDict] = {}
self.filepath = dbPath
self.dbdf = None
self.monitor = QtCore.QTimer()
# flag for determining what has been loaded so far.
# * None: nothing opened yet.
# * -1: empty DS open.
# * any value > 0: run ID from the most recent loading.
self.latestRunId = None
self.setWindowTitle('Plottr | QCoDeS dataset inspectr')
### GUI elements
# Main Selection widgets
self.dateList = DateList()
self._selected_dates: Tuple[str, ...] = ()
self.runList = RunList()
self.runInfo = RunInfo()
rightSplitter = QtWidgets.QSplitter(QtCore.Qt.Vertical)
rightSplitter.addWidget(self.runList)
rightSplitter.addWidget(self.runInfo)
rightSplitter.setSizes([400, 200])
splitter = QtWidgets.QSplitter()
splitter.addWidget(self.dateList)
splitter.addWidget(rightSplitter)
splitter.setSizes([100, 500])
self.setCentralWidget(splitter)
# status bar
self.status = QtWidgets.QStatusBar()
self.setStatusBar(self.status)
# toolbar
self.toolbar = self.addToolBar('Data monitoring')
# toolbar item: monitor interval
self.monitorInput = MonitorIntervalInput()
self.monitorInput.setToolTip('Set to 0 for disabling')
self.monitorInput.intervalChanged.connect(self.setMonitorInterval)
self.toolbar.addWidget(self.monitorInput)
self.toolbar.addSeparator()
# toolbar item: auto-launch plotting
self.autoLaunchPlots = FormLayoutWrapper([
('Auto-plot new', QtWidgets.QCheckBox())
])
tt = "If checked, and automatic refresh is running, "
tt += " launch plotting window for new datasets automatically."
self.autoLaunchPlots.setToolTip(tt)
self.toolbar.addWidget(self.autoLaunchPlots)
# menu bar
menu = self.menuBar()
fileMenu = menu.addMenu('&File')
# action: load db file
loadAction = QtWidgets.QAction('&Load', self)
loadAction.setShortcut('Ctrl+L')
loadAction.triggered.connect(self.loadDB)
fileMenu.addAction(loadAction)
# action: updates from the db file
refreshAction = QtWidgets.QAction('&Refresh', self)
refreshAction.setShortcut('R')
refreshAction.triggered.connect(self.refreshDB)
fileMenu.addAction(refreshAction)
# sizing
scaledSize = 640 * rint(self.logicalDpiX() / 96.0)
self.resize(scaledSize, scaledSize)
### Thread workers
# DB loading. can be slow, so nice to have in a thread.
self.loadDBProcess = LoadDBProcess()
self.loadDBThread = QtCore.QThread()
self.loadDBProcess.moveToThread(self.loadDBThread)
self.loadDBProcess.pathSet.connect(self.loadDBThread.start)
self.loadDBProcess.dbdfLoaded.connect(self.DBLoaded)
self.loadDBProcess.dbdfLoaded.connect(self.loadDBThread.quit)
self.loadDBThread.started.connect(self.loadDBProcess.loadDB)
### connect signals/slots
self.dbdfUpdated.connect(self.updateDates)
self.dbdfUpdated.connect(self.showDBPath)
self.dateList.datesSelected.connect(self.setDateSelection)
self.dateList.fileDropped.connect(self.loadFullDB)
self.runList.runSelected.connect(self.setRunSelection)
self.runList.runActivated.connect(self.plotRun)
self._sendInfo.connect(self.runInfo.setInfo)
self.monitor.timeout.connect(self.monitorTriggered)
if self.filepath is not None:
self.loadFullDB(self.filepath)
def closeEvent(self, event: QtGui.QCloseEvent) -> None:
"""
When closing the inspectr window, do some house keeping:
* stop the monitor, if running
* close all plot windows
"""
if self.monitor.isActive():
self.monitor.stop()
for runId, info in self._plotWindows.items():
info['window'].close()
@Slot()
def showDBPath(self) -> None:
tstamp = time.strftime("%Y-%m-%d %H:%M:%S")
assert self.filepath is not None
path = os.path.abspath(self.filepath)
self.status.showMessage(f"{path} (loaded: {tstamp})")
### loading the DB and populating the widgets
@Slot()
def loadDB(self) -> None:
"""
Open a file dialog that allows selecting a .db file for loading.
If a file is selected, opens the db.
"""
if self.filepath is not None:
curdir = os.path.split(self.filepath)[0]
else:
curdir = os.getcwd()
path, _fltr = QtWidgets.QFileDialog.getOpenFileName(
self,
'Open qcodes .db file',
curdir,
'qcodes .db files (*.db);;all files (*.*)',
)
if path:
logger().info(f"Opening: {path}")
self.loadFullDB(path=path)
def loadFullDB(self, path: Optional[str] = None) -> None:
if path is not None and path != self.filepath:
self.filepath = path
# makes sure we treat a newly loaded file fresh and not as a
# refreshed one.
self.latestRunId = None
if self.filepath is not None:
if not self.loadDBThread.isRunning():
self.loadDBProcess.setPath(self.filepath)
def DBLoaded(self, dbdf: pandas.DataFrame) -> None:
if dbdf.equals(self.dbdf):
logger().debug('DB reloaded with no changes. Skipping update')
return None
self.dbdf = dbdf
self.dbdfUpdated.emit()
self.dateList.sendSelectedDates()
logger().debug('DB reloaded')
if self.latestRunId is not None:
idxs = self.dbdf.index.values
newIdxs = idxs[idxs > self.latestRunId]
if self.monitor.isActive() and self.autoLaunchPlots.elements['Auto-plot new'].isChecked():
for idx in newIdxs:
self.plotRun(idx)
self._plotWindows[idx]['window'].setMonitorInterval(
self.monitorInput.spin.value()
)
@Slot()
def updateDates(self) -> None:
assert self.dbdf is not None
if self.dbdf.size > 0:
dates = list(self.dbdf.groupby('started_date').indices.keys())
self.dateList.updateDates(dates)
### reloading the db
@Slot()
def refreshDB(self) -> None:
if self.filepath is not None:
if self.dbdf is not None and self.dbdf.size > 0:
self.latestRunId = self.dbdf.index.values.max()
else:
self.latestRunId = -1
self.loadFullDB()
@Slot(float)
def setMonitorInterval(self, val: float) -> None:
self.monitor.stop()
if val > 0:
self.monitor.start(int(val * 1000))
self.monitorInput.spin.setValue(val)
@Slot()
def monitorTriggered(self) -> None:
logger().debug('Refreshing DB')
self.refreshDB()
### handling user selections
@Slot(list)
def setDateSelection(self, dates: Sequence[str]) -> None:
if len(dates) > 0:
assert self.dbdf is not None
selection = self.dbdf.loc[self.dbdf['started_date'].isin(dates)].sort_index(ascending=False)
old_dates = self._selected_dates
if not all(date in old_dates for date in dates):
self.runList.setRuns(selection.to_dict(orient='index'))
else:
self.runList.updateRuns(selection.to_dict(orient='index'))
self._selected_dates = tuple(dates)
else:
self._selected_dates = ()
self.runList.clear()
@Slot(int)
def setRunSelection(self, runId: int) -> None:
assert self.filepath is not None
ds = load_dataset_from(self.filepath, runId)
snap = None
if hasattr(ds, 'snapshot'):
snap = ds.snapshot
structure = cast(Dict[str, dict], get_ds_structure(ds))
# cast away typed dict so we can pop a key
for k, v in structure.items():
v.pop('values')
contentInfo = {'Data structure': structure,
'QCoDeS Snapshot': snap}
self._sendInfo.emit(contentInfo)
@Slot(int)
def plotRun(self, runId: int) -> None:
assert self.filepath is not None
fc, win = autoplotQcodesDataset(pathAndId=(self.filepath, runId))
self._plotWindows[runId] = {
'flowchart': fc,
'window': win,
}
win.showTime()
class WindowDict(TypedDict):
flowchart: Flowchart
window: QCAutoPlotMainWindow
def inspectr(dbPath: Optional[str] = None) -> QCodesDBInspector:
win = QCodesDBInspector(dbPath=dbPath)
return win
def main(dbPath: Optional[str]) -> None:
app = QtWidgets.QApplication([])
plottrlog.enableStreamHandler(True)
win = inspectr(dbPath=dbPath)
win.show()
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
appinstance = QtWidgets.QApplication.instance()
assert appinstance is not None
appinstance.exec_()
def script() -> None:
parser = argparse.ArgumentParser(description='inspectr -- sifting through qcodes data.')
parser.add_argument('--dbpath', help='path to qcodes .db file',
default=None)
args = parser.parse_args()
main(args.dbpath)
|
the-stack_106_19411
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utility for plotting peaks in a certain region.
"""
import os
import sys
import math
import argparse
import itertools
import cmder
import inflect
import pandas as pd
import pyBigWig
from seqflow import Flow, task, logger
import matplotlib.pylab as plt
import seaborn as sns
from matplotlib import rcParams
import matplotlib.colors
rcParams['font.sans-serif'] = ['Times New Roman', 'Tahoma', 'DejaVu Sans', 'Lucida Grande', 'Verdana']
plt.switch_backend('agg')
parser = argparse.ArgumentParser(description=__doc__, prog='peak-plot')
parser.add_argument('--peak_beds', nargs='+', help="Space separated peak bed files.")
parser.add_argument('--pos_bigwigs', nargs='+', help="Path to BigWig files contains data on positive strand.")
parser.add_argument('--neg_bigwigs', nargs='+', help="Path to BigWig files contains data on negative strand.")
parser.add_argument('--outdir', type=str, help="Path to output directory, default: current work directory.")
parser.add_argument('--bases', type=int, help="Number of bases need to expand from peak center, default: 50.",
default=50)
parser.add_argument('--peak_bed_ids', nargs='+', help="Optional space separated short IDs (e.g., S1, S2, ...) for "
"peak bed files, default: basename of each peak bed file "
"without .bed extension.")
parser.add_argument('--bigwig_ids', nargs='+', help="Optional space separated short IDs (e.g., S1, S2, ...) for "
"BigWig files, default: basename of each BigWig file "
"without .bw extension.")
parser.add_argument('--cleanup', action='store_true', help='Clean up temporary files and links after complete.')
parser.add_argument('--dry_run', action='store_true',
help='Print out steps and inputs/outputs of each step without actually running the pipeline.')
parser.add_argument('--debug', action='store_true', help='Invoke debug mode (only for develop purpose).')
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
NEED_TO_DELETE = []
def right_replace(s, src, tar):
if s.endswith(src):
return f'{s[:-len(src)]}{tar}'
return s
def validate_paths():
def files_exist(files, tag):
if not files:
logger.error(f'No {tag} were provided, aborted.')
sys.exit(1)
engine, paths = inflect.engine(), []
for i, file in enumerate(files, start=1):
if os.path.exists(file):
if not os.path.isfile(file):
logger.error(f'The {engine.ordinal(i)} file in {tag} "{file}" is not a file.')
sys.exit(1)
else:
paths.append(os.path.abspath(file))
else:
logger.error(f'The {engine.ordinal(i)} file in {tag} "{file}" does not exist.')
sys.exit(1)
return paths
def link_file(file, link):
if not os.path.exists(link):
os.symlink(file, link)
NEED_TO_DELETE.append(link)
return link
peak_beds = files_exist(args.peak_beds, 'Peak beds')
pos_bigwigs = files_exist(args.pos_bigwigs, 'BigWig positive')
neg_bigwigs = files_exist(args.neg_bigwigs, 'BigWig negative')
outdir = args.outdir or os.getcwd()
if os.path.exists(outdir):
if not os.path.isdir(outdir):
logger.error(f'Outdir "{outdir}" is a file not a directory.')
sys.exit(1)
else:
logger.error(f'Outdir "{outdir}" does not exist, try to create ...')
os.mkdir(outdir)
logger.error(f'Successfully created Outdir "{outdir}".')
beds, positive_bigwigs, negative_bigwigs = {}, {}, {}
peak_bed_ids = args.peak_bed_ids if args.peak_bed_ids else [''] * len(peak_beds)
for i, (peak_bed, basename) in enumerate(zip(peak_beds, peak_bed_ids), start=1):
basename = basename if basename else right_replace(os.path.basename(peak_bed), '.bed', '')
peak_bed_link = link_file(peak_bed, os.path.join(outdir, f'{basename}.peak.bed'))
beds[basename] = peak_bed_link
bigwig_ids = args.bigwig_ids if args.bigwig_ids else [''] * len(pos_bigwigs)
if len(pos_bigwigs) == len(neg_bigwigs) == len(bigwig_ids):
for i, (bw_pos, bw_neg, basename) in enumerate(zip(pos_bigwigs, neg_bigwigs, bigwig_ids), start=1):
basename = basename if basename else right_replace(os.path.basename(bw_pos), '.bw', '')
bw_pos_link = link_file(bw_pos, os.path.join(outdir, f'{basename}.pos.bw'))
bw_neg_link = link_file(bw_neg, os.path.join(outdir, f'{basename}.neg.bw'))
positive_bigwigs[basename] = bw_pos_link
negative_bigwigs[basename] = bw_neg_link
return beds, positive_bigwigs, negative_bigwigs, outdir, args.bases
beds, positive_bigwigs, negative_bigwigs, outdir, bases = validate_paths()
names = list(positive_bigwigs.keys())
POS_HANDLERS = {k: pyBigWig.open(v) for k, v in positive_bigwigs.items()}
NEG_HANDLERS = {k: pyBigWig.open(v) for k, v in negative_bigwigs.items()}
class Peak:
def __init__(self, chrom, start, end, strand):
self.chrom = chrom
self.start = start
self.end = end
self.strand = strand
self.peak = f'{chrom}:{start}-{end}:{strand}'
@task(inputs=[], outputs=os.path.join(outdir, 'peaks.union.bed'), kind='create')
def concatenate_bed(inputs, bed):
cmder.run(f'cat {" ".join([v for v in beds.values()])} > {bed}', msg='Concatenating peak bed files ...')
return bed
@task(inputs=concatenate_bed, outputs=lambda i: right_replace(i, '.union.bed', '.merged.bed'))
def merge_bed(bed, out):
tmp = right_replace(bed, '.union.bed', '.tmp.bed')
cmder.run(f'sort -k1,1 -k2,2n {bed} > {tmp}', msg=f'Sorting {bed} ...')
# Not output strand ?
cmder.run(f'bedtools merge -i {tmp} -s -c 6 -o distinct > {out}', msg=f'Merging peaks in {tmp} ...')
os.unlink(tmp)
return out
def get_density(chrom, start, end, handler):
density = handler.values(chrom, start, end + 1)
return density
@task(inputs=merge_bed, outputs=os.path.join(outdir, 'peaks.density.matrix.tsv'))
def create_matrix(bed, tsv):
bed = right_replace(tsv, '.density.matrix.tsv', '.merged.bed')
df = pd.read_csv(bed, sep='\t', header=None, names=['chrom', 'start', 'end', 'strand'])
dd, headers = [], ['Peak', 'Base']
for name in names:
headers.extend([f'{name}:{i}' for i in list(range(-args.bases, 1)) + list(range(1, args.bases + 1))])
for i, row in enumerate(df.itertuples()):
densities = []
peak = Peak(row.chrom, row.start, row.end, row.strand)
dx = {k: get_density(row.chrom, row.start, row.end, POS_HANDLERS[k] if row.strand == '+' else NEG_HANDLERS[k])
for k in names}
dx = pd.DataFrame(dx)
center = dx.abs().max(axis=1).idxmax() + row.start
left, right = center - bases, center + bases
densities.extend([peak.peak, f'{left}-{right}'])
dx = {k: get_density(row.chrom, left - 1, right - 1, POS_HANDLERS[k] if row.strand == '+' else NEG_HANDLERS[k])
for k in names}
maximum = pd.DataFrame(dx).abs().max().max()
for k in sorted(names):
densities.extend([abs(v) / maximum for v in dx[k]])
# densities.extend([abs(v) for v in dx[k]])
if args.debug and i == 100:
break
dd.append(densities)
dd = pd.DataFrame(dd, columns=headers)
dd = dd.fillna(0)
dd = dd[dd.sum(axis=1) > 0]
dd.to_csv(tsv, index=False, sep='\t', float_format='%.4f')
@task(inputs=create_matrix, outputs=lambda i: i.replace('.density.matrix.tsv', '.heatmap.png'))
def plot_peak(tsv, png):
df = pd.read_csv(tsv, sep='\t')
# print(df)
df = df.drop(columns=['Peak', 'Base'])
# df = df.drop(columns=[0, 1])
g = sns.clustermap(df, col_cluster=False, figsize=(12, 8), cmap='Greens', cbar_pos=(0.21, 0.9, 0.78, 0.03),
xticklabels=False, yticklabels=False, cbar_kws={"orientation": "horizontal"})
g.ax_row_dendrogram.set_visible(False)
g.ax_heatmap.set_xticks(list(range(51, len(names) * 100, 101)))
g.ax_heatmap.set_xticklabels([name.replace('QKI_', '').replace('.merged.rmDup.r2.sorted.bam.p.sort', '')
for name in names], rotation=90)
g.savefig(png, dpi=300)
# g.savefig(right_replace(png, '.png', '.pdf'))
# g.savefig(right_replace(png, '.png', '.svg'))
def main():
try:
flow = Flow('PeakPlot', description=__doc__.strip())
flow.run(dry=args.dry_run)
if NEED_TO_DELETE and args.cleanup:
logger.info('Cleaning up ...')
for file in need_to_remove:
cmder.run(f'rm {file}')
logger.info('Cleaning up complete.')
finally:
for k, v in POS_HANDLERS.items():
v.close()
for k, v in NEG_HANDLERS.items():
v.close()
if __name__ == '__main__':
main()
|
the-stack_106_19413
|
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from core.models import Ingredient, Recipe
from rest_framework.test import APIClient
from django.contrib.auth import get_user_model
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
"""Test the publicly available ingredients API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that the login is required to access the endpoint"""
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
"""Test the authorized user ingredients API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
"""Test retrieving a list of ingredients"""
Ingredient.objects.create(user=self.user, name='kale')
Ingredient.objects.create(user=self.user, name='salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
"""Test that only ingredients for authenticated user are returned"""
user2 = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='tumeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
"""Test creating a new ingredient"""
payload = {'name': 'Fish'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
"""Test creating a new ingredient with invalid payload"""
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingredients_assigned_to_recipes(self):
"""Test filtering ingredients by those assigned to recipes"""
ingredient1 = Ingredient.objects.create(
user=self.user, name='Apples'
)
ingredient2 = Ingredient.objects.create(
user=self.user, name='Turkey'
)
recipe = Recipe.objects.create(
title='Apple crumble',
time_minutes=5,
price=10.00,
user=self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_ingredient_assigned_unique(self):
"""Test filtering ingredients by assigned returns unique items"""
ingredient = Ingredient.objects.create(user=self.user, name='Eggs')
Ingredient.objects.create(user=self.user, name='Cheese')
recipe1 = Recipe.objects.create(
title='Eggs benedict',
time_minutes=30,
price=12.00,
user=self.user
)
recipe1.ingredients.add(ingredient)
recipe2 = Recipe.objects.create(
title='Green eggs on toast',
time_minutes=20,
price=5.00,
user=self.user
)
recipe2.ingredients.add(ingredient)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
|
the-stack_106_19414
|
import numpy as np
from gym_wmgds import Env, spaces
from gym_wmgds.utils import seeding
def categorical_sample(prob_n, np_random):
"""
Sample from categorical distribution
Each row specifies class probabilities
"""
prob_n = np.asarray(prob_n)
csprob_n = np.cumsum(prob_n)
return (csprob_n > np_random.rand()).argmax()
class DiscreteEnv(Env):
"""
Has the following members
- nS: number of states
- nA: number of actions
- P: transitions (*)
- isd: initial state distribution (**)
(*) dictionary dict of dicts of lists, where
P[s][a] == [(probability, nextstate, reward, done), ...]
(**) list or array of length nS
"""
def __init__(self, nS, nA, P, isd):
self.P = P
self.isd = isd
self.lastaction = None # for rendering
self.nS = nS
self.nA = nA
self.action_space = spaces.Discrete(self.nA)
self.observation_space = spaces.Discrete(self.nS)
self.seed()
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.s = categorical_sample(self.isd, self.np_random)
self.lastaction = None
return self.s
def step(self, a):
transitions = self.P[self.s][a]
i = categorical_sample([t[0] for t in transitions], self.np_random)
p, s, r, d= transitions[i]
self.s = s
self.lastaction = a
return (s, r, d, {"prob" : p})
|
the-stack_106_19421
|
class ReadFile:
"""ReadFile is a class for reading the lines from a given txt file.
"""
def __init__(self):
"""Keyword argumnets:
file_name -- path to the file wished to be read
"""
try:
print("Started creation of object type of ReadFile succesfully.")
except Exception as exc:
print("Error in creation of object type of ReadFile.")
print(exc)
finally:
print("Ended creation of object type of ReadFile succesfully.\n")
def read_txt_lines(self, file_name):
try:
print("Started succesfully ReadFile: read_txt_lines()")
url_file = open(file_name, "r")
lines = url_file.readlines()
url_file.close()
return lines
except Exception as exc:
print("Error in ReadFile: read_txt_lines()!")
print(exc)
finally:
print("Ended succesfully ReadFile: read_txt_lines()\n")
def read_from_couple(self, prename):
try:
print("Started succesfully ReadFile: read_from_dict()")
save_dir = prename + "/"
file_path = prename + ".txt"
return save_dir, file_path
except Exception as exc:
print("Error in ReadFile: read_from_dict()!")
print(exc)
finally:
print("Ended succesfully ReadFile: read_from_dict()\n")
def create_couples(self, urls_path):
try:
print("Started succesfully ReadFile: create_dict()")
lines = self.read_txt_lines(urls_path)
couples_list = list()
for line in lines:
words = line.split(',')
if words[1].endswith('\n'): words[1] = words[1].rstrip()
couples_list.append(words)
return couples_list
except Exception as exc:
print("Error in ReadFile: create_dict()!")
print(exc)
finally:
print("Ended succesfully ReadFile: create_dict()\n")
|
the-stack_106_19422
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Distributed under terms of the MIT license.
from .extractor import Extractor
from ..vocab import Vocab
import jieba
import re
import numpy as np
UNK_IDX = 0
EOS_IDX = 2
class WordEmbedExtractor(Extractor):
def __init__(self):
Extractor.__init__(self, name="WordEmbedExtractor")
self.max_clen = 300
self.max_wlen = 100
self.feat_names = ['label', 'sid', 's1_word', 's2_word', 's1_word_rvs', 's2_word_rvs', 's1_wlen', 's2_wlen',\
's1_char', 's2_char', 's1_char_rvs', 's2_char_rvs', 's1_clen', 's2_clen']
self.feat_lens = [1, 1, self.max_wlen, self.max_wlen, self.max_wlen, self.max_wlen, 1, 1,\
self.max_clen, self.max_clen, self.max_clen, self.max_clen, 1, 1]
self.feat_levels = ['p', 'p', 'w', 'w', 'w', 'w', 's', 's', 'c', 'c', 'c', 'c', 's', 's']
def extract(self, data, char_vocab, word_vocab, mode='train'):
s1_word = []
s2_word = []
s1_word_rvs = []
s2_word_rvs = []
s1_char = []
s2_char = []
s1_char_rvs = []
s2_char_rvs = []
s1_wlen = []
s2_wlen = []
s1_clen = []
s2_clen = []
sid = []
label = []
for ins in data:
if mode == 'train':
label.append([ins['label']])
sid.append([ins['sid']])
s1_wlen.append([len(ins['s1_word'])])
s2_wlen.append([len(ins['s2_word'])])
s1_word.append(np.pad(word_vocab.toi(ins['s1_word']), (0, self.max_wlen - len(ins['s1_word'])),\
'constant', constant_values=(EOS_IDX, EOS_IDX)))
s2_word.append(np.pad(word_vocab.toi(ins['s2_word']), (0, self.max_wlen - len(ins['s2_word'])),\
'constant', constant_values=(EOS_IDX, EOS_IDX)))
s1_word_rvs.append(np.pad(word_vocab.toi(ins['s1_word'])[::-1], (0, self.max_wlen - len(ins['s1_word'])),\
'constant', constant_values=(EOS_IDX, EOS_IDX)))
s2_word_rvs.append(np.pad(word_vocab.toi(ins['s2_word'])[::-1], (0, self.max_wlen - len(ins['s2_word'])),\
'constant', constant_values=(EOS_IDX, EOS_IDX)))
s1_clen.append([len(ins['s1_char'])])
s2_clen.append([len(ins['s2_char'])])
s1_char.append(np.pad(char_vocab.toi(ins['s1_char']), (0, self.max_clen - len(ins['s1_char'])),\
'constant', constant_values=(EOS_IDX, EOS_IDX)))
s2_char.append(np.pad(char_vocab.toi(ins['s2_char']), (0, self.max_clen - len(ins['s2_char'])),\
'constant', constant_values=(EOS_IDX, EOS_IDX)))
s1_char_rvs.append(np.pad(char_vocab.toi(ins['s1_char'])[::-1], (0, self.max_clen - len(ins['s1_char'])),\
'constant', constant_values=(EOS_IDX, EOS_IDX)))
s2_char_rvs.append(np.pad(char_vocab.toi(ins['s2_char'])[::-1], (0, self.max_clen - len(ins['s2_char'])),\
'constant', constant_values=(EOS_IDX, EOS_IDX)))
if mode == 'train':
feats = np.concatenate((label, sid, s1_word, s2_word, s1_word_rvs, s2_word_rvs, s1_wlen, s2_wlen, s1_char, s2_char, s1_char_rvs, s2_char_rvs, s1_clen, s2_clen), axis=1)
else:
feats = np.concatenate((sid, sid, s1_word, s2_word, s1_word_rvs, s2_word_rvs, s1_wlen, s2_wlen, s1_char, s2_char, s1_char_rvs, s2_char_rvs, s1_clen, s2_clen), axis=1)
return feats
|
the-stack_106_19424
|
# Auto-Discovery of Content Files
# print('This is working')
# write in base.html the navigation links
template = open("template/base.html").read()
#read files in the content directory
import glob
all_html_files = glob.glob("content/*.html")
i=0
pages = []
template = open("template/base.html").read()
navbar = ""
navbar_output = ""
add_navbar = []
combined_base = ""
filename = {}
# create the auto-generated list:
for html_file in all_html_files:
import os
file_path = all_html_files[i]
file_name = os.path.basename(file_path)
i = i+1
name_only, extension = os.path.splitext(file_name)
path = "content/index.html"
pages.append({
"filename": "content/" + file_name,
"title":"" + name_only + "",
"output": "docs/" + file_name,
})
# create the auto-generated navigation bar:
add_navbar = '<li class="nav-item"> ' + '<a class="nav-link js-scroll-trigger" href="' + file_name + '#' + name_only + '">' + name_only + '</a> ' + '</li>'
navbar_output = navbar_output + '\n' + add_navbar
# write the auto-generated navbar in the base template:
template = template.replace("{{navbar}}", navbar_output)
#combine the base template with the content and title:
for page in pages:
filename = open(page['filename']).read()
combined_file = template.replace("{{content}}", filename)
combined_file = combined_file.replace("{{title}}", page['title'])
open(page['output'], 'w+').write(combined_file)
#---------------------------------------
|
the-stack_106_19425
|
"""
Given a sorted array and a target value, return the index if the target is found.
If not, return the index where it would be if it were inserted in order.
You may assume no duplicates in the array.
@author: Lisong Guo <[email protected]>
@date: July 30, 2018
"""
class Solution:
def _searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
return the position to insert a new element into the sorted list.
Note: the solution can be implemented in a recursive way.
"""
size = len(nums)
if (size == 0):
# empty input list, early exit
return 0
mid_index = int(size/2)
if (nums[mid_index] == target):
return mid_index
elif (nums[mid_index] > target):
return self.searchInsert(nums[0:mid_index], target)
else:
return mid_index + 1 + self.searchInsert(nums[mid_index+1:], target)
def searchInsert(self, nums, target):
return self.tail_recursive_searchInsert(0, nums, target)
def tail_recursive_searchInsert(self, start, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
return the position to insert a new element into the sorted list.
Note: the solution can be implemented in a recursive way.
"""
size = len(nums)
if (size == 0):
# empty input list, early exit
return start
mid_index = int(size/2)
if (nums[mid_index] == target):
return start + mid_index
elif (nums[mid_index] > target):
return self.tail_recursive_searchInsert(
start, nums[0:mid_index], target)
else:
return self.tail_recursive_searchInsert(
start+mid_index+1, nums[mid_index+1:], target)
def verify(case_name, test_input, test_target):
"""
utility function for unit testing
"""
solution = Solution()
print(case_name, test_input, ' target:', test_target)
assert(test_target == solution.searchInsert(*test_input))
if __name__ == "__main__":
solution = Solution()
test_case_1_input = ([1,3,5,6], 5)
test_case_1_target = 2
verify('test case 1:', test_case_1_input, test_case_1_target)
test_case_2_input = ([], 1)
test_case_2_target = 0
verify('test case 2 (empty list):', test_case_2_input, test_case_2_target)
test_case_3_input = ([1,3,5,6], 2)
test_case_3_target = 1
verify("test case 3:", test_case_3_input, test_case_3_target)
test_case_4_input = ([1,3,5,6], 7)
test_case_4_target = 4
verify("test case 4:", test_case_4_input, test_case_4_target)
test_case_5_input = ([1,3,5,6], 0)
test_case_5_target = 0
verify("test case 5:", test_case_5_input, test_case_5_target)
|
the-stack_106_19427
|
# Copyright 2020 Mike Iacovacci
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lib.classes import *
from colorama import Fore, Style
from io import BytesIO
from os import geteuid, listdir, mkdir, path, rename, remove
from pickle import dump, load, PickleError
from prompt_toolkit import prompt
from prompt_toolkit.completion import FuzzyCompleter, WordCompleter
from prompt_toolkit.styles import Style as ptkStyle
from re import split
from shutil import rmtree
from sys import argv
from requests import get, RequestException
from yaml import safe_load_all, parser, scanner
from zipfile import BadZipFile, LargeZipFile, ZipFile
def able_to_merge(current_tool, tool_id, tools):
""" SUMMARY: determines if merging YAML data from 2 files is possible without data loss
INPUT: 1) an AxiomTool object, a tool ID value (int), and 2) a list of AxiomTool objects
OUTPUT: True or False """
if tool_id < 0:
return False
match = tools[tool_id]
temp_list = []
new_list = []
if current_tool[0]["ptf_module"] != match.ptf_module:
return False
if current_tool[0]["description"] != match.description:
return False
action_count = 0
while action_count < match.action_list.__len__():
temp_list.append(match.action_list[action_count].name)
action_count += 1
command_count = 0
while command_count < match.command_list.__len__():
temp_list.append(match.command_list[command_count].name)
command_count += 1
new_count = 0
while new_count < current_tool[1]["commands"].__len__():
new_list.append(list(current_tool[1]["commands"][new_count].keys())[0])
new_count += 1
for command_name in new_list:
if command_name in temp_list:
return False
return True
def axiom_help():
""" SUMMARY: displays helpful CLI usage details with examples
INPUT: none
OUTPUT: none, only prints to the screen """
print("\n" + "Standard usage: ./axiom [MODE] [TOOL] [NUM]"
"\n" + "" +
"\n" + " ./axiom show nmap" +
"\n" + " ./axiom show mimikatz 1" +
"\n" + " ./axiom build powershell 4" +
"\n" + " ./axiom run hashcat 3" +
"\n" + "" +
"\n" + "Configuration management: ./axiom [MODE] [URL]" +
"\n" + "" +
"\n" + " ./axiom new" +
"\n" + " ./axiom reload" +
"\n" + " ./axiom init" +
"\n" + " ./axiom init https://example.com/config.yml" +
"\n")
def axiom_prompt(tool_list, tool_names, tools):
""" SUMMARY: main interactive prompt loop of the program, handles multiple tool selection loops
INPUT: 1) list of two-item tuples (name, platform), 2) set of tool names, and 3) list of AxiomTool objects
OUTPUT: exit value (int) to be immediately passed to exit() in __main__ """
exit_code = 1
while exit_code > 0:
exit_code = tool_selection_prompt(tool_list, tool_names, tools)
return exit_code
def branch(settings, tool_list, tools):
""" SUMMARY: changes program flow based on user-supplied settings
INPUT: 1) a three-item dictionary, 2) a de-duplicated list of tuples, and 3) list of AxiomTool objects
OUTPUT: no return value, may exit the entire program """
if settings.get("mode") in [None, "reload", "init"]:
return
if settings.get("mode") == "new":
new_generate_command()
if settings.get("num") == -1:
print_error("ERROR: Invalid command ID")
exit(1)
text = settings.get("tool")
tool_id = disambiguate_tool_name(text, tool_list, tools)
if tool_id < 0:
print_error("ERROR: Invalid tool")
exit(1)
tool = tools[tool_id]
if settings.get("mode") == "show":
if settings.get("num") is None:
tool.show()
print()
exit(0)
elif int(settings.get("num") - 1) not in range(tool.combined_list.__len__()):
print_error("ERROR: Invalid command specified")
exit(1)
else:
number = int(settings.get("num") - 1)
command_type, id_value = tool.resolve_command(number)
if command_type == "action":
tool.action_list[id_value].show()
print()
elif command_type == "command":
tool.command_list[id_value].show()
print()
exit(0)
if settings.get("mode") == "run":
if settings.get("num") is None:
print_error("ERROR: No command specified")
exit(1)
elif int(settings.get("num") - 1) not in range(tool.combined_list.__len__()):
print_error("ERROR: Invalid command specified")
exit(1)
else:
number = int(settings.get("num") - 1)
command_type, id_value = tool.resolve_command(number)
if command_type == "action":
if tool.action_list[id_value].execution_type in ["standalone", "autonomous", "NX"]:
tool.action_list[id_value].run(tool)
else:
print_error("ERROR: Selected action must be executed via interactive AXIOM prompt")
exit(1)
elif command_type == "command":
if tool.command_list[id_value].execution_type in ["standalone", "autonomous", "NX"]:
tool.command_list[id_value].run(tool)
else:
print_error("ERROR: Selected command must be executed via interactive AXIOM prompt")
exit(1)
exit(0)
if settings.get("mode") == "build":
if settings.get("num") is None:
print_error("ERROR: No command specified")
exit(1)
elif int(settings.get("num") - 1) not in range(tool.combined_list.__len__()):
print_error("ERROR: Invalid command specified")
exit(1)
else:
number = int(settings.get("num") - 1)
command_type, id_value = tool.resolve_command(number)
if command_type == "action":
tool.action_list[id_value].cli_print()
elif command_type == "command":
tool.command_list[id_value].cli_print()
exit(0)
def command_selection_prompt(tool):
""" SUMMARY: prompts user to select a listed command/action for the current tool and calls the execution function
INPUT: an AxiomTool object
OUTPUT: none """
while True:
tool.show()
number = prompt('\n[AXIOM] Select command: ')
if number == "back":
return
if number == "exit" or number == "quit":
print("Exiting...")
exit(0)
if number == "":
continue
try:
number = int(number)
number -= 1
except (ValueError, TypeError):
number = -1
if number not in range(tool.combined_list.__len__()):
print_error("\nERROR: Invalid command specified")
else:
command_type, id_value = tool.resolve_command(number)
if command_type == "action":
confirmed = tool.action_list[id_value].confirm_and_execute(tool)
elif command_type == "command":
confirmed = tool.command_list[id_value].confirm_and_execute(tool)
else:
confirmed = False
if confirmed:
dispatch.continue_trigger.wait(timeout=None)
dispatch.continue_trigger.clear()
print()
input("[AXIOM] Press ENTER to continue ")
def create_missing_folder(folder):
""" SUMMARY: checks if specified folder exists, creates it if it does not exist
INPUT: a string specifying a folder on the filesystem
OUTPUT: none, creates necessary folder if it does not exist """
if path.exists(folder):
return
else:
try:
mkdir(folder)
except OSError:
print_error(str("ERROR: Cannot create folder \"" + folder + "\""))
exit(1)
def delete_and_recreate_folder(folder):
""" SUMMARY: deletes specified folder, if it exists, and (re)creates it on the filesystem
INPUT: a string specifying a folder on the filesystem
OUTPUT: none, deletes files from the filesystem and/or creates necessary folders """
if path.exists(folder):
try:
rmtree(folder)
except OSError:
print_error(str("ERROR: Cannot delete folder \"" + folder + "\""))
exit(1)
create_missing_folder(folder)
def disambiguate_tool_name(text, tool_list, tools):
""" SUMMARY: finds the user-intended tool ID for multi-platform tool names prompting the user as needed
INPUT: 1) supplied tool name (str), 2) de-duplicated list of tuples, and 3) list of AxiomTool objects
OUTPUT: tool ID value (int) or -1 if invalid number of platforms or no matching tool found """
platform_list = []
for x in tool_list:
if x[0] == text:
platform_list.append(x[1])
platform_list = sorted(platform_list, key=str.casefold)
potential_tool = []
if platform_list.__len__() == 0:
return -1
elif platform_list.__len__() == 1:
potential_tool.append(text)
potential_tool.append(platform_list[0])
else:
selection = 0
while selection == 0:
print("\nPlatforms\n")
i = 0
while i < platform_list.__len__():
print(" " + str(i + 1) + "\t" + platform_list[i])
i += 1
platform = prompt('\n[AXIOM] Select platform: ')
try:
number = int(platform)
except (ValueError, TypeError):
number = 0
if number > 0:
if number <= platform_list.__len__():
potential_tool.append(text)
potential_tool.append(platform_list[number - 1])
selection = 1
return resolve_tool_id(potential_tool, tools)
def download_and_extract_zip(zip_url, extracted_folder, destination_folder, human_name):
""" SUMMARY: prepares the filesystem, downloads a ZIP file, and extracts it to a folder with a specified name
INPUT: ZIP file URL, temporary folder name, destination folder name, and human-friendly name (all strings)
OUTPUT: no return values, modifies the filesystem """
if path.exists(destination_folder):
try:
rmtree(destination_folder)
except OSError:
print_error(str("ERROR: Cannot prepare extraction location \"" + destination_folder + "\""))
exit(1)
print("Downloading " + human_name + "...")
try:
request = get(zip_url)
except RequestException:
print_error(str("ERROR: Cannot download \"" + human_name + "\" from " + zip_url))
exit(1)
else:
if request.status_code == 200:
try:
zipfile = ZipFile(BytesIO(request.content))
zipfile.extractall(".")
rename(extracted_folder, destination_folder)
except (BadZipFile, LargeZipFile, OSError):
print_error(str("ERROR: Cannot extract \"" + extracted_folder + "\""))
exit(1)
else:
print_error(str("ERROR: Failed to download \"" + human_name + "\""))
exit(1)
def get_args():
""" SUMMARY: processes command-line arguments to modify overall program execution flow
INPUT: none, checks argv for arguments supplied via CLI
OUTPUT: three-item dictionary containing the mode type, tool name, and command/action number """
if argv.__len__() < 2:
return {"mode": None, "tool": None, "num": None}
elif argv.__len__() > 4:
axiom_help()
exit(1)
elif argv.__len__() == 2:
if argv[1] == "init":
return {"mode": "init", "tool": None, "num": None}
if argv[1] == "reload":
return {"mode": "reload", "tool": None, "num": None}
if argv[1] in ["n", "ne", "new", "-n", "--new"]:
return {"mode": "new", "tool": None, "num": None}
else:
axiom_help()
exit(1)
elif argv.__len__() == 3 or 4:
if argv[1] == "init":
return {"mode": "init", "tool": str(argv[2]), "num": None}
if argv[1] in ["s", "sh", "sho", "show", "-s", "--show"]:
if argv.__len__() == 3:
return {"mode": "show", "tool": str(argv[2]), "num": None}
if argv.__len__() == 4:
try:
number = int(argv[3])
except (ValueError, TypeError):
number = -1
return {"mode": "show", "tool": str(argv[2]), "num": number}
if argv[1] in ["r", "ru", "run", "-r", "--run"]:
if argv.__len__() == 3:
return {"mode": "run", "tool": str(argv[2]), "num": None}
if argv.__len__() == 4:
try:
number = int(argv[3])
except (ValueError, TypeError):
number = -1
return {"mode": "run", "tool": str(argv[2]), "num": number}
if argv[1] in ["b", "bu", "bui", "buil", "build", "-b", "--build"]:
if argv.__len__() == 3:
return {"mode": "build", "tool": str(argv[2]), "num": None}
if argv.__len__() == 4:
try:
number = int(argv[3])
except (ValueError, TypeError):
number = -1
return {"mode": "build", "tool": str(argv[2]), "num": number}
else:
axiom_help()
exit(1)
else:
axiom_help()
exit(1)
def get_input_types(input_types_list, text):
""" SUMMARY: parses placeholder text to determine the type of input required for command/action execution
INPUT: 1) list of all possible input types (strings), and 2) the command text (list or str)
OUTPUT: a list of strings """
if isinstance(text, list):
temporary_string = ""
line_count = 0
while line_count < text.__len__():
temporary_string += text[line_count]
line_count += 1
text = temporary_string
used_input_types = []
end = text.__len__()
indices = [i for i in range(end) if text.startswith("{", i)]
hit_count = 0
while hit_count < indices.__len__():
min_beginning = indices[hit_count]
if min_beginning + 10 > end:
max_ending = end
else:
max_ending = min_beginning + 10
target = text[min_beginning:max_ending]
for entry in input_types_list:
if str(entry + "}") in target:
used_input_types.append(entry)
break
hit_count += 1
return used_input_types
def get_tool_names(tool_list):
""" SUMMARY: creates a list (set) of unique tool names for searching, auto-suggestion, etc.
INPUT: a list of two-item tuple (tool, platform)
OUTPUT: a de-duplicated list (set) of tool names """
tool_names = []
for x in tool_list:
tool_names.append(x[0])
return set(tool_names)
def initialize(settings):
""" SUMMARY: installs PTF + toolkits, optionally downloads/loads user-supplied config file (overwriting existing)
INPUT: three-item settings dictionary
OUTPUT: no return values, modifies the filesystem and global config variable """
print("Initializing...")
if isinstance(settings.get("tool"), str):
config_yaml_url = str(settings.get("tool"))
print("Downloading configuration file...")
try:
request = get(config_yaml_url)
except RequestException:
print_error(str("ERROR: Cannot download configuration file from " + config_yaml_url))
exit(1)
else:
if request.status_code == 200:
try:
remove(config.axiom.config_file)
with open(config.axiom.config_file, 'wb') as config_file:
config_file.write(request.content)
except OSError:
print_error("ERROR: Cannot replace existing configuration file")
exit(1)
else:
config.axiom = config.AxiomConfig(config.axiom.config_file)
setup_ptf()
setup_toolkits()
else:
print_error("ERROR: Configuration file download failure")
exit(1)
elif settings.get("tool") is None:
setup_ptf()
setup_toolkits()
else:
print_error("ERROR: Invalid configuration file URL")
exit(1)
def load_commands(yam, inputs_pattern, input_types_list):
""" SUMMARY: creates all command and action objects for a given tool file's YAML data
INPUT: 1) a list of 2 dicts from the source YAML file, 2) a regex pattern (str), and 3) a list of strings
OUTPUT: a two-item tuple of 1) a list of AxiomCommand objects and 2) a list of AxiomAction objects """
total = yam[1]['commands'].__len__()
command_list = []
action_list = []
tool_string = ""
i = 0
while i < total:
current_cmd = yam[1]['commands'][i]
name = str(list(current_cmd.keys())[0])
for x in command_list:
if x.name == name:
tool_string = str(yam[0]["name"] + " (" + yam[0]["os"] + ") ")
print_error(str("ERROR: " + tool_string + "contains non-unique command name \"" + name + "\""))
exit(1)
for y in action_list:
if y.name == name:
tool_string = str(yam[0]["name"] + " (" + yam[0]["os"] + ") ")
print_error(str("ERROR: " + tool_string + "contains non-unique action name \"" + name + "\""))
exit(1)
prompt_type = str(list(list(current_cmd.values())[0][0].values())[0][0])
execution_type = str(list(list(current_cmd.values())[0][0].values())[0][1])
text = list(list(current_cmd.values())[0][1].values())[0]
note = str(list(list(current_cmd.values())[0][4].values())[0])
raw_output_list = list(list(current_cmd.values())[0][3].values())[0]
output_list = None
if raw_output_list:
output_list = load_outputs(raw_output_list, tool_string)
raw_input_list = list(list(current_cmd.values())[0][2].values())[0]
if raw_input_list:
tokens, input_list = load_text_and_inputs(text, inputs_pattern, input_types_list, raw_input_list)
command_list.append(AxiomCommand(name, prompt_type, execution_type, tokens, output_list, note, input_list))
else:
action_list.append(AxiomAction(name, prompt_type, execution_type, text, output_list, note))
i += 1
return command_list, action_list
def load_inventory():
""" SUMMARY: instantiates the runtime toolkits that organize all tools and their commands/actions
INPUT: none
OUTPUT: a list of AxiomToolkit objects """
loadable_inventory_file = str(config.axiom.binary_folder + "/inventory.axiom")
if path.exists(loadable_inventory_file):
try:
with open(loadable_inventory_file, 'rb') as inventory_dump:
toolkits = load(inventory_dump)
except (OSError, PickleError):
print_error(str("ERROR: Failed to load inventory binary file " + loadable_inventory_file))
exit(1)
else:
return toolkits
folders = []
if path.exists(config.axiom.inventory_folder):
folders = listdir(config.axiom.inventory_folder)
else:
print_error(str("ERROR: Inventory folder " + config.axiom.inventory_folder + " not found"))
exit(1)
toolkits = []
for i in range(folders.__len__()):
kit_name = folders[i]
kit_folder = str(config.axiom.inventory_folder + "/" + kit_name)
tool_list = []
for filename in listdir(kit_folder):
current_file = str(kit_folder + "/" + filename)
if current_file.endswith(".yml"):
try:
with open(current_file, 'r') as tool_file:
tool_yaml = list(safe_load_all(tool_file))[0]
tool_name = tool_yaml["name"]
tool_platform = tool_yaml["os"]
tool_list.append((tool_name, tool_platform))
except (OSError, parser.ParserError, scanner.ScannerError):
print_error(str("ERROR: Failed to load " + current_file))
exit(1)
tool_list = set(tool_list)
toolkits.append(AxiomToolkit(kit_name, kit_folder, tool_list))
try:
with open(loadable_inventory_file, 'wb') as inventory:
dump(toolkits, inventory)
except (OSError, PickleError):
print_error(str("ERROR: Failed to save inventory binary file " + loadable_inventory_file))
exit(1)
return toolkits
def load_outputs(raw_output_list, tool):
""" SUMMARY: retrieves a list of values representing each command/action output
INPUT: 1) a list of outputs (str) taken directly from a YAML file 2) the target tool (str)
OUTPUT: a list of two-item tuples """
output_list = []
output_count = 0
try:
while output_count < raw_output_list.__len__():
current_output = raw_output_list[output_count]
if isinstance(current_output, dict):
if list(current_output)[0] == "FILE":
if list(list(current_output.values())[0].keys())[0] == "input":
output_list.append(("F_INPUT", int(list(list(current_output.values())[0].values())[0])))
elif list(list(current_output.values())[0].keys())[0] == "string":
output_list.append(("F_STRING", str(list(list(current_output.values())[0].values())[0])))
elif list(list(current_output.values())[0].keys())[0] == "prefix":
input_number = int(list(list(current_output.values())[0].values())[0][0])
if isinstance(list(list(current_output.values())[0].values())[0][1], str): # single extension
extension_string = str(list(list(current_output.values())[0].values())[0][1])
output_list.append(("F_PREFIX", (input_number, extension_string)))
elif isinstance(list(list(current_output.values())[0].values())[0][1], list): # >1 extensions
prefix_count = 0
while prefix_count <= list(list(current_output.values())[0].values())[0][0]:
extension_string = str(list(list(
current_output.values())[0].values())[0][1][prefix_count])
output_list.append(("F_PREFIX", (input_number, extension_string)))
prefix_count += 1
elif list(current_output)[0] == "PROMPT":
output_list.append(("PROMPT", str(list(current_output.values())[0])))
else:
output_list.append(str(current_output))
output_count += 1
except (AttributeError, IndexError, KeyError, TypeError, ValueError):
print_error(str("ERROR: Invalid outputs defined for " + tool))
exit(1)
return output_list
def load_text_and_inputs(text, inputs_pattern, input_types_list, raw_input_list):
""" SUMMARY: retrieves executable command/action text (tokens) and the inputs required at execution
INPUT: 1) command text (str), 2) regex pattern (str), 3) list of types (list), and 4) list of inputs (list)
OUTPUT: a two-item tuple containing 1) a list of strings and 2) a list of 2-item or 3-item tuples """
used_input_types = get_input_types(input_types_list, text)
tokens = []
if isinstance(text, str):
tokens = list(split(inputs_pattern, text))
elif isinstance(text, list):
line_count = 0
while line_count < text.__len__():
current_line = text[line_count]
tokens.append(list(split(inputs_pattern, current_line)))
line_count += 1
input_list = []
input_count = 0
while input_count < raw_input_list.__len__():
current_input = raw_input_list[input_count]
current_type = used_input_types[input_count]
if isinstance(current_input, str):
input_list.append(tuple((current_input, current_type)))
elif isinstance(current_input, dict):
current_name = list(current_input.keys())[0]
current_options = list(current_input.values())[0]
input_list.append(tuple((current_name, current_type, current_options)))
input_count += 1
return tokens, input_list
def load_tool_list(inventory):
""" SUMMARY: creates a de-duplicated list of all tools present in all toolkits
INPUT: a list of AxiomToolkit objects
OUTPUT: a list of two-item tuples (tool, platform) """
loadable_list_file = str(config.axiom.binary_folder + '/tool_list.axiom')
if path.exists(loadable_list_file):
try:
with open(loadable_list_file, 'rb') as list_dump:
loaded_list = load(list_dump)
except (OSError, PickleError):
print_error(str("ERROR: Failed to load tool list binary file " + loadable_list_file))
exit(1)
else:
return loaded_list
master_tool_list = []
for i in range(inventory.__len__()):
for x in inventory[i].tool_name_list:
master_tool_list.append(x)
master_tool_list = set(master_tool_list)
try:
with open(loadable_list_file, 'wb') as tool_list:
dump(list(master_tool_list), tool_list)
except (OSError, PickleError):
print_error(str("ERROR: Failed to save tool list binary file " + loadable_list_file))
exit(1)
return list(master_tool_list)
def load_tools(inventory, unloaded_tools):
""" SUMMARY: imports all tool data from all YAML files from all inventory folders
INPUT: 1) a list of AxiomToolkit objects, and 2) a list of two-item tuples (tool, platform)
OUTPUT: a list of AxiomTool objects """
loadable_tools_file = str(config.axiom.binary_folder + "/tools.axiom")
if path.exists(loadable_tools_file):
try:
with open(loadable_tools_file, 'rb') as tools_dump:
loaded_tools = load(tools_dump)
except (OSError, PickleError):
print_error(str("ERROR: Failed to load tools binary file " + loadable_tools_file))
exit(1)
else:
return loaded_tools
tools = []
for i in range(len(inventory)):
folder = inventory[i].location
for filename in listdir(folder):
current_file = str(folder + "/" + filename)
if current_file.endswith(".yml"):
try:
with open(current_file, 'r') as tool_file:
tool = list(safe_load_all(tool_file))
current_tool = (tool[0]["name"], tool[0]["os"])
if current_tool in unloaded_tools:
command_list, action_list = load_commands(tool, config.axiom.inputs_pattern,
config.axiom.input_types_list)
tools.append(AxiomTool(tool[0]["name"], tool[0]["os"], tool[0]["ptf_module"],
tool[0]["description"], action_list, command_list))
unloaded_tools.remove(current_tool)
else:
tool_id = resolve_tool_id(current_tool, tools)
if able_to_merge(tool, tool_id, tools):
if merge(tool, tool_id, tools, config.axiom.inputs_pattern,
config.axiom.input_types_list):
continue
else:
print_error(str("ERROR: Merge failure for " + str(tool[0]["name"]) + " from " +
str(current_file)))
exit(1)
else:
print_error(str("ERROR: Unable to merge " + str(tool[0]["name"]) + " from " +
str(current_file)))
exit(1)
except (AttributeError, IndexError, KeyError, OSError, TypeError, ValueError):
print_error(str("ERROR: Failed to load " + current_file))
exit(1)
for item in tools:
item.initialize_combined_list()
try:
with open(loadable_tools_file, 'wb') as axiom:
dump(tools, axiom)
except (OSError, PickleError):
print_error(str("ERROR: Failed to save tools binary file " + loadable_tools_file))
exit(1)
return tools
def merge(tool, tool_id, tools, inputs_pattern, input_types_list):
""" SUMMARY: merges new commands/actions into existing AxiomTool objects
INPUT: 1) list of two dictionaries 2) tool ID value (int) 3) list of AxiomTool objects
4) regex pattern (str) and 5) list of strings
OUTPUT: Returns True after completing merge procedure """
command_list, action_list = load_commands(tool, inputs_pattern, input_types_list)
action_count = 0
command_count = 0
if action_list.__len__() > 0:
while action_count < action_list.__len__():
tools[tool_id].action_list.append(action_list[action_count])
action_count += 1
if command_list.__len__() > 0:
while command_count < command_list.__len__():
tools[tool_id].command_list.append(command_list[command_count])
command_count += 1
return True
def new_generate_command():
""" SUMMARY: prompts user with data entry questions and prints a complete and valid YAML snippet to the screen
INPUT: none
OUTPUT: none, prints to the screen and exits """
name = prompt("[AXIOM] Enter command name: ")
prompt_selection = new_get_prompt_selection()
execution_type = new_get_execution_type(prompt_selection)
text = new_get_text()
inputs = new_get_inputs(text)
outputs = new_get_outputs(execution_type, text)
note = prompt("[AXIOM] Enter command note: ")
name = new_get_escaped_text(name)
note = new_get_escaped_text(note)
new_print_finalized_command_text(name, prompt_selection, execution_type, text, inputs, outputs, note)
exit(0)
def new_get_escaped_text(text):
""" SUMMARY: replaces any backslash and double-quote characters with backslash-escaped character sequences
INPUT: command text line(s) (list or str)
OUTPUT: returns backslash-escaped command text (list or str) """
if isinstance(text, list):
new_list = []
for line in range(text.__len__()):
new_list.append(text[line].replace("\\", "\\\\").replace("\"", "\\\""))
return new_list
else:
return text.replace("\\", "\\\\").replace("\"", "\\\"")
def new_get_execution_type(prompt_selection):
""" SUMMARY: prompts user to enter the command execution type
INPUT: the current command's prompt type (str)
OUTPUT: returns the execution type name (str) """
if prompt_selection == "other":
return "NX"
print("\nExecution Types\n")
print(" 1\tstandalone")
print(" 2\tautonomous")
print(" 3\tinteractive")
print(" 4\tNX")
number = prompt("\n[AXIOM] Select an option: ")
try:
number = int(number)
if number == 1:
return "standalone"
elif number == 2:
return "autonomous"
elif number == 3:
return "interactive"
elif number == 4:
return "NX"
else:
print_error("ERROR: Invalid execution type selection")
exit(1)
except (ValueError, TypeError):
print_error("ERROR: Invalid execution type selection")
exit(1)
def new_get_inputs(text):
""" SUMMARY: prompts user to enter input descriptions and related data
INPUT: the command text (list or str)
OUTPUT: returns the inputs text line (str) """
inputs = "["
used_input_types = get_input_types(config.axiom.input_types_list, text)
input_count = used_input_types.__len__()
for i in range(input_count):
description = prompt(str("[AXIOM] Enter name for input " +
str("(" + str(i + 1) + "/" + str(input_count) + ")") +
" {" + used_input_types[i] + "}: "))
description = new_get_escaped_text(description)
if used_input_types[i] in ["INTMENU", "STRMENU"]:
option_count = prompt(str("[AXIOM] Enter number of \"" + description + "\" options: "))
try:
option_count = int(option_count)
if option_count <= 0:
print_error("ERROR: Invalid number of options")
exit(1)
option_text = "["
except (ValueError, TypeError):
print_error("ERROR: Invalid number of options")
exit(1)
if used_input_types[i] == "INTMENU":
for x in range(option_count):
single_option = prompt(str("[AXIOM] Enter \"" + description + "\" option (" +
str(x + 1) + "/" + str(option_count) + ") {INT}: "))
try:
single_option = int(single_option)
except (ValueError, TypeError):
print_error("ERROR: Invalid integer option")
exit(1)
option_text = str(option_text + str(single_option) + ",")
option_text = str(option_text[:-1] + "]")
elif used_input_types[i] == "STRMENU":
for x in range(option_count):
single_option = prompt(str("[AXIOM] Enter \"" + description + "\" option (" +
str(x + 1) + "/" + str(option_count) + ") {STR}: "))
single_option = new_get_escaped_text(single_option)
option_text = str(option_text + "\"" + str(single_option) + "\"" + ",")
option_text = str(option_text[:-1] + "]")
inputs = str(inputs + "{\"" + description + "\":" + option_text + "},")
else:
inputs = str(inputs + "\"" + description + "\",")
if inputs == "[":
return "null"
else:
inputs = str(inputs[:-1] + "]")
return inputs
def new_get_output_details(input_count, current_output_index, output_count):
""" SUMMARY: prompts user to select output type and enter type-specific details
INPUT: 1) number of total inputs (int) 2) current output number (int) 3) total number of outputs (int)
OUTPUT: returns the outputs text line (str) """
print(str("[AXIOM] Select output type for remaining output (" +
str(current_output_index + 1) + "/" + str(output_count) + "): "))
print("\nOutput Types\n")
print(" 1\tFile (input)\tfilename is entirely user-controlled command input")
print(" 2\tFile (prefix)\tfilename prefix is command input, file extension(s) hardcoded")
print(" 3\tFile (string)\tfilename is entirely hardcoded")
print(" 4\tSTDERR\t\tstandard error")
number = prompt("\n[AXIOM] Select an option: ")
try:
number = int(number)
if number == 1:
if input_count <= 0:
print_error("ERROR: Output type requires at least one command input")
exit(1)
input_number = prompt("[AXIOM] Enter the corresponding input number: ")
input_number = int(input_number)
if int(input_number - 1) in range(input_count):
return str("{\"FILE\":{\"input\":" + str(input_number) + "}}")
else:
print_error("ERROR: Invalid input number")
exit(1)
elif number == 2:
if input_count <= 0:
print_error("ERROR: Output type requires at least one command input")
exit(1)
extensions = ""
entry = prompt("[AXIOM] Enter the corresponding input number: ")
entry = int(entry)
if int(entry - 1) in range(input_count):
extension_count = prompt("[AXIOM] Enter number of file extensions: ")
extension_count = int(extension_count)
if extension_count > 0:
for e in range(extension_count):
current_ext = prompt("[AXIOM] Enter file extension (" +
str(e + 1) + "/" + str(extension_count) + "): ")
current_ext = new_get_escaped_text(current_ext)
extensions = str(extensions + "\"" + current_ext + "\",")
extensions = extensions[:-1]
return str("{\"FILE\":{\"prefix\":[" + str(entry) + "," + extensions + "]}}")
else:
print_error("ERROR: Invalid number of file extensions")
exit(1)
else:
print_error("ERROR: Invalid input number")
exit(1)
elif number == 3:
filename = prompt("[AXIOM] Enter the output filename: ")
filename = new_get_escaped_text(filename)
return str("{\"FILE\":{\"string\":\"" + str(filename) + "\"}}")
elif number == 4:
return "\"STDERR\""
else:
print_error("ERROR: Invalid output type selection")
exit(1)
except (ValueError, TypeError):
print_error("ERROR: Invalid number entered")
exit(1)
def new_get_outputs(execution_type, text):
""" SUMMARY: prompts user to enter output data
INPUT: 1) command execution type name (str) 2) command text (list or str)
OUTPUT: returns completed outputs text line (str) """
input_count = get_input_types(config.axiom.input_types_list, text).__len__()
outputs = "["
answer = prompt("[AXIOM] Does command output to STDOUT? [Y/n] ")
if answer not in ["Y", "y", "Yes", "yes"]:
pass
else:
outputs = "[\"STDOUT\","
if execution_type == "interactive":
print("[AXIOM] Select prompt type emitted by interactive command: ")
prompt_type = new_get_prompt_selection()
if outputs == "[\"STDOUT\",":
outputs = str("[\"STDOUT\"," + "{\"PROMPT\":\"" + prompt_type + "\"},")
else:
outputs = str("[{\"PROMPT\":\"" + prompt_type + "\"},")
output_count = prompt("[AXIOM] Enter number of remaining outputs: ")
try:
output_count = int(output_count)
except (ValueError, TypeError):
print_error("ERROR: Invalid number of outputs")
exit(1)
if output_count <= 0:
if outputs == "[":
return "null"
else:
return str(outputs[:-1] + "]")
for y in range(output_count):
outputs = str(outputs + new_get_output_details(input_count, y, output_count) + ",")
return str(outputs[:-1] + "]")
def new_get_prompt_selection():
""" SUMMARY: prompts user to select command prompt type
INPUT: none, gets input from user
OUTPUT: returns a prompt name from the global config (str) """
print("\nPrompts\n")
for i in range(config.axiom.prompts.__len__()):
print(" " + str(i + 1) + "\t" + str(config.axiom.prompts[i][0]))
number = prompt("\n[AXIOM] Select an option: ")
try:
number = int(number)
if int(number - 1) in range(config.axiom.prompts.__len__()):
return config.axiom.prompts[number - 1][0]
else:
print_error("ERROR: Invalid prompt selection")
exit(1)
except (ValueError, TypeError):
print_error("ERROR: Invalid prompt selection")
exit(1)
def new_get_text():
""" SUMMARY: prompts user for number of command text lines and the line contents
INPUT: none, gets input from the user
OUTPUT: returns completed and sanitized command text (list or str) """
line_count = prompt("[AXIOM] Enter number of text input lines: ")
try:
line_count = int(line_count)
except (ValueError, TypeError):
print_error("ERROR: Invalid number of lines")
exit(1)
if line_count <= 0:
print_error("ERROR: Invalid number of lines")
exit(1)
elif line_count == 1:
text = prompt("[AXIOM] Enter command text: ")
else:
text = []
for i in range(line_count):
text.append(prompt(str("[AXIOM] Enter command text (line " + str(i + 1) + "): ")))
return new_get_escaped_text(text)
def new_print_finalized_command_text(name, prompt_selection, execution_type, text, inputs, outputs, note):
""" SUMMARY: prints newly-generated YAML text to the screen
INPUT: seven variables generated by related functions, all are strings but "text" can also be a list
OUTPUT: none, only prints to the screen """
print()
print(" - \"" + name + "\":")
print(" - type: [\"" + prompt_selection + "\",\"" + execution_type + "\"]")
if isinstance(text, str):
print(" - text: \"" + text + "\"")
else:
print(" - text:")
for i in range(text.__len__()):
print(" - \"" + text[i] + "\"")
print(" - input: " + inputs + "")
print(" - output: " + outputs + "")
print(" - note: \"" + note + "\"")
print()
def print_banner(banner_file):
""" SUMMARY: displays ASCII art from file and other introductory info
INPUT: filename (str) of text file on filesystem
OUTPUT: none, only prints to screen """
try:
with open(banner_file, 'r') as file:
data = file.readlines()
for line in data:
print(Fore.RED + line.replace('\n', ''))
print(Style.RESET_ALL, end='')
except OSError:
print_error(str("ERROR: Unable to access banner file " + banner_file))
exit(1)
else:
print(" C9EE FD5E 15DA 9C02 1B0C 603C A397 0118 D56B 2E35 ")
print()
print(" Created by Mike Iacovacci https://payl0ad.run ")
print()
print()
def print_stats(inventory, tool_list, tools):
""" SUMMARY: displays counts of loaded tools, commands/actions, and toolkits
INPUT: 1) list of AxiomToolkit objects objects 2) de-deplicated list of tuples 3) list of AxiomTool objects
OUTPUT: none, only prints to the screen """
action_count = 0
command_count = 0
for tool in tools:
current_actions = tool.action_list.__len__()
current_commands = tool.command_list.__len__()
action_count = action_count + current_actions
command_count = command_count + current_commands
combined_count = str(action_count + command_count)
tool_count = str(tool_list.__len__())
toolkit_count = str(inventory.__len__())
print("\n" + "Loaded " +
combined_count + " commands for " +
tool_count + " unique tools from " +
toolkit_count + " toolkits."
"\n")
def reload():
""" SUMMARY: deletes and recreates binary folder causing all YAML tool files to be deserialized again
INPUT: none
OUTPUT: none """
print("Reloading...")
delete_and_recreate_folder(config.axiom.binary_folder)
def resolve_tool_id(potential_tool, tools):
""" SUMMARY: searches for a tool's ID number using a user-supplied tool name string
INPUT: 1) a tool name (str), and 2) a list of AxiomTool objects
OUTPUT: a tool ID value (int) or -1 if no match is found """
tool_id = 0
while tool_id < tools.__len__():
if tools[tool_id].name == potential_tool[0] and tools[tool_id].platform == potential_tool[1]:
return tool_id
tool_id += 1
return -1
def set_user_expectations(settings):
""" SUMMARY: prints a message so the user expects to wait while the YAML is deserialized
INPUT: three-item settings dictionary
OUTPUT: no return value, only prints to the screen conditionally """
if path.exists(str(config.axiom.binary_folder + "/inventory.axiom")) and \
path.exists(str(config.axiom.binary_folder + "/tool_list.axiom")) and \
path.exists(str(config.axiom.binary_folder + "/tools.axiom")) or \
settings.get("mode") in ["init", "reload"]:
return
else:
print("Initializing...")
def setup_folders(settings):
""" SUMMARY: initializes folders for history/binary files and installs PTF if missing
INPUT: three-item settings dictionary
OUTPUT: none, causes filesystem modifications within user-defined locations """
set_user_expectations(settings)
create_missing_folder(config.axiom.history_folder)
create_missing_folder(config.axiom.binary_folder)
if not path.exists(config.axiom.ptf_folder):
setup_ptf()
if not path.exists(config.axiom.inventory_folder):
setup_toolkits()
def setup_ptf():
""" SUMMARY: deletes existing PTF folder and downloads/installs the latest version from GitHub master branch
INPUT: none
OUTPUT: no return values, modifies the filesystem """
download_and_extract_zip("https://github.com/trustedsec/ptf/archive/master.zip",
"ptf-master",
config.axiom.ptf_folder,
"The PenTesters Framework (PTF)")
def setup_toolkits():
""" SUMMARY: deletes existing inventory folder, downloads all listed toolkits, and reloads the binary data
INPUT: none
OUTPUT: no return values, modifies the filesystem """
delete_and_recreate_folder(config.axiom.inventory_folder)
for toolkit in config.axiom.toolkits:
download_and_extract_zip(toolkit[2],
toolkit[1],
str(config.axiom.inventory_folder + "/" + toolkit[0]),
toolkit[0])
reload()
def tool_selection_prompt(tool_list, tool_names, tools):
""" SUMMARY: prompts user to select a tool, provides a fuzzy word completer interface
INPUT: 1) list of two-item tuples (name, platform), 2) set of tool names, and 3) list of AxiomTool objects
OUTPUT: exit value (int) """
tool_names = FuzzyCompleter(WordCompleter(tool_names))
completer_style = ptkStyle.from_dict({
"completion-menu": "bg:#111111",
"scrollbar.background": "bg:#111111",
"scrollbar.button": "bg:#999999",
"completion-menu.completion.current": "nobold bg:ansired",
"completion-menu.completion fuzzymatch.outside": "nobold fg:#AAAAAA",
"completion-menu.completion fuzzymatch.inside": "nobold fg:ansired",
"completion-menu.completion fuzzymatch.inside.character": "nobold nounderline fg:ansired",
"completion-menu.completion.current fuzzymatch.outside": "nobold fg:#AAAAAA",
"completion-menu.completion.current fuzzymatch.inside": "nobold fg:#AAAAAA",
"completion-menu.completion.current fuzzymatch.inside.character": "nobold nounderline fg:#AAAAAA"})
while True:
text = prompt('[AXIOM] Enter tool: ', completer=tool_names, complete_while_typing=True, style=completer_style)
if text == "exit" or text == "quit":
return 0
if text == "":
continue
tool_id = disambiguate_tool_name(text, tool_list, tools)
if tool_id < 0:
print_error("ERROR: Invalid tool name")
else:
tool = tools[tool_id]
command_selection_prompt(tool)
return 1
def validate_privileges(mode):
""" SUMMARY: confirms effective root privilege level if writing to the filesystem or spawning a subprocess
INPUT: program mode type (str)
OUTPUT: none """
if mode not in ["show", "new"]:
if geteuid() != 0:
print_error("ERROR: AXIOM requires root privileges")
exit(1)
|
the-stack_106_19428
|
import pygame
import pygame.gfxdraw
class PrimitiveManager(pygame.sprite.Sprite):
"""A primitive manager for each platoon
"""
def __init__(self, map_instance, platoon_id, vehicle_type):
pygame.sprite.Sprite.__init__(self)
self.platoon_id = platoon_id
self.vehicle_type = vehicle_type
self.map = map_instance
self.key = self.vehicle_type + '_p_' + str(self.platoon_id)
return None
def get_centroid(self):
return self.state['centroid_pos']
def get_vehicles(self, states):
self.state = states[self.vehicle_type][self.key]
vehicles = self.state['vehicles']
return vehicles
def convert_to_pixel(self, points):
pos = [int(points[1] / .2 + 420), int(points[0] / .2 + 340)]
return pos
def update(self, states):
self.vehicles = self.get_vehicles(states)
# Execute only if the number of vehicles are > 0
# if self.state['n_vehicles'] > 0:
if self.vehicle_type == 'uav':
color = (0, 153, 76)
for vehicle in self.vehicles:
pos = self.convert_to_pixel(vehicle.current_pos)
# Draw a circle
pygame.gfxdraw.filled_circle(self.map.env_surface, pos[0],
pos[1], 3, color)
pygame.gfxdraw.aacircle(self.map.env_surface, pos[0], pos[1],
3, color)
else:
color = (0, 0, 255)
for vehicle in self.vehicles:
pos = self.convert_to_pixel(vehicle.current_pos)
# Draw a rectangle
pygame.draw.rect(self.map.env_surface, color,
pygame.Rect(pos, [5, 5]))
# Draw a black circle around the platoons if it is selected
if self.state['selected']:
color = (0, 0, 0)
cent_pos = self.convert_to_pixel(self.get_centroid())
pygame.draw.circle(self.map.env_surface, color, cent_pos, 25, 3)
self.map.surface.blit(self.map.env_surface, self.map.position)
return None
|
the-stack_106_19429
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Paddle-Lite light python api demo
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from paddlelite.lite import *
import numpy as np
# Command arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir", default="", type=str, help="Non-combined Model dir path")
parser.add_argument(
"--input_shape",
default=[1, 3, 224, 224],
nargs='+',
type=int,
required=False,
help="Model input shape, eg: 1 3 224 224. Defalut: 1 3 224 224")
parser.add_argument(
"--backend",
default="",
type=str,
help="To use a particular backend for execution. Should be one of: arm|opencl|x86|x86_opencl|metal"
)
parser.add_argument(
"--image_path", default="", type=str, help="The path of test image file")
parser.add_argument(
"--label_path", default="", type=str, help="The path of label file")
parser.add_argument(
"--print_results",
type=bool,
default=False,
help="Print results. Default: False")
def RunModel(args):
# 1. Set config information
config = MobileConfig()
config.set_model_from_file(args.model_dir)
if args.backend.upper() in ["OPENCL", "X86_OPENCL"]:
bin_path = "./"
bin_name = "lite_opencl_kernel.bin"
config.set_opencl_binary_path_name(bin_path, bin_name)
'''
opencl tune option:
CL_TUNE_NONE
CL_TUNE_RAPID
CL_TUNE_NORMAL
CL_TUNE_EXHAUSTIVE
'''
tuned_path = "./"
tuned_name = "lite_opencl_tuned.bin"
config.set_opencl_tune(CLTuneMode.CL_TUNE_NORMAL, tuned_path,
tuned_name, 4)
'''
opencl precision option:
CL_PRECISION_AUTO, first fp16 if valid, default
CL_PRECISION_FP32, force fp32
CL_PRECISION_FP16, force fp16
'''
config.set_opencl_precision(CLPrecisionType.CL_PRECISION_AUTO)
elif args.backend.upper() in ["METAL"]:
# set metallib path
import paddlelite, os
module_path = os.path.dirname(paddlelite.__file__)
config.set_metal_lib_path(module_path + "/libs/lite.metallib")
config.set_metal_use_mps(True)
# 2. Create paddle predictor
predictor = create_paddle_predictor(config)
# 3. Set input data
input_tensor = predictor.get_input(0)
c, h, w = args.input_shape[1], args.input_shape[2], args.input_shape[3]
read_image = len(args.image_path) != 0 and len(args.label_path) != 0
if read_image == True:
import cv2
with open(args.label_path, "r") as f:
label_list = f.readlines()
image_mean = [0.485, 0.456, 0.406]
image_std = [0.229, 0.224, 0.225]
image_data = cv2.imread(args.image_path)
image_data = cv2.resize(image_data, (h, w))
image_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)
image_data = image_data.transpose((2, 0, 1)) / 255.0
image_data = (image_data - np.array(image_mean).reshape(
(3, 1, 1))) / np.array(image_std).reshape((3, 1, 1))
image_data = image_data.reshape([1, c, h, w]).astype('float32')
input_tensor.from_numpy(image_data)
else:
input_tensor.from_numpy(np.ones((1, c, h, w)).astype("float32"))
# 4. Run model
predictor.run()
# 5. Get output data
output_tensor = predictor.get_output(0)
output_data = output_tensor.numpy()
if args.print_results == True:
print("result data:\n{}".format(output_data))
print("mean:{:.6e}, std:{:.6e}, min:{:.6e}, max:{:.6e}".format(
np.mean(output_data),
np.std(output_data), np.min(output_data), np.max(output_data)))
# 6. Post-process
if read_image == True:
output_data = output_data.flatten()
class_id = np.argmax(output_data)
class_name = label_list[class_id]
score = output_data[class_id]
print("class_name: {} score: {}".format(class_name, score))
if __name__ == '__main__':
args = parser.parse_args()
RunModel(args)
|
the-stack_106_19431
|
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2014-2018 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015-2017 Ceridwen <[email protected]>
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2015 Radosław Ganczarek <[email protected]>
# Copyright (c) 2016 Moises Lopez <[email protected]>
# Copyright (c) 2017 Hugo <[email protected]>
# Copyright (c) 2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2017 Calen Pennington <[email protected]>
# Copyright (c) 2018 Ashley Whetter <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""astroid packaging information"""
distname = 'astroid'
modname = 'astroid'
version = '2.0.4'
numversion = tuple(int(elem) for elem in version.split('.') if elem.isdigit())
extras_require = {}
install_requires = [
'lazy_object_proxy',
'six',
'wrapt',
'typing;python_version<"3.5"',
'typed_ast;python_version<"3.7" and implementation_name== "cpython"'
]
# pylint: disable=redefined-builtin; why license is a builtin anyway?
license = 'LGPL'
author = 'Python Code Quality Authority'
author_email = '[email protected]'
mailinglist = "mailto://%s" % author_email
web = 'https://github.com/PyCQA/astroid'
description = "An abstract syntax tree for Python with inference support."
classifiers = ["Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
|
the-stack_106_19432
|
import re
import math
import numpy as np
from matplotlib import pyplot as plt
class EM:
X = []
Y = []
F = ""
def __init__(self, func, size, initialX, initialY, finalX):
self.f = func
self.h = size
self.x0 = initialX
self.y0 = initialY
self.xf = finalX
def __del__(self):
class_name=self.__class__.__name__
def Func(self):
self.f = re.sub("(x)+", "*x", self.f)
self.f = re.sub("(y)+", "*y", self.f)
self.f = re.sub("(\()+", "*(", self.f)
self.f = re.sub("(asin\*)+", "*math.asin", self.f)
self.f = re.sub("(acos\*)+", "*math.acos", self.f)
self.f = re.sub("(atan\*)+", "*math.atan", self.f)
self.f = re.sub("(sin\*)+", "*math.sin", self.f) # sin(x)
self.f = re.sub("(cos\*)+", "*math.cos", self.f)
self.f = re.sub("(tan\*)+", "*math.tan", self.f)
self.f = re.sub("(cot\*)+", "*1/math.tan", self.f)
self.f = re.sub("(sec\*)+", "*1/math.cos", self.f)
self.f = re.sub("(csc\*)+", "*1/math.sin", self.f)
# self.f = re.sub("(acot\*)+", "*math.acot", self.f)
# self.f = re.sub("(asec\*)+", "*math.asec", self.f)
# self.f = re.sub("(acsc\*)+", "*math.acsc", self.f)
if "e^" not in self.f:
self.f = re.sub("(e)+", "*math.e", self.f)
self.f = re.sub("(pi)+", "*math.pi", self.f)
self.f = re.sub("(log\*)+", "*math.log", self.f) # log(x, base)
self.f = re.sub("(e\^\*)+", "*math.exp", self.f) # e^(x)
self.f = re.sub("(ln\*)+", "*math.log", self.f) # ln(x) #log.e(x)
self.f = re.sub("(lg\*)+", "*math.log10", self.f) # log(x) #log.10(x)
self.f = re.sub("(sqrt\*)+", "*math.sqrt", self.f)
self.f = re.sub("(\^\*)+", "**", self.f) # x^() #if cube root(1/3)
self.f = re.sub("(abs\*)+", "*math.fabs", self.f) # abs() ||
self.f = re.sub("(/\*)+", "/", self.f)
self.f = re.sub("(\(\*)+", "(", self.f)
self.f = re.sub("(\+\*)+", "+", self.f)
self.f = re.sub("(\-\*)+", "-", self.f)
if self.f.find("*") == 0: # remove if 1st is *
self.f = self.f[1::]
return self.f
def Error(self):
EM.F = self.Func()
x=1
y=1
try:
eval(EM.F)
try:
self.EMcalc()
return False
except:
return True
except:
return True
def FuncError(self):
EM.F = self.Func()
x=1
y=1
try:
eval(EM.F)
return False
except:
return True
def EMcalc(self):
x = self.x0
y = self.y0
n = int((self.xf-self.x0)/self.h)
EM.X.clear()
EM.Y.clear()
EM.X.append(x)
EM.Y.append(y)
for i in range(n):
y = y + self.h * eval(EM.F)
x = x + self.h
EM.X.append(round(x, 2))
EM.Y.append(y)
def xValue(self):
return self.MatBrk(np.array([EM.X]).reshape(len(EM.X),1))
def yValue(self):
return self.MatBrk(np.array(np.round([EM.Y], decimals=5)).reshape(len(EM.Y), 1))
def MatBrk(self, arr):
return str(arr).replace('[[', ' ').replace(']]', '').replace('[', '').replace(']', '')
def Graph(self):
plt.close('all')
plt.rcParams['axes.facecolor'] = 'black'
fig = plt.figure()
# fig.patch.set_facecolor('xkcd:black')
fig.canvas.set_window_title("Euler's Method Graph")
plt.tick_params(direction='out', length=5, width=1, colors='r', grid_color='r', grid_alpha=0.8, labelcolor='r')
plt.plot(EM.X, EM.Y, 'bo')
plt.grid(True,color='red')
plt.xlabel("X-label")
plt.ylabel("Y-label")
plt.title("Approximate Solution with Euler's Method")
plt.show()
def CloseGraph(self):
return plt.close('all')
|
the-stack_106_19435
|
import paystacklib
from paystacklib.base.baseapi import BaseApi
from paystacklib.util.utils import clean_params
class Page(BaseApi):
object_type = '/page'
def __init__(
self, secret_key=None,
uri=paystacklib.api_base + object_type, method=None,
headers=None, params=None):
BaseApi.__init__(self, secret_key, uri, method, headers, params)
@classmethod
def create(
cls, name, description=None, amount= None, slug=None,
metadata=None, redirect_url=None, custom_fields=None):
params = clean_params(locals())
uri = paystacklib.api_base + cls.object_type
return cls(uri=uri, method='post', params=params).execute()
@classmethod
def list(cls, perPage=None, page=None):
params = clean_params(locals())
uri = paystacklib.api_base + cls.object_type
return cls(uri=uri, method='get', params=params).execute()
@classmethod
def fetch(cls, id_or_slug):
uri = paystacklib.api_base + \
'{0}/{1}'.format(cls.object_type, str(id_or_slug))
return cls(uri=uri, method='get').execute()
@classmethod
def update(
cls, id_or_slug, name=None, description=None, amount=None,
active=None):
params = clean_params(locals())
uri = paystacklib.api_base + \
'{0}/{1}'.format(cls.object_type, str(id_or_slug))
return cls(uri=uri, method='put', params=params).execute()
@classmethod
def check_slug_availability(cls, slug):
uri = paystacklib.api_base + \
'{0}/{1}'.format(cls.api_base, slug)
return cls(uri=uri, method='get').execute()
@classmethod
def add_products(page_id, products):
"""
products is an array of product ids
"""
params = clean_params(locals())
del params['page_id']
uri = paystacklib.api_base + \
'{0}/{1}'.format(cls.object_type, str(page_id)) + '/product'
return cls(uri=uri, method='post', params=params).execute()
|
the-stack_106_19438
|
"""."""
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
plt.style.use('fivethirtyeight')
data = pd.read_csv('data/data_3.csv')
ids = data['Responder_id']
ages = data['Age']
bins = np.arange(10, 110, step=10)
plt.hist(ages, bins=bins, edgecolor='black', log=True)
median_age = int(data['Age'].median())
color = '#fc4f30'
plt.axvline(median_age, color=color, label='Age Median', linewidth=2)
plt.legend()
plt.title('Ages of Respondents')
plt.xticks(bins)
plt.xlabel('Ages')
plt.ylabel('Total Respondents')
plt.tight_layout()
plt.show()
|
the-stack_106_19439
|
# -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import unittest
from parameterized import parameterized
from qiskit import QuantumRegister
from test.common import QiskitAquaTestCase
from qiskit.aqua.components.reciprocals.lookup_rotation import LookupRotation
class TestLookupRotation(QiskitAquaTestCase):
"""Lookup Rotation tests."""
#def setUp(self):
@parameterized.expand([[3, 330], [5, 1478], [7, 6592],
[9, 11484], [11, 17652]])
def test_lookup_rotation(self, reg_size, gate_cnt):
self.log.debug('Testing Lookup Rotation with positive eigenvalues')
a = QuantumRegister(reg_size, name='a')
lrot = LookupRotation(negative_evals=False)
lrot_circuit = lrot.construct_circuit('', a)
circuit_cnt = lrot_circuit.data.__len__()
assert(circuit_cnt == gate_cnt)
self.log.debug('Lookup rotation register size: {}'.format(reg_size))
self.log.debug('Lookup rotation gate count: {}'.format(circuit_cnt))
@parameterized.expand([[3, 185], [5, 761], [7, 4425],
[9, 9033], [11, 14329]])
def test_lookup_rotation_neg(self, reg_size, gate_cnt):
self.log.debug('Testing Lookup Rotation with support for negative '
'eigenvalues')
a = QuantumRegister(reg_size, name='a')
lrot = LookupRotation(negative_evals=True)
lrot_circuit = lrot.construct_circuit('', a)
circuit_cnt = lrot_circuit.data.__len__()
assert(circuit_cnt == gate_cnt)
self.log.debug('Lookup rotation register size: {}'.format(reg_size))
self.log.debug('Lookup rotation gate count: {}'.format(circuit_cnt))
if __name__ == '__main__':
unittest.main()
|
the-stack_106_19440
|
# coding=utf8
# Copyright 2018-2025 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeVpcRequest(JDCloudRequest):
"""
查询Vpc信息详情
"""
def __init__(self, parameters, header=None, version="v1"):
super(DescribeVpcRequest, self).__init__(
'/regions/{regionId}/vpcs/{vpcId}', 'GET', header, version)
self.parameters = parameters
class DescribeVpcParameters(object):
def __init__(self, regionId, vpcId, ):
"""
:param regionId: Region ID
:param vpcId: Vpc ID
"""
self.regionId = regionId
self.vpcId = vpcId
|
the-stack_106_19441
|
#
# Copyright 2018 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from task import Task
from datetime import datetime
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, TimeoutError, failure
from voltha.extensions.omci.omci_defs import ReasonCodes
from voltha.extensions.omci.omci_frame import OmciFrame, OmciGet
class IntervalDataTaskFailure(Exception):
pass
class IntervalDataTask(Task):
"""
OpenOMCI Performance Interval Get Request
"""
task_priority = Task.DEFAULT_PRIORITY
name = "Interval Data Task"
max_payload = 29
def __init__(self, omci_agent, device_id, class_id, entity_id,
max_get_response_payload=max_payload):
"""
Class initialization
:param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
:param device_id: (str) ONU Device ID
:param class_id: (int) ME Class ID
:param entity_id: (int) ME entity ID
:param max_get_response_payload: (int) Maximum number of octets in a
single GET response frame
"""
super(IntervalDataTask, self).__init__(IntervalDataTask.name,
omci_agent,
device_id,
priority=IntervalDataTask.task_priority,
exclusive=False)
self._local_deferred = None
self._class_id = class_id
self._entity_id = entity_id
me_map = self.omci_agent.get_device(self.device_id).me_map
if self._class_id not in me_map:
msg = "The requested ME Class () does not exist in the ONU's ME Map".format(self._class_id)
self.log.warn('unknown-pm-me', msg=msg)
raise IntervalDataTaskFailure(msg)
self._entity = me_map[self._class_id]
self._counter_attributes = self.get_counter_attributes_names_and_size()
self._max_payload = max_get_response_payload
def cancel_deferred(self):
super(IntervalDataTask, self).cancel_deferred()
d, self._local_deferred = self._local_deferred, None
try:
if d is not None and not d.called:
d.cancel()
except:
pass
def start(self):
"""
Start the tasks
"""
super(IntervalDataTask, self).start()
self._local_deferred = reactor.callLater(0, self.perform_get_interval)
def stop(self):
"""
Shutdown the tasks
"""
self.log.debug('stopping')
self.cancel_deferred()
super(IntervalDataTask, self).stop()
def get_counter_attributes_names_and_size(self):
"""
Get all of the counter attributes names and the amount of storage they take
:return: (dict) Attribute name -> length
"""
return {name: self._entity.attributes[attr_index].field.sz
for name, attr_index in self._entity.attribute_name_to_index_map.items()
if self._entity.attributes[attr_index].is_counter}
@inlineCallbacks
def perform_get_interval(self):
"""
Sync the time
"""
self.log.info('perform-get-interval', class_id=self._class_id,
entity_id=self._entity_id)
device = self.omci_agent.get_device(self.device_id)
attr_names = self._counter_attributes.keys()
final_results = {
'class_id': self._class_id,
'entity_id': self._entity_id,
'me_name': self._entity.__name__, # Mostly for debugging...
'interval_utc_time': None,
# Counters added here as they are retrieved
}
last_end_time = None
while len(attr_names) > 0:
# Get as many attributes that will fit. Always include the 1 octet
# Interval End Time Attribute and 2 octets for the Entity ID
remaining_payload = self._max_payload - 3
attributes = list()
for name in attr_names:
if self._counter_attributes[name] > remaining_payload:
break
attributes.append(name)
remaining_payload -= self._counter_attributes[name]
attr_names = attr_names[len(attributes):]
attributes.append('interval_end_time')
frame = OmciFrame(
transaction_id=None,
message_type=OmciGet.message_id,
omci_message=OmciGet(
entity_class=self._class_id,
entity_id=self._entity_id,
attributes_mask=self._entity.mask_for(*attributes)
)
)
self.log.debug('interval-get-request', class_id=self._class_id,
entity_id=self._entity_id)
try:
results = yield device.omci_cc.send(frame)
omci_msg = results.fields['omci_message'].fields
status = omci_msg['success_code']
end_time = omci_msg['data'].get('interval_end_time')
self.log.debug('interval-get-results', class_id=self._class_id,
entity_id=self._entity_id, status=status,
end_time=end_time)
if status != ReasonCodes.Success:
raise IntervalDataTaskFailure('Unexpected Response Status: {}, Class ID: {}'.
format(status, self._class_id))
if last_end_time is None:
last_end_time = end_time
elif end_time != last_end_time:
msg = 'Interval End Time Changed during retrieval from {} to {}'\
.format(last_end_time, end_time)
self.log.info('interval-roll-over', msg=msg, class_id=self._class_id)
raise IntervalDataTaskFailure(msg)
final_results['interval_utc_time'] = datetime.utcnow()
for attribute in attributes:
final_results[attribute] = omci_msg['data'].get(attribute)
except TimeoutError as e:
self.log.warn('interval-get-timeout', e=e, class_id=self._class_id)
self.deferred.errback(failure.Failure(e))
except Exception as e:
self.log.exception('interval-get-failure', e=e, class_id=self._class_id)
self.deferred.errback(failure.Failure(e))
# Successful if here
self.deferred.callback(final_results)
|
the-stack_106_19443
|
from discord.ext import commands
from discord import utils
from datetime import datetime as d
import typing
from botmodules import converters
class Converters(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.color = 0xffffff
@commands.command(
brief="Wandle Morsecode um",
description="Wandle Morsecode um",
aliases=["mors","morsecode"],
help="Benutze /morse <Text> und erhalte den Text in Morsecode oder umgekehrt",
usage=""
)
async def morse(self, ctx):
message = ctx.getargs()
if message.replace("-","").replace("_","").replace(".","").replace(" ","") == "":
text = converters.morse_decrypt(message.replace("_","-"))
morse = message
else:
text = message
morse = converters.morse_encrypt(message)
await ctx.sendEmbed(title="Morsecode", color=self.color, fields=[("Morsecode", morse.replace(" "," | ")),("Text", text)])
def setup(bot):
bot.add_cog(Converters(bot))
|
the-stack_106_19444
|
# Copyright (c) 2020 Rik079, Worthy Alpaca, Zibadian, Micro-T. All rights reserved.
__version__ = "Alpha"
# Discord login Token
token = ""
# Path to modules folder
modulepath = "./modules"
# AWS credentials
aws_id = ''
aws_secret = ''
aws_region = 'us-west-2'
# Staff
# ------------------------
# Admins
adminids = []
# Tech guys
botadminids = []
|
the-stack_106_19446
|
# -- coding: utf-8 --
'''
Script for comparing our Bayesian preference learning approach with the results from Habernal 2016.
Steps in this test:
1. Load word embeddings for the original text data that were used in the NN approach in Habernal 2016. -- done, but
only using averages to combine them.
2. Load feature data that was used in the SVM-based approach in Habernal 2016.
3. Load the crowdsourced data. -- done.
4. Copy a similar testing setup to Habernal 2016 (training/test split?) and run the Bayesian approach (during testing,
we can set aside some held-out data). -- done, results saved to file with no metrics computed yet except acc.
5. Print some simple metrics that are comparable to those used in Habernal 2016.
Thoughts:
1. NN takes into account sequence of word embeddings; here we need to use a combined embedding for whole text to avoid
a 300x300 dimensional input space.
2. So our method can only learn which elements of the embedding are important, but cannot learn from patterns in the
sequence, unless we can find a way to encode those.
3. However, the SVM-based approach also did well. Which method is better, NN or SVM, and by how much?
4. We should be able to improve on the SVM-based approach.
5. The advantages of our method: ranking with sparse data; personalised predictions to the individual annotators;
uncertainty estimates for active learning and decision-making confidence thresholds.
Created on 20 Mar 2017
@author: simpson
'''
import logging
from scipy.stats.stats import kendalltau
from sklearn.metrics import log_loss
from sklearn.svm.classes import NuSVR, SVC
logging.basicConfig(level=logging.DEBUG)
import sys
import os
sys.path.append("./python")
sys.path.append("./python/analysis")
sys.path.append("./python/models")
sys.path.append("./python/analysis/habernal_comparison")
svm_python_path = '~/libsvm-3.22/python'
# sys.path.append(os.path.expanduser("~/data/personalised_argumentation/embeddings/skip-thoughts"))
# sys.path.append(os.path.expanduser("~/data/personalised_argumentation/embeddings/Siamese-CBOW/siamese-cbow"))
sys.path.append(os.path.expanduser(svm_python_path))
import pickle
import time
from gp_pref_learning import GPPrefLearning, pref_likelihood
from gp_classifier_svi import GPClassifierSVI
from gp_classifier_vb import compute_median_lengthscales
from sklearn.svm import SVR
from embeddings import load_embeddings, load_siamese_cbow_embeddings, load_skipthoughts_embeddings, get_mean_embeddings
from data_loader import data_root_dir, load_train_test_data, load_ling_features
import numpy as np
import pandas as pd
ndebug_features = 10
verbose = False
# Lengthscale initialisation -------------------------------------------------------------------------------------------
# use the median heuristic to find a reasonable initial length-scale. This is the median of the distances.
# First, grab a sample of points because N^2 could be too large.
def compute_lengthscale_heuristic(feature_type, embeddings_type, embeddings, ling_feat_spmatrix, docids, folds,
index_to_word_map):
# get the embedding values for the test data -- need to find embeddings of the whole piece of text
if feature_type == 'both' or feature_type == 'embeddings' or feature_type == 'debug':
docidxs = []
doc_tok_seqs = []
doctexts = []
for f in folds:
doc_tok_seqs.append(folds.get(f)["test"][0])
doc_tok_seqs.append(folds.get(f)["test"][1])
testids = np.array([ids_pair.split('_') for ids_pair in folds.get(f)["test"][3]])
docidxs.append(get_docidxs_from_ids(docids, testids[:, 0]))
docidxs.append(get_docidxs_from_ids(docids, testids[:, 1]))
doctexts.append(folds.get(f)["test"][5])
doctexts.append(folds.get(f)["test"][6])
X, _, utexts = get_doc_token_seqs(docidxs, doc_tok_seqs, doctexts)
if embeddings_type == 'word_mean':
items_feat = get_mean_embeddings(embeddings, X)
elif embeddings_type == 'skipthoughts':
global skipthoughts
import skipthoughts
items_feat = skipthoughts.encode(embeddings, utexts)
elif embeddings_type == 'siamese-cbow':
items_feat = np.array([embeddings.getAggregate(index_to_word_map[Xi]) for Xi in X])
else:
logging.info("invalid embeddings type! %s" % embeddings_type)
if feature_type == 'both' or feature_type == 'debug':
items_feat = np.concatenate((items_feat, ling_feat_spmatrix.toarray()), axis=1)
if feature_type == 'ling':
items_feat = ling_feat_spmatrix.toarray()
if feature_type == 'debug':
items_feat = items_feat[:, :ndebug_features]
starttime = time.time()
#for f in range(items_feat.shape[1]):
ls_initial = compute_median_lengthscales(items_feat, N_max=3000)
endtime = time.time()
logging.info('@@@ Selected initial lengthscales in %f seconds' % (endtime - starttime))
return ls_initial
def get_doc_token_seqs(ids, X_list, texts=None):
'''
ids -- list of document IDs
X_list -- list of lists of word indices for each argument corresponding to the ids
texts -- list of texts corresponding to the ids
returns
X -- list of lists of word indices for each argument corresponding to the uids
uids -- list of unique document IDs
utexts -- unique texts corresponding to the uids
'''
# X_train_a1 and a1_train both have one entry per observation. We want to replace them with a list of
# unique arguments, and the indexes into that list. First, get the unique argument ids from trainids and testids:
if hasattr(ids[0], '__len__'):
allids = np.concatenate(ids)
else:
allids = ids
uids, uidxs = np.unique(allids, return_index=True)
# get the word index vectors corresponding to the unique arguments
X = np.empty(np.max(uids) + 1, dtype=object)
if texts is not None:
utexts = np.zeros(np.max(uids) + 1, dtype=object)
utexts[:] = ''
start = 0
fin = 0
for i in range(len(X)):
if i not in uids:
X[i] = []
for i in range(len(X_list)):
fin += len(X_list[i])
idxs = (uidxs>=start) & (uidxs<fin)
# keep the original IDs to try to make life easier. This means the IDs become indexes into X
X[uids[idxs]] = np.array(X_list[i])[uidxs[idxs] - start]
if texts is not None:
utexts[uids[idxs]] = np.array(texts[i])[uidxs[idxs] - start]
start += len(X_list[i])
if texts is not None:
utexts = [utext for utext in utexts]
return X, uids, utexts
else:
return X, uids
def get_docidxs_from_ids(all_docids, ids_to_map):
return np.array([np.argwhere(docid == all_docids)[0][0] for docid in ids_to_map])
def get_fold_data(folds, fold, docids):
#X_train_a1, X_train_a2 are lists of lists of word indexes
X_train_a1, X_train_a2, prefs_train, ids_train, person_train, tr_a1, tr_a2 = folds.get(fold)["training"]
X_test_a1, X_test_a2, prefs_test, ids_test, person_test, test_a1, test_a2 = folds.get(fold)["test"]
#a1_train, a2_train are lists of argument ids
trainids = np.array([ids_pair.split('_') for ids_pair in ids_train])
if docids is None:
docids = np.arange(np.unique(trainids).size)
a1_train = get_docidxs_from_ids(docids, trainids[:, 0])
a2_train = get_docidxs_from_ids(docids, trainids[:, 1])
testids = np.array([ids_pair.split('_') for ids_pair in ids_test])
a1_test = get_docidxs_from_ids(docids, testids[:, 0])
a2_test = get_docidxs_from_ids(docids, testids[:, 1])
X, uids, utexts = get_doc_token_seqs((a1_train, a2_train, a1_test, a2_test),
[X_train_a1, X_train_a2, X_test_a1, X_test_a2], (tr_a1, tr_a2, test_a1, test_a2))
print(("Training instances ", len(X_train_a1), " training labels ", len(prefs_train)))
print(("Test instances ", len(X_test_a1), " test labels ", len(prefs_test)))
prefs_train = np.array(prefs_train)
prefs_test = np.array(prefs_test)
person_train = np.array(person_train)
person_test = np.array(person_test)
personIDs = np.concatenate((person_train, person_test))
upersonIDs, personIdxs = np.unique(personIDs, return_inverse=True)
person_train = personIdxs[:len(person_train)]
person_test = personIdxs[len(person_train):]
return a1_train, a2_train, prefs_train, person_train, a1_test, a2_test, prefs_test, person_test, \
X, uids, utexts, upersonIDs
def get_noisy_fold_data(folds, folds_test, fold, docids, tr_pair_subset=None):
a1_train, a2_train, prefs_train, person_train, _, _, _, _, X, \
uids, utexts, upersonIDs = get_fold_data(folds, fold, docids)
a1_agg, a2_agg, gold_train, person_agg, a1_test, a2_test, prefs_test, person_test, _, \
_, _, _ = get_fold_data(folds_test, fold, docids)
# now subsample the training data
N = len(a1_train)
if tr_pair_subset is not None:
Nsub = N * tr_pair_subset
subidxs = np.random.choice(N, Nsub, replace=False)
a1_train = a1_train[subidxs]
a2_train = a2_train[subidxs]
prefs_train = prefs_train[subidxs]
person_train = person_train[subidxs]
gold_train = gold_train[subidxs]
else:
Nsub = N
return a1_train, a2_train, prefs_train, person_train, a1_test, a2_test, prefs_test, person_test, \
X, uids, utexts, upersonIDs, a1_agg, a2_agg, gold_train, person_agg
def get_fold_regression_data(folds_regression, fold, docids):
if folds_regression is not None:
_, scores_rank_train, argids_rank_train, person_rank_train, _ = folds_regression.get(fold)["training"] # blank argument is turkIDs_rank_test
item_idx_ranktrain = np.array([np.argwhere(trainid==docids)[0][0] for trainid in argids_rank_train])
scores_rank_train = np.array(scores_rank_train)
argids_rank_train = np.array(argids_rank_train)
_, scores_rank_test, argids_rank_test, person_rank_test, _ = folds_regression.get(fold)["test"] # blank argument is turkIDs_rank_test
item_idx_ranktest = np.array([np.argwhere(testid==docids)[0][0] for testid in argids_rank_test])
scores_rank_test = np.array(scores_rank_test)
argids_rank_test = np.array(argids_rank_test)
else:
item_idx_ranktrain = None
scores_rank_train = None
argids_rank_train = None
person_rank_train = None
item_idx_ranktest = None
scores_rank_test = None
argids_rank_test = None
person_rank_test = None
return item_idx_ranktrain, scores_rank_train, argids_rank_train, person_rank_train,\
item_idx_ranktest, scores_rank_test, argids_rank_test, person_rank_test
def subsample_tr_data(subsample_amount, a1_train, a2_train):
item_subsample_ids = []
nselected = 0
while nselected < subsample_amount:
idx = np.random.choice(len(a1_train), 1)
if a1_train[idx] not in item_subsample_ids:
item_subsample_ids.append(a1_train[idx])
if a2_train[idx] not in item_subsample_ids:
item_subsample_ids.append(a2_train[idx])
nselected = len(item_subsample_ids)
pair_subsample_idxs = np.argwhere(np.in1d(a1_train, item_subsample_ids) & np.in1d(a2_train, item_subsample_ids)).flatten()
# pair_subsample_idxs = np.random.choice(len(a1_train), subsample_amount, replace=False)
return pair_subsample_idxs
class TestRunner:
def __init__(self, current_expt_output_dir, datasets, feature_types, embeddings_types, methods,
dataset_increment, expt_tag='habernal'):
self.folds = None
self.initial_pair_subset = {}
self.default_ls_values = {}
self.expt_output_dir = current_expt_output_dir
self.expt_tag = expt_tag
self.datasets = datasets
self.feature_types = feature_types
self.embeddings_types = embeddings_types
self.methods = methods
self.dataset_increment = dataset_increment
self.vscales = [] # record the latent factor scales
def load_features(self, feature_type, embeddings_type, a1_train, a2_train, uids, utexts=None):
'''
Load all the features specified by the type into an items_feat object. Remove any features where the values are all
zeroes.
'''
# get the embedding values for the test data -- need to find embeddings of the whole piece of text
if feature_type == 'both' or feature_type == 'embeddings' or feature_type=='debug':
logging.info("Converting texts to mean embeddings (we could use a better sentence embedding?)...")
if embeddings_type == 'word_mean':
items_feat = get_mean_embeddings(self.embeddings, self.X)
elif embeddings_type == 'skipthoughts':
global skipthoughts
import skipthoughts
items_feat = skipthoughts.encode(self.embeddings, utexts)
elif embeddings_type == 'siamese-cbow':
items_feat = np.array([self.embeddings.getAggregate(self.index_to_word_map[Xi]) for Xi in self.X])
else:
logging.info("invalid embeddings type! %s" % embeddings_type)
logging.info("...embeddings loaded.")
# trim away any features not in the training data because we can't learn from them
valid_feats = np.sum((items_feat[a1_train] != 0) + (items_feat[a2_train] != 0), axis=0) > 0
items_feat = items_feat[:, valid_feats]
self.ling_items_feat = None # will get overwritten if we load the linguistic features further down.
self.embeddings_items_feat = items_feat
elif feature_type == 'ling':
items_feat = np.zeros((self.X.shape[0], 0))
valid_feats = np.zeros(0)
self.embeddings_items_feat = None
if feature_type == 'both' or feature_type == 'ling' or feature_type == 'debug':
logging.info("Obtaining linguistic features for argument texts.")
# trim the features that are not used in training
valid_feats_ling = np.sum( (self.ling_feat_spmatrix[a1_train, :] != 0) +
(self.ling_feat_spmatrix[a2_train, :] != 0), axis=0) > 0
valid_feats_ling = np.array(valid_feats_ling).flatten()
self.ling_items_feat = self.ling_feat_spmatrix[uids, :][:, valid_feats_ling].toarray()
items_feat = np.concatenate((items_feat, self.ling_items_feat), axis=1)
logging.info("...loaded all linguistic features for training and test data.")
valid_feats = np.concatenate((valid_feats, valid_feats_ling))
print('Found %i features.' % items_feat.shape[1])
if feature_type=='debug':
items_feat = items_feat[:, :ndebug_features] #use only n features for faster debugging
self.ling_items_feat = items_feat
self.embeddings_items_feat = items_feat
valid_feats = valid_feats[:ndebug_features]
self.items_feat = items_feat
self.ndims = self.items_feat.shape[1]
self.valid_feats = valid_feats.astype(bool)
# Methods for running the prediction methods -----------------------------------------------------------------------
def run_gppl(self):
if 'additive' in self.method:
kernel_combination = '+'
else:
kernel_combination = '*'
if 'shrunk' in self.method:
ls_initial = self.ls_initial / float(len(self.ls_initial))
else:
ls_initial = self.ls_initial
if 'weaksprior' in self.method:
shape_s0 = 2.0
rate_s0 = 200.0
elif 'lowsprior' in self.method:
shape_s0 = 1.0
rate_s0 = 1.0
elif 'weakersprior' in self.method:
shape_s0 = 2.0
rate_s0 = 2000.0
else:
shape_s0 = 200.0
rate_s0 = 20000.0
if '_M' in self.method:
validx = self.method.find('_M') + 2
M = int(self.method[validx:].split('_')[0])
else:
M = 500
if '_SS' in self.method:
validx = self.method.find('_SS') + 3
SS = int(self.method[validx:].split('_')[0])
else:
SS = 200
if self.model is None:
if M == 0:
use_svi = False
else:
use_svi = True
self.model = GPPrefLearning(ninput_features=self.ndims, ls_initial=ls_initial, verbose=self.verbose,
shape_s0=shape_s0, rate_s0=rate_s0, rate_ls = 1.0 / np.mean(ls_initial), use_svi=use_svi,
ninducing=M, max_update_size=SS, kernel_combination=kernel_combination, forgetting_rate=0.9,
delay=1.0)
self.model.max_iter_VB = 200
new_items_feat = self.items_feat # pass only when initialising
else:
new_items_feat = None
print("no. features: %i" % new_items_feat.shape[1])
self.model.fit(self.a1_train, self.a2_train, new_items_feat, np.array(self.prefs_train, dtype=float)-1,
optimize=self.optimize_hyper, input_type='zero-centered')
proba = self.model.predict(None, self.a1_test, self.a2_test, reuse_output_kernel=True, return_var=False)
if self.a1_unseen is not None and len(self.a1_unseen):
tr_proba, _ = self.model.predict(None, self.a1_unseen, self.a2_unseen, reuse_output_kernel=True)
else:
tr_proba = None
if self.a_rank_test is not None:
predicted_f, _ = self.model.predict_f(None, self.a_rank_test)
else:
predicted_f = None
if self.a_rank_train is not None:
tr_f, _ = self.model.predict_f(None, self.a_rank_train)
else:
tr_f = None
return proba, predicted_f, tr_proba, tr_f
# model, _, a1_train, a2_train, self.prefs_train, items_feat, _, _, self.a1_test, self.a2_test,
# self.a1_unseen, self.a2_unseen, ls_initial, verbose, _, self.a_rank_test=None, _, _, _
def run_gpsvm(self):
if self.model is None:
self.model = GPPrefLearning(ninput_features=1, ls_initial=[1000], verbose=self.verbose, shape_s0 = 1.0,
rate_s0 = 1.0, rate_ls = 1.0 / np.mean(self.ls_initial), use_svi=False, kernel_func='diagonal')
self.model.max_iter_VB = 10
# never use optimize with diagonal kernel
self.model.fit(self.a1_train, self.a2_train, np.arange(self.items_feat.shape[0])[:, np.newaxis],
np.array(self.prefs_train, dtype=float)-1, optimize=False, input_type='zero-centered')
train_idxs = np.unique([self.a1_train, self.a2_train])
train_feats = self.items_feat[train_idxs]
f, _ = self.model.predict_f(train_idxs[:, np.newaxis])
svm = SVR()
svm.fit(train_feats, f)
test_f = svm.predict(self.items_feat)
# apply the preference likelihood from GP method
proba = pref_likelihood(test_f, v=self.a1_test, u=self.a2_test, return_g_f=False)
if self.a_rank_test is not None:
predicted_f = svm.predict(self.items_feat[self.a_rank_test])
else:
predicted_f = None
if self.a1_unseen is not None and len(self.a1_unseen):
tr_proba = pref_likelihood(test_f, v=self.a1_unseen, u=self.a2_unseen, return_g_f=False)
else:
tr_proba = None
return proba, predicted_f, tr_proba
def run_gpc(self):
if 'additive' in self.method:
kernel_combination = '+'
else:
kernel_combination = '*'
if 'weaksprior' in self.method:
shape_s0 = 2.0
rate_s0 = 200.0
elif 'lowsprior':
shape_s0 = 1.0
rate_s0 = 1.0
else:
shape_s0 = 200.0
rate_s0 = 20000.0
# twice as many features means the lengthscale heuristic is * 2
if self.model is None:
ls_initial = np.concatenate((self.ls_initial * 2.0, self.ls_initial * 2.0))
self.model = GPClassifierSVI(ninput_features=self.ndims, ls_initial=ls_initial, verbose=self.verbose,
shape_s0=shape_s0, rate_s0=rate_s0, rate_ls=1.0 / np.mean(self.ls_initial),
use_svi=True, ninducing=500, max_update_size=200, kernel_combination=kernel_combination)
self.model.max_iter_VB = 500
# with the argument order swapped around and data replicated:
gpc_feats = np.empty(shape=(len(self.a1_train)*2, self.items_feat.shape[1]*2))
gpc_feats[:len(self.a1_train), :self.items_feat.shape[1]] = self.items_feat[self.a1_train]
gpc_feats[len(self.a1_train):, :self.items_feat.shape[1]] = self.items_feat[self.a2_train]
gpc_feats[:len(self.a1_train), self.items_feat.shape[1]:] = self.items_feat[self.a2_train]
gpc_feats[len(self.a1_train):, self.items_feat.shape[1]:] = self.items_feat[self.a1_train]
gpc_labels = np.concatenate((np.array(self.prefs_train, dtype=float) * 0.5,
1 - np.array(self.prefs_train, dtype=float) * 0.5))
self.model.fit(np.arange(len(self.a1_train)), gpc_labels, optimize=self.optimize_hyper, features=gpc_feats)
proba, _ = self.model.predict(np.concatenate((self.items_feat[self.a1_test], self.items_feat[self.a2_test]), axis=1))
if self.a_rank_test is not None:
predicted_f = np.zeros(len(self.a_rank_test)) # can't easily rank with this method
else:
predicted_f = None
if self.a1_unseen is not None and len(self.a1_unseen):
tr_proba, _ = self.model.predict(np.concatenate((self.items_feat[self.a1_unseen], self.items_feat[self.a2_unseen]), axis=1))
else:
tr_proba = None
return proba, predicted_f, tr_proba
def run_svm(self, feature_type):
prefs_train_fl = np.array(self.prefs_train, dtype=float)
# cannot train on uncertain labels
idxs = (prefs_train_fl == 0) | (prefs_train_fl == 2)
prefs_train_fl = prefs_train_fl[idxs]
svc_labels = np.concatenate((prefs_train_fl * 0.5, 1 - prefs_train_fl * 0.5))
svc = SVC(probability=True)
trainfeats = np.concatenate((
np.concatenate((
self.items_feat[self.a1_train[idxs]],
self.items_feat[self.a2_train[idxs]]), axis=1),
np.concatenate((
self.items_feat[self.a1_train[idxs]],
self.items_feat[self.a2_train[idxs]]), axis=1)),
axis=0)
print("SVM classifier: no. features: %i" % trainfeats.shape[1])
print("SVM classifier: no. pairs: %i" % trainfeats.shape[0])
svc.fit(trainfeats, svc_labels.astype(int))
proba = svc.predict_proba(np.concatenate((self.items_feat[self.a1_train], self.items_feat[self.a2_train]), axis=1))
maxdiff = np.max(proba) - np.min(proba)
if maxdiff == 0:
maxdiff = 1
proba = (proba - np.min(proba)) / maxdiff # sometimes the probability estimates are too
# squashed together... may be as bug in later versions of sklearn
# libSVM flips the labels if the first one it sees is positive
if svc_labels[0] == 1:
proba = 1 - np.array(proba)
if self.a_rank_test is not None:
svr = NuSVR()
svr.fit(self.items_feat[self.a_rank_train], self.scores_rank_train)
predicted_f = svr.predict(self.items_feat[self.a_rank_test])
logging.debug('Predictions from SVM regression: %s ' % predicted_f)
else:
predicted_f = None
if self.a1_unseen is not None and len(self.a1_unseen):
tr_proba = svc.predict_proba(np.concatenate((self.items_feat[self.a1_unseen], self.items_feat[self.a2_unseen]), axis=1))[:, 0]
# libSVM flips the labels if the first one it sees is positive
if svc_labels[0] == 1:
tr_proba = 1 - np.array(tr_proba)
tr_proba = tr_proba[:, None]
else:
tr_proba = None
return proba[:, None], predicted_f, tr_proba
def run_bilstm(self, feature_type):
import os
os.environ['KERAS_BACKEND'] = 'theano'
from keras.preprocessing import sequence
from keras.models import Graph
from keras.layers.core import Dense, Dropout
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
# Include document-level features in a simple manner using one hidden layer, which is then combined with the outputs of
# the LSTM layers, as in "Boosting Information Extraction Systems with Character-level Neural Networks and Free Noisy
# Supervision". This is equivalent to an MLP with one hidden layer combined with the LSTM.
if feature_type == 'ling' or feature_type == 'both' or feature_type == 'debug':
use_doc_level_features = True
n_doc_level_feats = self.ling_items_feat.shape[1]
else:
use_doc_level_features = False
np.random.seed(1337) # for reproducibility
max_len = 300 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
nb_epoch = 5 # 5 epochs are meaningful to prevent over-fitting...
print(len(self.folds.get(self.fold)["training"]))
X_train1 = self.X[self.a1_train]
X_train2 = self.X[self.a2_train]
y_train = self.prefs_train.tolist()
X_train = []
for i, row1 in enumerate(X_train1):
row1 = row1 + X_train2[i]
X_train.append(row1)
X_test1, X_test2, _, _, _, _, _ = self.folds.get(self.fold)["test"]
X_test = []
for i, row1 in enumerate(X_test1):
row1 = row1 + X_test2[i]
X_test.append(row1)
print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=max_len)
X_test = sequence.pad_sequences(X_test, maxlen=max_len)
print(('X_train shape:', X_train.shape))
print(('X_test shape:', X_test.shape))
y_train = np.array(y_train) / 2.0
print(('y_train values: ', np.unique(y_train)))
print('Training data sizes:')
print((X_train.shape))
print((y_train.shape))
if use_doc_level_features:
pair_doc_feats_tr = np.concatenate((self.ling_items_feat[self.a1_train, :],
self.ling_items_feat[self.a2_train, :]), axis=1)
print((pair_doc_feats_tr.shape))
print(n_doc_level_feats * 2)
pair_doc_feats_test = np.concatenate((self.ling_items_feat[self.a1_test, :],
self.ling_items_feat[self.a2_test, :]), axis=1)
print('Build model...')
if self.model is None:
self.model = Graph()
self.model.add_input(name='input', input_shape=(max_len,), dtype='int')
self.model.add_node(Embedding(self.embeddings.shape[0], self.embeddings.shape[1], input_length=max_len,
weights=[self.embeddings]), name='embedding', input='input')
self.model.add_node(LSTM(64), name='forward', input='embedding')
self.model.add_node(LSTM(64, go_backwards=True), name='backward', input='embedding')
self.model.add_node(Dropout(0.5), name='dropout', inputs=['forward', 'backward'])
if use_doc_level_features:
self.model.add_input(name='docfeatures', input_shape=(n_doc_level_feats*2,), dtype='float')
self.model.add_node(Dense(64, activation='relu'), name='docfeatures_hiddenlayer', input='docfeatures')
self.model.add_node(Dropout(0.5), name='dropout_docfeatures', input='docfeatures_hiddenlayer')
self.model.add_node(Dense(1, activation='sigmoid'), name='sigmoid',
inputs=['dropout_docfeatures', 'dropout'])
else:
self.model.add_node(Dense(1, activation='sigmoid'), name='sigmoid', input='dropout')
self.model.add_output(name='output', input='sigmoid')
# try using different optimizers and different optimizer configs
self.model.compile('adam', {'output': 'binary_crossentropy'})
print('Train...')
if use_doc_level_features:
self.model.fit({'input': X_train, 'docfeatures' : pair_doc_feats_tr, 'output': y_train},
batch_size=batch_size, nb_epoch=nb_epoch)
else:
self.model.fit({'input': X_train, 'output': y_train}, batch_size=batch_size, nb_epoch=nb_epoch)
print('Prediction')
if use_doc_level_features:
model_predict = self.model.predict({'input': X_test, 'docfeatures': pair_doc_feats_test}, batch_size=batch_size)
else:
model_predict = self.model.predict({'input': X_test}, batch_size=batch_size)
proba = np.array(model_predict['output'])
#proba = np.zeros(len(prefs_test))
if self.a_rank_test is not None:
X_train = self.X[self.a_rank_train]
X_test = self.X[self.a_rank_test]
print((len(X_train), 'train sequences'))
print((len(X_test), 'test sequences'))
print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=max_len)
X_test = sequence.pad_sequences(X_test, maxlen=max_len)
print(('X_train shape:', X_train.shape))
print(('X_test shape:', X_test.shape))
print('Build model...')
rank_model = Graph()
rank_model.add_input(name='input', input_shape=(max_len,), dtype=int)
rank_model.add_node(Embedding(self.embeddings.shape[0], self.embeddings.shape[1], input_length=max_len,
weights=[self.embeddings]), name='embedding', input='input')
rank_model.add_node(LSTM(64), name='forward', input='embedding')
rank_model.add_node(LSTM(64, go_backwards=True), name='backward', input='embedding')
rank_model.add_node(Dropout(0.5), name='dropout', inputs=['forward', 'backward'])
# match output layer for regression better
if use_doc_level_features:
rank_model.add_input(name='docfeatures', input_shape=(n_doc_level_feats,), dtype='float')
rank_model.add_node(Dense(64, activation='relu'), name='docfeatures_hiddenlayer', input='docfeatures')
rank_model.add_node(Dropout(0.5), name='dropout_docfeatures', input='docfeatures_hiddenlayer')
rank_model.add_node(Dense(1, activation='linear', init='uniform'), name='output_layer',
inputs=['dropout_docfeatures', 'dropout'])
else:
rank_model.add_node(Dense(1, activation='linear', init='uniform'), name='output_layer', input='dropout')
rank_model.add_output(name='output', input='output_layer')
# use mean absolute error loss
rank_model.compile('adam', {'output': 'mean_absolute_error'})
print('Train...')
if use_doc_level_features:
rank_model.fit({'input': X_train, 'docfeatures' : self.ling_items_feat[self.a_rank_train, :], 'output':
self.scores_rank_train}, batch_size=batch_size, nb_epoch=nb_epoch)
else:
rank_model.fit({'input': X_train, 'output': self.scores_rank_train}, batch_size=batch_size, nb_epoch=nb_epoch)
print('Prediction')
if use_doc_level_features:
model_predict = rank_model.predict({'input': X_test, 'docfeatures': self.ling_items_feat[self.a_rank_test, :]},
batch_size=batch_size)
else:
model_predict = rank_model.predict({'input': X_test}, batch_size=batch_size)
predicted_f = np.asarray(model_predict['output']).flatten()
print(('Unique regression predictions: ', np.unique(predicted_f)))
else:
predicted_f = None
if self.a1_unseen is not None and len(self.a1_unseen):
X_test = []
X_test1 = self.X[self.a1_unseen]
X_test2 = self.X[self.a2_unseen]
for i, row1 in enumerate(X_test1):
row1 = row1 + X_test2[i]
X_test.append(row1)
X_test = sequence.pad_sequences(X_test, maxlen=max_len)
print('Prediction on unseen data...')
if use_doc_level_features:
pair_doc_feats_unseen = np.concatenate((self.ling_items_feat[self.a1_unseen, :],
self.ling_items_feat[self.a2_unseen, :]), axis=1)
model_predict = self.model.predict({'input': X_test, 'docfeatures': pair_doc_feats_unseen}, batch_size=batch_size)
else:
model_predict = self.model.predict({'input': X_test}, batch_size=batch_size)
tr_proba = np.array(model_predict['output'])
else:
tr_proba = None
return proba, predicted_f, tr_proba
def _choose_method_fun(self, feature_type):
if 'SinglePrefGP' in self.method:
method_runner_fun = self.run_gppl
elif 'GP+SVM' in self.method:
method_runner_fun = self.run_gpsvm
elif 'SingleGPC' in self.method:
method_runner_fun = self.run_gpc
elif 'SVM' in self.method:
method_runner_fun = lambda: self.run_svm(feature_type)
elif 'BI-LSTM' in self.method:
if feature_type == 'ling':
logging.error("BI-LSTM is not set up to run without using embeddings. Will switch to feature type=both...")
feature_type = 'both'
method_runner_fun = lambda: self.run_bilstm(feature_type)
return method_runner_fun
def _set_resultsfile(self, feature_type, embeddings_type, data_inc):
# To run the active learning tests, call this function with dataset_increment << 1.0.
output_data_dir = os.path.join(data_root_dir, 'outputdata/')
if not os.path.isdir(output_data_dir):
os.mkdir(output_data_dir)
output_data_dir = os.path.join(output_data_dir, self.expt_output_dir)
if not os.path.isdir(output_data_dir):
os.mkdir(output_data_dir)
# Select output paths for CSV files and final results
output_filename_template = output_data_dir + '/%s' % self.expt_tag
output_filename_template += '_%s_%s_%s_%s_di%.2f'
results_stem = output_filename_template % (self.dataset, self.method, feature_type, embeddings_type, data_inc)
resultsfile = results_stem + '_test.pkl' # the old results format with everything in one file
# modelfile = results_stem + '_model_fold%i.pkl'
logging.info('The output file for the results will be: %s' % resultsfile)
if not os.path.isdir(results_stem):
os.mkdir(results_stem)
return resultsfile, results_stem
def _load_dataset(self, dataset):
self.folds, self.folds_test, self.folds_r, self.word_index_to_embeddings_map, self.word_to_indices_map, \
self.index_to_word_map = load_train_test_data(dataset)
self.ling_feat_spmatrix, self.docids = load_ling_features(dataset)
self.dataset = dataset
def _init_ls(self, feature_type, embeddings_type, ls_factor=1):
if self.dataset in self.default_ls_values and feature_type in self.default_ls_values[self.dataset] and \
embeddings_type in self.default_ls_values[self.dataset][feature_type]:
self.default_ls = self.default_ls_values[self.dataset][feature_type][embeddings_type]
elif 'GP' in self.method:
ls_file = './data/ls_%s_%s_%s.csv' % (self.dataset, feature_type, embeddings_type)
if os.path.exists(ls_file):
self.default_ls = np.genfromtxt(ls_file)
else:
self.default_ls = compute_lengthscale_heuristic(feature_type, embeddings_type, self.embeddings,
self.ling_feat_spmatrix, self.docids, self.folds, self.index_to_word_map)
np.savetxt(ls_file, self.default_ls)
self.default_ls *= ls_factor
# self.default_ls /= float(len(self.default_ls)) # long lengthscale does strange things with training dataset
if self.dataset not in self.default_ls_values:
self.default_ls_values[self.dataset] = {}
if feature_type not in self.default_ls_values[self.dataset]:
self.default_ls_values[self.dataset][feature_type] = {}
self.default_ls_values[self.dataset][feature_type][embeddings_type] = self.default_ls
else:
self.default_ls = []
def _set_embeddings(self, embeddings_type):
if 'word_mean' == embeddings_type and not hasattr(self, 'word_embeddings'):
self.word_embeddings = load_embeddings(self.word_index_to_embeddings_map)
elif 'word_mean' != embeddings_type:
self.word_embeddings = None
if 'skipthoughts' == embeddings_type and not hasattr(self, 'skipthoughts_model'):
self.skipthoughts_model = load_skipthoughts_embeddings(self.word_to_indices_map)
elif 'skipthoughts' != embeddings_type:
self.skipthoughts_model = None
if 'siamese-cbow' == embeddings_type and not hasattr(self, 'siamese_cbow_embeddings'):
self.siamese_cbow_embeddings = load_siamese_cbow_embeddings(self.word_to_indices_map)
elif 'siamese-cbow' != embeddings_type:
self.siamese_cbow_embeddings = None
if embeddings_type == 'word_mean':
self.embeddings = self.word_embeddings
elif embeddings_type == 'skipthoughts':
self.embeddings = self.skipthoughts_model
elif embeddings_type == 'siamese-cbow':
self.embeddings = self.siamese_cbow_embeddings
else:
self.embeddings = None
def _reload_partial_result(self, resultsfile):
if not os.path.isfile(resultsfile):
all_proba = {}
all_predictions = {}
all_f = {}
all_tr_proba = {}
all_tr_f = {}
all_target_prefs = {}
all_target_rankscores = {}
final_ls = {}
times = {}
else:
with open(resultsfile, 'rb') as fh:
all_proba, all_predictions, all_f, all_target_prefs, all_target_rankscores, _, times, final_ls, \
all_tr_proba, all_tr_f, _ = pickle.load(fh, encoding='latin1')
if all_tr_proba is None:
all_tr_proba = {}
if all_tr_f is None:
all_tr_f = {}
return all_proba, all_predictions, all_f, all_target_prefs, all_target_rankscores, times, final_ls, \
all_tr_proba, all_tr_f
def run_test(self, feature_type, embeddings_type=None, dataset_increment=0, subsample_amount=0,
min_no_folds=0, max_no_folds=32, npairs=0, test_on_all_training_pairs=False, ls_factor=1):
logging.info("**** Running method %s with features %s, embeddings %s, on dataset %s ****" % (self.method,
feature_type, embeddings_type, self.dataset) )
self._set_embeddings(embeddings_type)
self._init_ls(feature_type, embeddings_type, ls_factor)
resultsfile, results_stem = self._set_resultsfile(feature_type, embeddings_type, dataset_increment)
self.results_stem = results_stem
all_proba, all_predictions, all_f, all_target_prefs, all_target_rankscores, times, final_ls, all_tr_proba, \
all_tr_f = self._reload_partial_result(resultsfile)
np.random.seed(121) # allows us to get the same initialisation for all methods/feature types/embeddings
if os.path.isfile(results_stem + '/foldorder.txt'):
fold_keys = np.genfromtxt(os.path.expanduser(results_stem + '/foldorder.txt'), dtype=str)
else:
fold_keys = list(self.folds.keys())
fold_keys = np.sort(fold_keys)
for foldidx, self.fold in enumerate(fold_keys):
self.foldidx = foldidx
if foldidx in all_proba and dataset_increment==0:
print(("Skipping fold %i, %s" % (foldidx, self.fold)))
continue
if foldidx >= max_no_folds or foldidx < min_no_folds:
print(("Already completed maximum no. folds or this fold is below the minimum specified."
" Skipping fold %i, %s" % (foldidx, self.fold)))
continue
foldresultsfile = results_stem + '/fold%i.pkl' % foldidx
if foldidx not in all_proba and os.path.isfile(foldresultsfile):
if dataset_increment == 0:
print(("Skipping fold %i, %s" % (foldidx, self.fold)))
continue
with open(foldresultsfile, 'rb') as fh:
all_proba[foldidx], all_predictions[foldidx], all_f[foldidx], all_target_prefs[foldidx],\
all_target_rankscores[foldidx], _, times[foldidx], final_ls[foldidx], all_tr_proba[foldidx], _, _ = \
pickle.load(fh, encoding='latin1')
# Get data for this fold -----------------------------------------------------------------------------------
print(("Fold name ", self.fold))
a1_train, a2_train, prefs_train, person_train, a1_test, a2_test, prefs_test, person_test,\
self.X, uids, utexts, upersonIDs, a1_agg, a2_agg, gold_train, person_agg \
= get_noisy_fold_data(self.folds, self.folds_test, self.fold, self.docids)
# ranking folds
a_rank_train, scores_rank_train, _, person_rank_train, a_rank_test, scores_rank_test, _, \
person_rank_test = get_fold_regression_data(self.folds_r, self.fold, self.docids)
# rand_items = np.random.choice(1052, 200, replace=False)
# tridxs = np.in1d(a1_train, rand_items) & np.in1d(a2_train, rand_items)
# a1_train = a1_train[tridxs]
# a2_train = a2_train[tridxs]
# prefs_train = prefs_train[tridxs]
# person_train = person_train[tridxs]
# convert the ranking person IDs to the idxs
if person_rank_train is not None and len(person_rank_train):
person_rank_train = np.array([np.argwhere(upersonIDs == p.strip())[0][0] if p.strip() in upersonIDs else -1
for p in person_rank_train])
a_rank_train = a_rank_train[person_rank_train != -1]
scores_rank_train = scores_rank_train[person_rank_train != -1]
person_rank_train = person_rank_train[person_rank_train != -1]
if person_rank_test is not None and len(person_rank_test):
# If running personalized predictions, ensure we only test on workers that were seen in training because
# we have no worker features to predict preferences of new workers.
person_rank_test = np.array([np.argwhere(upersonIDs == p.strip())[0][0] if p.strip() in upersonIDs else -1
for p in person_rank_test])
valid_test_idxs = (person_rank_test != -1) & np.in1d(person_rank_test, person_rank_train)
a_rank_test = a_rank_test[valid_test_idxs]
scores_rank_test = scores_rank_test[valid_test_idxs]
person_rank_test = person_rank_test[valid_test_idxs]
if len(person_test) and len(np.unique(person_test)) > 1:
# If running personalized predictions, ensure we only test on workers that were seen in training because
# we have no worker features to predict preferences of new workers.
valid_test_idxs = np.in1d(person_test, person_train)
a1_test = a1_test[valid_test_idxs]
a2_test = a2_test[valid_test_idxs]
person_test = person_test[valid_test_idxs]
prefs_test = prefs_test[valid_test_idxs]
self.prefs_test = prefs_test
self.load_features(feature_type, embeddings_type, a1_train, a2_train, uids, utexts)
#items_feat = items_feat[:, :ndebug_features]
# Subsample training data --------------------------------------------------------------------------------------
if npairs == 0:
npairs_f = len(a1_train)
else:
npairs_f = npairs
nseen_so_far = 0
if dataset_increment != 0:
if foldidx in all_proba and all_proba[foldidx].shape[1] >= float(npairs_f) / dataset_increment:
print(("Skipping fold %i, %s" % (foldidx, self.fold)))
continue
nnew_pairs = dataset_increment
else:
nnew_pairs = npairs_f
# choose the initial dataset
if self.fold in self.initial_pair_subset:
pair_subset = self.initial_pair_subset[self.fold]
elif dataset_increment != 0:
pair_subset = np.random.choice(len(a1_train), nnew_pairs, replace=False)
elif subsample_amount > 0:
pair_subset = subsample_tr_data(subsample_amount, a1_train, a2_train)
else:
pair_subset = np.arange(npairs_f)
# save so we can reuse for another method
self.initial_pair_subset[self.fold] = pair_subset
self.verbose = verbose
self.optimize_hyper = ('noOpt' not in self.method)
# with open(modelfile % foldidx, 'r') as fh:
# model = pickle.load(fh)
# items_feat_test = None
self.model = None # initial value
if len(self.default_ls) > 1:
self.ls_initial = self.default_ls[self.valid_feats]
else:
self.ls_initial = self.default_ls
if '_oneLS' in self.method:
self.ls_initial = np.median(self.ls_initial)
logging.info("Selecting a single LS for all features: %f" % self.ls_initial)
logging.info("Starting test with method %s..." % (self.method))
starttime = time.time()
unseen_subset = np.ones(len(a1_train), dtype=bool)
# Run the chosen method with active learning simulation if required---------------------------------------------
while nseen_so_far < npairs_f:
logging.info('****** Fitting model with %i pairs in fold %i, %s ******' % (len(pair_subset), foldidx, self.fold))
# get the indexes of data points that are not yet seen
if not test_on_all_training_pairs:
unseen_subset[pair_subset] = False
if dataset_increment == 0: # no active learning, don't need to evaluate the unseen data points
unseen_subset[:] = False
# set the current dataset
self.a1_train = a1_train[pair_subset]
self.a2_train = a2_train[pair_subset]
self.prefs_train = prefs_train[pair_subset]
self.person_train = person_train[pair_subset]
self.a1_test = a1_test
self.a2_test = a2_test
self.person_test = person_test
self.a1_unseen = a1_agg #unseen_subset] change it so we test on train -- test the aggregated labels after combining the crowd
self.a2_unseen = a2_agg #unseen_subset]
self.person_unseen = person_agg #unseen_subset]
self.a_rank_train = a_rank_train
self.scores_rank_train = scores_rank_train
self.person_rank_train = person_rank_train
self.a_rank_test = a_rank_test
self.person_rank_test = person_rank_test
if self.a_rank_test is not None and len(self.person_rank_test) == 0:
self.person_rank_test = np.zeros(len(self.a_rank_test)) # if no person IDs, make sure we default to 0
# run the method with the current data subset
method_runner_fun = self._choose_method_fun(feature_type)
proba, predicted_f, tr_proba, tr_f = method_runner_fun()
endtime = time.time()
# make it the right shape
proba = np.array(proba)
if proba.ndim == 2 and proba.shape[1] > 1:
proba = proba[:, 1:2]
elif proba.ndim == 1:
proba = proba[:, None]
predictions = np.round(proba)
if predicted_f is not None:
predicted_f = np.array(predicted_f)
if predicted_f.ndim == 3:
predicted_f = predicted_f[0]
if predicted_f.ndim == 1:
predicted_f = predicted_f[:, None]
if tr_proba is not None:
tr_proba = np.array(tr_proba)
if tr_proba.ndim == 2 and tr_proba.shape[1] > 1:
tr_proba = tr_proba[:, 1:2]
elif tr_proba.ndim == 1:
tr_proba = tr_proba[:, None]
if tr_f is not None:
tr_f = np.array(tr_f)
if tr_f.ndim == 1:
tr_f = tr_f[:, None]
# get more data
nseen_so_far += nnew_pairs
nnew_pairs = dataset_increment # int(np.floor(dataset_increment * npairs_f))
if nseen_so_far >= npairs_f:
# the last iteration possible
nnew_pairs = npairs_f - nseen_so_far
nseen_so_far = npairs_f
else:
# don't do this if we have already seen all the data
# use predictions at available training points
tr_proba = np.array(tr_proba)
uncertainty = tr_proba * np.log(tr_proba) + (1-tr_proba) * np.log(1-tr_proba) # -ve shannon entropy
ranked_pair_idxs = np.argsort(uncertainty.flatten())
new_pair_subset = ranked_pair_idxs[:nnew_pairs] # active learning (uncertainty sampling) step
new_pair_subset = np.argwhere(unseen_subset)[new_pair_subset].flatten()
pair_subset = np.concatenate((pair_subset, new_pair_subset))
# if tr_proba is not None:
# tr_proba_complete = prefs_train.flatten()[:, np.newaxis] / 2.0
# tr_proba_complete[unseen_subset] = tr_proba
# tr_proba = tr_proba_complete
logging.info("@@@ Completed running fold %i with method %s, features %s, %i data so far, in %f seconds." % (
foldidx, self.method, feature_type, nseen_so_far, endtime-starttime) )
if predictions.size == prefs_test.size:
acc = np.sum(prefs_test[prefs_test != 1] == 2 * predictions.flatten()[prefs_test != 1]
) / float(np.sum(prefs_test != 1))
CEE = log_loss(prefs_test[prefs_test != 1] == 2, proba[prefs_test != 1])
print("Accuracy for fold = %f" % acc)
print('CEE = %f' % CEE)
tau = 0
tau_40 = 0
if predicted_f is not None and predicted_f.size == scores_rank_test.size:
# print out the pearson correlation
if person_rank_test is not None and len(person_rank_test):
for upeep in np.unique(person_rank_test):
idxs = person_rank_test == upeep
if np.sum(idxs) < 2 or len(scores_rank_test[idxs]) < 2:
continue
tau_p, _ = kendalltau(scores_rank_test[idxs], predicted_f.flatten()[idxs])
if np.isnan(tau_p):
continue
tau.append(tau_p)
if np.sum(person_rank_train == upeep) >= 40:
tau_40.append(tau_p)
tau = np.mean(tau)
tau_40 = np.mean(tau_40)
else:
tau, _ = kendalltau(scores_rank_test, predicted_f.flatten())
tau_40 = tau
print("Kendall's tau for fold = %f" % tau)
print("For worker IDs with at least 40 annotations: Kendall's tau for fold = %f" % tau_40)
if tr_proba is not None:
tr_acc = np.sum(gold_train[gold_train != 1] == 2 * np.round(tr_proba).flatten()[gold_train != 1]
) / float(np.sum(gold_train != 1))
tr_cee = log_loss(gold_train[gold_train != 1]==2, tr_proba[gold_train != 1])
print("Unseen data in the training fold, accuracy for fold = %f" % tr_acc )
else:
tr_acc = 0
tr_cee = 0
tr_tau = 0
tr_tau_40 = 0
if tr_f is not None:
for upeep in np.unique(person_rank_train):
idxs = person_rank_train == upeep
if np.sum(idxs) < 2:
continue
tau_p, _ = kendalltau(scores_rank_train[idxs], tr_f.flatten()[idxs])
tr_tau.append(tau_p)
if np.sum(person_rank_train == upeep) >= 40:
tr_tau_40.append(tau_p)
tr_tau = np.mean(tr_tau)
tr_tau_40 = np.mean(tr_tau_40)
print("(training set) Kendall's tau for fold = %f" % tr_tau)
print("(training set) For worker IDs with at least 40 annotations: Kendall's tau for fold = %f" % tr_tau_40)
else:
tr_tau = 0
# Save the data for later analysis ----------------------------------------------------------------------------
if hasattr(self.model, 'ls'):
final_ls[foldidx] = self.model.ls
else:
final_ls[foldidx] = [0]
# Outputs from the tested method
if foldidx not in all_proba:
all_proba[foldidx] = proba
all_predictions[foldidx] = predictions
all_f[foldidx] = predicted_f
all_tr_proba[foldidx] = tr_proba
all_tr_f[foldidx] = tr_f
else:
all_proba[foldidx] = np.concatenate((all_proba[foldidx], proba), axis=1)
all_predictions[foldidx] = np.concatenate((all_predictions[foldidx], predictions), axis=1)
if predicted_f is not None:
all_f[foldidx] = np.concatenate((all_f[foldidx], predicted_f), axis=1)
if tr_proba is not None:
all_tr_proba[foldidx] = np.concatenate((all_tr_proba[foldidx], tr_proba), axis=1)
if tr_f is not None:
all_tr_f[foldidx] = np.concatenate((all_tr_f[foldidx], tr_f), axis=1)
# Save the ground truth
all_target_prefs[foldidx] = prefs_test
if self.folds_r is not None:
all_target_rankscores[foldidx] = scores_rank_test
else:
all_target_rankscores[foldidx] = None
# Save the time taken
times[foldidx] = endtime-starttime
results = (all_proba[foldidx], all_predictions[foldidx], all_f[foldidx], all_target_prefs[foldidx],
all_target_rankscores[foldidx], self.ls_initial, times[foldidx], final_ls[foldidx],
all_tr_proba[foldidx], all_tr_f[foldidx], len(self.a1_train))
with open(foldresultsfile, 'wb') as fh:
pickle.dump(results, fh)
pair_prob_file = results_stem + '/pair_probs_%i.csv' % foldidx
pd.DataFrame(proba).to_csv(pair_prob_file)
if not os.path.isfile(results_stem + "/foldorder.txt"):
np.savetxt(results_stem + "/foldorder.txt", fold_keys[:, None], fmt="%s")
results_file = os.path.join(self.results_stem, 'metrics.csv')
try:
metrics = pd.read_csv(results_file).values.tolist()
except:
metrics = []
metrics.append([acc, CEE, tau, tau_40, tr_acc, tr_cee, tr_tau, tr_tau_40])
metric_names = ['acc', 'CEE', 'tau', 'tau_40', 'tr_acc', 'tr_CEE', 'tr_tau', 'tr_tau_40']
pd.DataFrame(np.array(metrics), columns=metric_names).to_csv(results_file, index=False)
# with open(modelfile % foldidx, 'w') as fh:
# pickle.dump(model, fh)
del self.model # release the memory before we try to do another iteration
results_file = os.path.join(self.results_stem, 'metrics.csv')
try:
metrics = pd.read_csv(results_file).values.tolist()
metric_means = np.mean(metrics, axis=0)
print(metric_means)
metrics.append(metric_means)
pd.DataFrame(np.array(metrics), columns=metric_names).to_csv(results_file, index=False)
except:
print('no metrics file found')
def run_test_set(self, subsample_tr=0, min_no_folds=0, max_no_folds=32,
npairs=0, test_on_train=False, ls_factor=1):
# keep these variables around in case we are restarting the script with different method settings and same data.
for dataset in self.datasets:
self.initial_pair_subset = {} # reset this when we use a different dataset
for self.method in self.methods:
if self.folds is None or self.dataset != dataset:
self._load_dataset(dataset) # reload only if we use a new dataset
if (self.dataset == 'UKPConvArgAll' or self.dataset == 'UKPConvArgStrict' or self.dataset == 'UKPConvArgCrowd_evalAll') \
and ('IndPref' in self.method or 'Personalised' in self.method):
logging.warning('Skipping method %s on dataset %s because there are no separate worker IDs.'
% (self.method, self.dataset))
continue
for feature_type in self.feature_types:
if feature_type == 'embeddings' or feature_type == 'both' or feature_type=='debug':
embeddings_to_use = self.embeddings_types
else:
embeddings_to_use = ['']
for embeddings_type in embeddings_to_use:
self.run_test(feature_type, embeddings_type, dataset_increment=self.dataset_increment,
subsample_amount=subsample_tr, min_no_folds=min_no_folds, max_no_folds=max_no_folds,
npairs=npairs, test_on_all_training_pairs=test_on_train, ls_factor=ls_factor)
logging.info("**** Completed: method %s with features %s, embeddings %s ****" % (self.method, feature_type,
embeddings_type) )
if __name__ == '__main__':
dataset_increment = 0
# datasets = ['UKPConvArgCrowdSample_evalMACE']
# methods = ['dummy']
# feature_types = ['both']
# embeddings_types = ['word_mean']
#
# runner = TestRunner('personalised', datasets, feature_types, embeddings_types, methods,
# dataset_increment)
# runner.run_test_set(min_no_folds=0, max_no_folds=32)
datasets = ['UKPConvArgStrict']
methods = ['SinglePrefGP_weaksprior_noOpt']
feature_types = ['both']
embeddings_types = ['word_mean']
runner = TestRunner('crowdsourcing_argumentation_expts', datasets, feature_types, embeddings_types, methods,
dataset_increment)
runner.run_test_set(min_no_folds=0, max_no_folds=32)
|
the-stack_106_19448
|
#!/usr/bin/env pytest-3
# -*- coding: utf-8 -*-
__author__ = "Marc-Olivier Buob"
__maintainer__ = "Marc-Olivier Buob"
__email__ = "[email protected]"
__copyright__ = "Copyright (C) 2020, Nokia"
__license__ = "BSD-3"
from pybgl.ipynb import in_ipynb, ipynb_display_graph
from pybgl.nfa import (
Nfa, accepts, add_edge, initials, finals, delta, set_final
)
from pybgl.thompson_compile_nfa import (
DEFAULT_ALPHABET,
alternation, bracket, concatenation, literal,
one_or_more,
parse_bracket, parse_escaped, parse_repetition,
repetition, repetition_range,
thompson_compile_nfa,
zero_or_one, zero_or_more,
)
def nfa_to_triple(nfa) -> tuple:
q0s = set(initials(nfa))
assert len(q0s) == 1
q0 = q0s.pop()
fs = set(finals(nfa))
assert len(fs) == 1
f = fs.pop()
return (nfa, q0, f)
def make_nfa1() -> Nfa:
(nfa, q0, f) = literal("x")
return nfa
def make_nfa2() -> Nfa:
g = Nfa(2)
add_edge(0, 0, "a", g)
add_edge(0, 1, "b", g)
set_final(1, g)
return g
def test_literal():
(nfa, q0, f) = literal("a")
assert set(initials(nfa)) == {0}
assert set(finals(nfa)) == {1}
assert delta(0, "a", nfa) == {1}
def test_concatenation():
(nfa1, q01, f1) = nfa_to_triple(make_nfa1())
(nfa2, q02, f2) = nfa_to_triple(make_nfa2())
(nfa, q0, f) = concatenation(nfa1, q01, f1, nfa2, q02, f2)
assert accepts("xaab", nfa) == True
assert accepts("x", nfa) == False
assert accepts("aab", nfa) == False
def test_alternation():
(nfa1, q01, f1) = nfa_to_triple(make_nfa1())
(nfa2, q02, f2) = nfa_to_triple(make_nfa2())
(nfa, q0, f) = alternation(nfa1, q01, f1, nfa2, q02, f2)
assert accepts("xaab", nfa) == False
assert accepts("x", nfa) == True
assert accepts("aab", nfa) == True
def test_zero_or_one():
(nfa, q0, f) = nfa_to_triple(make_nfa1())
(nfa, q0, f) = zero_or_one(nfa, q0, f)
assert accepts("", nfa) == True
assert accepts("x", nfa) == True
assert accepts("xx", nfa) == False
assert accepts("a", nfa) == False
def test_zero_or_more():
(nfa, q0, f) = nfa_to_triple(make_nfa1())
(nfa, q0, f) = zero_or_more(nfa, q0, f)
assert accepts("", nfa) == True
assert accepts("x", nfa) == True
assert accepts("xx", nfa) == True
assert accepts("a", nfa) == False
def test_one_or_more():
(nfa, q0, f) = nfa_to_triple(make_nfa1())
(nfa, q0, f) = one_or_more(nfa, q0, f)
assert accepts("", nfa) == False
assert accepts("x", nfa) == True
assert accepts("xx", nfa) == True
assert accepts("a", nfa) == False
def test_parse_repetition():
for (m, n) in [(0, 1), (0, None), (1, None), (3, 3), (2, 4), (2, None)]:
for fmt in ["{%s,%s}", "{ %s , %s }"]:
s = "{%s, %s}" % (
m if m is not None else "",
n if n is not None else ""
)
assert parse_repetition(s) == (m, n)
def test_repetition():
(nfa, q0, f) = nfa_to_triple(make_nfa1())
m = 4
(nfa, q0, f) = repetition(nfa, q0, f, m)
words = ["x" * i for i in range(10)]
# Exactly m repetition
for (i, w) in enumerate(words):
assert accepts(w, nfa) == (i == m)
def test_repetition_range():
a = "a"
for (m, n) in [(3, 5), (0, 3), (3, 3), (3, None)]:
(nfa, q0, f) = literal(a)
(nfa, q0, f) = repetition_range(nfa, q0, f, m, n)
for i in range(10):
expected = (m <= i) and (n is None or i <= n)
w = a * i
obtained = accepts(w, nfa)
assert obtained == expected, f"(m, n) = {(m, n)} i = {i} w = {w}"
def test_parse_bracket():
map_input_expected = {
"[a-z]" : [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'
],
"[a-zA-Z]" : [
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'
],
"[a-e0-9P-T]" : [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'P', 'Q', 'R',
'S', 'T', 'a', 'b', 'c', 'd', 'e'
],
"[^a-zA-Z]" : [
'\t', '\n', '\x0b', '\x0c', '\r', ' ', '!', '"', '#', '$', '%',
'&', "'", '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2',
'3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?',
'@', '[', '\\', ']', '^', '_', '`', '{', '|', '}', '~'
],
}
for (s, expected) in map_input_expected.items():
assert sorted(parse_bracket(s)) == expected
def test_parse_bracket():
s = "[X-Z03a-e]"
chars = parse_bracket(s)
(nfa, q0, f) = bracket(chars)
for a in "XYZ03abcde":
assert accepts(a, nfa) == True
for a in "ABC12456789fghi":
assert accepts(a, nfa) == False
def test_parse_bracket_escaped():
s = r"[\s]"
assert parse_bracket(s) == {' ', '\t'}
def test_parse_escaped():
assert parse_escaped(r"\.") == {"."}
assert parse_escaped(r"\|") == {"|"}
assert parse_escaped(r"\?") == {"?"}
assert parse_escaped(r"\*") == {"*"}
assert parse_escaped(r"\+") == {"+"}
assert parse_escaped(r"\(") == {"("}
assert parse_escaped(r"\)") == {")"}
assert parse_escaped(r"\[") == {"["}
assert parse_escaped(r"\]") == {"]"}
assert parse_escaped(r"\{") == {"{"}
assert parse_escaped(r"\}") == {"}"}
assert parse_escaped(r"\w") == parse_bracket("[a-zA-Z0-9]")
assert parse_escaped(r"\W") == parse_bracket("[^a-zA-Z0-9]")
assert parse_escaped(r"\d") == parse_bracket("[0-9]")
assert parse_escaped(r"\D") == parse_bracket("[^0-9]")
assert parse_escaped(r"\a") == {"\a"}
assert parse_escaped(r"\b") == {"\b"}
assert parse_escaped(r"\f") == {"\f"}
assert parse_escaped(r"\n") == {"\n"}
assert parse_escaped(r"\r") == {"\r"}
assert parse_escaped(r"\t") == {"\t"}
assert parse_escaped(r"\v") == {"\v"}
def test_escaped_operator():
(nfa, q0, f) = thompson_compile_nfa("a\\?b")
assert accepts("a?b", nfa) == True
assert accepts("ab", nfa) == False
assert accepts("b", nfa) == False
(nfa, q0, f) = thompson_compile_nfa("a?b")
assert accepts("a?b", nfa) == False
assert accepts("ab", nfa) == True
assert accepts("b", nfa) == True
for regexp in r"\|", r"\.", r"\*", r"\+", r"\(", r"\)", r"\{", r"\}", r"\[", r"\]":
(nfa, q0, f) = thompson_compile_nfa(regexp)
assert accepts(regexp.replace("\\", ""), nfa)
regexp = r"\|\.\*\+\(\)\{\}\[\]"
(nfa, q0, f) = thompson_compile_nfa(regexp)
accepts(regexp.replace("\\", ""), nfa)
def test_escaped_classes():
whole_alphabet = DEFAULT_ALPHABET
escaped_classes = [r"\d", r"\w", r"\s", r"\D", r"\W", r"\S"]
map_escape_allowed = {
r : set(parse_escaped(r, whole_alphabet))
for r in escaped_classes
}
for regexp in [r"\d", r"\w", r"\s", r"\D", r"\W", r"\S"]:
allowed = map_escape_allowed[regexp.lower()]
if regexp.lower() != regexp:
allowed = set(whole_alphabet) - allowed
(nfa, q0, f) = thompson_compile_nfa(regexp, whole_alphabet)
for a in whole_alphabet:
assert accepts(a, nfa) == (a in allowed), \
f"regexp = {regexp} a = '{a}' ({ord(a)}) allowed = '{allowed}' obtained = {accepts(a, nfa)} expected = {a in allowed}"
def test_class_s():
for r in (r"\s+", r"[\s]+"):
print(r)
(nfa, q0, f) = thompson_compile_nfa(r)
assert nfa.accepts(" ")
assert nfa.accepts(" ")
assert nfa.accepts("\t\t")
assert nfa.accepts(" \t \t ")
def test_thompson_compile_nfa():
(nfa, q0, f) = thompson_compile_nfa("(a?b)*?c+d")
if in_ipynb():
ipynb_display_graph(nfa)
w = "babbbababcccccd"
assert accepts(w, nfa) == True
def test_thompson_compile_nfa_bracket_repetitions():
(nfa, q0, f) = thompson_compile_nfa("[x-z]{1,3}")
if in_ipynb():
ipynb_display_graph(nfa)
for w in ["x", "y", "xx", "xy", "zy", "xxx", "yyy", "zzz", "xyz", "zyx"]:
assert accepts(w, nfa) == True
for w in ["", "xxxx", "aaa"]:
assert accepts(w, nfa) == False
(nfa, q0, f) = thompson_compile_nfa("x{3}")
assert accepts("xxx", nfa)
def test_thompson_compile_nfa_escaped_operators():
regexp = r"\|\.\*\+\(\)\{\}\[\]aa"
(nfa, q0, f) = thompson_compile_nfa(regexp)
accepts(regexp.replace("\\", ""), nfa)
if in_ipynb():
ipynb_display_graph(nfa)
|
the-stack_106_19449
|
import os
from typing import Dict
from typing import List
from typing import cast
from cleo.helpers import argument
from cleo.helpers import option
from poetry.console.application import Application
from poetry.console.commands.init import InitCommand
from poetry.console.commands.update import UpdateCommand
class PluginAddCommand(InitCommand):
name = "plugin add"
description = "Adds new plugins."
arguments = [
argument("plugins", "The names of the plugins to install.", multiple=True),
]
options = [
option(
"dry-run",
None,
"Output the operations but do not execute anything (implicitly enables --verbose).",
)
]
help = """
The <c1>plugin add</c1> command installs Poetry plugins globally.
It works similarly to the <c1>add</c1> command:
If you do not specify a version constraint, poetry will choose a suitable one based on the available package versions.
You can specify a package in the following forms:
- A single name (<b>requests</b>)
- A name and a constraint (<b>requests@^2.23.0</b>)
- A git url (<b>git+https://github.com/python-poetry/poetry.git</b>)
- A git url with a revision (<b>git+https://github.com/python-poetry/poetry.git#develop</b>)
- A git SSH url (<b>git+ssh://github.com/python-poetry/poetry.git</b>)
- A git SSH url with a revision (<b>git+ssh://github.com/python-poetry/poetry.git#develop</b>)
- A file path (<b>../my-package/my-package.whl</b>)
- A directory (<b>../my-package/</b>)
- A url (<b>https://example.com/packages/my-package-0.1.0.tar.gz</b>)\
"""
def handle(self) -> int:
from pathlib import Path
import tomlkit
from cleo.io.inputs.string_input import StringInput
from cleo.io.io import IO
from poetry.core.pyproject.toml import PyProjectTOML
from poetry.core.semver.helpers import parse_constraint
from poetry.factory import Factory
from poetry.packages.project_package import ProjectPackage
from poetry.repositories.installed_repository import InstalledRepository
from poetry.utils.env import EnvManager
plugins = self.argument("plugins")
# Plugins should be installed in the system env to be globally available
system_env = EnvManager.get_system_env(naive=True)
env_dir = Path(os.getenv("POETRY_HOME") or system_env.path)
# We check for the plugins existence first.
if env_dir.joinpath("pyproject.toml").exists():
pyproject = tomlkit.loads(
env_dir.joinpath("pyproject.toml").read_text(encoding="utf-8")
)
poetry_content = pyproject["tool"]["poetry"]
existing_packages = self.get_existing_packages_from_input(
plugins, poetry_content, "dependencies"
)
if existing_packages:
self.notify_about_existing_packages(existing_packages)
plugins = [plugin for plugin in plugins if plugin not in existing_packages]
if not plugins:
return 0
plugins = self._determine_requirements(plugins)
# We retrieve the packages installed in the system environment.
# We assume that this environment will be a self contained virtual environment
# built by the official installer or by pipx.
# If not, it might lead to side effects since other installed packages
# might not be required by Poetry but still taken into account when resolving dependencies.
installed_repository = InstalledRepository.load(
system_env, with_dependencies=True
)
root_package = None
for package in installed_repository.packages:
if package.name == "poetry":
root_package = ProjectPackage(package.name, package.version)
for dependency in package.requires:
root_package.add_dependency(dependency)
break
root_package.python_versions = ".".join(
str(v) for v in system_env.version_info[:3]
)
# We create a `pyproject.toml` file based on all the information
# we have about the current environment.
if not env_dir.joinpath("pyproject.toml").exists():
Factory.create_pyproject_from_package(root_package, env_dir)
# We add the plugins to the dependencies section of the previously
# created `pyproject.toml` file
pyproject = PyProjectTOML(env_dir.joinpath("pyproject.toml"))
poetry_content = pyproject.poetry_config
poetry_dependency_section = poetry_content["dependencies"]
plugin_names = []
for plugin in plugins:
if "version" in plugin:
# Validate version constraint
parse_constraint(plugin["version"])
constraint = tomlkit.inline_table()
for name, value in plugin.items():
if name == "name":
continue
constraint[name] = value
if len(constraint) == 1 and "version" in constraint:
constraint = constraint["version"]
poetry_dependency_section[plugin["name"]] = constraint
plugin_names.append(plugin["name"])
pyproject.save()
# From this point forward, all the logic will be deferred to
# the update command, by using the previously created `pyproject.toml`
# file.
application = cast(Application, self.application)
update_command: UpdateCommand = cast(UpdateCommand, application.find("update"))
# We won't go through the event dispatching done by the application
# so we need to configure the command manually
update_command.set_poetry(Factory().create_poetry(env_dir))
update_command.set_env(system_env)
application._configure_installer(update_command, self._io)
argv = ["update"] + plugin_names
if self.option("dry-run"):
argv.append("--dry-run")
return update_command.run(
IO(
StringInput(" ".join(argv)),
self._io.output,
self._io.error_output,
)
)
def get_existing_packages_from_input(
self, packages: List[str], poetry_content: Dict, target_section: str
) -> List[str]:
existing_packages = []
for name in packages:
for key in poetry_content[target_section]:
if key.lower() == name.lower():
existing_packages.append(name)
return existing_packages
def notify_about_existing_packages(self, existing_packages: List[str]) -> None:
self.line(
"The following plugins are already present in the "
"<c2>pyproject.toml</c2> file and will be skipped:\n"
)
for name in existing_packages:
self.line(f" • <c1>{name}</c1>")
self.line(
"\nIf you want to update it to the latest compatible version, "
"you can use `<c2>poetry plugin update package</c2>`.\n"
"If you prefer to upgrade it to the latest available version, "
"you can use `<c2>poetry plugin add package@latest</c2>`.\n"
)
|
the-stack_106_19450
|
from django.shortcuts import render,redirect
from django.http import JsonResponse,HttpResponse
from django.contrib.auth import login
from django.conf import settings
from django.views import View
from QQLoginTool.QQtool import OAuthQQ
from oauth.models import OAuthQQUser
from django_redis import get_redis_connection
from users.models import User
from itsdangerous import TimedJSONWebSignatureSerializer as TJS
class QQLoginView(View):
def get(self, request):
# 获取登录成功后的跳转连接
next = request.GET.get('next')
if next is None:
next = '/'
# 1、初始化创建qq对象
qq = OAuthQQ(client_id=settings.QQ_CLIENT_ID, client_secret=settings.QQ_CLIENT_SECRET,
redirect_uri=settings.QQ_REDIRECT_URI, state=next)
# 2、调用方法生成跳转连接
login_url = qq.get_qq_url()
# 3、返回跳转连接
return JsonResponse({'login_url': login_url})
class QQCallBackView(View):
def get(self,request):
# 获取用户的code值 和状态信息
code=request.GET.get('code')
state=request.GET.get('state')
if code is None or state is None:
return JsonResponse({'error':'缺少数据'},state=400)
# 生成qq对象
qq = OAuthQQ(client_id=settings.QQ_CLIENT_ID, client_secret=settings.QQ_CLIENT_SECRET,
redirect_uri=settings.QQ_REDIRECT_URI, state=next)
# 获取access_token
try:
access_token=qq.get_access_token(code)
open_id=qq.get_open_id(access_token)
# 调用方法获取open_id的值
except:
return JsonResponse({'error':'网络错误'},state=400)
# 判断用户是否绑定梅朵用户
try:
# 查询mysql数据表中是否有此qq用户
qq_user=OAuthQQUser.objects.get(openid=open_id)
except:
tjs = TJS(settings.SECRET_KEY, 300)
openid = tjs.dumps({'openid': open_id}).decode()
return render(request, 'oauth_callback.html', {'token': openid})
login(request,qq_user.user)
# 没有异常说明qq查询到绑定的qq用户
# 将用户名写入cookie中 在页面中显示
response=redirect(state)
response.set_cookie('username',qq_user.user.username,60*60*2)
return response
def post(self,request):
#绑定qq用户
data=request.POST
mobile=data.get('mobile')
password=data.get('pwd')
sms_code=data.get('sms_code')
openid=data.get('access_token')
# 验证数据
# 短信验证
client=get_redis_connection('verfycode')
real_sms_code=client.get('sms_code_%s'%mobile)
if real_sms_code is None:
return render(request, 'oauth_callback.html', {'errmsg': '短信验证码已失效'})
if sms_code != real_sms_code.decode():
return render(request, 'oauth_callback.html', {'errmsg': '短信验证码错误'})
# 3、绑定数据
try:
user=User.objects.get(mobile=mobile)
# 验证qq用户的密码是否正确
if not user.check_password(password):
return render(request,'oauth_callback.html',{'errmsg': '密码错误'})
except:
# 当前用户为注册为梅朵用户 注册新用户
user=User.objects.create_user(username=mobile,mobile=mobile,password=password)
tjs = TJS(settings.SECRET_KEY, 300)
try:
data = tjs.loads(openid)
except:
return render(request, 'oauth_callback.html', {'errmsg': 'openid异常'})
openid = data.get('openid')
OAuthQQUser.objects.create(openid=openid, user=user)
return redirect('/')
|
the-stack_106_19451
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_xpath,
compat_urlparse,
)
from ..utils import (
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
float_or_none,
HEADRequest,
RegexNotFoundError,
sanitized_Request,
strip_or_none,
timeconvert,
try_get,
unescapeHTML,
update_url_query,
url_basename,
get_domain,
xpath_text,
)
def _media_xml_tag(tag):
return '{http://search.yahoo.com/mrss/}%s' % tag
class MTVServicesInfoExtractor(InfoExtractor):
_MOBILE_TEMPLATE = None
_LANG = None
@staticmethod
def _id_from_uri(uri):
return uri.split(':')[-1]
@staticmethod
def _remove_template_parameter(url):
# Remove the templates, like &device={device}
return re.sub(r'&[^=]*?={.*?}(?=(&|$))', '', url)
def _get_feed_url(self, uri, url=None):
return self._FEED_URL
def _get_thumbnail_url(self, uri, itemdoc):
search_path = '%s/%s' % (_media_xml_tag('group'), _media_xml_tag('thumbnail'))
thumb_node = itemdoc.find(search_path)
if thumb_node is None:
return None
return thumb_node.get('url') or thumb_node.text or None
def _extract_mobile_video_formats(self, mtvn_id):
webpage_url = self._MOBILE_TEMPLATE % mtvn_id
req = sanitized_Request(webpage_url)
# Otherwise we get a webpage that would execute some javascript
req.add_header('User-Agent', 'curl/7')
webpage = self._download_webpage(req, mtvn_id,
'Downloading mobile page')
metrics_url = unescapeHTML(self._search_regex(r'<a href="(http://metrics.+?)"', webpage, 'url'))
req = HEADRequest(metrics_url)
response = self._request_webpage(req, mtvn_id, 'Resolving url')
url = response.geturl()
# Transform the url to get the best quality:
url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, 1)
return [{'url': url, 'ext': 'mp4'}]
def _extract_video_formats(self, mdoc, mtvn_id, video_id):
if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4|copyright_error\.flv(?:\?geo\b.+?)?)$', mdoc.find('.//src').text) is not None:
if mtvn_id is not None and self._MOBILE_TEMPLATE is not None:
self.to_screen('The normal version is not available from your '
'country, trying with the mobile version')
return self._extract_mobile_video_formats(mtvn_id)
raise ExtractorError('This video is not available from your country.',
expected=True)
formats = []
for rendition in mdoc.findall('.//rendition'):
if rendition.get('method') == 'hls':
hls_url = rendition.find('./src').text
formats.extend(self._extract_m3u8_formats(
hls_url, video_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
# fms
try:
_, _, ext = rendition.attrib['type'].partition('/')
rtmp_video_url = rendition.find('./src').text
if 'error_not_available.swf' in rtmp_video_url:
raise ExtractorError(
'%s said: video is not available' % self.IE_NAME,
expected=True)
if rtmp_video_url.endswith('siteunavail.png'):
continue
formats.extend([{
'ext': 'flv' if rtmp_video_url.startswith('rtmp') else ext,
'url': rtmp_video_url,
'format_id': '-'.join(filter(None, [
'rtmp' if rtmp_video_url.startswith('rtmp') else None,
rendition.get('bitrate')])),
'width': int(rendition.get('width')),
'height': int(rendition.get('height')),
}])
except (KeyError, TypeError):
raise ExtractorError('Invalid rendition field.')
if formats:
self._sort_formats(formats)
return formats
def _extract_subtitles(self, mdoc, mtvn_id):
subtitles = {}
for transcript in mdoc.findall('.//transcript'):
if transcript.get('kind') != 'captions':
continue
lang = transcript.get('srclang')
for typographic in transcript.findall('./typographic'):
sub_src = typographic.get('src')
if not sub_src:
continue
ext = typographic.get('format')
if ext == 'cea-608':
ext = 'scc'
subtitles.setdefault(lang, []).append({
'url': compat_str(sub_src),
'ext': ext
})
return subtitles
def _get_video_info(self, itemdoc, use_hls=True):
uri = itemdoc.find('guid').text
video_id = self._id_from_uri(uri)
self.report_extraction(video_id)
content_el = itemdoc.find('%s/%s' % (_media_xml_tag('group'), _media_xml_tag('content')))
mediagen_url = self._remove_template_parameter(content_el.attrib['url'])
mediagen_url = mediagen_url.replace('device={device}', '')
if 'acceptMethods' not in mediagen_url:
mediagen_url += '&' if '?' in mediagen_url else '?'
mediagen_url += 'acceptMethods='
mediagen_url += 'hls' if use_hls else 'fms'
mediagen_doc = self._download_xml(
mediagen_url, video_id, 'Downloading video urls', fatal=False)
if mediagen_doc is False:
return None
item = mediagen_doc.find('./video/item')
if item is not None and item.get('type') == 'text':
message = '%s returned error: ' % self.IE_NAME
if item.get('code') is not None:
message += '%s - ' % item.get('code')
message += item.text
raise ExtractorError(message, expected=True)
description = strip_or_none(xpath_text(itemdoc, 'description'))
timestamp = timeconvert(xpath_text(itemdoc, 'pubDate'))
title_el = None
if title_el is None:
title_el = find_xpath_attr(
itemdoc, './/{http://search.yahoo.com/mrss/}category',
'scheme', 'urn:mtvn:video_title')
if title_el is None:
title_el = itemdoc.find(compat_xpath('.//{http://search.yahoo.com/mrss/}title'))
if title_el is None:
title_el = itemdoc.find(compat_xpath('.//title'))
if title_el.text is None:
title_el = None
title = title_el.text
if title is None:
raise ExtractorError('Could not find video title')
title = title.strip()
# This a short id that's used in the webpage urls
mtvn_id = None
mtvn_id_node = find_xpath_attr(itemdoc, './/{http://search.yahoo.com/mrss/}category',
'scheme', 'urn:mtvn:id')
if mtvn_id_node is not None:
mtvn_id = mtvn_id_node.text
formats = self._extract_video_formats(mediagen_doc, mtvn_id, video_id)
# Some parts of complete video may be missing (e.g. missing Act 3 in
# http://www.southpark.de/alle-episoden/s14e01-sexual-healing)
if not formats:
return None
self._sort_formats(formats)
return {
'title': title,
'formats': formats,
'subtitles': self._extract_subtitles(mediagen_doc, mtvn_id),
'id': video_id,
'thumbnail': self._get_thumbnail_url(uri, itemdoc),
'description': description,
'duration': float_or_none(content_el.attrib.get('duration')),
'timestamp': timestamp,
}
def _get_feed_query(self, uri):
data = {'uri': uri}
if self._LANG:
data['lang'] = self._LANG
return data
def _get_videos_info(self, uri, use_hls=True, url=None):
video_id = self._id_from_uri(uri)
feed_url = self._get_feed_url(uri, url)
info_url = update_url_query(feed_url, self._get_feed_query(uri))
return self._get_videos_info_from_url(info_url, video_id, use_hls)
def _get_videos_info_from_url(self, url, video_id, use_hls=True):
idoc = self._download_xml(
url, video_id,
'Downloading info', transform_source=fix_xml_ampersands)
title = xpath_text(idoc, './channel/title')
description = xpath_text(idoc, './channel/description')
entries = []
for item in idoc.findall('.//item'):
info = self._get_video_info(item, use_hls)
if info:
entries.append(info)
return self.playlist_result(
entries, playlist_title=title, playlist_description=description)
def _extract_triforce_mgid(self, webpage, data_zone=None, video_id=None):
triforce_feed = self._parse_json(self._search_regex(
r'triforceManifestFeed\s*=\s*({.+?})\s*;\s*\n', webpage,
'triforce feed', default='{}'), video_id, fatal=False)
data_zone = self._search_regex(
r'data-zone=(["\'])(?P<zone>.+?_lc_promo.*?)\1', webpage,
'data zone', default=data_zone, group='zone')
feed_url = try_get(
triforce_feed, lambda x: x['manifest']['zones'][data_zone]['feed'],
compat_str)
if not feed_url:
return
feed = self._download_json(feed_url, video_id, fatal=False)
if not feed:
return
return try_get(feed, lambda x: x['result']['data']['id'], compat_str)
def _extract_new_triforce_mgid(self, webpage, url='', video_id=None):
if url == '':
return
domain = get_domain(url)
if domain is None:
raise ExtractorError(
'[%s] could not get domain' % self.IE_NAME,
expected=True)
url = url.replace("https://", "http://")
enc_url = compat_urlparse.quote(url, safe='')
_TRIFORCE_V8_TEMPLATE = 'https://%s/feeds/triforce/manifest/v8?url=%s'
triforce_manifest_url = _TRIFORCE_V8_TEMPLATE % (domain, enc_url)
manifest = self._download_json(triforce_manifest_url, video_id, fatal=False)
if manifest:
if manifest.get('manifest').get('type') == 'redirect':
self.to_screen('Found a redirect. Downloading manifest from new location')
new_loc = manifest.get('manifest').get('newLocation')
new_loc = new_loc.replace("https://", "http://")
enc_new_loc = compat_urlparse.quote(new_loc, safe='')
triforce_manifest_new_loc = _TRIFORCE_V8_TEMPLATE % (domain, enc_new_loc)
manifest = self._download_json(triforce_manifest_new_loc, video_id, fatal=False)
item_id = try_get(manifest, lambda x: x['manifest']['reporting']['itemId'], compat_str)
if not item_id:
self.to_screen('No id found!')
return
# 'episode' can be anything. 'content' is used often as well
_MGID_TEMPLATE = 'mgid:arc:episode:%s:%s'
mgid = _MGID_TEMPLATE % (domain, item_id)
return mgid
def _extract_mgid(self, webpage, url, title=None, data_zone=None):
try:
# the url can be http://media.mtvnservices.com/fb/{mgid}.swf
# or http://media.mtvnservices.com/{mgid}
og_url = self._og_search_video_url(webpage)
mgid = url_basename(og_url)
if mgid.endswith('.swf'):
mgid = mgid[:-4]
except RegexNotFoundError:
mgid = None
if not title:
title = url_basename(url)
try:
window_data = self._parse_json(self._search_regex(
r'(?s)window.__DATA__ = (?P<json>{.+});', webpage,
'JSON Window Data', default=None, fatal=False, group='json'), title, fatal=False)
main_container = None
for i in range(len(window_data['children'])):
if window_data['children'][i]['type'] == 'MainContainer':
main_container = window_data['children'][i]
mgid = main_container['children'][0]['props']['media']['video']['config']['uri']
except (KeyError, IndexError, TypeError):
pass
if mgid is None or ':' not in mgid:
mgid = self._search_regex(
[r'data-mgid="(.*?)"', r'swfobject\.embedSWF\(".*?(mgid:.*?)"'],
webpage, 'mgid', default=None)
if not mgid:
sm4_embed = self._html_search_meta(
'sm4:video:embed', webpage, 'sm4 embed', default='')
mgid = self._search_regex(
r'embed/(mgid:.+?)["\'&?/]', sm4_embed, 'mgid', default=None)
if not mgid:
mgid = self._extract_new_triforce_mgid(webpage, url)
if not mgid:
mgid = self._extract_triforce_mgid(webpage, data_zone)
return mgid
def _real_extract(self, url):
title = url_basename(url)
webpage = self._download_webpage(url, title)
mgid = self._extract_mgid(webpage, url, title=title)
videos_info = self._get_videos_info(mgid, url=url)
return videos_info
class MTVServicesEmbeddedIE(MTVServicesInfoExtractor):
IE_NAME = 'mtvservices:embedded'
_VALID_URL = r'https?://media\.mtvnservices\.com/embed/(?P<mgid>.+?)(\?|/|$)'
_TEST = {
# From http://www.thewrap.com/peter-dinklage-sums-up-game-of-thrones-in-45-seconds-video/
'url': 'http://media.mtvnservices.com/embed/mgid:uma:video:mtv.com:1043906/cp~vid%3D1043906%26uri%3Dmgid%3Auma%3Avideo%3Amtv.com%3A1043906',
'md5': 'cb349b21a7897164cede95bd7bf3fbb9',
'info_dict': {
'id': '1043906',
'ext': 'mp4',
'title': 'Peter Dinklage Sums Up \'Game Of Thrones\' In 45 Seconds',
'description': '"Sexy sexy sexy, stabby stabby stabby, beautiful language," says Peter Dinklage as he tries summarizing "Game of Thrones" in under a minute.',
'timestamp': 1400126400,
'upload_date': '20140515',
},
}
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//media.mtvnservices.com/embed/.+?)\1', webpage)
if mobj:
return mobj.group('url')
def _get_feed_url(self, uri):
video_id = self._id_from_uri(uri)
config = self._download_json(
'http://media.mtvnservices.com/pmt/e1/access/index.html?uri=%s&configtype=edge' % uri, video_id)
return self._remove_template_parameter(config['feedWithQueryParams'])
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
mgid = mobj.group('mgid')
return self._get_videos_info(mgid)
class MTVIE(MTVServicesInfoExtractor):
IE_NAME = 'mtv'
_VALID_URL = r'https?://(?:www\.)?mtv\.com/(?:video-clips|(?:full-)?episodes)/(?P<id>[^/?#.]+)'
_FEED_URL = 'http://www.mtv.com/feeds/mrss/'
_TESTS = [{
'url': 'http://www.mtv.com/video-clips/vl8qof/unlocking-the-truth-trailer',
'md5': '1edbcdf1e7628e414a8c5dcebca3d32b',
'info_dict': {
'id': '5e14040d-18a4-47c4-a582-43ff602de88e',
'ext': 'mp4',
'title': 'Unlocking The Truth|July 18, 2016|1|101|Trailer',
'description': '"Unlocking the Truth" premieres August 17th at 11/10c.',
'timestamp': 1468846800,
'upload_date': '20160718',
},
}, {
'url': 'http://www.mtv.com/full-episodes/94tujl/unlocking-the-truth-gates-of-hell-season-1-ep-101',
'only_matching': True,
}, {
'url': 'http://www.mtv.com/episodes/g8xu7q/teen-mom-2-breaking-the-wall-season-7-ep-713',
'only_matching': True,
}]
class MTVJapanIE(MTVServicesInfoExtractor):
IE_NAME = 'mtvjapan'
_VALID_URL = r'https?://(?:www\.)?mtvjapan\.com/videos/(?P<id>[0-9a-z]+)'
_TEST = {
'url': 'http://www.mtvjapan.com/videos/prayht/fresh-info-cadillac-escalade',
'info_dict': {
'id': 'bc01da03-6fe5-4284-8880-f291f4e368f5',
'ext': 'mp4',
'title': '【Fresh Info】Cadillac ESCALADE Sport Edition',
},
'params': {
'skip_download': True,
},
}
_GEO_COUNTRIES = ['JP']
_FEED_URL = 'http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed'
def _get_feed_query(self, uri):
return {
'arcEp': 'mtvjapan.com',
'mgid': uri,
}
class MTVVideoIE(MTVServicesInfoExtractor):
IE_NAME = 'mtv:video'
_VALID_URL = r'''(?x)^https?://
(?:(?:www\.)?mtv\.com/videos/.+?/(?P<videoid>[0-9]+)/[^/]+$|
m\.mtv\.com/videos/video\.rbml\?.*?id=(?P<mgid>[^&]+))'''
_FEED_URL = 'http://www.mtv.com/player/embed/AS3/rss/'
_TESTS = [
{
'url': 'http://www.mtv.com/videos/misc/853555/ours-vh1-storytellers.jhtml',
'md5': '850f3f143316b1e71fa56a4edfd6e0f8',
'info_dict': {
'id': '853555',
'ext': 'mp4',
'title': 'Taylor Swift - "Ours (VH1 Storytellers)"',
'description': 'Album: Taylor Swift performs "Ours" for VH1 Storytellers at Harvey Mudd College.',
'timestamp': 1352610000,
'upload_date': '20121111',
},
},
]
def _get_thumbnail_url(self, uri, itemdoc):
return 'http://mtv.mtvnimages.com/uri/' + uri
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('videoid')
uri = mobj.groupdict().get('mgid')
if uri is None:
webpage = self._download_webpage(url, video_id)
# Some videos come from Vevo.com
m_vevo = re.search(
r'(?s)isVevoVideo = true;.*?vevoVideoId = "(.*?)";', webpage)
if m_vevo:
vevo_id = m_vevo.group(1)
self.to_screen('Vevo video detected: %s' % vevo_id)
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, 'uri')
return self._get_videos_info(uri)
class MTVDEIE(MTVServicesInfoExtractor):
IE_NAME = 'mtv.de'
_VALID_URL = r'https?://(?:www\.)?mtv\.de/(?:musik/videoclips|folgen|news)/(?P<id>[0-9a-z]+)'
_TESTS = [{
'url': 'http://www.mtv.de/musik/videoclips/2gpnv7/Traum',
'info_dict': {
'id': 'd5d472bc-f5b7-11e5-bffd-a4badb20dab5',
'ext': 'mp4',
'title': 'Traum',
'description': 'Traum',
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Blocked at Travis CI',
}, {
# mediagen URL without query (e.g. http://videos.mtvnn.com/mediagen/e865da714c166d18d6f80893195fcb97)
'url': 'http://www.mtv.de/folgen/6b1ylu/teen-mom-2-enthuellungen-S5-F1',
'info_dict': {
'id': '1e5a878b-31c5-11e7-a442-0e40cf2fc285',
'ext': 'mp4',
'title': 'Teen Mom 2',
'description': 'md5:dc65e357ef7e1085ed53e9e9d83146a7',
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Blocked at Travis CI',
}, {
'url': 'http://www.mtv.de/news/glolix/77491-mtv-movies-spotlight--pixels--teil-3',
'info_dict': {
'id': 'local_playlist-4e760566473c4c8c5344',
'ext': 'mp4',
'title': 'Article_mtv-movies-spotlight-pixels-teil-3_short-clips_part1',
'description': 'MTV Movies Supercut',
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Das Video kann zur Zeit nicht abgespielt werden.',
}]
_GEO_COUNTRIES = ['DE']
_FEED_URL = 'http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed'
def _get_feed_query(self, uri):
return {
'arcEp': 'mtv.de',
'mgid': uri,
}
|
the-stack_106_19452
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Silvio Peroni <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
import unittest
from rdflib import Literal, RDF
from oc_ocdm.graph.graph_entity import GraphEntity
from oc_ocdm.graph.graph_set import GraphSet
class TestDiscourseElement(unittest.TestCase):
resp_agent = 'http://resp_agent.test/'
@classmethod
def setUpClass(cls) -> None:
cls.graph_set = GraphSet("http://test/", "./info_dir/", "", False)
def setUp(self):
self.rp = self.graph_set.add_rp(self.resp_agent)
self.pl = self.graph_set.add_pl(self.resp_agent)
self.de1 = self.graph_set.add_de(self.resp_agent)
self.de2 = self.graph_set.add_de(self.resp_agent)
def test_has_title(self):
title = "DiscourseElement"
result = self.de1.has_title(title)
self.assertIsNone(result)
triple = self.de1.res, GraphEntity.iri_title, Literal(title)
self.assertIn(triple, self.de1.g)
def test_contains_discourse_element(self):
result = self.de1.contains_discourse_element(self.de2)
self.assertIsNone(result)
triple = self.de1.res, GraphEntity.iri_contains_de, self.de2.res
self.assertIn(triple, self.de1.g)
def test_has_next_de(self):
result = self.de1.has_next_de(self.de2)
self.assertIsNone(result)
triple = self.de1.res, GraphEntity.iri_has_next, self.de2.res
self.assertIn(triple, self.de1.g)
def test_is_context_of_rp(self):
result = self.de1.is_context_of_rp(self.rp)
self.assertIsNone(result)
triple = self.de1.res, GraphEntity.iri_is_context_of, self.rp.res
self.assertIn(triple, self.de1.g)
def test_is_context_of_pl(self):
result = self.de1.is_context_of_pl(self.pl)
self.assertIsNone(result)
triple = self.de1.res, GraphEntity.iri_is_context_of, self.pl.res
self.assertIn(triple, self.de1.g)
def test_has_content(self):
content = "Content"
result = self.de1.has_content(content)
self.assertIsNone(result)
triple = self.de1.res, GraphEntity.iri_has_content, Literal(content)
self.assertIn(triple, self.de1.g)
def test_create_section(self):
result = self.de1.create_section()
self.assertIsNone(result)
triple = self.de1.res, RDF.type, GraphEntity.iri_section
self.assertIn(triple, self.de1.g)
def test_create_section_title(self):
result = self.de1.create_section_title()
self.assertIsNone(result)
triple = self.de1.res, RDF.type, GraphEntity.iri_section_title
self.assertIn(triple, self.de1.g)
def test_create_paragraph(self):
result = self.de1.create_paragraph()
self.assertIsNone(result)
triple = self.de1.res, RDF.type, GraphEntity.iri_paragraph
self.assertIn(triple, self.de1.g)
def test_create_sentence(self):
result = self.de1.create_sentence()
self.assertIsNone(result)
triple = self.de1.res, RDF.type, GraphEntity.iri_sentence
self.assertIn(triple, self.de1.g)
def test_create_text_chunk(self):
result = self.de1.create_text_chunk()
self.assertIsNone(result)
triple = self.de1.res, RDF.type, GraphEntity.iri_text_chunk
self.assertIn(triple, self.de1.g)
def test_create_table(self):
result = self.de1.create_table()
self.assertIsNone(result)
triple = self.de1.res, RDF.type, GraphEntity.iri_table
self.assertIn(triple, self.de1.g)
def test_create_footnote(self):
result = self.de1.create_footnote()
self.assertIsNone(result)
triple = self.de1.res, RDF.type, GraphEntity.iri_footnote
self.assertIn(triple, self.de1.g)
def test_create_caption(self):
result = self.de1.create_caption()
self.assertIsNone(result)
triple = self.de1.res, RDF.type, GraphEntity.iri_caption
self.assertIn(triple, self.de1.g)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_19453
|
#!/usr/bin/env python
import pandas as pd
import os
import tensorflow as tf
from utils import load_dataset, data_augment
from config import *
from networks import *
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
import math
train_data = pd.read_csv(train_path)
train_data.head()
X_data = train_data["image_id"].apply(lambda id: os.path.join(images_path, id + '.jpg'))
y_data = train_data.loc[:, 'healthy':'scab']
X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size=0.20, random_state=42)
train_dataset = (
tf.data.Dataset
.from_tensor_slices((X_train, y_train))
.map(load_dataset)
.map(data_augment)
.repeat()
.shuffle(256)
.batch(batch_size)
)
test_dataset = (
tf.data.Dataset
.from_tensor_slices(X_test)
.map(load_dataset)
.batch(batch_size)
)
classes = y_train.shape[1]
model = my_model(input_shape, classes)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
if not os.path.exists(models_path):
os.makedirs(models_path)
model.fit(train_dataset,
epochs=epochs,
steps_per_epoch=X_train.shape[0]//batch_size*2,
callbacks=[
keras.callbacks.EarlyStopping(monitor="loss", min_delta=0, patience=7, verbose=0, mode="min"),
keras.callbacks.ModelCheckpoint(
filepath=os.path.join(models_path, 'model_best.hdf5'),
save_weights_only=False,
monitor='loss',
mode='min',
save_best_only=True
)
]
)
steps = math.ceil(X_test.shape[0]/batch_size)
predicted = model.predict(test_dataset, verbose=1, steps=steps)
score = roc_auc_score(y_test, predicted)
print("roc auc score:", score)
model.save(os.path.join(models_path, 'model.hdf5'))
|
the-stack_106_19454
|
import pandas as pd
import mol_sim
def test_input_data():
'''Tests input_data function in mol_sim.py'''
input_df = pd.read_csv('playground_df_cleaned_kegg_with_smiles.csv')
test_df = mol_sim.input_data(input_df)
assert isinstance(test_df, pd.DataFrame) == True, """TypeError,
function should return a pandas dataframe"""
#assert
return '1/1 tests successful'
def test_fingerprint_products():
'''Tests fingerprint_products function in mol_sim.py'''
input_df = pd.read_csv('playground_df_cleaned_kegg_with_smiles.csv')
test_df = mol_sim.input_data(input_df)
assert isinstance(mol_sim.fingerprint_products(test_df), pd.DataFrame) == True, """TypeError,
function should return a pandas dataframe"""
#assert
return '1/1 tests successful'
def test_split_by_enzyme():
'''Tests split_by_enzyme function in mol_sim.py'''
input_df = pd.read_csv('playground_df_cleaned_kegg_with_smiles.csv')
test_df = mol_sim.fingerprint_products(mol_sim.input_data(input_df))
assert isinstance(mol_sim.split_by_enzyme(test_df), list) == True, """TypeError,
function should return a pandas dataframe"""
#assert
return '1/1 tests successful'
def test_sim_i_j():
'''Tests sim_i_j function in mol_sim.py'''
input_df = pd.read_csv('playground_df_cleaned_kegg_with_smiles.csv')
test_df = mol_sim.fingerprint_products(mol_sim.input_data(input_df))
A = test_df.iloc[0]
#B = test_df.iloc[1]
#C = test_df.iloc[2]
assert mol_sim.sim_i_j(A, A) == 1, "Self correlation is broken"
#assert mol_sim.sim_i_j(A, B) == -1, "Standard correlation is broken"
#assert mol_sim.sim_i_j(A, C) == 0, "Standard correlation is broken"
return '1/1 tests successful'
def test_sim_i_all():
'''Test sim_i_all functionin mol_sim.py'''
input_df = pd.read_csv('playground_df_cleaned_kegg_with_smiles.csv')
test_df = mol_sim.fingerprint_products(mol_sim.input_data(input_df))
metric = pd.DataFrame()
assert metric.empty == True, """ShapeError, input metric dataframe
should be initialized as empty"""
for index, row in test_df.iterrows():
assert mol_sim.sim_i_all(test_df, index, row, metric) == None, """OutputError, function
shouldn't return anything"""
assert metric[index].all() >= 0 and metric[index].all() <= 1.0, """ValueError,
metric should be between 0 and 1"""
return "3/3 Tests successful"
def test_sim_metric():
'''Test sim_i_all functionin mol_sim.py'''
input_df = pd.read_csv('playground_df_cleaned_kegg_with_smiles.csv')
test_df = mol_sim.fingerprint_products(mol_sim.input_data(input_df))
assert isinstance(mol_sim.sim_metric(test_df), pd.DataFrame) == True, """TypeError,
function should return a dataframe"""
assert mol_sim.sim_metric(test_df).isnull().values.any() == False, """ValueError,
function-generated dataframe should not contain null values"""
#assert test_df.size == mol_sim.sim_metric(test_df).size, """ShapeError,
#function-generated dataframe should be the same size as input dataframe"""
return "2/2 Tests successful"
def test_calculate_dist():
df = pd.read_csv('playground_df_cleaned_kegg_with_smiles.csv')
test_df = mol_sim.calculate_dist(df)
assert isinstance(test_df, pd.DataFrame) == True, """TypeError,
function should return a dataframe"""
#assert len(test_df.columns) == 3+len(df.columns), """ShapeError,
#function should add 3 columns to dataframe"""
return "1/1 Tests successful"
|
the-stack_106_19457
|
#!/usr/bin/python3
import os
import sys
import matplotlib.pyplot as plt
import cv2
import numpy as np
#image = cv2.imread("/Users/sam/code/Hackathon/InnerOuter/0-0/12.250-CF716-U02822-35625-2017-11-17-NA_B1.JPG")
#image = cv2.imread("/Users/sam/code/Hackathon/InnerOuter/0-0/DSC01277.JPG")
def get_circles(filename, circle_folder):
image = cv2.imread(filename,0)
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Create mask
height,width = image.shape
mask = np.zeros((height,width), np.uint8)
gray = cv2.GaussianBlur(image,(5,5),0)
gray = cv2.medianBlur(gray,5)
gimage = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,3.5)
circles = cv2.HoughCircles(gray,cv2.HOUGH_GRADIENT,1,145,param1=150,param2=22 ,minRadius=50,maxRadius=150)
#circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.5, 450)
# circles = np.uint16(np.around(circles))
if not circles is None:
for _,i in enumerate(circles[0,:]):
mask = np.zeros((height,width), np.uint8)
# draw the outer circle
# .circle(image,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
# cv2.circle(image,(i[0],i[1]),2,(0,0,255),3)
#Code below this point deals with cropping out found circles and writing the cropped circle
cv2.circle(mask,(i[0],i[1]),i[2],(255,255,255),thickness=-1)
# Copy that image using that mask
masked_data = cv2.bitwise_and(image, image, mask=mask)
# Apply Threshold
__,thresh = cv2.threshold(mask,1,255,cv2.THRESH_BINARY)
# Find Contour
contours = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
x,y,w,h = cv2.boundingRect(contours[0])
# Crop masked_data
crop = masked_data[y:y+h,x:x+w]
#Finaly write circle to file (Probably need to change path or except arguments here)
#print("%s/circle-%s.jpg" % (circle_folder,_))
cv2.imwrite("%s/circle-%s.jpg" % (circle_folder,_), crop)
#Used to add text to image. Maybe useful method to note which circles are which
#font = cv2.FONT_HERSHEY_SIMPLEX
#cv2.putText(image,'OpenCV',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA)
#cv2.imshow('detected circles',image)
#Used to show cropped images
#cv2.imshow('Cropped Eye',crop)
cv2.waitKey(0)
cv2.destroyAllWindows()
return True
else:
return False
|
the-stack_106_19461
|
"""
Tests to verify text fields are rendered correctly.
"""
import os
from django.test.html import parse_html
from tbxforms.tbxforms.helper import FormHelper
from tbxforms.tbxforms.layout import (
Field,
Layout,
Size,
)
from tests.forms import (
CheckboxesForm,
TextInputForm,
)
from tests.utils import (
TEST_DIR,
parse_contents,
parse_form,
render_template,
)
RESULT_DIR = os.path.join(TEST_DIR, "helpers", "results")
def test_default_label_size():
"""Verify a default label size can set for fields."""
form = TextInputForm()
form.helper = FormHelper(form)
form.helper.label_size = Size.MEDIUM
assert parse_form(form) == parse_contents(RESULT_DIR, "label_size.html")
def test_override_default_label_size():
"""Verify a default label size can be overridden on the field."""
form = TextInputForm()
form.helper = FormHelper()
form.helper.label_size = Size.MEDIUM
form.helper.layout = Layout(Field.text("name", label_size=Size.LARGE))
assert parse_form(form) == parse_contents(
RESULT_DIR, "override_label_size.html"
)
def test_default_legend_size():
"""Verify a default legend size can set for fields."""
form = CheckboxesForm()
form.helper = FormHelper(form)
form.helper.legend_size = Size.MEDIUM
assert parse_form(form) == parse_contents(RESULT_DIR, "legend_size.html")
def test_override_default_legend_size():
"""Verify a default legend size can be overridden on the field."""
form = CheckboxesForm()
form.helper = FormHelper()
form.helper.legend_size = Size.MEDIUM
form.helper.layout = Layout(
Field.checkboxes("method", legend_size=Size.LARGE)
)
assert parse_form(form) == parse_contents(
RESULT_DIR, "override_legend_size.html"
)
|
the-stack_106_19463
|
import logging
import reversion
from django.conf import settings
from django.db import transaction
from django.db.models import (
Count,
ExpressionWrapper,
F,
OuterRef,
PositiveSmallIntegerField,
Q,
Subquery,
)
from django.db.models.aggregates import Sum
from django.http import HttpResponse
from django.http.response import JsonResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_exempt
from django_filters.rest_framework import DjangoFilterBackend
from django_fsm import TransitionNotAllowed
from rest_framework import exceptions as rf_exceptions
from rest_framework import mixins
from rest_framework import permissions as rf_permissions
from rest_framework import status, views
from rest_framework import viewsets as rf_viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import PermissionDenied, ValidationError
from rest_framework.generics import ListAPIView
from rest_framework.response import Response
from waldur_core.core import models as core_models
from waldur_core.core import permissions as core_permissions
from waldur_core.core import utils as core_utils
from waldur_core.core import validators as core_validators
from waldur_core.core import views as core_views
from waldur_core.core.mixins import EagerLoadMixin
from waldur_core.core.utils import is_uuid_like, month_start, order_with_nulls
from waldur_core.structure import filters as structure_filters
from waldur_core.structure import models as structure_models
from waldur_core.structure import permissions as structure_permissions
from waldur_core.structure import serializers as structure_serializers
from waldur_core.structure import utils as structure_utils
from waldur_core.structure import views as structure_views
from waldur_core.structure.exceptions import ServiceBackendError
from waldur_core.structure.permissions import _has_owner_access
from waldur_core.structure.registry import get_resource_type
from waldur_core.structure.serializers import (
ProjectUserSerializer,
get_resource_serializer_class,
)
from waldur_core.structure.signals import resource_imported
from waldur_mastermind.invoices import models as invoice_models
from waldur_mastermind.marketplace import callbacks
from waldur_mastermind.marketplace.utils import validate_attributes
from waldur_pid import models as pid_models
from . import filters, log, models, permissions, plugins, serializers, tasks, utils
logger = logging.getLogger(__name__)
class BaseMarketplaceView(core_views.ActionsViewSet):
lookup_field = 'uuid'
filter_backends = (DjangoFilterBackend,)
update_permissions = partial_update_permissions = destroy_permissions = [
structure_permissions.is_owner
]
class PublicViewsetMixin:
def get_permissions(self):
if settings.WALDUR_MARKETPLACE[
'ANONYMOUS_USER_CAN_VIEW_OFFERINGS'
] and self.action in ['list', 'retrieve']:
return [rf_permissions.AllowAny()]
else:
return super(PublicViewsetMixin, self).get_permissions()
class ServiceProviderViewSet(PublicViewsetMixin, BaseMarketplaceView):
queryset = models.ServiceProvider.objects.all().order_by('customer__name')
serializer_class = serializers.ServiceProviderSerializer
filterset_class = filters.ServiceProviderFilter
api_secret_code_permissions = (
projects_permissions
) = (
project_permissions_permissions
) = keys_permissions = users_permissions = set_offerings_username_permissions = [
structure_permissions.is_owner
]
@action(detail=True, methods=['GET', 'POST'])
def api_secret_code(self, request, uuid=None):
"""On GET request - return service provider api_secret_code.
On POST - generate new service provider api_secret_code.
"""
service_provider = self.get_object()
if request.method == 'GET':
return Response(
{'api_secret_code': service_provider.api_secret_code},
status=status.HTTP_200_OK,
)
else:
service_provider.generate_api_secret_code()
service_provider.save()
return Response(
{
'detail': _('Api secret code updated.'),
'api_secret_code': service_provider.api_secret_code,
},
status=status.HTTP_200_OK,
)
def get_customer_project_ids(self):
service_provider = self.get_object()
offering_ids = models.Offering.objects.filter(
shared=True, customer=service_provider.customer
).values_list('id', flat=True)
project_ids = (
models.Resource.objects.filter(offering_id__in=offering_ids)
.exclude(state=models.Resource.States.TERMINATED)
.values_list('project_id', flat=True)
)
return project_ids
@action(detail=True, methods=['GET'])
def projects(self, request, uuid=None):
project_ids = self.get_customer_project_ids()
projects = structure_models.Project.objects.filter(id__in=project_ids)
page = self.paginate_queryset(projects)
serializer = structure_serializers.ProjectSerializer(
page, many=True, context=self.get_serializer_context()
)
return self.get_paginated_response(serializer.data)
@action(detail=True, methods=['GET'])
def project_permissions(self, request, uuid=None):
project_ids = self.get_customer_project_ids()
permissions = structure_models.ProjectPermission.objects.filter(
project_id__in=project_ids, is_active=True
)
page = self.paginate_queryset(permissions)
serializer = structure_serializers.ProjectPermissionLogSerializer(
page, many=True, context=self.get_serializer_context()
)
return self.get_paginated_response(serializer.data)
@action(detail=True, methods=['GET'])
def keys(self, request, uuid=None):
project_ids = self.get_customer_project_ids()
user_ids = structure_models.ProjectPermission.objects.filter(
project_id__in=project_ids, is_active=True
).values_list('user_id', flat=True)
keys = core_models.SshPublicKey.objects.filter(user_id__in=user_ids)
page = self.paginate_queryset(keys)
serializer = structure_serializers.SshKeySerializer(
page, many=True, context=self.get_serializer_context()
)
return self.get_paginated_response(serializer.data)
@action(detail=True, methods=['GET'])
def users(self, request, uuid=None):
project_ids = self.get_customer_project_ids()
user_ids = structure_models.ProjectPermission.objects.filter(
project_id__in=project_ids, is_active=True
).values_list('user_id', flat=True)
users = core_models.User.objects.filter(id__in=user_ids)
page = self.paginate_queryset(users)
serializer = structure_serializers.UserSerializer(
page, many=True, context=self.get_serializer_context()
)
return self.get_paginated_response(serializer.data)
def check_related_resources(request, view, obj=None):
if obj and obj.has_active_offerings:
raise rf_exceptions.ValidationError(
_('Service provider has active offerings. Please archive them first.')
)
destroy_permissions = [structure_permissions.is_owner, check_related_resources]
@action(detail=True, methods=['POST'])
def set_offerings_username(self, request, uuid=None):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user_uuid = serializer.validated_data['user_uuid']
username = serializer.validated_data['username']
try:
user = core_models.User.objects.get(uuid=user_uuid)
except core_models.User.DoesNotExist:
validation_message = f'A user with the uuid [{user_uuid}] is not found.'
raise rf_exceptions.ValidationError(_(validation_message))
user_projects_ids = structure_models.ProjectPermission.objects.filter(
user=user, is_active=True,
).values_list('project_id', flat=True)
offering_ids = (
models.Resource.objects.exclude(state=models.Resource.States.TERMINATED)
.filter(
project_id__in=user_projects_ids,
offering__customer=self.get_object().customer,
)
.values_list('offering_id', flat=True)
)
for offering_id in offering_ids:
models.OfferingUser.objects.update_or_create(
user=user, offering_id=offering_id, defaults={'username': username}
)
return Response(
{'detail': _('Offering users have been set.'),},
status=status.HTTP_201_CREATED,
)
set_offerings_username_serializer_class = serializers.SetOfferingsUsernameSerializer
class CategoryViewSet(PublicViewsetMixin, EagerLoadMixin, core_views.ActionsViewSet):
queryset = models.Category.objects.all()
serializer_class = serializers.CategorySerializer
lookup_field = 'uuid'
filter_backends = (DjangoFilterBackend,)
filterset_class = filters.CategoryFilter
create_permissions = (
update_permissions
) = partial_update_permissions = destroy_permissions = [
structure_permissions.is_staff
]
def can_update_offering(request, view, obj=None):
offering = obj
if not offering:
return
if offering.state == models.Offering.States.DRAFT:
if offering.has_user(request.user) or _has_owner_access(
request.user, offering.customer
):
return
else:
raise rf_exceptions.PermissionDenied()
else:
structure_permissions.is_staff(request, view)
def validate_offering_update(offering):
if offering.state == models.Offering.States.ARCHIVED:
raise rf_exceptions.ValidationError(
_('It is not possible to update archived offering.')
)
class OfferingViewSet(
core_views.CreateReversionMixin,
core_views.UpdateReversionMixin,
PublicViewsetMixin,
BaseMarketplaceView,
):
"""
This viewset enables uniform implementation of resource import.
Consider the following example:
importable_resources_backend_method = 'get_tenants_for_import'
import_resource_executor = executors.TenantImportExecutor
It is expected that importable_resources_backend_method returns list of dicts, each of which
contains two mandatory fields: name and backend_id, and one optional field called extra.
This optional field should be list of dicts, each of which contains two mandatory fields: name and value.
Note that there are only 3 mandatory parameters:
* importable_resources_backend_method
* importable_resources_serializer_class
* import_resource_serializer_class
"""
queryset = models.Offering.objects.all()
serializer_class = serializers.OfferingDetailsSerializer
create_serializer_class = serializers.OfferingCreateSerializer
update_serializer_class = (
partial_update_serializer_class
) = serializers.OfferingUpdateSerializer
filterset_class = filters.OfferingFilter
filter_backends = (
DjangoFilterBackend,
filters.OfferingCustomersFilterBackend,
filters.OfferingImportableFilterBackend,
filters.ExternalOfferingFilterBackend,
)
def get_queryset(self):
queryset = super(OfferingViewSet, self).get_queryset()
if self.request.user.is_anonymous:
return queryset.filter(
state__in=[
models.Offering.States.ACTIVE,
models.Offering.States.PAUSED,
],
shared=True,
)
return queryset
@action(detail=True, methods=['post'])
def activate(self, request, uuid=None):
return self._update_state('activate')
@action(detail=True, methods=['post'])
def draft(self, request, uuid=None):
return self._update_state('draft')
@action(detail=True, methods=['post'])
def pause(self, request, uuid=None):
return self._update_state('pause', request)
pause_serializer_class = serializers.OfferingPauseSerializer
@action(detail=True, methods=['post'])
def unpause(self, request, uuid=None):
return self._update_state('unpause', request)
@action(detail=True, methods=['post'])
def archive(self, request, uuid=None):
return self._update_state('archive')
def _update_state(self, action, request=None):
offering = self.get_object()
try:
getattr(offering, action)()
except TransitionNotAllowed:
raise rf_exceptions.ValidationError(_('Offering state is invalid.'))
with reversion.create_revision():
if request:
serializer = self.get_serializer(
offering, data=request.data, partial=True
)
serializer.is_valid(raise_exception=True)
offering = serializer.save()
offering.save(update_fields=['state'])
reversion.set_user(self.request.user)
reversion.set_comment(
f'Offering state has been updated using method {action}'
)
return Response(
{
'detail': _('Offering state updated.'),
'state': offering.get_state_display(),
},
status=status.HTTP_200_OK,
)
pause_permissions = unpause_permissions = archive_permissions = [
permissions.user_is_owner_or_service_manager,
]
activate_permissions = [structure_permissions.is_staff]
activate_validators = pause_validators = archive_validators = destroy_validators = [
structure_utils.check_customer_blocked
]
update_permissions = partial_update_permissions = [can_update_offering]
update_validators = partial_update_validators = [
validate_offering_update,
structure_utils.check_customer_blocked,
]
def perform_create(self, serializer):
customer = serializer.validated_data['customer']
structure_utils.check_customer_blocked(customer)
super(OfferingViewSet, self).perform_create(serializer)
@action(detail=True, methods=['get'])
def importable_resources(self, request, uuid=None):
offering = self.get_object()
method = plugins.manager.get_importable_resources_backend_method(offering.type)
if not method:
raise rf_exceptions.ValidationError(
'Current offering plugin does not support resource import'
)
backend = offering.scope.get_backend()
resources = getattr(backend, method)()
page = self.paginate_queryset(resources)
return self.get_paginated_response(page)
importable_resources_permissions = [permissions.user_can_list_importable_resources]
import_resource_permissions = [permissions.user_can_list_importable_resources]
import_resource_serializer_class = serializers.ImportResourceSerializer
@action(detail=True, methods=['post'])
def import_resource(self, request, uuid=None):
import_resource_serializer = self.get_serializer(data=request.data)
import_resource_serializer.is_valid(raise_exception=True)
plan = import_resource_serializer.validated_data.get('plan', None)
project = import_resource_serializer.validated_data['project']
backend_id = import_resource_serializer.validated_data['backend_id']
offering = self.get_object()
backend = offering.scope.get_backend()
method = plugins.manager.import_resource_backend_method(offering.type)
if not method:
raise rf_exceptions.ValidationError(
'Current offering plugin does not support resource import'
)
resource_model = plugins.manager.get_resource_model(offering.type)
if resource_model.objects.filter(
service_settings=offering.scope, backend_id=backend_id
).exists():
raise rf_exceptions.ValidationError(
_('Resource has been imported already.')
)
try:
resource = getattr(backend, method)(backend_id=backend_id, project=project)
except ServiceBackendError as e:
raise rf_exceptions.ValidationError(str(e))
else:
resource_imported.send(
sender=resource.__class__,
instance=resource,
plan=plan,
offering=offering,
)
import_resource_executor = plugins.manager.get_import_resource_executor(
offering.type
)
if import_resource_executor:
transaction.on_commit(lambda: import_resource_executor.execute(resource))
marketplace_resource = models.Resource.objects.get(scope=resource)
resource_serializer = serializers.ResourceSerializer(
marketplace_resource, context=self.get_serializer_context()
)
return Response(data=resource_serializer.data, status=status.HTTP_201_CREATED)
@action(detail=True, methods=['post'])
def update_attributes(self, request, uuid=None):
offering = self.get_object()
if not isinstance(request.data, dict):
raise rf_exceptions.ValidationError('Dictionary is expected.')
validate_attributes(request.data, offering.category)
offering.attributes = request.data
with reversion.create_revision():
offering.save(update_fields=['attributes'])
reversion.set_user(self.request.user)
reversion.set_comment('Offering attributes have been updated via REST API')
return Response(status=status.HTTP_200_OK)
update_attributes_permissions = [permissions.user_is_owner_or_service_manager]
update_attributes_validators = [validate_offering_update]
@action(detail=True, methods=['post'])
def update_thumbnail(self, request, uuid=None):
offering = self.get_object()
serializer = serializers.OfferingThumbnailSerializer(
instance=offering, data=request.data
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_200_OK)
update_thumbnail_permissions = [permissions.user_can_update_thumbnail]
@action(detail=True, methods=['post'])
def delete_thumbnail(self, request, uuid=None):
offering = self.get_object()
offering.thumbnail = None
offering.save()
return Response(status=status.HTTP_204_NO_CONTENT)
delete_thumbnail_permissions = update_thumbnail_permissions
@action(detail=True)
def customers(self, request, uuid):
offering = self.get_object()
active_customers = utils.get_active_customers(request, self)
customer_queryset = utils.get_offering_customers(offering, active_customers)
serializer_class = structure_serializers.CustomerSerializer
serializer = serializer_class(
instance=customer_queryset, many=True, context=self.get_serializer_context()
)
page = self.paginate_queryset(serializer.data)
return self.get_paginated_response(page)
customers_permissions = [structure_permissions.is_owner]
def get_stats(self, get_queryset, serializer, serializer_context=None):
offering = self.get_object()
active_customers = utils.get_active_customers(self.request, self)
start, end = utils.get_start_and_end_dates_from_request(self.request)
invoice_items = invoice_models.InvoiceItem.objects.filter(
details__offering_uuid=offering.uuid.hex,
invoice__customer__in=active_customers,
invoice__created__gte=start,
invoice__created__lte=end,
)
queryset = get_queryset(invoice_items)
serializer = serializer(
instance=queryset, many=True, context=serializer_context
)
page = self.paginate_queryset(serializer.data)
return self.get_paginated_response(page)
@action(detail=True)
def costs(self, *args, **kwargs):
return self.get_stats(
utils.get_offering_costs, serializers.OfferingCostSerializer
)
costs_permissions = [structure_permissions.is_owner]
@action(detail=True)
def component_stats(self, *args, **kwargs):
offering = self.get_object()
offering_components_map = {
component.type: component for component in offering.components.all()
}
def get_offering_component_stats(invoice_items):
return (
invoice_items.filter(
details__offering_component_type__in=offering_components_map.keys()
)
.values(
'details__offering_component_type',
'invoice__year',
'invoice__month',
)
.order_by(
'details__offering_component_type',
'invoice__year',
'invoice__month',
)
.annotate(total_quantity=Sum('quantity'))
)
serializer_context = {
'offering_components_map': offering_components_map,
}
return self.get_stats(
get_offering_component_stats,
serializers.OfferingComponentStatSerializer,
serializer_context,
)
component_stats_permissions = [structure_permissions.is_owner]
@action(detail=True, methods=['post'])
def update_divisions(self, request, uuid):
offering = self.get_object()
serializer = serializers.DivisionsSerializer(
instance=offering, context={'request': request}, data=request.data
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_200_OK)
update_divisions_permissions = [structure_permissions.is_owner]
@action(detail=True, methods=['post'])
def delete_divisions(self, request, uuid=None):
offering = self.get_object()
offering.divisions.clear()
return Response(status=status.HTTP_204_NO_CONTENT)
delete_divisions_permissions = update_divisions_permissions
@action(detail=False)
def groups(self, *args, **kwargs):
OFFERING_LIMIT = 4
qs = self.get_queryset()
customer_ids = self.paginate_queryset(
qs.order_by('customer__name')
.values_list('customer_id', flat=True)
.distinct()
)
customers = {
customer.id: customer
for customer in structure_models.Customer.objects.filter(
id__in=customer_ids
)
}
return Response(
data=[
{
'customer_name': customers[customer_id].name,
'customer_uuid': customers[customer_id].uuid.hex,
'offerings': [
{
'offering_name': offering.name,
'offering_uuid': offering.uuid.hex,
}
for offering in qs.filter(customer_id=customer_id)[
:OFFERING_LIMIT
]
],
}
for customer_id in customer_ids
]
)
class OfferingReferralsViewSet(PublicViewsetMixin, rf_viewsets.ReadOnlyModelViewSet):
queryset = pid_models.DataciteReferral.objects.all()
serializer_class = serializers.OfferingReferralSerializer
lookup_field = 'uuid'
filter_backends = (
filters.OfferingReferralScopeFilterBackend,
structure_filters.GenericRoleFilter,
DjangoFilterBackend,
)
filterset_class = filters.OfferingReferralFilter
class OfferingPermissionViewSet(structure_views.BasePermissionViewSet):
queryset = models.OfferingPermission.objects.filter(is_active=True).order_by(
'-created'
)
serializer_class = serializers.OfferingPermissionSerializer
filter_backends = (
structure_filters.GenericRoleFilter,
DjangoFilterBackend,
)
filterset_class = filters.OfferingPermissionFilter
scope_field = 'offering'
class OfferingPermissionLogViewSet(
mixins.RetrieveModelMixin, mixins.ListModelMixin, rf_viewsets.GenericViewSet
):
queryset = models.OfferingPermission.objects.filter(is_active=None).order_by(
'offering__name'
)
serializer_class = serializers.OfferingPermissionLogSerializer
filter_backends = (
structure_filters.GenericRoleFilter,
DjangoFilterBackend,
)
filterset_class = filters.OfferingPermissionFilter
class PlanUsageReporter:
"""
This class provides aggregate counts of how many plans of a
certain type for each offering is used.
"""
def __init__(self, view, request):
self.view = view
self.request = request
def get_report(self):
plans = models.Plan.objects.exclude(offering__billable=False)
query = self.parse_query()
if query:
plans = self.apply_filters(query, plans)
resources = self.get_subquery()
remaining = ExpressionWrapper(
F('limit') - F('usage'), output_field=PositiveSmallIntegerField()
)
plans = plans.annotate(
usage=Subquery(resources[:1]), limit=F('max_amount')
).annotate(remaining=remaining)
plans = self.apply_ordering(plans)
return self.serialize(plans)
def parse_query(self):
if self.request.query_params:
serializer = serializers.PlanUsageRequestSerializer(
data=self.request.query_params
)
serializer.is_valid(raise_exception=True)
return serializer.validated_data
return None
def get_subquery(self):
# Aggregate
resources = (
models.Resource.objects.filter(plan_id=OuterRef('pk'))
.exclude(state=models.Resource.States.TERMINATED)
.annotate(count=Count('*'))
.values_list('count', flat=True)
)
# Workaround for Django bug:
# https://code.djangoproject.com/ticket/28296
# It allows to remove extra GROUP BY clause from the subquery.
resources.query.group_by = []
return resources
def apply_filters(self, query, plans):
if query.get('offering_uuid'):
plans = plans.filter(offering__uuid=query.get('offering_uuid'))
if query.get('customer_provider_uuid'):
plans = plans.filter(
offering__customer__uuid=query.get('customer_provider_uuid')
)
return plans
def apply_ordering(self, plans):
param = (
self.request.query_params and self.request.query_params.get('o') or '-usage'
)
return order_with_nulls(plans, param)
def serialize(self, plans):
page = self.view.paginate_queryset(plans)
serializer = serializers.PlanUsageResponseSerializer(page, many=True)
return self.view.get_paginated_response(serializer.data)
def validate_plan_update(plan):
if models.Resource.objects.filter(plan=plan).exists():
raise rf_exceptions.ValidationError(
_('It is not possible to update plan because it is used by resources.')
)
def validate_plan_archive(plan):
if plan.archived:
raise rf_exceptions.ValidationError(_('Plan is already archived.'))
class PlanViewSet(core_views.UpdateReversionMixin, BaseMarketplaceView):
queryset = models.Plan.objects.all()
serializer_class = serializers.PlanDetailsSerializer
filterset_class = filters.PlanFilter
disabled_actions = ['destroy']
update_validators = partial_update_validators = [validate_plan_update]
archive_permissions = [structure_permissions.is_owner]
archive_validators = [validate_plan_archive]
@action(detail=True, methods=['post'])
def archive(self, request, uuid=None):
plan = self.get_object()
with reversion.create_revision():
plan.archived = True
plan.save(update_fields=['archived'])
reversion.set_user(self.request.user)
reversion.set_comment('Plan has been archived.')
return Response(
{'detail': _('Plan has been archived.')}, status=status.HTTP_200_OK
)
@action(detail=False)
def usage_stats(self, request):
return PlanUsageReporter(self, request).get_report()
class PlanComponentViewSet(PublicViewsetMixin, rf_viewsets.ReadOnlyModelViewSet):
queryset = models.PlanComponent.objects.filter()
serializer_class = serializers.PlanComponentSerializer
filterset_class = filters.PlanComponentFilter
lookup_field = 'uuid'
def get_queryset(self):
queryset = super(PlanComponentViewSet, self).get_queryset()
if self.request.user.is_anonymous:
return queryset.filter(plan__offering__shared=True,)
class ScreenshotViewSet(
core_views.CreateReversionMixin,
core_views.UpdateReversionMixin,
BaseMarketplaceView,
):
queryset = models.Screenshot.objects.all().order_by('offering__name')
serializer_class = serializers.ScreenshotSerializer
filterset_class = filters.ScreenshotFilter
class OrderViewSet(BaseMarketplaceView):
queryset = models.Order.objects.all()
serializer_class = serializers.OrderSerializer
filter_backends = (DjangoFilterBackend,)
filterset_class = filters.OrderFilter
destroy_validators = partial_update_validators = [
structure_utils.check_customer_blocked
]
def get_queryset(self):
"""
Orders are available to both service provider and service consumer.
"""
if self.request.user.is_staff or self.request.user.is_support:
return self.queryset
return self.queryset.filter(
Q(
project__permissions__user=self.request.user,
project__permissions__is_active=True,
)
| Q(
project__customer__permissions__user=self.request.user,
project__customer__permissions__is_active=True,
)
| Q(
items__offering__customer__permissions__user=self.request.user,
items__offering__customer__permissions__is_active=True,
)
).distinct()
@action(detail=True, methods=['post'])
def approve(self, request, uuid=None):
tasks.approve_order(self.get_object(), request.user)
return Response(
{'detail': _('Order has been approved.')}, status=status.HTTP_200_OK
)
approve_validators = [
core_validators.StateValidator(models.Order.States.REQUESTED_FOR_APPROVAL),
structure_utils.check_customer_blocked,
structure_utils.check_project_end_date,
]
approve_permissions = [permissions.user_can_approve_order_permission]
@action(detail=True, methods=['post'])
def reject(self, request, uuid=None):
order = self.get_object()
order.reject()
order.save(update_fields=['state'])
return Response(
{'detail': _('Order has been rejected.')}, status=status.HTTP_200_OK
)
reject_validators = [
core_validators.StateValidator(models.Order.States.REQUESTED_FOR_APPROVAL),
structure_utils.check_customer_blocked,
]
reject_permissions = [permissions.user_can_reject_order]
@action(detail=True)
def pdf(self, request, uuid=None):
order = self.get_object()
file = utils.create_order_pdf(order)
file_response = HttpResponse(file, content_type='application/pdf')
filename = order.get_filename()
file_response[
'Content-Disposition'
] = 'attachment; filename="{filename}"'.format(filename=filename)
return file_response
def perform_create(self, serializer):
project = serializer.validated_data['project']
structure_utils.check_customer_blocked(project)
structure_utils.check_project_end_date(project)
super(OrderViewSet, self).perform_create(serializer)
class PluginViewSet(views.APIView):
def get(self, request):
offering_types = plugins.manager.get_offering_types()
payload = []
for offering_type in offering_types:
components = [
dict(
type=component.type,
name=component.name,
measured_unit=component.measured_unit,
billing_type=component.billing_type,
)
for component in plugins.manager.get_components(offering_type)
]
payload.append(
dict(
offering_type=offering_type,
components=components,
available_limits=plugins.manager.get_available_limits(
offering_type
),
)
)
return Response(payload, status=status.HTTP_200_OK)
class OrderItemViewSet(BaseMarketplaceView):
queryset = models.OrderItem.objects.all()
filter_backends = (DjangoFilterBackend,)
serializer_class = serializers.OrderItemDetailsSerializer
filterset_class = filters.OrderItemFilter
def order_items_destroy_validator(order_item):
if not order_item:
return
if order_item.order.state != models.Order.States.REQUESTED_FOR_APPROVAL:
raise rf_exceptions.PermissionDenied()
destroy_validators = [order_items_destroy_validator]
destroy_permissions = terminate_permissions = [
structure_permissions.is_administrator
]
def get_queryset(self):
"""
OrderItems are available to both service provider and service consumer.
"""
if self.request.user.is_staff or self.request.user.is_support:
return self.queryset
return self.queryset.filter(
Q(
order__project__permissions__user=self.request.user,
order__project__permissions__is_active=True,
)
| Q(
order__project__customer__permissions__user=self.request.user,
order__project__customer__permissions__is_active=True,
)
| Q(
offering__customer__permissions__user=self.request.user,
offering__customer__permissions__is_active=True,
)
).distinct()
approve_permissions = [permissions.can_approve_order_item]
reject_permissions = [permissions.can_reject_order_item]
# Approve action is enabled for service provider, and
# reject action is enabled for both provider and consumer.
# Pending order item for remote offering is executed after it is approved by service provider.
@action(detail=True, methods=['post'])
def reject(self, request, uuid=None):
order_item = self.get_object()
if order_item.state == models.OrderItem.States.EXECUTING:
if not order_item.resource:
raise ValidationError('Order item does not have a resource.')
callbacks.sync_order_item_state(
order_item, models.OrderItem.States.TERMINATED
)
elif order_item.state == models.OrderItem.States.PENDING:
order_item.reviewed_at = timezone.now()
order_item.reviewed_by = request.user
order_item.set_state_terminated()
order_item.save()
else:
raise ValidationError('Order item is not in executing or pending state.')
return Response(status=status.HTTP_200_OK)
@action(detail=True, methods=['post'])
def approve(self, request, uuid=None):
order_item = self.get_object()
if order_item.state == models.OrderItem.States.EXECUTING:
if not order_item.resource:
raise ValidationError('Order item does not have a resource.')
callbacks.sync_order_item_state(order_item, models.OrderItem.States.DONE)
elif order_item.state == models.OrderItem.States.PENDING:
order_item.reviewed_at = timezone.now()
order_item.reviewed_by = request.user
order_item.set_state_executing()
order_item.save()
transaction.on_commit(
lambda: tasks.process_order_item.delay(
core_utils.serialize_instance(order_item),
core_utils.serialize_instance(request.user),
)
)
else:
raise ValidationError('Order item is not in executing or pending state.')
return Response(status=status.HTTP_200_OK)
@action(detail=True, methods=['post'])
def terminate(self, request, uuid=None):
order_item = self.get_object()
if not plugins.manager.can_terminate_order_item(order_item.offering.type):
return Response(
{
'details': 'Order item could not be terminated because it is not supported by plugin.'
},
status=status.HTTP_400_BAD_REQUEST,
)
try:
# It is expected that plugin schedules Celery task to call backend
# and then switches order item to terminated state.
order_item.set_state_terminating()
order_item.save(update_fields=['state'])
except TransitionNotAllowed:
return Response(
{
'details': 'Order item could not be terminated because it has been already processed.'
},
status=status.HTTP_400_BAD_REQUEST,
)
return Response(
{'details': 'Order item termination has been scheduled.'},
status=status.HTTP_202_ACCEPTED,
)
class CartItemViewSet(core_views.ActionsViewSet):
queryset = models.CartItem.objects.all()
lookup_field = 'uuid'
serializer_class = serializers.CartItemSerializer
filter_backends = (DjangoFilterBackend,)
filterset_class = filters.CartItemFilter
def get_queryset(self):
return self.queryset.filter(user=self.request.user)
@action(detail=False, methods=['post'])
def submit(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
order = serializer.save()
order_serializer = serializers.OrderSerializer(
instance=order, context=self.get_serializer_context()
)
return Response(order_serializer.data, status=status.HTTP_201_CREATED)
submit_serializer_class = serializers.CartSubmitSerializer
class ResourceViewSet(core_views.ActionsViewSet):
queryset = models.Resource.objects.all()
filter_backends = (DjangoFilterBackend, filters.ResourceScopeFilterBackend)
filterset_class = filters.ResourceFilter
lookup_field = 'uuid'
serializer_class = serializers.ResourceSerializer
disabled_actions = ['create', 'destroy']
update_serializer_class = (
partial_update_serializer_class
) = serializers.ResourceUpdateSerializer
def get_queryset(self):
"""
Resources are available to both service provider and service consumer.
"""
if self.request.user.is_staff or self.request.user.is_support:
return self.queryset
return self.queryset.filter(
Q(
project__permissions__user=self.request.user,
project__permissions__is_active=True,
)
| Q(
project__customer__permissions__user=self.request.user,
project__customer__permissions__is_active=True,
)
| Q(
offering__customer__permissions__user=self.request.user,
offering__customer__permissions__is_active=True,
)
).distinct()
@action(detail=True, methods=['get'])
def details(self, request, uuid=None):
resource = self.get_object()
if not resource.scope:
return Response(status=status.HTTP_404_NOT_FOUND)
resource_type = get_resource_type(resource.scope)
serializer_class = get_resource_serializer_class(resource_type)
if not serializer_class:
return Response(status.HTTP_204_NO_CONTENT)
serializer = serializer_class(
instance=resource.scope, context=self.get_serializer_context()
)
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=True, methods=['post'])
def terminate(self, request, uuid=None):
resource = self.get_object()
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
attributes = serializer.validated_data.get('attributes', {})
with transaction.atomic():
order_item = models.OrderItem(
resource=resource,
offering=resource.offering,
type=models.OrderItem.Types.TERMINATE,
attributes=attributes,
)
try:
project = resource.project
except structure_models.Project.DoesNotExist:
project = structure_models.Project.all_objects.get(
pk=resource.project_id
)
order = serializers.create_order(
project=project,
user=self.request.user,
items=[order_item],
request=request,
)
return Response({'order_uuid': order.uuid.hex}, status=status.HTTP_200_OK)
terminate_serializer_class = serializers.ResourceTerminateSerializer
terminate_permissions = [permissions.user_can_terminate_resource]
terminate_validators = [
core_validators.StateValidator(
models.Resource.States.OK, models.Resource.States.ERRED
),
utils.check_customer_blocked_for_terminating,
]
@action(detail=True, methods=['post'])
def switch_plan(self, request, uuid=None):
resource = self.get_object()
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
plan = serializer.validated_data['plan']
with transaction.atomic():
order_item = models.OrderItem(
resource=resource,
offering=resource.offering,
old_plan=resource.plan,
plan=plan,
type=models.OrderItem.Types.UPDATE,
limits=resource.limits or {},
)
order = serializers.create_order(
project=resource.project,
user=self.request.user,
items=[order_item],
request=request,
)
return Response({'order_uuid': order.uuid.hex}, status=status.HTTP_200_OK)
switch_plan_serializer_class = serializers.ResourceSwitchPlanSerializer
@action(detail=True, methods=['post'])
def update_limits(self, request, uuid=None):
resource = self.get_object()
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
limits = serializer.validated_data['limits']
with transaction.atomic():
order_item = models.OrderItem(
resource=resource,
offering=resource.offering,
plan=resource.plan,
type=models.OrderItem.Types.UPDATE,
limits=limits,
attributes={'old_limits': resource.limits},
)
order = serializers.create_order(
project=resource.project,
user=self.request.user,
items=[order_item],
request=request,
)
return Response({'order_uuid': order.uuid.hex}, status=status.HTTP_200_OK)
update_limits_serializer_class = serializers.ResourceUpdateLimitsSerializer
switch_plan_permissions = update_limits_permissions = [
structure_permissions.is_administrator
]
switch_plan_validators = update_limits_validators = [
core_validators.StateValidator(models.Resource.States.OK),
structure_utils.check_customer_blocked,
]
@action(detail=True, methods=['get'])
def plan_periods(self, request, uuid=None):
resource = self.get_object()
qs = models.ResourcePlanPeriod.objects.filter(resource=resource)
qs = qs.filter(Q(end=None) | Q(end__gte=month_start(timezone.now())))
serializer = serializers.ResourcePlanPeriodSerializer(qs, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=True, methods=['post'])
def move_resource(self, request, uuid=None):
resource = self.get_object()
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
project = serializer.validated_data['project']
try:
utils.move_resource(resource, project)
except utils.MoveResourceException as exception:
error_message = str(exception)
return JsonResponse({'error_message': error_message}, status=409)
serialized_resource = serializers.ResourceSerializer(
resource, context=self.get_serializer_context()
)
return Response(serialized_resource.data, status=status.HTTP_200_OK)
move_resource_serializer_class = serializers.MoveResourceSerializer
move_resource_permissions = [structure_permissions.is_staff]
@action(detail=True, methods=['post'])
def set_backend_id(self, request, uuid=None):
resource = self.get_object()
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
new_backend_id = serializer.validated_data['backend_id']
old_backend_id = resource.backend_id
resource.backend_id = serializer.validated_data['backend_id']
resource.save()
logger.info(
'%s has changed backend_id from %s to %s',
request.user.full_name,
old_backend_id,
new_backend_id,
)
return Response(
{'status': _('Resource backend_id has been changed.')},
status=status.HTTP_200_OK,
)
set_backend_id_permissions = [permissions.user_is_owner_or_service_manager]
set_backend_id_serializer_class = serializers.ResourceBackendIDSerializer
@action(detail=True, methods=['post'])
def submit_report(self, request, uuid=None):
resource = self.get_object()
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
resource.report = serializer.validated_data['report']
resource.save(update_fields=['report'])
return Response({'status': _('Report is submitted')}, status=status.HTTP_200_OK)
submit_report_permissions = [
permissions.user_is_service_provider_owner_or_service_provider_manager
]
submit_report_serializer_class = serializers.ResourceReportSerializer
def _set_end_date(self, request, is_staff_action):
resource = self.get_object()
serializer = serializers.ResourceEndDateByProviderSerializer(
data=request.data, instance=resource, context={'request': request}
)
serializer.is_valid(raise_exception=True)
serializer.save()
transaction.on_commit(
lambda: tasks.notify_about_resource_termination.delay(
resource.uuid.hex, request.user.uuid.hex, is_staff_action
)
)
if not is_staff_action:
log.log_marketplace_resource_end_date_has_been_updated_by_provider(
resource, request.user
)
else:
log.log_marketplace_resource_end_date_has_been_updated_by_staff(
resource, request.user
)
return Response(status=status.HTTP_200_OK)
@action(detail=True, methods=['post'])
def set_end_date_by_provider(self, request, uuid=None):
return self._set_end_date(request, False)
set_end_date_by_provider_permissions = [
permissions.user_can_set_end_date_by_provider
]
@action(detail=True, methods=['post'])
def set_end_date_by_staff(self, request, uuid=None):
return self._set_end_date(request, True)
set_end_date_by_staff_permissions = [structure_permissions.is_staff]
# Service provider endpoint only
@action(detail=True, methods=['get'])
def team(self, request, uuid=None):
resource = self.get_object()
project = resource.project
return Response(
ProjectUserSerializer(
instance=project.get_users(),
many=True,
context={'project': project, 'request': request},
).data,
status=status.HTTP_200_OK,
)
team_permissions = [
permissions.user_is_service_provider_owner_or_service_provider_manager
]
class ProjectChoicesViewSet(ListAPIView):
def get_project(self):
project_uuid = self.kwargs['project_uuid']
if not is_uuid_like(project_uuid):
return Response(
status=status.HTTP_400_BAD_REQUEST, data='Project UUID is invalid.'
)
return get_object_or_404(structure_models.Project, uuid=project_uuid)
def get_category(self):
category_uuid = self.kwargs['category_uuid']
if not is_uuid_like(category_uuid):
return Response(
status=status.HTTP_400_BAD_REQUEST, data='Category UUID is invalid.'
)
return get_object_or_404(models.Category, uuid=category_uuid)
class ResourceOfferingsViewSet(ProjectChoicesViewSet):
serializer_class = serializers.ResourceOfferingSerializer
def get_queryset(self):
project = self.get_project()
category = self.get_category()
offerings = (
models.Resource.objects.filter(project=project, offering__category=category)
.exclude(state=models.Resource.States.TERMINATED)
.values_list('offering_id', flat=True)
)
return models.Offering.objects.filter(pk__in=offerings)
class CategoryComponentUsageViewSet(core_views.ReadOnlyActionsViewSet):
queryset = models.CategoryComponentUsage.objects.all().order_by(
'-date', 'component__type'
)
filter_backends = (
DjangoFilterBackend,
filters.CategoryComponentUsageScopeFilterBackend,
)
filterset_class = filters.CategoryComponentUsageFilter
serializer_class = serializers.CategoryComponentUsageSerializer
class ComponentUsageViewSet(core_views.ReadOnlyActionsViewSet):
queryset = models.ComponentUsage.objects.all().order_by('-date', 'component__type')
filter_backends = (structure_filters.GenericRoleFilter, DjangoFilterBackend)
filterset_class = filters.ComponentUsageFilter
serializer_class = serializers.ComponentUsageSerializer
@action(detail=False, methods=['post'])
def set_usage(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
resource = serializer.validated_data['plan_period'].resource
if not _has_owner_access(
request.user, resource.offering.customer
) and not resource.offering.has_user(request.user):
raise PermissionDenied(
_(
'Only staff, service provider owner and service manager are allowed '
'to submit usage data for marketplace resource.'
)
)
serializer.save()
return Response(status=status.HTTP_201_CREATED)
set_usage_serializer_class = serializers.ComponentUsageCreateSerializer
class MarketplaceAPIViewSet(rf_viewsets.ViewSet):
"""
TODO: Move this viewset to ComponentUsageViewSet.
"""
permission_classes = ()
serializer_class = serializers.ServiceProviderSignatureSerializer
def get_validated_data(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data['data']
dry_run = serializer.validated_data['dry_run']
if self.action == 'set_usage':
data_serializer = serializers.ComponentUsageCreateSerializer(data=data)
data_serializer.is_valid(raise_exception=True)
if not dry_run:
data_serializer.save()
return serializer.validated_data, dry_run
@action(detail=False, methods=['post'])
@csrf_exempt
def check_signature(self, request, *args, **kwargs):
self.get_validated_data(request)
return Response(status=status.HTTP_200_OK)
@action(detail=False, methods=['post'])
@csrf_exempt
def set_usage(self, request, *args, **kwargs):
self.get_validated_data(request)
return Response(status=status.HTTP_201_CREATED)
class OfferingFileViewSet(core_views.ActionsViewSet):
queryset = models.OfferingFile.objects.all().order_by('name')
filterset_class = filters.OfferingFileFilter
filter_backends = [DjangoFilterBackend]
serializer_class = serializers.OfferingFileSerializer
lookup_field = 'uuid'
disabled_actions = ['update', 'partial_update']
def check_create_permissions(request, view, obj=None):
serializer = view.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = request.user
offering = serializer.validated_data['offering']
if user.is_staff or (
offering.customer
and offering.customer.has_user(user, structure_models.CustomerRole.OWNER)
):
return
raise rf_exceptions.PermissionDenied()
create_permissions = [check_create_permissions]
destroy_permissions = [structure_permissions.is_owner]
class OfferingUsersViewSet(
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.CreateModelMixin,
rf_viewsets.GenericViewSet,
):
queryset = models.OfferingUser.objects.all()
serializer_class = serializers.OfferingUserSerializer
lookup_field = 'uuid'
filter_backends = (DjangoFilterBackend,)
filterset_class = filters.OfferingUserFilter
def get_queryset(self):
queryset = super(OfferingUsersViewSet, self).get_queryset()
if self.request.user.is_staff or self.request.user.is_support:
return queryset
queryset = queryset.filter(
Q(user=self.request.user)
| Q(
offering__customer__permissions__user=self.request.user,
offering__customer__permissions__is_active=True,
)
)
return queryset
class CustomerStatsViewSet(rf_viewsets.ViewSet):
permission_classes = [rf_permissions.IsAuthenticated, core_permissions.IsSupport]
@action(detail=False, methods=['get'])
def organization_project_count(self, request, *args, **kwargs):
data = structure_models.Project.objects.values(
'customer__abbreviation', 'customer__name', 'customer__uuid'
).annotate(count=Count('customer__uuid'))
serializer = serializers.CustomerStatsSerializer(data, many=True)
return Response(status=status.HTTP_200_OK, data=serializer.data)
@action(detail=False, methods=['get'])
def organization_resource_count(self, request, *args, **kwargs):
data = (
models.Resource.objects.filter(state=models.Resource.States.OK)
.values(
'project__customer__abbreviation',
'project__customer__name',
'project__customer__uuid',
)
.annotate(count=Count('project__customer__uuid'))
)
serializer = serializers.CustomerStatsSerializer(data, many=True)
return Response(status=status.HTTP_200_OK, data=serializer.data)
@action(detail=False, methods=['get'])
def customer_member_count(self, request, *args, **kwargs):
data = (
structure_models.CustomerPermission.objects.filter(is_active=True)
.values('customer__abbreviation', 'customer__name', 'customer__uuid')
.annotate(count=Count('customer__uuid'))
)
serializer = serializers.CustomerStatsSerializer(data, many=True)
return Response(status=status.HTTP_200_OK, data=serializer.data)
@action(detail=False, methods=['get'])
def project_member_count(self, request, *args, **kwargs):
data = (
structure_models.ProjectPermission.objects.filter(is_active=True)
.values(
'project__customer__abbreviation',
'project__customer__name',
'project__customer__uuid',
)
.annotate(count=Count('project__customer__uuid'))
)
serializer = serializers.CustomerStatsSerializer(data, many=True)
return Response(status=status.HTTP_200_OK, data=serializer.data)
for view in (structure_views.ProjectCountersView, structure_views.CustomerCountersView):
def inject_resources_counter(scope):
counters = models.AggregateResourceCount.objects.filter(scope=scope).only(
'count', 'category'
)
return {
'marketplace_category_{}'.format(counter.category.uuid): counter.count
for counter in counters
}
view.register_dynamic_counter(inject_resources_counter)
|
the-stack_106_19464
|
from itertools import chain
from cms.exceptions import NoHomeFound
from cms.utils import get_language_from_request, get_template_from_request
from cms.utils.moderator import get_cmsplugin_queryset, get_page_queryset
from cms.plugin_rendering import render_plugins, render_placeholder, render_placeholder_toolbar
from cms.plugins.utils import get_plugins
from cms.models import Page, Placeholder
from django import template
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.mail import mail_managers
from django.template.defaultfilters import title
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.forms.widgets import Media
register = template.Library()
def get_site_id(site):
if site:
if isinstance(site, Site):
site_id = site.id
elif isinstance(site, int):
site_id = site
else:
site_id = settings.SITE_ID
else:
site_id = settings.SITE_ID
return site_id
def has_permission(page, request):
return page.has_change_permission(request)
register.filter(has_permission)
def _get_cache_key(name, page_lookup, lang, site_id):
if isinstance(page_lookup, Page):
page_key = str(page_lookup.pk)
else:
page_key = str(page_lookup)
return name+'__page_lookup:'+page_key+'_site:'+str(site_id)+'_lang:'+str(lang)
def _get_page_by_untyped_arg(page_lookup, request, site_id):
"""
The `page_lookup` argument can be of any of the following types:
- Integer: interpreted as `pk` of the desired page
- String: interpreted as `reverse_id` of the desired page
- `dict`: a dictionary containing keyword arguments to find the desired page
(for instance: `{'pk': 1}`)
- `Page`: you can also pass a Page object directly, in which case there will be no database lookup.
- `None`: the current page will be used
"""
if page_lookup is None:
return request.current_page
if isinstance(page_lookup, Page):
return page_lookup
if isinstance(page_lookup, basestring):
page_lookup = {'reverse_id': page_lookup}
elif isinstance(page_lookup, (int, long)):
page_lookup = {'pk': page_lookup}
elif not isinstance(page_lookup, dict):
raise TypeError('The page_lookup argument can be either a Dictionary, Integer, Page, or String.')
page_lookup.update({'site': site_id})
try:
return get_page_queryset(request).get(**page_lookup)
except Page.DoesNotExist:
site = Site.objects.get_current()
subject = _('Page not found on %(domain)s') % {'domain':site.domain}
body = _("A template tag couldn't find the page with lookup arguments `%(page_lookup)s\n`. "
"The URL of the request was: http://%(host)s%(path)s") \
% {'page_lookup': repr(page_lookup), 'host': site.domain, 'path': request.path}
if settings.DEBUG:
raise Page.DoesNotExist(body)
else:
mail_managers(subject, body, fail_silently=True)
return None
def page_url(context, page_lookup, lang=None, site=None):
"""
Show the url of a page with a reverse id in the right language
This is mostly used if you want to have a static link in a template to a page
"""
site_id = get_site_id(site)
request = context.get('request', False)
if not request:
return {'content': ''}
if request.current_page == "dummy":
return {'content': ''}
if lang is None:
lang = get_language_from_request(request)
cache_key = _get_cache_key('page_url', page_lookup, lang, site_id)+'_type:absolute_url'
url = cache.get(cache_key)
if not url:
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if page:
url = page.get_absolute_url(language=lang)
cache.set(cache_key, url, settings.CMS_CONTENT_CACHE_DURATION)
if url:
return {'content': url}
return {'content': ''}
page_url = register.inclusion_tag('cms/content.html', takes_context=True)(page_url)
def page_id_url(context, reverse_id, lang=None, site=None):
return page_url(context, reverse_id, lang, site)
page_id_url = register.inclusion_tag('cms/content.html', takes_context=True)(page_id_url)
def do_placeholder(parser, token):
error_string = '%r tag requires at least 1 and accepts at most 2 arguments'
nodelist_or = None
inherit = False
try:
# split_contents() knows not to split quoted strings.
bits = token.split_contents()
# if the `placeholderor` tag was used, look for closing tag, and pass the enclosed nodes
# to PlaceholderNode below
if bits[-1].lower() == 'or':
bits.pop()
nodelist_or = parser.parse(('endplaceholder',))
parser.delete_first_token()
elif bits[-1].lower() == 'inherit':
bits.pop()
inherit = True
else:
bit = bits[-1]
if bit[0] == bit[-1] and bit[0] in ('"', "'"):
bit = bit[1:-1]
if bit.isdigit():
import warnings
warnings.warn("The width parameter for the placeholder tag is deprecated.", DeprecationWarning)
except ValueError:
raise template.TemplateSyntaxError(error_string % bits[0])
if len(bits) == 2:
#tag_name, name
return PlaceholderNode(bits[1], nodelist_or=nodelist_or, inherit=inherit)
elif len(bits) == 3:
#tag_name, name, width
return PlaceholderNode(bits[1], bits[2], nodelist_or=nodelist_or, inherit=inherit)
else:
raise template.TemplateSyntaxError(error_string % bits[0])
class PlaceholderNode(template.Node):
"""This template node is used to output page content and
is also used in the admin to dynamically generate input fields.
eg: {% placeholder "placeholder_name" %}
{% placeholder "sidebar" inherit %}
{% placeholder "footer" inherit or %}
<a href="/about/">About us</a>
{% endplaceholder %}
Keyword arguments:
name -- the name of the placeholder
width -- additional width attribute (integer) which gets added to the plugin context
(deprecated, use `{% with 320 as width %}{% placeholder "foo"}{% endwith %}`)
inherit -- optional argument which if given will result in inheriting
the content of the placeholder with the same name on parent pages
or -- optional argument which if given will make the template tag a block
tag whose content is shown if the placeholder is empty
"""
def __init__(self, name, width=None, nodelist_or=None, inherit=False):
self.name = "".join(name.lower().split('"'))
if width:
self.width_var = template.Variable(width)
self.nodelist_or = nodelist_or
self.inherit = inherit
def __repr__(self):
return "<Placeholder Node: %s>" % self.name
def render(self, context):
if not 'request' in context:
return ''
request = context['request']
width_var = getattr(self, 'width_var', None)
if width_var:
try:
width = int(width_var.resolve(context))
context.update({'width': width})
except (template.VariableDoesNotExist, ValueError):
pass
page = request.current_page
if not page or page == 'dummy':
return ''
try:
placeholder = page.placeholders.get(slot=self.name)
except Placeholder.DoesNotExist:
from cms.utils.plugins import get_placeholders
placeholders = get_placeholders(page.get_template())
found = None
for slot in placeholders:
new, created = page.placeholders.get_or_create(slot=slot)
if slot == self.name:
found = new
placeholder = found
if not found:
if settings.DEBUG:
raise Placeholder.DoesNotExist("No placeholder '%s' found for page '%s'" % (self.name, page.pk))
else:
return "<!-- ERROR:cms.utils.plugins.get_placeholders:%s -->" % self.name
content = self.get_content(request, page, context)
if not content:
if self.nodelist_or:
content = self.nodelist_or.render(context)
if self.edit_mode(placeholder, context):
return render_placeholder_toolbar(placeholder, context, content)
return content
return content
def edit_mode(self, placeholder, context):
from cms.utils.placeholder import get_page_from_placeholder_if_exists
request = context['request']
page = get_page_from_placeholder_if_exists(placeholder)
if ("edit" in request.GET or request.session.get("cms_edit", False)) and \
'cms.middleware.toolbar.ToolbarMiddleware' in settings.MIDDLEWARE_CLASSES and \
request.user.is_staff and request.user.is_authenticated() and \
(not page or page.has_change_permission(request)):
return True
return False
def get_content(self, request, page, context):
from cms.utils.plugins import get_placeholders
pages = [page]
if self.inherit:
pages = chain([page], page.get_cached_ancestors(ascending=True))
for page in pages:
template = get_template_from_request(request, page)
placeholder = page.placeholders.filter(slot__in=get_placeholders(template)).get(slot=self.name)
if not get_plugins(request, placeholder):
continue
request.placeholder_media += placeholder.get_media(request, context)
content = render_placeholder(placeholder, context)
if content:
return content
return ''
register.tag('placeholder', do_placeholder)
def do_page_attribute(parser, token):
error_string = '%r tag requires one argument' % token.contents[0]
try:
# split_contents() knows not to split quoted strings.
bits = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(error_string)
if len(bits) >= 2:
# tag_name, name
# tag_name, name, page_lookup
page_lookup = len(bits) == 3 and bits[2] or None
return PageAttributeNode(bits[1], page_lookup)
else:
raise template.TemplateSyntaxError(error_string)
class PageAttributeNode(template.Node):
"""This template node is used to output attribute from a page such
as its title or slug.
Synopsis
{% page_attribute "field-name" %}
{% page_attribute "field-name" page_lookup %}
Example
{# Output current page's page_title attribute: #}
{% page_attribute "page_title" %}
{# Output page_title attribute of the page with reverse_id "the_page": #}
{% page_attribute "page_title" "the_page" %}
{# Output slug attribute of the page with pk 10: #}
{% page_attribute "slug" 10 %}
Keyword arguments:
field-name -- the name of the field to output. Use one of:
- title
- menu_title
- page_title
- slug
- meta_description
- meta_keywords
page_lookup -- lookup argument for Page, if omitted field-name of current page is returned.
See _get_page_by_untyped_arg() for detailed information on the allowed types and their interpretation
for the page_lookup argument.
"""
def __init__(self, name, page_lookup=None):
self.name_var = template.Variable(name)
self.page_lookup = None
self.valid_attributes = ["title", "slug", "meta_description", "meta_keywords", "page_title", "menu_title"]
if page_lookup:
self.page_lookup_var = template.Variable(page_lookup)
def render(self, context):
if not 'request' in context:
return ''
var_name = self.name_var.var.lower()
if var_name in self.valid_attributes:
# Variable name without quotes works, but is deprecated
self.name = var_name
else:
self.name = self.name_var.resolve(context)
lang = get_language_from_request(context['request'])
page_lookup_var = getattr(self, 'page_lookup_var', None)
if page_lookup_var:
page_lookup = page_lookup_var.resolve(context)
else:
page_lookup = None
page = _get_page_by_untyped_arg(page_lookup, context['request'], get_site_id(None))
if page == "dummy":
return ''
if page and self.name in self.valid_attributes:
f = getattr(page, "get_"+self.name)
return f(language=lang, fallback=True)
return ''
def __repr__(self):
return "<PageAttribute Node: %s>" % self.name
register.tag('page_attribute', do_page_attribute)
def clean_admin_list_filter(cl, spec):
"""
used in admin to display only these users that have actually edited a page and not everybody
"""
choices = sorted(list(spec.choices(cl)), key=lambda k: k['query_string'])
query_string = None
unique_choices = []
for choice in choices:
if choice['query_string'] != query_string:
unique_choices.append(choice)
query_string = choice['query_string']
return {'title': spec.title(), 'choices' : unique_choices}
clean_admin_list_filter = register.inclusion_tag('admin/filter.html')(clean_admin_list_filter)
def _show_placeholder_for_page(context, placeholder_name, page_lookup, lang=None,
site=None, cache_result=True):
"""
Shows the content of a page with a placeholder name and given lookup arguments in the given language.
This is useful if you want to have some more or less static content that is shared among many pages,
such as a footer.
See _get_page_by_untyped_arg() for detailed information on the allowed types and their interpretation
for the page_lookup argument.
"""
request = context.get('request', False)
site_id = get_site_id(site)
if not request:
return {'content': ''}
if lang is None:
lang = get_language_from_request(request)
content = None
if cache_result:
cache_key = _get_cache_key('_show_placeholder_for_page', page_lookup, lang, site_id)+'_placeholder:'+placeholder_name
content = cache.get(cache_key)
if not content:
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if not page:
return {'content': ''}
placeholder = page.placeholders.get(slot=placeholder_name)
plugins = get_cmsplugin_queryset(request).filter(placeholder=placeholder, language=lang, placeholder__slot__iexact=placeholder_name, parent__isnull=True).order_by('position').select_related()
c = render_plugins(plugins, context, placeholder)
content = "".join(c)
if cache_result:
cache.set(cache_key, content, settings.CMS_CONTENT_CACHE_DURATION)
if content:
return {'content': mark_safe(content)}
return {'content': ''}
def show_placeholder_by_id(context, placeholder_name, reverse_id, lang=None, site=None):
"""
Show the content of a specific placeholder, from a page found by reverse id, in the given language.
This templatetag is deprecated, replace with `show_placeholder`.
"""
return _show_placeholder_for_page(context, placeholder_name, {'reverse_id': reverse_id}, lang=lang, site=site)
show_placeholder_by_id = register.inclusion_tag('cms/content.html', takes_context=True)(show_placeholder_by_id)
def show_uncached_placeholder_by_id(context, placeholder_name, reverse_id, lang=None, site=None):
"""
Show the uncached content of a specific placeholder, from a page found by reverse id, in the given language.
This templatetag is deprecated, replace with `show_uncached_placeholder`.
"""
return _show_placeholder_for_page(context, placeholder_name, {'reverse_id': reverse_id},
lang=lang, site=site, cache_result=False)
show_uncached_placeholder_by_id = register.inclusion_tag('cms/content.html', takes_context=True)(show_uncached_placeholder_by_id)
def show_placeholder(context, placeholder_name, page_lookup, lang=None, site=None):
"""
Show the content of a specific placeholder, from a page found by pk|reverse_id|dict
or passed to the function, in the given language.
"""
return _show_placeholder_for_page(context, placeholder_name, page_lookup, lang=lang, site=site)
show_placeholder_for_page = register.inclusion_tag('cms/content.html', takes_context=True)(show_placeholder)
def show_uncached_placeholder(context, placeholder_name, page_lookup, lang=None, site=None):
"""
Show the uncached content of a specific placeholder, from a page found by pk|reverse_id|dict
or passed to the function, in the given language.
"""
return _show_placeholder_for_page(context, placeholder_name, page_lookup, lang=lang, site=site, cache_result=False)
show_uncached_placeholder_for_page = register.inclusion_tag('cms/content.html', takes_context=True)(show_uncached_placeholder)
def do_plugins_media(parser, token):
args = token.split_contents()
if len(args) > 2:
raise template.TemplateSyntaxError("Invalid syntax. Expected "
"'{%% %s [page_lookup] %%}'" % tag)
elif len(args) == 2:
page_lookup = args[1]
else:
page_lookup = None
return PluginsMediaNode(page_lookup)
class PluginsMediaNode(template.Node):
"""
This template node is used to output media for plugins.
eg: {% plugins_media %}
You can also pass the object a page_lookup arg if you want to output media tags for a specific
page other than the current page.
eg: {% plugins_media "gallery" %}
"""
def __init__(self, page_lookup=None):
if page_lookup:
self.page_lookup_var = template.Variable(page_lookup)
def render(self, context):
from cms.plugins.utils import get_plugins_media
if not 'request' in context:
return ''
request = context['request']
from cms.plugins.utils import get_plugins_media
plugins_media = None
page_lookup_var = getattr(self, 'page_lookup_var', None)
if page_lookup_var:
page_lookup = page_lookup_var.resolve(context)
page = _get_page_by_untyped_arg(page_lookup, request, get_site_id(None))
plugins_media = get_plugins_media(request, page)
else:
page = request.current_page
if page == "dummy":
return ''
# make sure the plugin cache is filled
plugins_media = get_plugins_media(request, context, request._current_page_cache)
if plugins_media:
return plugins_media.render()
else:
return u''
def __repr__(self):
return "<PluginsMediaNode Node: %s>" % getattr(self, 'name', '')
register.tag('plugins_media', do_plugins_media)
|
the-stack_106_19465
|
from Echo import *
from Broadcast import *
from RestPersistBroadcast import *
from RestPersistSendToGroup import *
from RestPersistSendToUser import *
from RestSendToGroup import *
from RestSendToUser import *
from RestBroadcast import *
from SendToClient import *
from SendToGroup import *
from StreamingEcho import *
from FrequentJoinLeaveGroup import *
import argparse
from Util.SettingsHelper import *
from Util.Common import *
def parse_arguments():
arg_type = ArgType()
kind_type = KindType()
scenario_type = ScenarioType()
parser = argparse.ArgumentParser(description='Generate benchmark configuration')
# required
parser.add_argument('-u',
'--unit',
type=int,
required=True,
help="Azure SignalR service unit.")
parser.add_argument('-S',
'--scenario',
required=True,
choices=[scenario_type.echo,
scenario_type.broadcast,
scenario_type.rest_persist_broadcast,
scenario_type.rest_persist_send_to_group,
scenario_type.rest_persist_send_to_user,
scenario_type.rest_broadcast,
scenario_type.rest_send_to_group,
scenario_type.rest_send_to_user,
scenario_type.send_to_client,
scenario_type.send_to_group,
scenario_type.streaming_echo,
scenario_type.frequent_join_leave_group],
help="Scenario, choose from <{}>|<{}>|<{}>|<{}>|<{}>|<{}>|<{}>|<{}>|<{}>|<{}>|<{}>|<{}>"
.format(scenario_type.echo,
scenario_type.broadcast,
scenario_type.rest_persist_broadcast,
scenario_type.rest_persist_send_to_group,
scenario_type.rest_persist_send_to_user,
scenario_type.rest_broadcast,
scenario_type.rest_send_to_group,
scenario_type.rest_send_to_user,
scenario_type.send_to_client,
scenario_type.send_to_group,
scenario_type.streaming_echo,
scenario_type.frequent_join_leave_group))
parser.add_argument('-p',
'--protocol',
required=False,
default=arg_type.protocol_json,
choices=[arg_type.protocol_json,
arg_type.protocol_messagepack],
help="SignalR Hub protocol, choose from <{}>|<{}>"
.format(arg_type.protocol_json,
arg_type.protocol_messagepack))
parser.add_argument('-t',
'--transport',
required=False,
default=arg_type.transport_websockets,
choices=[arg_type.transport_websockets,
arg_type.transport_long_polling,
arg_type.transport_server_sent_event],
help="SignalR connection transport type, choose from: <{}>|<{}>|<{}>".format(
arg_type.transport_websockets, arg_type.transport_long_polling,
arg_type.transport_server_sent_event))
parser.add_argument('-U',
'--url',
required=True,
help="App server Url or connection string (only for REST API test)")
parser.add_argument('-m',
'--use_max_connection',
action='store_true',
help="Flag indicates using max connection or not. Set true to apply 1.5x on normal connections")
# todo: add default value
parser.add_argument('-g', '--group_type', type=str, choices=[arg_type.group_tiny, arg_type.group_small,
arg_type.group_big], default=arg_type.group_tiny,
help="Group type, choose from <{}>|<{}>|<{}>".format(arg_type.group_tiny, arg_type.group_small,
arg_type.group_big))
# todo: set default value
parser.add_argument('-ms', '--message_size', type=int, default=2*1024, help="Message size")
# todo: set default value
parser.add_argument('-M', '--module', help='Plugin name', default='Plugin.Microsoft.Azure.SignalR.Benchmark.\
SignalRBenchmarkPlugin, Plugin.Microsoft.Azure.SignalR.Benchmark')
# todo: set default value
parser.add_argument('-s', '--settings', type=str, default='settings.yaml', help='Settings from different unit')
parser.add_argument('-d', '--duration', type=int, default=240, help='Duration to run (second)')
parser.add_argument('-i', '--interval', type=int, default=1000, help='Interval for message sending (millisecond)')
parser.add_argument('-c', '--config_save_path', default='config.yaml',
help='Path of output benchmark configuration')
# args for conditional stop
parser.add_argument('-cc', '--criteria_max_fail_connection_amount', type=int, default=100, help='Criteria for max \
failed connection amount')
parser.add_argument('-cp', '--criteria_max_fail_connection_percentage', type=float, default=0.01, help='Criteria \
for max failed connection percentage')
parser.add_argument('-cs', '--criteria_max_fail_sending_percentage', type=float, default=0.01, help='Criteria \
for max failed sending percentage')
# args for statistics collector
parser.add_argument('-so', '--statistics_output_path', default='counters.txt',
help='Path to counters which record the statistics while running benchmark')
parser.add_argument('-si', '--statistic_interval', type=int, default=1000, help='Interval for collecting intervals')
parser.add_argument('-w', '--wait_time', type=int, default=15000, help='Waiting time for each epoch')
parser.add_argument('-lm', '--statistic_latency_max', type=int, default=1000, help='Latency max of statistics')
parser.add_argument('-ls', '--statistic_latency_step', type=int, default=100, help='Latency step of statistics')
parser.add_argument('-csp', '--connection_percentile_list', type=str, default='0.5,0.9,0.95,0.99', help='Specify the percentile list for connection stat')
# group config mode
parser.add_argument('-gm', '--group_config_mode', choices=[arg_type.group_config_mode_group,
arg_type.group_config_mode_connection],
default=arg_type.group_config_mode_connection, help='Group configuration mode')
parser.add_argument('-ct', '--connection_type', type=str,
choices=[arg_type.connection_type_core,
arg_type.connection_type_aspnet,
arg_type.connection_type_rest_direct],
default=arg_type.connection_type_core,
help='Specify the connection type: Core, AspNet, or CoreDirect')
# kinds: perf, longrun
parser.add_argument('-k', '--kind', type=str,
choices=[kind_type.perf, kind_type.longrun],
default=kind_type.perf,
help="Specify the kind of benchmark: perf or longrun, default is perf")
# streaming
parser.add_argument('-sic', '--streaming_item_count', type=int, default=2, help='Streaming item count')
parser.add_argument('-sisi', '--streaming_item_send_interval', type=int, default=0, help='Streaming item sending interval')
# args
args = parser.parse_args()
# unit convert from second to millisecond
args.duration = args.duration * 1000
return args
def main():
args = parse_arguments()
# parse settings
scenario_config_collection = parse_settings(args.settings)
# constant config
constant_config = ConstantConfig(args.module,
args.wait_time,
args.config_save_path,
args.criteria_max_fail_connection_amount,
args.criteria_max_fail_connection_percentage,
args.criteria_max_fail_sending_percentage)
# statistics config
statistics_config = StatisticsConfig(args.statistics_output_path,
args.statistic_interval,
args.statistic_latency_max,
args.statistic_latency_step,
args.connection_percentile_list)
# connection config
connection_config = ConnectionConfig(args.url, args.protocol, args.transport)
# determine settings
scenario_config = determine_scenario_config(scenario_config_collection,
args.unit,
args.scenario,
args.transport,
args.protocol,
args.use_max_connection,
args.message_size,
args.group_type,
args.group_config_mode,
args.streaming_item_count,
args.streaming_item_send_interval)
# basic sending config
sending_config = SendingConfig(args.duration, args.interval, args.message_size)
kind = PERF_KIND
if args.kind == "longrun":
kind = LONGRUN_KIND
lst = [word[0].upper() + word[1:] for word in args.scenario.split()]
func = "".join(lst)
callfunc='''{func_name}(sending_config,
scenario_config,
connection_config,
statistics_config,
constant_config,
args.connection_type,
{kind}).generate_config()'''.format(func_name=func, kind=kind)
eval(callfunc)
if __name__ == "__main__":
main()
|
the-stack_106_19466
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.db.models import Count, F
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.html import escape
from django.utils.safestring import mark_safe
from hc.accounts.models import Profile, Project
@mark_safe
def _format_usage(num_checks, num_channels):
result = ""
if num_checks == 0:
result += "0 checks, "
elif num_checks == 1:
result += "1 check, "
else:
result += f"<strong>{num_checks} checks</strong>, "
if num_channels == 0:
result += "0 channels"
elif num_channels == 1:
result += "1 channel"
else:
result += f"<strong>{num_channels} channels</strong>"
return result
class Fieldset:
name = None
fields = []
@classmethod
def tuple(cls):
return (cls.name, {"fields": cls.fields})
class ProfileFieldset(Fieldset):
name = "User Profile"
fields = (
"email",
"reports_allowed",
"next_report_date",
"nag_period",
"next_nag_date",
"deletion_notice_date",
"token",
"sort",
)
class TeamFieldset(Fieldset):
name = "Team"
fields = (
"team_limit",
"check_limit",
"ping_log_limit",
"sms_limit",
"sms_sent",
"last_sms_date",
)
class NumChecksFilter(admin.SimpleListFilter):
title = "Checks"
parameter_name = "num_checks"
def lookups(self, request, model_admin):
return (
("20", "more than 20"),
("50", "more than 50"),
("100", "more than 100"),
("500", "more than 500"),
("1000", "more than 1000"),
)
def queryset(self, request, queryset):
if not self.value():
return
value = int(self.value())
return queryset.filter(num_checks__gt=value)
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
class Media:
css = {"all": ("css/admin/profiles.css",)}
readonly_fields = ("user", "email")
search_fields = ["id", "user__email"]
list_per_page = 30
list_select_related = ("user",)
list_display = (
"id",
"email",
"date_joined",
"last_active_date",
"projects",
"checks",
"invited",
"sms",
"reports_allowed",
)
list_filter = (
"user__date_joined",
"last_active_date",
"reports_allowed",
"check_limit",
NumChecksFilter,
)
fieldsets = (ProfileFieldset.tuple(), TeamFieldset.tuple())
def get_queryset(self, request):
qs = super(ProfileAdmin, self).get_queryset(request)
qs = qs.prefetch_related("user__project_set")
qs = qs.annotate(num_members=Count("user__project__member", distinct=True))
qs = qs.annotate(num_checks=Count("user__project__check", distinct=True))
qs = qs.annotate(plan=F("user__subscription__plan_name"))
return qs
@mark_safe
def email(self, obj):
s = escape(obj.user.email)
if obj.plan:
return "<span title='%s'>%s</span>" % (obj.plan, s)
return s
def date_joined(self, obj):
return obj.user.date_joined
@mark_safe
def projects(self, obj):
return render_to_string("admin/profile_list_projects.html", {"profile": obj})
def checks(self, obj):
return "%d of %d" % (obj.num_checks, obj.check_limit)
def invited(self, obj):
return "%d of %d" % (obj.num_members, obj.team_limit)
def sms(self, obj):
return "%d of %d" % (obj.sms_sent, obj.sms_limit)
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
readonly_fields = ("code", "owner")
list_select_related = ("owner",)
list_display = ("id", "name_", "users", "usage", "switch")
search_fields = ["id", "name", "owner__email"]
class Media:
css = {"all": ("css/admin/projects.css",)}
def get_queryset(self, request):
qs = super(ProjectAdmin, self).get_queryset(request)
qs = qs.annotate(num_channels=Count("channel", distinct=True))
qs = qs.annotate(num_checks=Count("check", distinct=True))
qs = qs.annotate(num_members=Count("member", distinct=True))
return qs
def name_(self, obj):
if obj.name:
return obj.name
return "Default Project for %s" % obj.owner.email
@mark_safe
def users(self, obj):
if obj.num_members == 0:
return obj.owner.email
else:
return render_to_string("admin/project_list_team.html", {"project": obj})
def email(self, obj):
return obj.owner.email
def usage(self, obj):
return _format_usage(obj.num_checks, obj.num_channels)
@mark_safe
def switch(self, obj):
url = reverse("hc-checks", args=[obj.code])
return "<a href='%s'>Show Checks</a>" % url
class HcUserAdmin(UserAdmin):
actions = ["send_report", "send_nag"]
list_display = (
"id",
"email",
"usage",
"date_joined",
"last_login",
"is_staff",
)
list_display_links = ("id", "email")
list_filter = ("last_login", "date_joined", "is_staff", "is_active")
ordering = ["-id"]
def get_queryset(self, request):
qs = super().get_queryset(request)
qs = qs.annotate(num_checks=Count("project__check", distinct=True))
qs = qs.annotate(num_channels=Count("project__channel", distinct=True))
return qs
@mark_safe
def usage(self, user):
return _format_usage(user.num_checks, user.num_channels)
def send_report(self, request, qs):
for user in qs:
user.profile.send_report()
self.message_user(request, "%d email(s) sent" % qs.count())
def send_nag(self, request, qs):
for user in qs:
user.profile.send_report(nag=True)
self.message_user(request, "%d email(s) sent" % qs.count())
admin.site.unregister(User)
admin.site.register(User, HcUserAdmin)
|
the-stack_106_19468
|
import re
import shutil
import subprocess
import sys
import time
from pathlib import Path
from random import random
from pythonfuzz.main import PythonFuzz
class Result(object):
"""
Use the output from the tool to collect the information about its execution.
Very sensitive to the format of the output as to whether it collects information or not.
"""
coverage_re = re.compile('cov: (\d+)')
corpus_re = re.compile('corp: (\d+)')
speed_re = re.compile('exec/s: (\d+)')
memory_re = re.compile('rss: (\d+\.\d+)')
count_re = re.compile('^#(\d+)')
count2_re = re.compile('^did (\d+) runs, stopping now')
exception_re = re.compile('^Exception: (.*)')
failfile_re = re.compile('^sample written to (.*)')
def __init__(self):
self.coverage = None
self.corpus = None
self.speed = None
self.memory = None
self.count = None
self.time_start = None
self.time_end = None
self.fail_file = None
self.exception = None
self.lines = []
self.rc = None
def record_start(self):
self.time_start = time.time()
def record_end(self):
self.time_end = time.time()
@property
def time_duration(self):
"""
Number of seconds the execution took, or None if not known
"""
if self.time_start and self.time_end:
return self.time_end - self.time_start
if self.time_start:
return time.time() - self.time_start
return None
def process_output(self, line):
match = self.coverage_re.search(line)
if match:
self.coverage = int(match.group(1))
match = self.corpus_re.search(line)
if match:
self.corpus = int(match.group(1))
match = self.speed_re.search(line)
if match:
self.speed = int(match.group(1))
match = self.memory_re.search(line)
if match:
self.memory = float(match.group(1))
match = self.count_re.search(line) or self.count2_re.search(line)
if match:
self.count = int(match.group(1))
match = self.exception_re.search(line)
if match:
self.exception = match.group(1)
match = self.failfile_re.search(line)
if match:
self.fail_file = match.group(1)
self.lines.append(line)
def show(self, show_lines=False, indent=''):
"""
Show the status of this result.
"""
print("{}Executions : {}".format(indent, self.count))
print("{}Corpus : {}".format(indent, self.corpus))
print("{}Coverage : {}".format(indent, self.coverage))
print("{}Final speed : {}/s".format(indent, self.speed))
if self.memory:
print("{}Memory : {:.2f} MB".format(indent, self.memory))
print("{}Runtime : {:.2f} s".format(indent, self.time_duration))
if self.time_duration and self.count:
print("{}Overall speed : {:.2f}/s".format(indent, self.count / self.time_duration))
print("{}Return code : {}".format(indent, self.rc))
if self.exception:
print("{}Exception : {}".format(indent, self.exception))
if self.fail_file:
print("{}Failed filename : {}".format(indent, self.fail_file))
if show_lines or self.rc:
print("{}Lines:".format(indent))
for line in self.lines:
print("{} {}".format(indent, line.strip('\n')))
def run(cwd, workdir, python='python', log='/dev/null'):
"""
Run the script, capturing the output and processing it.
"""
cmd = [python, '-m', 'rial.main', '--workdir', workdir]
result = Result()
with open(log, 'w') as log_fh:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd)
result.record_start()
for line in proc.stdout:
line = line.decode('utf-8', 'replace')
result.process_output(line)
result.record_end()
proc.wait()
result.rc = proc.returncode
return result
@PythonFuzz
def fuzz(buf):
try:
string = buf.decode("utf-8")
except UnicodeDecodeError:
return
cur_directory = Path(__file__.replace("fuzzing.py", ""))
tmp_directory = cur_directory.joinpath(f"tmp_{random()}")
tmp_directory.mkdir(parents=False, exist_ok=False)
src_directory = tmp_directory.joinpath("src")
src_directory.mkdir(parents=False, exist_ok=False)
with open(src_directory.joinpath("main.rial"), "w") as file:
file.write(string)
try:
result = run(str(cur_directory.joinpath("..")), tmp_directory, sys.executable)
result.show(indent=' ')
except:
raise
finally:
shutil.rmtree(tmp_directory)
if __name__ == '__main__':
fuzz(dirs=[str(Path(__file__.replace("fuzzing.py", "").replace("/rial", "")).joinpath("fuzzing_out")),
str(Path(__file__.replace("fuzzing.py", "").replace("/rial", "")).joinpath("fuzzing_in"))])
|
the-stack_106_19469
|
import discord
import os
import requests
import json
client = discord.Client()
def get_quote():
response = requests.get("https://zenquotes.io/api/random")
json_data = json.loads(response.text)
quote = json_data[0]['q'] + " -" + json_data[0]['a']
return(quote)
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
elif message.content.startswith('$inspire'):
quote = get_quote()
await message.channel.send('Some Inspiration:')
await message.channel.send(quote)
elif message.content.startswith('$'):
await message.channel.send('That went over my head!')
elif message.content.startswith('Hey You'):
await message.channel.send('How are you ' + str(message.author) +'?')
client.run(os.getenv('TOKEN'))
|
the-stack_106_19470
|
from string import printable
from typing import (Any, Callable, Generic, Iterable, NoReturn, Tuple, TypeVar,
Union)
from . import (Dict, Immutable, List, aio_trampoline, effect, either, maybe,
trampoline)
try:
from hypothesis.strategies import (
booleans,
builds,
composite,
dictionaries,
floats,
integers,
just,
lists as lists_,
one_of,
recursive,
text,
SearchStrategy
)
except ImportError:
raise ImportError(
'Could not import hypothesis. To use pfun.hypothesis_strategies, '
'install pfun with \n\n\tpip install pfun[test]'
)
A = TypeVar('A')
def _everything(allow_nan: bool = False) -> Tuple[SearchStrategy[int],
SearchStrategy[bool],
SearchStrategy[str],
SearchStrategy[float]]:
return integers(), booleans(), text(), floats(allow_nan=allow_nan)
def anything(allow_nan: bool = False
) -> SearchStrategy[Union[int, bool, str, float]]:
"""
Create a search strategy that produces one of int, bool, str or floats.
Args:
Allow_nan: whether to allow nan values
Return:
Search strategy that produces ints, bools, str or floats
"""
return one_of(*_everything(allow_nan))
CO = TypeVar('CO', covariant=True)
class Unary(Immutable, Generic[CO]):
return_value: CO
def __repr__(self):
return f'lambda _: {repr(self.return_value)}'
def __call__(self, _: object) -> CO:
return self.return_value
def unaries(return_strategy: SearchStrategy[A]
) -> SearchStrategy[Callable[[object], A]]:
"""
Create a search strategy that produces functions of 1 argument
Example:
>>> f = unaries(integers()).example()
>>> f
<function ...>
>>> f(None)
2
Args:
return_strategy: strategy used to draw return values
Return:
Search strategy that produces callables of 1 argument
"""
return builds(Unary, return_strategy)
def maybes(value_strategy: SearchStrategy[A]
) -> SearchStrategy[maybe.Maybe[A]]:
"""
Create a search strategy that produces `pfun.maybe.Maybe` values
Example:
>>> maybes(integers()).example()
Just(1)
Args:
value_strategy: search strategy to draw values from
Return:
search strategy that produces `pfun.maybe.Maybe` values
"""
justs = builds(maybe.Just, value_strategy)
nothings = just(maybe.Nothing())
return one_of(justs, nothings)
def rights(value_strategy: SearchStrategy[A]
) -> SearchStrategy[either.Right[A]]:
"""
Create a search strategy that produces `pfun.either.Right` values
Args:
value_strategy: search strategy to draw values from
Example:
>>> rights(integers()).example()
Right(0)
Return:
search strategy that produces `pfun.either.Right` values
"""
return builds(either.Right, value_strategy)
def lefts(value_strategy: SearchStrategy[A]) -> SearchStrategy[either.Left[A]]:
"""
Create a search strategy that produces `pfun.either.Left` values
Args:
value_strategy: search strategy to draw values from
Example:
>>> lefts(integers()).example()
Left(0)
Return:
search strategy that produces `pfun.either.Left` values
"""
return builds(either.Left, value_strategy)
def eithers(value_strategy: SearchStrategy[A]
) -> SearchStrategy[either.Either[A, A]]:
"""
Create a search strategy that produces `pfun.either.Either` values
Args:
value_strategy: search strategy to draw values from
Example:
>>> s = eithers(integers())
>>> s.example()
Right(0)
>>> s.example()
Left(0)
Return:
search strategy that produces `pfun.either.Either` values
"""
return one_of(lefts(value_strategy), rights(value_strategy))
def nullaries(return_strategy: SearchStrategy[A]
) -> SearchStrategy[Callable[[], A]]:
"""
Create a search strategy that produces functions of 0 arguments
Args:
return_strategy: strategy used to draw return values
Example:
>>> f = unaries(integers()).example()
>>> f
<function ...>
>>> f()
2
Return:
Search strategy that produces callables of 0 arguments
"""
def f(v):
return lambda: v
return builds(f, return_strategy)
def trampolines(value_strategy: SearchStrategy[A]
) -> SearchStrategy[trampoline.Trampoline[A]]:
"""
Create a strategy that produces `pfun.trampoline.Trampoline` instances
Args:
value_strategy: strategy used to draw result values
Example:
>>> trampolines(integers()).example()
Call(thunk=<function ... at 0x1083d2d40>)
Return:
search strategy that produces `pfun.trampoline.Trampoline` instances
"""
dones = builds(trampoline.Done, value_strategy)
@composite
def call(draw):
t = draw(trampolines(value_strategy))
return trampoline.Call(lambda: t)
@composite
def and_then(draw):
t = draw(trampolines(value_strategy))
cont = lambda _: t
return trampoline.AndThen(draw(trampolines(value_strategy)), cont)
return one_of(dones, call(), and_then())
def aio_trampolines(value_strategy: SearchStrategy[A]
) -> SearchStrategy[trampoline.Trampoline[A]]:
"""
Create a strategy that produces `pfun.aio_trampoline.Trampoline` instances
Args:
value_strategy: strategy used to draw result values
Example:
>>> aio_trampolines(integers()).example()
Call(thunk=<function ... at 0x1083d2d40>)
Return:
search strategy that produces \
`pfun.aio_trampoline.Trampoline` instances
"""
dones = builds(aio_trampoline.Done, value_strategy)
@composite
def call(draw):
t = draw(aio_trampolines(value_strategy))
async def f():
return t
return aio_trampoline.Call(f)
@composite
def and_then(draw):
t = draw(aio_trampolines(value_strategy))
cont = lambda _: t
return aio_trampoline.AndThen(
draw(aio_trampolines(value_strategy)), cont
)
return one_of(dones, call(), and_then())
def lists(elements: SearchStrategy[A], min_size=0):
"""
Create a search strategy that produces `pfun.list.List` instances
Args:
elements: strategy used to draw elements of the list
min_size: minimum size of the lists
Example:
>>> lists(integers()).example()
List((0,))
Return:
search strategy that produces `pfun.list.List` instances
"""
return builds(List, lists_(elements, min_size=min_size))
B = TypeVar('B')
def dicts(
keys: SearchStrategy[A],
values: SearchStrategy[B],
min_size: int = 0,
max_size: int = None
) -> SearchStrategy[Dict[A, B]]:
"""
Create a search strategy that produces `pfun.dict.Dict` instances
Args:
keys: search strategy used to draw keys for the Dict instances
values: search strategy used to draw values for the Dict instances
min_size: minimum size of the Dicts
max_size: max size of the Dicts
Example:
>>> dicts(text(), integers()).example()
Dict({'0': 0})
Return:
search strategy that produces `pfun.dict.Dict` instances
"""
return builds(
Dict, dictionaries(keys, values, min_size=min_size, max_size=max_size)
)
TestEffect = effect.Effect[object, Any, A]
class TestException(Exception):
"""
Dummy exception used to avoid catching any exceptions
unintentionally in tests.
"""
def effects(
value_strategy: SearchStrategy[A],
include_errors: bool = False,
max_size: int = 10,
max_leaves: int = 10
) -> SearchStrategy[TestEffect[A]]:
"""
Create a search strategy that produces `pfun.effect.Effect` instances
Args:
value_strategy: search strategy used to draw success values
include_errors: whether to include effects that fail
max_size: max size of effects that produces iterables \
(such as `pfun.effect.gather`)
max_leaves: max number of leaf effects \
(`pfun.effect.success`, `pfun.effect.from_callable` etc) \
to be drawn
Example:
>>> e = effects(integers()).example()
>>> e
success(0)
>>> e.run(None)
0
Return:
search strategy that produces `pfun.effect.Effect` instances
"""
def extend(children: SearchStrategy[TestEffect[A]]
) -> SearchStrategy[TestEffect[A]]:
maps: SearchStrategy[TestEffect[A]] = children.flatmap(
lambda e: unaries(value_strategy).map(lambda f: e.map(f))
)
and_then = children.flatmap(
lambda e: unaries(children).map(lambda f: e.and_then(f))
)
discard_and_then = children.flatmap(
lambda e: children.map(lambda e2: e.discard_and_then(e2))
)
either = children.map(lambda e: e.either())
recover = children.flatmap(
lambda e: children.map(lambda e2: e.recover(lambda _: e2))
)
memoize = children.map(lambda e: e.memoize())
ensure = children.flatmap(
lambda e: children.map(lambda e2: e.ensure(e2))
)
with_repr = children.flatmap(
lambda e: text(printable).map(lambda s: e.with_repr(s))
)
gather: SearchStrategy[TestEffect[Iterable[A]]] = lists_(
children,
max_size=10).map(
effect.gather
)
gather_async: SearchStrategy[TestEffect[Iterable[A]]] = lists_(
children,
max_size=max_size
).map(
effect.gather_async
)
lift = unaries(value_strategy).flatmap(
lambda f: children.map(lambda e: effect.lift(f)(e))
)
lift_io_bound = unaries(value_strategy).flatmap(
lambda f: children.map(lambda e: effect.lift_io_bound(f)(e))
)
lift_cpu_bound = unaries(value_strategy).flatmap(
lambda f: children.map(lambda e: effect.lift_cpu_bound(f)(e))
)
combine = unaries(value_strategy).flatmap(
lambda f: children.map(lambda e: effect.combine(e)(f))
)
combine_io_bound = unaries(value_strategy).flatmap(
lambda f: children.map(lambda e: effect.combine_io_bound(e)(f))
)
combine_cpu_bound = unaries(value_strategy).flatmap(
lambda f: children.map(lambda e: effect.combine_cpu_bound(e)(f))
)
race = children.map(lambda e: e.race(e))
return one_of(
maps,
and_then,
discard_and_then,
either,
recover,
memoize,
ensure,
with_repr,
gather,
gather_async,
lift,
lift_io_bound,
lift_cpu_bound,
combine,
combine_io_bound,
combine_cpu_bound,
race
)
success = builds(effect.success, value_strategy)
depends: SearchStrategy[effect.Effect[Any, NoReturn, Any]
] = builds(effect.depend)
from_callable: SearchStrategy[TestEffect[A]
] = unaries(rights(value_strategy)
).map(effect.from_callable)
from_io_bound_callable: SearchStrategy[TestEffect[A]] = unaries(
rights(value_strategy)
).map(effect.from_io_bound_callable)
from_cpu_bound_callable: SearchStrategy[TestEffect[A]] = unaries(
rights(value_strategy)
).map(effect.from_cpu_bound_callable)
catch: SearchStrategy[TestEffect[A]] = unaries(
value_strategy
).flatmap(
lambda f: value_strategy.map(
lambda a: effect.catch(TestException)(f)(a))
)
catch_io_bound: SearchStrategy[TestEffect[A]] = unaries(
value_strategy
).flatmap(
lambda f: value_strategy.map(
lambda a: effect.catch_io_bound(TestException)(f)(a)
)
)
catch_cpu_bound: SearchStrategy[TestEffect[A]] = unaries(
value_strategy
).flatmap(
lambda f: value_strategy.map(
lambda a: effect.catch_cpu_bound(TestException)(f)(a)
)
)
purify = nullaries(value_strategy).map(lambda f: effect.purify(f)())
base = (
success
| from_callable
| from_io_bound_callable
| from_cpu_bound_callable
| depends
| catch
| catch_io_bound
| catch_cpu_bound
| purify
)
if include_errors:
errors = builds(effect.error, value_strategy)
base = base | errors
return recursive(base, extend, max_leaves=max_leaves)
|
the-stack_106_19471
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
archivebot.py - discussion page archiving bot.
usage:
python pwb.py archivebot [OPTIONS] TEMPLATE_PAGE
Bot examines backlinks (Special:WhatLinksHere) to TEMPLATE_PAGE.
Then goes through all pages (unless a specific page specified using options)
and archives old discussions. This is done by breaking a page into threads,
then scanning each thread for timestamps. Threads older than a specified
threshold are then moved to another page (the archive), which can be named
either basing on the thread's name or then name can contain a counter which
will be incremented when the archive reaches a certain size.
Transcluded template may contain the following parameters:
{{TEMPLATE_PAGE
|archive =
|algo =
|counter =
|maxarchivesize =
|minthreadsleft =
|minthreadstoarchive =
|archiveheader =
|key =
}}
Meanings of parameters are:
archive Name of the page to which archived threads will be put.
Must be a subpage of the current page. Variables are
supported.
algo Specifies the maximum age of a thread. Must be
in the form old(<delay>) where <delay> specifies
the age in seconds (s), hours (h), days (d),
weeks (w), or years (y) like 24h or 5d. Default is
old(24h).
counter The current value of a counter which could be assigned as
variable. Will be updated by bot. Initial value is 1.
maxarchivesize The maximum archive size before incrementing the counter.
Value can be given with appending letter like K or M
which indicates KByte or MByte. Default value is 200K.
minthreadsleft Minimum number of threads that should be left on a page.
Default value is 5.
minthreadstoarchive The minimum number of threads to archive at once. Default
value is 2.
archiveheader Content that will be put on new archive pages as the
header. This parameter supports the use of variables.
Default value is {{talkarchive}}
key A secret key that (if valid) allows archives not to be
subpages of the page being archived.
Variables below can be used in the value for "archive" in the template above:
%(counter)d the current value of the counter
%(year)d year of the thread being archived
%(isoyear)d ISO year of the thread being archived
%(isoweek)d ISO week number of the thread being archived
%(semester)d semester term of the year of the thread being archived
%(quarter)d quarter of the year of the thread being archived
%(month)d month (as a number 1-12) of the thread being archived
%(monthname)s localized name of the month above
%(monthnameshort)s first three letters of the name above
%(week)d week number of the thread being archived
The ISO calendar starts with the Monday of the week which has at least four
days in the new Gregorian calendar. If January 1st is between Monday and
Thursday (including), the first week of that year started the Monday of that
week, which is in the year before if January 1st is not a Monday. If it's
between Friday or Sunday (including) the following week is then the first week
of the year. So up to three days are still counted as the year before.
See also:
- http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
- https://docs.python.org/3.4/library/datetime.html#datetime.date.isocalendar
Options (may be omitted):
-help show this help message and exit
-calc:PAGE calculate key for PAGE and exit
-file:FILE load list of pages from FILE
-force override security options
-locale:LOCALE switch to locale LOCALE
-namespace:NS only archive pages from a given namespace
-page:PAGE archive a single PAGE, default ns is a user talk page
-salt:SALT specify salt
"""
#
# (C) Pywikibot team, 2006-2020
#
# Distributed under the terms of the MIT license.
#
import datetime
import locale
import math
import os
import re
import time
from collections import defaultdict, OrderedDict
from hashlib import md5
from math import ceil
from typing import Any, Optional, Pattern
import pywikibot
from pywikibot.date import apply_month_delta
from pywikibot import i18n
from pywikibot.textlib import (extract_sections, findmarker, TimeStripper,
to_local_digits)
from pywikibot.tools import (
deprecated, FrozenDict, issue_deprecation_warning, PYTHON_VERSION,
)
if PYTHON_VERSION >= (3, 9):
List = list
Set = set
Tuple = tuple
else:
from typing import List, Set, Tuple
ShouldArchive = Tuple[str, str]
Size = Tuple[int, str]
ZERO = datetime.timedelta(0)
MW_KEYS = FrozenDict({
's': 'seconds',
'h': 'hours',
'd': 'days',
'w': 'weeks',
'y': 'years',
# 'months' and 'minutes' were removed because confusion outweighs merit
}, 'MW_KEYS is a dict constant')
class ArchiveBotSiteConfigError(pywikibot.Error):
"""There is an error originated by archivebot's on-site configuration."""
class MalformedConfigError(ArchiveBotSiteConfigError):
"""There is an error in the configuration template."""
class MissingConfigError(ArchiveBotSiteConfigError):
"""
The config is missing in the header.
It's in one of the threads or transcluded from another page.
"""
class AlgorithmError(MalformedConfigError):
"""Invalid specification of archiving algorithm."""
class ArchiveSecurityError(ArchiveBotSiteConfigError):
"""
Page title is not a valid archive of page being archived.
The page title is neither a subpage of the page being archived,
nor does it match the key specified in the archive configuration template.
"""
def str2localized_duration(site, string: str) -> str:
"""
Localise a shorthand duration.
Translates a duration written in the shorthand notation (ex. "24h", "7d")
into an expression in the local wiki language ("24 hours", "7 days").
"""
key, duration = checkstr(string)
template = site.mediawiki_message(MW_KEYS[key])
if template:
# replace plural variants
exp = i18n.translate(site.code, template, {'$1': int(duration)})
return exp.replace('$1', to_local_digits(duration, site.code))
else:
return to_local_digits(string, site.code)
def str2time(string: str, timestamp=None) -> datetime.timedelta:
"""
Return a timedelta for a shorthand duration.
@param string: a string defining a time period:
300s - 300 seconds
36h - 36 hours
7d - 7 days
2w - 2 weeks (14 days)
1y - 1 year
@param timestamp: a timestamp to calculate a more accurate duration offset
used by years
@type timestamp: datetime.datetime
@return: the corresponding timedelta object
"""
key, duration = checkstr(string)
if duration.isdigit():
duration = int(duration)
else:
key = ''
if key in ['d', 's', 'h', 'w']: # days, seconds, hours, weeks
return datetime.timedelta(**{MW_KEYS[key]: duration})
if key == 'y': # years
days = math.ceil(duration * 365.25)
duration *= 12
else:
raise MalformedConfigError(
'Unrecognized parameter in template: {0}'.format(string))
if timestamp:
return apply_month_delta(
timestamp.date(), month_delta=duration) - timestamp.date()
else:
return datetime.timedelta(days=days)
def checkstr(string: str) -> Tuple[str, str]:
"""
Return the key and duration extracted from the string.
@param string: a string defining a time period:
300s - 300 seconds
36h - 36 hours
7d - 7 days
2w - 2 weeks (14 days)
1y - 1 year
@return: key and duration extracted form the string
"""
if string.isdigit():
key = 's'
duration = string
issue_deprecation_warning('Time period without qualifier',
string + key, 1, UserWarning,
since='20161009')
else:
key = string[-1]
duration = string[:-1]
return key, duration
def str2size(string: str) -> Size:
"""
Return a size for a shorthand size.
Accepts a string defining a size:
1337 - 1337 bytes
150K - 150 kilobytes
2M - 2 megabytes
Returns a tuple (size,unit), where size is an integer and unit is
'B' (bytes) or 'T' (threads).
"""
match = re.fullmatch(r'(\d{1,3}(?: \d{3})+|\d+) *([BkKMT]?)', string)
if not match:
raise MalformedConfigError("Couldn't parse size: {}".format(string))
val, unit = (int(match.group(1).replace(' ', '')), match.group(2))
if unit == 'M':
val *= 1024
unit = 'K'
if unit in ('K', 'k'):
val *= 1024
if unit != 'T':
unit = 'B'
return val, unit
def template_title_regex(tpl_page: pywikibot.Page) -> Pattern:
"""
Return a regex that matches to variations of the template title.
It supports the transcluding variant as well as localized namespaces and
case-insensitivity depending on the namespace.
@param tpl_page: The template page
@type tpl_page: pywikibot.page.Page
"""
ns = tpl_page.site.namespaces[tpl_page.namespace()]
marker = '?' if ns.id == 10 else ''
title = tpl_page.title(with_ns=False)
if ns.case != 'case-sensitive':
title = '[{}{}]{}'.format(re.escape(title[0].upper()),
re.escape(title[0].lower()),
re.escape(title[1:]))
else:
title = re.escape(title)
return re.compile(r'(?:(?:%s):)%s%s' % ('|'.join(ns), marker, title))
def calc_md5_hexdigest(txt, salt) -> str:
"""Return md5 hexdigest computed from text and salt."""
s = md5()
s.update(salt.encode('utf-8'))
s.update(b'\n')
s.update(txt.encode('utf8'))
s.update(b'\n')
return s.hexdigest()
class TZoneUTC(datetime.tzinfo):
"""Class building a UTC tzinfo object."""
def utcoffset(self, dt) -> datetime.timedelta:
"""Subclass implementation, return timedelta(0)."""
return ZERO
def tzname(self, dt) -> str:
"""Subclass implementation."""
return 'UTC'
def dst(self, dt) -> datetime.timedelta:
"""Subclass implementation, return timedelta(0)."""
return ZERO
def __repr__(self) -> str:
"""Return a string representation."""
return '{}()'.format(self.__class__.__name__)
class DiscussionThread:
"""
An object representing a discussion thread on a page.
It represents something that is of the form:
== Title of thread ==
Thread content here. ~~~~
:Reply, etc. ~~~~
"""
def __init__(self, title: str, _now=None, timestripper=None) -> None:
"""Initializer."""
if _now is not None:
issue_deprecation_warning(
'Argument "now" in DiscussionThread.__init__()',
warning_class=FutureWarning,
since='20200727')
assert timestripper is not None
self.title = title
self.ts = timestripper
self.code = self.ts.site.code
self.content = ''
self.timestamp = None
def __repr__(self) -> str:
"""Return a string representation."""
return '{}("{}",{} bytes)'.format(self.__class__.__name__, self.title,
len(self.content.encode('utf-8')))
def feed_line(self, line: str) -> None:
"""Add a line to the content and find the newest timestamp."""
if not self.content and not line:
return
self.content += line + '\n'
timestamp = self.ts.timestripper(line)
if not self.timestamp: # first time
self.timestamp = timestamp
if timestamp:
self.timestamp = max(self.timestamp, timestamp)
def size(self) -> int:
"""
Return size of discussion thread.
Note that the result is NOT equal to that of
len(self.to_text()). This method counts bytes, rather than
codepoints (characters). This corresponds to MediaWiki's
definition of page size.
"""
return len(self.title.encode('utf-8')) + len(
self.content.encode('utf-8')) + 12
def to_text(self) -> str:
"""Return wikitext discussion thread."""
return '== {} ==\n\n{}'.format(self.title, self.content)
@deprecated('PageArchiver.should_archive_thread(thread)', since='20200727',
future_warning=True)
def should_be_archived(self, archiver) -> Optional[ShouldArchive]:
"""Check whether thread has to be archived."""
return archiver.should_archive_thread(self)
class DiscussionPage(pywikibot.Page):
"""
A class that represents a single page of discussion threads.
Feed threads to it and run an update() afterwards.
"""
def __init__(self, source, archiver, params=None) -> None:
"""Initializer."""
super().__init__(source)
self.threads = []
self.full = False
self.archiver = archiver
# for testing purposes we allow archiver to be None and we are able
# to create the a DiscussionPage in this way:
# >>> import pywikibot as py
# >>> from scripts.archivebot import DiscussionPage
# >>> d = DiscussionPage(py.Page(py.Site(), <talk page name>), None)
if archiver is None:
self.timestripper = TimeStripper(self.site)
else:
self.timestripper = self.archiver.timestripper
self.params = params
try:
self.load_page()
except pywikibot.NoPage:
self.header = archiver.get_attr('archiveheader',
i18n.twtranslate(
self.site.code,
'archivebot-archiveheader'))
if self.params:
self.header = self.header % self.params
def load_page(self) -> None:
"""Load the page to be archived and break it up into threads."""
self.header = ''
self.threads = []
self.archives = {}
self.archived_threads = 0
# Exclude unsupported headings (h1, h3, etc):
# adding the marker will make them ignored by extract_sections()
text = self.get()
marker = findmarker(text)
text = re.sub(r'^((=|={3,})[^=])', marker + r'\1', text, flags=re.M)
# Find threads, avoid archiving categories or interwiki
header, threads, footer = extract_sections(text, self.site)
header = header.replace(marker, '')
if header and footer:
self.header = '\n\n'.join((header.rstrip(), footer, ''))
else:
self.header = header + footer
for thread_heading, thread_content in threads:
cur_thread = DiscussionThread(
thread_heading.strip('= '), timestripper=self.timestripper)
# remove heading line
_, *lines = thread_content.replace(marker, '').splitlines()
for line in lines:
cur_thread.feed_line(line)
self.threads.append(cur_thread)
# This extra info is not desirable when run under the unittest
# framework, which may be run either directly or via setup.py
if pywikibot.calledModuleName() not in ['archivebot_tests', 'setup']:
pywikibot.output('{} thread(s) found on {}'
.format(len(self.threads), self))
def is_full(self, max_archive_size=(250 * 1024, 'B')) -> bool:
"""Check whether archive size exceeded."""
if max_archive_size[1] == 'B':
if self.size() >= max_archive_size[0]:
self.full = True # xxx: this is one-way flag
elif max_archive_size[1] == 'T':
if len(self.threads) >= max_archive_size[0]:
self.full = True
return self.full
def feed_thread(self, thread: DiscussionThread,
max_archive_size=(250 * 1024, 'B')) -> bool:
"""Append a new thread to the archive."""
self.threads.append(thread)
self.archived_threads += 1
return self.is_full(max_archive_size)
def size(self) -> int:
"""
Return size of talk page threads.
Note that this method counts bytes, rather than codepoints
(characters). This corresponds to MediaWiki's definition
of page size.
"""
return len(self.header.encode('utf-8')) + sum(t.size()
for t in self.threads)
def update(self, summary, sort_threads=False) -> None:
"""Recombine threads and save page."""
if sort_threads:
pywikibot.output('Sorting threads...')
self.threads.sort(key=lambda t: t.timestamp)
newtext = re.sub('\n*$', '\n\n', self.header) # Fix trailing newlines
for t in self.threads:
newtext += t.to_text()
if self.full:
summary += ' ' + i18n.twtranslate(self.site.code,
'archivebot-archive-full')
self.text = newtext
self.save(summary)
class PageArchiver:
"""A class that encapsulates all archiving methods."""
algo = 'none'
def __init__(self, page, template, salt, force=False) -> None:
"""Initializer.
param page: a page object to be archived
type page: pywikibot.Page
param template: a template with configuration settings
type template: pywikibot.Page
param salt: salt value
type salt: str
param force: override security value
type force: bool
"""
self.attributes = OrderedDict([
('archive', ['', False]),
('algo', ['old(24h)', False]),
('counter', ['1', False]),
('maxarchivesize', ['200K', False]),
])
self.salt = salt
self.force = force
self.site = page.site
self.tpl = template
self.timestripper = TimeStripper(site=self.site)
self.page = DiscussionPage(page, self)
self.load_config()
self.comment_params = {
'from': self.page.title(),
}
self.now = datetime.datetime.utcnow().replace(tzinfo=TZoneUTC())
self.archives = {}
self.archived_threads = 0
self.month_num2orig_names = {}
for n, (long, short) in enumerate(self.site.months_names, start=1):
self.month_num2orig_names[n] = {'long': long, 'short': short}
def get_attr(self, attr, default='') -> Any:
"""Get an archiver attribute."""
return self.attributes.get(attr, [default])[0]
def set_attr(self, attr, value, out=True) -> None:
"""Set an archiver attribute."""
if attr == 'archive':
value = value.replace('_', ' ')
self.attributes[attr] = [value, out]
def saveables(self) -> List[str]:
"""Return a list of saveable attributes."""
return [a for a in self.attributes if self.attributes[a][1]
and a != 'maxage']
def attr2text(self) -> str:
"""Return a template with archiver saveable attributes."""
return '{{%s\n%s\n}}' \
% (self.tpl.title(with_ns=(self.tpl.namespace() != 10)),
'\n'.join('|{} = {}'.format(a, self.get_attr(a))
for a in self.saveables()))
def key_ok(self) -> bool:
"""Return whether key is valid."""
hexdigest = calc_md5_hexdigest(self.page.title(), self.salt)
return self.get_attr('key') == hexdigest
def load_config(self) -> None:
"""Load and validate archiver template."""
pywikibot.output('Looking for: {{%s}} in %s' % (self.tpl.title(),
self.page))
for tpl, params in self.page.raw_extracted_templates:
try: # Check tpl name before comparing; it might be invalid.
tpl_page = pywikibot.Page(self.site, tpl, ns=10)
tpl_page.title()
except pywikibot.Error:
continue
if tpl_page == self.tpl:
for item, value in params.items():
self.set_attr(item.strip(), value.strip())
break
else:
raise MissingConfigError('Missing or malformed template')
if not self.get_attr('algo', ''):
raise MissingConfigError('Missing argument "algo" in template')
if not self.get_attr('archive', ''):
raise MissingConfigError('Missing argument "archive" in template')
def should_archive_thread(self, thread: DiscussionThread
) -> Optional[ShouldArchive]:
"""
Check whether a thread has to be archived.
@return: the archivation reason as a tuple of localization args
"""
# Archived by timestamp
algo = self.get_attr('algo')
re_t = re.fullmatch(r'old\((.*)\)', algo)
if re_t:
if not thread.timestamp:
return None
# TODO: handle unsigned
maxage = str2time(re_t.group(1), thread.timestamp)
if self.now - thread.timestamp > maxage:
duration = str2localized_duration(self.site, re_t.group(1))
return ('duration', duration)
# TODO: handle marked with template
return None
def get_archive_page(self, title: str, params=None) -> DiscussionPage:
"""
Return the page for archiving.
If it doesn't exist yet, create and cache it.
Also check for security violations.
"""
page_title = self.page.title()
archive = pywikibot.Page(self.site, title)
if not (self.force or title.startswith(page_title + '/')
or self.key_ok()):
raise ArchiveSecurityError(
'Archive page {} does not start with page title ({})!'
.format(archive, page_title))
if title not in self.archives:
self.archives[title] = DiscussionPage(archive, self, params)
return self.archives[title]
@deprecated(since='20200727', future_warning=True)
def feed_archive(self, archive: pywikibot.Page, thread: DiscussionThread,
max_archive_size: Size, params=None) -> bool:
"""
Feed the thread to one of the archives.
Also check for security violations.
@return: whether the archive is full
"""
archive_page = self.get_archive_page(
archive.title(with_ns=True), params)
return archive_page.feed_thread(thread, max_archive_size)
def get_params(self, timestamp, counter: int) -> dict:
"""Make params for archiving template."""
lang = self.site.lang
return {
'counter': to_local_digits(counter, lang),
'year': to_local_digits(timestamp.year, lang),
'isoyear': to_local_digits(timestamp.isocalendar()[0], lang),
'isoweek': to_local_digits(timestamp.isocalendar()[1], lang),
'semester': to_local_digits(int(ceil(timestamp.month / 6)), lang),
'quarter': to_local_digits(int(ceil(timestamp.month / 3)), lang),
'month': to_local_digits(timestamp.month, lang),
'monthname': self.month_num2orig_names[timestamp.month]['long'],
'monthnameshort': self.month_num2orig_names[
timestamp.month]['short'],
'week': to_local_digits(
int(time.strftime('%W', timestamp.timetuple())), lang),
}
def analyze_page(self) -> Set[ShouldArchive]:
"""Analyze DiscussionPage."""
max_arch_size = str2size(self.get_attr('maxarchivesize'))
counter = int(self.get_attr('counter', '1'))
pattern = self.get_attr('archive')
keep_threads = []
threads_per_archive = defaultdict(list)
whys = set()
pywikibot.output('Processing {} threads'
.format(len(self.page.threads)))
for i, thread in enumerate(self.page.threads):
# TODO: Make an option so that unstamped (unsigned) posts get
# archived.
why = self.should_archive_thread(thread)
if not why or why[0] != 'duration':
keep_threads.append(i)
continue
params = self.get_params(thread.timestamp, counter)
# this is actually just a dummy key to group the threads by
# "era" regardless of the counter and deal with it later
key = pattern % params
threads_per_archive[key].append((i, thread))
whys.add(why) # xxx: we don't now if we ever archive anything
params = self.get_params(self.now, counter)
aux_params = self.get_params(self.now, counter + 1)
counter_matters = (pattern % params) != (pattern % aux_params)
del params, aux_params
# we need to start with the oldest archive since that is
# the one the saved counter applies to, so sort the groups
# by the oldest timestamp
groups = sorted(threads_per_archive.values(),
key=lambda group: min(t.timestamp for _, t in group))
era_change = False
for group in groups:
# We will reset counter IFF:
# 1. it matters (AND)
# 2. "era" (year, month, etc.) changes (AND)
# 3. there is something to put to the new archive.
for i, thread in group:
threads_left = len(self.page.threads) - self.archived_threads
if threads_left <= int(self.get_attr('minthreadsleft', 5)):
keep_threads.append(i)
continue # Because there's too little threads left.
if era_change:
era_change = False
counter = 1
params = self.get_params(thread.timestamp, counter)
archive = self.get_archive_page(pattern % params, params)
if counter_matters:
while counter > 1 and not archive.exists():
# This may happen when either:
# 1. a previous version of the bot run and reset
# the counter without archiving anything
# (number #3 above)
# 2. era changed between runs.
# Decrease the counter.
# TODO: This can be VERY slow, use preloading
# or binary search.
counter -= 1
params = self.get_params(thread.timestamp, counter)
archive = self.get_archive_page(
pattern % params, params)
while archive.is_full(max_arch_size):
counter += 1
params = self.get_params(thread.timestamp, counter)
archive = self.get_archive_page(
pattern % params, params)
archive.feed_thread(thread, max_arch_size)
self.archived_threads += 1
if counter_matters:
era_change = True
if self.archived_threads:
self.page.threads = [self.page.threads[i]
for i in sorted(keep_threads)]
self.set_attr('counter', str(counter))
return whys
else:
return set()
def run(self) -> None:
"""Process a single DiscussionPage object."""
if not self.page.botMayEdit():
return
whys = self.analyze_page()
mintoarchive = int(self.get_attr('minthreadstoarchive', 2))
if self.archived_threads < mintoarchive:
# We might not want to archive a measly few threads
# (lowers edit frequency)
pywikibot.output('Only {} (< {}) threads are old enough. Skipping'
.format(self.archived_threads, mintoarchive))
return
if whys:
# Search for the marker template
rx = re.compile(r'\{\{%s\s*?\n.*?\n\}\}'
% (template_title_regex(self.tpl).pattern),
re.DOTALL)
if not rx.search(self.page.header):
raise MalformedConfigError(
"Couldn't find the template in the header"
)
pywikibot.output('Archiving {0} thread(s).'
.format(self.archived_threads))
# Save the archives first (so that bugs don't cause a loss of data)
for title, archive in sorted(self.archives.items()):
count = archive.archived_threads
if count == 0:
continue
self.comment_params['count'] = count
comment = i18n.twtranslate(self.site.code,
'archivebot-archive-summary',
self.comment_params)
archive.update(comment)
# Save the page itself
self.page.header = rx.sub(self.attr2text(), self.page.header)
self.comment_params['count'] = self.archived_threads
comma = self.site.mediawiki_message('comma-separator')
self.comment_params['archives'] = comma.join(
a.title(as_link=True) for a in self.archives.values()
if a.archived_threads > 0
)
# Find out the reasons and return them localized
translated_whys = set()
for why, arg in whys:
# Archived by timestamp
if why == 'duration':
translated_whys.add(
i18n.twtranslate(self.site.code,
'archivebot-older-than',
{'duration': arg,
'count': self.archived_threads}))
# TODO: handle unsigned or archived by template
self.comment_params['why'] = comma.join(translated_whys)
comment = i18n.twtranslate(self.site.code,
'archivebot-page-summary',
self.comment_params)
self.page.update(comment)
def main(*args) -> None:
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: str
"""
filename = None
pagename = None
namespace = None
salt = ''
force = False
calc = None
templates = []
local_args = pywikibot.handle_args(args)
for arg in local_args:
option, _, value = arg.partition(':')
if not option.startswith('-'):
templates.append(arg)
continue
option = option[1:]
if option in ('file', 'filename'):
filename = value
elif option == 'locale':
# Required for english month names
locale.setlocale(locale.LC_TIME, value.encode('utf8'))
elif option == 'timezone':
os.environ['TZ'] = value.timezone
# Or use the preset value
if hasattr(time, 'tzset'):
time.tzset()
elif option == 'calc':
calc = value
elif option == 'salt':
salt = value
elif option == 'force':
force = True
elif option == 'page':
pagename = value
elif option == 'namespace':
namespace = value
site = pywikibot.Site()
if calc:
if not salt:
pywikibot.bot.suggest_help(missing_parameters=['-salt'])
return
page = pywikibot.Page(site, calc)
if page.exists():
calc = page.title()
else:
pywikibot.output(
'NOTE: the specified page "{0}" does not (yet) exist.'
.format(calc))
pywikibot.output('key = {}'.format(calc_md5_hexdigest(calc, salt)))
return
if not templates:
pywikibot.bot.suggest_help(
additional_text='No template was specified.')
return
for template_name in templates:
pagelist = []
tmpl = pywikibot.Page(site, template_name, ns=10)
if not filename and not pagename:
if namespace is not None:
ns = [str(namespace)]
else:
ns = []
pywikibot.output('Fetching template transclusions...')
pagelist.extend(tmpl.getReferences(only_template_inclusion=True,
follow_redirects=False,
namespaces=ns))
if filename:
for pg in open(filename, 'r').readlines():
pagelist.append(pywikibot.Page(site, pg, ns=10))
if pagename:
pagelist.append(pywikibot.Page(site, pagename, ns=3))
pagelist.sort()
for pg in pagelist:
pywikibot.output('Processing {}'.format(pg))
# Catching exceptions, so that errors in one page do not bail out
# the entire process
try:
archiver = PageArchiver(pg, tmpl, salt, force)
archiver.run()
except ArchiveBotSiteConfigError as e:
# no stack trace for errors originated by pages on-site
pywikibot.error('Missing or malformed template in page {}: {}'
.format(pg, e))
except Exception:
pywikibot.error('Error occurred while processing page {}'
.format(pg))
pywikibot.exception(tb=True)
if __name__ == '__main__':
main()
|
the-stack_106_19472
|
#FLM: Typerig Panel
# ----------------------------------------
# (C) Vassil Kateliev, 2018 (http://www.kateliev.com)
# (C) Karandash Type Foundry (http://www.karandash.eu)
#-----------------------------------------
# www.typerig.com
# No warranties. By using this you agree
# that you use it at your own risk!
# - Dependencies -----------------
#import fontlab as fl6
#import fontgate as fgt
from PythonQt import QtCore
from typerig import QtGui
# -- Internals - Load toolpanels
import Panel
# - Init --------------------------
app_version = '0.46'
app_name = 'TypeRig Panel'
ignorePanel = '__'
# - Style -------------------------
ss_Toolbox_none = """/* EMPTY STYLESHEET */ """
# - Interface -----------------------------
# -- Main Widget --------------------------
class typerig_Panel(QtGui.QDialog):
def __init__(self):
super(typerig_Panel, self).__init__()
#self.setStyleSheet(ss_Toolbox_none)
# - Layers --------------------------
self.chk_ActiveLayer = QtGui.QCheckBox('Active')
self.chk_Masters = QtGui.QCheckBox('Masters')
self.chk_Masks = QtGui.QCheckBox('Masks')
self.chk_Service = QtGui.QCheckBox('Services')
self.chk_ActiveLayer.setCheckState(QtCore.Qt.Checked)
#self.chk_ActiveLayer.setStyleSheet('QCheckBox::indicator:checked {background-color: limegreen; border: 1px Solid limegreen;}')
self.chk_ActiveLayer.stateChanged.connect(self.refreshLayers)
self.chk_Masters.stateChanged.connect(self.refreshLayers)
self.chk_Masks.stateChanged.connect(self.refreshLayers)
self.chk_Service.stateChanged.connect(self.refreshLayers)
self.refreshLayers()
# - Glyphs --------------------------
self.rad_glyph = QtGui.QRadioButton('Glyph')
self.rad_window = QtGui.QRadioButton('Window')
self.rad_selection = QtGui.QRadioButton('Selection')
self.rad_font = QtGui.QRadioButton('Font')
self.rad_glyph.toggled.connect(self.refreshMode)
self.rad_window.toggled.connect(self.refreshMode)
self.rad_selection.toggled.connect(self.refreshMode)
self.rad_font.toggled.connect(self.refreshMode)
self.rad_glyph.setChecked(True)
self.rad_glyph.setEnabled(True)
self.rad_window.setEnabled(True)
self.rad_selection.setEnabled(True)
self.rad_font.setEnabled(False)
self.rad_glyph.setToolTip('Affect current glyph')
self.rad_window.setToolTip('Affect glyphs in active window')
self.rad_selection.setToolTip('Affect selected glyphs')
self.rad_font.setToolTip('Affect the entire font')
# - Fold Button ---------------------
self.btn_fold = QtGui.QPushButton('^')
self.btn_unfold = QtGui.QPushButton('Restore Panel')
self.btn_fold.setFixedHeight(self.chk_ActiveLayer.sizeHint.height()*2.5)
self.btn_fold.setFixedWidth(self.chk_ActiveLayer.sizeHint.height())
self.btn_unfold.setFixedHeight(self.chk_ActiveLayer.sizeHint.height() + 5)
self.btn_fold.setToolTip('Fold Panel')
self.btn_unfold.setToolTip('Unfold Panel')
self.btn_fold.clicked.connect(self.fold)
self.btn_unfold.clicked.connect(self.fold)
self.flag_fold = False
# - Tabs --------------------------
# -- Dynamically load all tabs
self.tabs = QtGui.QTabWidget()
self.tabs.setTabPosition(QtGui.QTabWidget.East)
# --- Load all tabs from this directory as modules. Check __init__.py
# --- <dirName>.modules tabs/modules manifest in list format
for toolName in Panel.modules:
if ignorePanel not in toolName:
self.tabs.addTab(eval('Panel.%s.tool_tab()' %toolName), toolName)
# - Layouts -------------------------------
layoutV = QtGui.QVBoxLayout()
layoutV.setContentsMargins(0,0,0,0)
self.lay_controller = QtGui.QGridLayout()
self.fr_controller = QtGui.QFrame()
self.lay_controller.setContentsMargins(15,5,5,3)
self.lay_controller.setSpacing(5)
# -- Build layouts -------------------------------
self.lay_controller.addWidget(self.chk_ActiveLayer, 0, 0, 1, 1)
self.lay_controller.addWidget(self.chk_Masters, 0, 1, 1, 1)
self.lay_controller.addWidget(self.chk_Masks, 0, 2, 1, 1)
self.lay_controller.addWidget(self.chk_Service, 0, 3, 1, 1)
self.lay_controller.addWidget(self.btn_fold, 0, 4, 2, 1)
self.lay_controller.addWidget(self.rad_glyph, 1, 0, 1, 1)
self.lay_controller.addWidget(self.rad_window, 1, 1, 1, 1)
self.lay_controller.addWidget(self.rad_selection, 1, 2, 1, 1)
self.lay_controller.addWidget(self.rad_font, 1, 3, 1, 1)
layoutV.addWidget(self.btn_unfold)
self.fr_controller.setLayout(self.lay_controller)
layoutV.addWidget(self.fr_controller)
layoutV.addWidget(self.tabs)
self.btn_unfold.hide()
# - Set Widget -------------------------------
self.setLayout(layoutV)
self.setWindowTitle('%s %s' %(app_name, app_version))
self.setGeometry(300, 300, 240, 440)
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) # Always on top!!
#self.setMinimumWidth(300)
self.show()
def refreshMode(self):
global pMode
pMode = 0
if self.rad_glyph.isChecked(): pMode = 0
if self.rad_window.isChecked(): pMode = 1
if self.rad_selection.isChecked(): pMode = 2
if self.rad_font.isChecked(): pMode = 3
for toolName in Panel.modules:
exec('Panel.%s.pMode = %s' %(toolName, pMode))
def refreshLayers(self):
global pLayers
pLayers = (self.chk_ActiveLayer.isChecked(), self.chk_Masters.isChecked(), self.chk_Masks.isChecked(), self.chk_Service.isChecked())
for toolName in Panel.modules:
exec('Panel.%s.pLayers = %s' %(toolName, pLayers))
def fold(self):
# - Init
width_all = self.chk_ActiveLayer.sizeHint.width()
height_folded = self.btn_unfold.sizeHint.height()
height_expanded = self.tabs.sizeHint.height() + 40 #Fix this! + 40 Added because Nodes tab breaks
#self.resize(QtCore,Qsize(width_all, height_folded))
# - Do
if not self.flag_fold:
self.tabs.hide()
self.fr_controller.hide()
self.btn_unfold.show()
self.repaint()
self.setFixedHeight(height_folded)
self.flag_fold = True
else:
self.setFixedHeight(height_expanded)
self.tabs.show()
self.fr_controller.show()
self.btn_unfold.hide()
self.repaint()
self.flag_fold = False
# - STYLE OVERRIDE -------------------
# -- Following (uncommented) will override the default OS styling for Qt Widgets on Mac OS.
from platform import system
if system() == 'Darwin':
QtGui.QApplication.setStyle(QtGui.QStyleFactory.create('macintosh')) # Options: Windows, WindowsXP, WindowsVista, Fusion
# - RUN ------------------------------
dialog = typerig_Panel()
|
the-stack_106_19475
|
import os
import shutil
from openpype.hosts import tvpaint
from openpype.lib import (
PreLaunchHook,
get_pype_execute_args
)
import avalon
class TvpaintPrelaunchHook(PreLaunchHook):
"""Launch arguments preparation.
Hook add python executable and script path to tvpaint implementation before
tvpaint executable and add last workfile path to launch arguments.
Existence of last workfile is checked. If workfile does not exists tries
to copy templated workfile from predefined path.
"""
app_groups = ["tvpaint"]
def execute(self):
# Pop tvpaint executable
executable_path = self.launch_context.launch_args.pop(0)
# Pop rest of launch arguments - There should not be other arguments!
remainders = []
while self.launch_context.launch_args:
remainders.append(self.launch_context.launch_args.pop(0))
new_launch_args = get_pype_execute_args(
"run", self.launch_script_path(), executable_path
)
# Add workfile to launch arguments
workfile_path = self.workfile_path()
if workfile_path:
new_launch_args.append(workfile_path)
# How to create new command line
# if platform.system().lower() == "windows":
# new_launch_args = [
# "cmd.exe",
# "/c",
# "Call cmd.exe /k",
# *new_launch_args
# ]
# Append as whole list as these areguments should not be separated
self.launch_context.launch_args.append(new_launch_args)
if remainders:
self.log.warning((
"There are unexpected launch arguments in TVPaint launch. {}"
).format(str(remainders)))
self.launch_context.launch_args.extend(remainders)
def launch_script_path(self):
avalon_dir = os.path.dirname(os.path.abspath(avalon.__file__))
script_path = os.path.join(
avalon_dir,
"tvpaint",
"launch_script.py"
)
return script_path
def workfile_path(self):
workfile_path = self.data["last_workfile_path"]
# copy workfile from template if doesnt exist any on path
if not os.path.exists(workfile_path):
# TODO add ability to set different template workfile path via
# settings
pype_dir = os.path.dirname(os.path.abspath(tvpaint.__file__))
template_path = os.path.join(
pype_dir, "resources", "template.tvpp"
)
if not os.path.exists(template_path):
self.log.warning(
"Couldn't find workfile template file in {}".format(
template_path
)
)
return
self.log.info(
f"Creating workfile from template: \"{template_path}\""
)
# Copy template workfile to new destinantion
shutil.copy2(
os.path.normpath(template_path),
os.path.normpath(workfile_path)
)
self.log.info(f"Workfile to open: \"{workfile_path}\"")
return workfile_path
|
the-stack_106_19477
|
#!/usr/bin/env python
import os
import sys
import vtk
from vtk.util.misc import vtkGetDataRoot, vtkGetTempDir
gotWarning = False
gotError = False
def WarningCallback(obj, evt):
global gotWarning
gotWarning = True
VTK_DATA_ROOT = vtkGetDataRoot()
VTK_TEMP_DIR = vtkGetTempDir()
# Image pipeline
image1 = vtk.vtkTIFFReader()
image1.SetFileName(VTK_DATA_ROOT + "/Data/beach.tif")
# "beach.tif" image contains ORIENTATION tag which is
# ORIENTATION_TOPLEFT (row 0 top, col 0 lhs) type. The TIFF
# reader parses this tag and sets the internal TIFF image
# orientation accordingly. To overwrite this orientation with a vtk
# convention of ORIENTATION_BOTLEFT (row 0 bottom, col 0 lhs ), invoke
# SetOrientationType method with parameter value of 4.
image1.SetOrientationType(4)
image1.Update()
filename = VTK_TEMP_DIR + "/" + "pngw1.png"
testKey = "test key"
testValue = "test value"
longKey = "0123456789012345678901234567890123456789"\
"0123456789012345678901234567890123456789"
longKeyValue = "this also prints a warning"
try:
# Can we write to the directory?
channel = open(filename, "wb")
channel.close()
writer = vtk.vtkPNGWriter()
writer.SetInputConnection(image1.GetOutputPort())
writer.SetFileName(filename)
writer.AddText(testKey, testValue);
# this is fine
writer.AddText(testKey, testValue);
observerId = writer.AddObserver(vtk.vtkCommand.WarningEvent, WarningCallback)
# this prints a warning and does not add the text chunk
writer.AddText("", "this prints a warning")
if (not gotWarning):
print("Error: expect warning when adding a text chunk with empty key")
gotError = True
gotWarning = False
# this prints a warning and add a text chunk with a truncated key
writer.AddText(longKey, longKeyValue)
if (not gotWarning):
print("Error: expect warning when adding a text chunk "\
"with key length bigger than 79 characters")
gotError = True
writer.RemoveObserver(observerId)
writer.Write()
reader = vtk.vtkPNGReader()
reader.SetFileName(filename);
reader.Update();
if (reader.GetNumberOfTextChunks() != 3):
print("Error: Expecting three text chunks in the PNG file but got",\
reader.GetNumberOfTextChunks())
gotError = True
beginEnd = [0, 0]
reader.GetTextChunks(testKey,beginEnd)
# the key starting with 0 comes in first.
if (beginEnd[0] != 1 and beginEnd[1] != 3):
print("Error: expect \"%s\" at index 1 and 2 but got "\
"them at positions %d and %d" % (testKey, beginEnd[0], beginEnd[1]))
gotError = True
if (reader.GetTextKey(1) != testKey or reader.GetTextKey(2) != testKey):
print("Error: expecting key \"%s\" at index 1 and 2 but got \"%s\"" % \
(testKey, reader.GetTextKey(1)))
gotError = True
if (reader.GetTextValue(1) != testValue or reader.GetTextValue(2) != testValue):
print("Error: expecting value \"%s\" at index 1 and 2 but got \"%s\"" % \
(testValue, reader.GetTextValue(1)))
gotError = True
if (reader.GetTextKey(0) != longKey[:-1]):
print("Error: expecting value \"%s\" at index but got \"%s\"" % \
(longKey[:-1], reader.GetTextKey(0)))
gotError = True
if (gotError):
sys.exit(1)
else:
sys.exit(0)
except IOError:
print("Error: Unable to test PNG write/read of text chunks.")
sys.exit(1)
|
the-stack_106_19479
|
from asyncio import get_running_loop
from tesoro import REVEALER
from sys import exc_info
import logging
logger = logging.getLogger(__name__)
def kapicorp_labels(req_uid, req_obj):
"returns kapicorp labels dict for req_obj"
labels = {}
try:
for label_key, label_value in req_obj["metadata"]["labels"].items():
if label_key.startswith("tesoro.kapicorp.com"):
labels[label_key] = label_value
except KeyError:
logger.error("request_id=%s Tesoro label not found", req_uid)
return labels
return labels
async def run_blocking(func):
"run blocking funcion in async executor"
loop = get_running_loop()
return await loop.run_in_executor(None, func)
def kapitan_reveal_json(req_uid, json_doc, retries=3):
"return revealed object, total revealed tags (TODO)"
for retry in range(retries):
try:
return REVEALER.reveal_obj(json_doc)
except Exception as e:
exc_type, exc_value, _ = exc_info()
if retry + 1 <= retries:
logger.error("message=\"Kapitan reveal failed, retrying\", request_uid=%s, "
"retry=\"%d of %d\", exception_type=%s, error=\"%s\"",
req_uid, retry + 1, retries, exc_type, exc_value)
continue
raise
def setup_logging(level=logging.INFO, kapitan_debug=False):
"setup logging, set kapitan_debug to True for kapitan debug logging (dangerous)"
for name, logger in logging.root.manager.loggerDict.items():
if name.startswith("kapitan."):
logger.disabled = not kapitan_debug
logging.basicConfig(
format="%(asctime)s %(levelname)-8s %(message)s", level=level, datefmt="%Y-%m-%d %H:%M:%S"
)
logging.getLogger("tesoro").setLevel(level)
class KapitanRevealFail(Exception):
pass
|
the-stack_106_19481
|
"""Notification Model"""
from datetime import datetime
from enum import Enum
import requests
from flask import current_app
from pushover_complete import PushoverAPI
from .. import db
class Service(Enum):
Pushover = "Pushover"
LineNotify = "Line Notify"
VALID_ARGS = {
Service.Pushover: [
"device",
"title",
"url",
"url_title",
"image_url",
"priority",
"retry",
"expire",
"callback_url",
"timestamp",
"sound",
"html",
],
Service.LineNotify: [
"image_url",
"stickerPackageId",
"stickerId",
"notificationDisabled",
],
}
class Notification(db.Model):
"""An Object which describe a Notification for a specific user
Variables:
id {str} -- identifier of this notification
initiator {str} -- function or task which fire this notification
user_id {str} -- receiver's username
user {user.User} -- receiver user object
service {str} -- service used to send notification
message {str} -- notification body
kwargs {dict} -- other miscs of this notification
sent_datetime {datetime.datetime} -- datetime when this object is created
response {dict} -- server response when notification is sent
"""
__tablename__ = "notification"
id = db.Column(db.Integer, primary_key=True)
initiator = db.Column(db.String(16), nullable=False)
username = db.Column(db.String(32), db.ForeignKey("user.username"))
service = db.Column(db.Enum(Service))
message = db.Column(db.Text)
kwargs = db.Column(db.JSON, nullable=False, default={})
sent_timestamp = db.Column(db.DateTime, index=True)
response = db.Column(db.JSON, nullable=False, default={})
user = db.relationship("User", back_populates="notifications")
def __init__(self, initiator, user, service, send=True, **kwargs):
"""An Object which describe a Notification for a specific user
Arguments:
initiator {str} -- function or task which fire this notification
user {user.User} -- receiver user object
service {str or notification.Service} -- service used to send notification
Keyword Arguments:
send {bool} -- Send on initialize (default: {True})
message {str} -- message of Notification
image_url {str} -- URL of the image
"""
self.user = user
self.service = Service(service)
# This step validate that the user has authorized the service
getattr(self.user, self.service.value.lower())
self.initiator = initiator
self.message = kwargs.pop("message", None)
self.kwargs = kwargs
db.session.add(self)
db.session.commit()
current_app.logger.info(f"Notification <{self.id}>: Create")
if send:
self.send()
def __repr__(self):
return f"<Notification <{self.id}>"
@staticmethod
def _clean_up_kwargs(kwargs, service):
invalid_args = {
key: val for key, val in kwargs.items() if key not in VALID_ARGS[service]
}
for key, val in invalid_args.items():
current_app.logger.warning(f"Invalid argument ({key}, {val}) is ommited")
kwargs.pop(key)
return kwargs
def send(self):
"""Trigger Sending with the service assigned
Returns:
dict -- Response from service
Raises:
AttributeError -- Description of why notification is unsentable
"""
if not self.service:
raise AttributeError("Service is not set")
if not self.message:
raise AttributeError("Message is empty")
if self.sent_timestamp:
raise AttributeError("This Notification has already sent")
if self.service is Service.Pushover:
self.response = self._send_with_pushover()
if self.service is Service.LineNotify:
self.response = self._send_with_line_notify().json()
self.sent_timestamp = datetime.utcnow()
db.session.commit()
current_app.logger.info(f"Notification <{self.id}>: Sent")
return self.response
def _send_with_pushover(self):
"""Send Notification with Pushover API
Returns:
dict -- Response from service
"""
kwargs = Notification._clean_up_kwargs(self.kwargs.copy(), self.service)
image_url = kwargs.pop("image_url", None)
if image_url:
kwargs["image"] = requests.get(image_url, stream=True).content
pusher = PushoverAPI(current_app.config["PUSHOVER_TOKEN"])
return pusher.send_message(self.user.pushover, self.message, **kwargs)
def _send_with_line_notify(self):
"""Send Notification with Line Notify API
Returns:
dict -- Response from service
"""
kwargs = Notification._clean_up_kwargs(self.kwargs.copy(), self.service)
kwargs["imageFullsize"] = kwargs.pop("image_url", None)
return self.user.line_notify.post(
"api/notify", data=dict(message=self.message, **kwargs)
)
|
the-stack_106_19482
|
# encoding: utf-8
# This file contains commonly used parts of external libraries. The idea is
# to help in removing helpers from being used as a dependency by many files
# but at the same time making it easy to change for example the json lib
# used.
#
# NOTE: This file is specificaly created for
# from ckan.common import x, y, z to be allowed
from collections import MutableMapping
import flask
import six
from werkzeug.local import Local, LocalProxy
from flask_babel import (gettext as flask_ugettext,
ngettext as flask_ungettext)
import simplejson as json
import ckan.lib.maintain as maintain
current_app = flask.current_app
@maintain.deprecated('All web requests are served by Flask', since="2.10.0")
def is_flask_request():
u'''
This function is deprecated. All CKAN requests are now served by Flask
'''
return True
def streaming_response(
data, mimetype=u'application/octet-stream', with_context=False):
iter_data = iter(data)
if is_flask_request():
# Removal of context variables for pylon's app is prevented
# inside `pylons_app.py`. It would be better to decide on the fly
# whether we need to preserve context, but it won't affect performance
# in any visible way and we are going to get rid of pylons anyway.
# Flask allows to do this in easy way.
if with_context:
iter_data = flask.stream_with_context(iter_data)
resp = flask.Response(iter_data, mimetype=mimetype)
return resp
def ugettext(*args, **kwargs):
return flask_ugettext(*args, **kwargs)
_ = ugettext
def ungettext(*args, **kwargs):
return flask_ungettext(*args, **kwargs)
class CKANConfig(MutableMapping):
u'''Main CKAN configuration object
This is a dict-like object that also proxies any changes to the
Flask and Pylons configuration objects.
The actual `config` instance in this module is initialized in the
`load_environment` method with the values of the ini file or env vars.
'''
def __init__(self, *args, **kwargs):
self.store = dict()
self.update(dict(*args, **kwargs))
def __getitem__(self, key):
return self.store[key]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __repr__(self):
return self.store.__repr__()
def copy(self):
return self.store.copy()
def clear(self):
self.store.clear()
try:
flask.current_app.config.clear()
except RuntimeError:
pass
def __setitem__(self, key, value):
self.store[key] = value
try:
flask.current_app.config[key] = value
except RuntimeError:
pass
def __delitem__(self, key):
del self.store[key]
try:
del flask.current_app.config[key]
except RuntimeError:
pass
def _get_request():
return flask.request
class CKANRequest(LocalProxy):
u'''Common request object
This is just a wrapper around LocalProxy so we can handle some special
cases for backwards compatibility.
LocalProxy will forward to Flask or Pylons own request objects depending
on the output of `_get_request` (which essentially calls
`is_flask_request`) and at the same time provide all objects methods to be
able to interact with them transparently.
'''
@property
def params(self):
u''' Special case as Pylons' request.params is used all over the place.
All new code meant to be run just in Flask (eg views) should always
use request.args
'''
try:
return super(CKANRequest, self).params
except AttributeError:
return self.args
def _get_c():
return flask.g
def _get_session():
return flask.session
local = Local()
# This a proxy to the bounded config object
local(u'config')
# Thread-local safe objects
config = local.config = CKANConfig()
# Proxies to already thread-local safe objects
request = CKANRequest(_get_request)
# Provide a `c` alias for `g` for backwards compatibility
g = c = LocalProxy(_get_c)
session = LocalProxy(_get_session)
truthy = frozenset([u'true', u'yes', u'on', u'y', u't', u'1'])
falsy = frozenset([u'false', u'no', u'off', u'n', u'f', u'0'])
def asbool(obj):
if isinstance(obj, str):
obj = obj.strip().lower()
if obj in truthy:
return True
elif obj in falsy:
return False
else:
raise ValueError(u"String is not true/false: {}".format(obj))
return bool(obj)
def asint(obj):
try:
return int(obj)
except (TypeError, ValueError):
raise ValueError(u"Bad integer value: {}".format(obj))
def aslist(obj, sep=None, strip=True):
if isinstance(obj, str):
lst = obj.split(sep)
if strip:
lst = [v.strip() for v in lst]
return lst
elif isinstance(obj, (list, tuple)):
return obj
elif obj is None:
return []
else:
return [obj]
|
the-stack_106_19483
|
import setuptools
import castero
install_requires = [
'requests',
'grequests',
'cjkwrap',
'beautifulsoup4',
'lxml',
'python-vlc',
'python-mpv'
]
tests_require = [
'pytest',
'coverage',
'codacy-coverage'
]
extras_require = {
'test': tests_require
}
def long_description():
with open("README.md") as readme:
return readme.read()
setuptools.setup(
name=castero.__title__,
version=castero.__version__,
description=castero.__description__,
long_description=long_description(),
long_description_content_type='text/markdown',
keywords=castero.__keywords__,
url=castero.__url__,
author=castero.__author__,
author_email=castero.__author_email__,
license=castero.__license__,
packages=[
'castero', 'castero.perspectives', 'castero.players', 'castero.menus'
],
package_data={
'castero': ['templates/*', 'templates/migrations/*'],
},
python_requires='>=3',
install_requires=install_requires,
tests_require=tests_require,
extras_require=extras_require,
entry_points={'console_scripts': ['castero=castero.__main__:main']},
classifiers=[
'Intended Audience :: End Users/Desktop',
'Environment :: Console :: Curses',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
'Topic :: Terminals'
],
)
|
the-stack_106_19486
|
import speech_recognition as sr
import re
import philips_hue as ph
import soundclassify as sc
import pyaudio
import wave
from jesica4 import create_dashboard
from jesica4 import command_light
from jesica4 import command_SoundSystem
from jesica4 import command_Door
from jesica4 import command_detectsound
command_light(True, '#FFC200', 'Turn on the light Jesica to red')
command_SoundSystem('On', 30, 'Can you turn on the speakers to 30%')
command_Door('Open', 'Please open the door')
command_detectsound('dog_bark')
r = sr.Recognizer()
m = sr.Microphone()
stopCommands = ["stop","stop listening"]
callCommand = ["OK Google" , "hey Google" , "hey Alexa" , "Alexa", "hey", "hey Jeffrey","Jeffrey","hey Dennis", 'hey Jessica', 'Jessica']
volume = 50
def Record():
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "output.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
return WAVE_OUTPUT_FILENAME
def processCommand(speech):
for stopCmd in stopCommands:
if stopCmd in value:
print("stop listening...")
exit()
for cmd in callCommand:
if (cmd in speech):
print("call command specified")
break
else:
return
lights_on = re.compile(r'^(?=.*turn)((?=.*lights)|(?=.*light))(?=.*on).*$', re.I)
lights_off = re.compile(r'^(?=.*turn)((?=.*lights)|(?=.*light))(?=.*off).*$', re.I)
decrease_brightness = re.compile(r'^(?=.*decrease)(?=.*brightness).*$', re.I)
increase_brightness = re.compile(r'^(?=.*increase)(?=.*brightness).*$', re.I)
set_brightness = re.compile(r'^(?=.*set)(?=.*brightness).*$', re.I)
rotate_color = re.compile(r'^(?=.*rotate)((?=.*color)|(?=.*colour)).*$', re.I)
set_color = re.compile(r'^(?=.*set)((?=.*color)|(?=.*colour)).*$', re.I)
play_song = re.compile(r'^(?=.*play)*((?=.*song)|(?=.*something)).*$', re.I)
stop_song = re.compile(r'^(?=.*stop)*((?=.*playing)|(?=.*music)).*$', re.I)
pause_song = re.compile(r'^(?=.*pause)*((?=.*song)|(?=.*music)).*$', re.I)
increase_volume = re.compile(r'^((?=.*increase)(?=.*volume))|((?=.*make)(?=.*louder)).*$', re.I)
decrease_volume = re.compile(r'^((?=.*decrease)(?=.*volume))|((?=.*make)(?=.*softer)).*$', re.I)
open_door = re.compile(r'^(?=.*open)(?=.*door).*$', re.I)
close_door = re.compile(r'^(?=.*close)(?=.*door).*$', re.I)
if lights_on.match(speech):
ph.turn_on_group('lights')
print("turning lights on")
return
if lights_off.match(speech):
ph.turn_off_group('lights')
print("turning lights off")
return
if decrease_brightness.match(speech):
ph.decrease_brightness_group('lights')
print("decreasing brightness")
return
if increase_brightness.match(speech):
ph.increase_brightness_group('lights')
print("increasing brightness")
return
if set_brightness.match(speech):
temp = re.findall(r'\d+', speech)
percentage = list(map(int, temp))
if percentage[0] > 100:
ph.set_brightness_group('lights', 100)
print("setting brightness to 100%")
elif percentage[0] < 0:
ph.set_brightness_group('lights', 0)
print("setting brightness to 0%")
else:
ph.set_brightness_group('lights', percentage[0])
print("setting brightness to " + str(percentage[0]) + "%")
return
if set_color.match(speech):
temp = speech.split(" ")
color = temp[-1]
ph.set_color('lights', color)
return
if rotate_color.match(speech):
ph.rotate_color()
return
if play_song.match(speech):
print("playing a song")
command_SoundSystem("Playing a song", volume, speech)
return
if stop_song.match(speech):
print("stopping song")
command_SoundSystem("Stopping song", volume, speech)
return
if pause_song.match(speech):
print("pausing a song")
command_SoundSystem("Pausing song", volume, speech)
return
if increase_volume.match(speech):
print("increasing volume")
edit_volume(10)
command_SoundSystem("Increasing volume", volume, speech)
return
if decrease_volume.match(speech):
print("decreasing volume")
edit_volume(-10)
command_SoundSystem("Decreasing volume", volume, speech)
return
if open_door.match(speech):
print("opening door")
command_Door('Open', speech)
return
if close_door.match(speech):
print("closing door")
command_Door('Closed', speech)
return
if("identify" in speech):
#todo
#filtered = output_audio_file(Record())
#print(filtered)
result = sc.classify(Record())
command_detectsound(result)
return
def edit_volume(vol):
volume += vol
try:
print("A moment of silence, please...")
with m as source: r.adjust_for_ambient_noise(source)
print("Set minimum energy threshold to {}".format(r.energy_threshold))
while True:
print("Say something!")
try :
with m as source: audio = r.listen(source, timeout = 3, phrase_time_limit = 7)
except sr.WaitTimeoutError as e:
print("Timeout!")
print(e)
continue
except Exception as e:
print(e)
print("Got it! Now to recognize it...")
try:
# recognize speech using Google Speech Recognition
value = r.recognize_google(audio)
# we need some special handling here to correctly print unicode characters to standard output
if str is bytes: # this version of Python uses bytes for strings (Python 2)
print(u"You said {}".format(value).encode("utf-8"))
else: # this version of Python uses unicode for strings (Python 3+)
print("You said {}".format(value))
processCommand(value)
except sr.UnknownValueError:
print("Oops! Didn't catch that")
except sr.RequestError as e:
print("Uh oh! Couldn't request results from Google Speech Recognition service; {0}".format(e))
except KeyboardInterrupt:
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.