content
stringlengths 5
1.05M
|
---|
import numpy as np
import streamlit as st
import torch
import torchvision.transforms as T
import urllib
from PIL import Image
from torchvision.utils import make_grid, save_image
from model import RestorationModel
import albumentations as A
from albumentations.pytorch import ToTensorV2
CHECKPOINT = "https://github.com/Git-Nayanjyoti/image-inpainter/releases/download/v-0.0.1/weights_sun_4_jul_2021.ckpt"
CHECKPOINT2 = ""
SIZE = 256
# MODEL_WEIGHTS_DEPLOYMENT_URL = ''
# Constants for sidebar dropdown
SIDEBAR_OPTION_PROJECT_INFO = "Show Project Info"
SIDEBAR_OPTION_ENCODER_DECODER = "Auto Encoder Decoder"
SIDEBAR_OPTION_PIX2PIX = "PIX2PIX GAN"
SIDEBAR_OPTIONS = [SIDEBAR_OPTION_PROJECT_INFO, SIDEBAR_OPTION_ENCODER_DECODER, SIDEBAR_OPTION_PIX2PIX]
preprocess_fn = A.Compose([
A.ToFloat(max_value=255.0),
ToTensorV2(p=1.0)
])
dropout_fn = A.CoarseDropout(max_holes=1, min_holes=1, p=1.0, max_width=50, max_height=50)
resize_fn = A.Resize(SIZE, SIZE, p=1.0)
def make_holes(image):
image = np.array(image)
with st.spinner(" Genrating patches over image ..."):
aug_image = dropout_fn(image=image)['image']
image = aug_image.astype(np.uint8)
image = Image.fromarray(image)
st.markdown("### Image after adding patches")
st.image(image, caption='Image with patches')
return aug_image
def resize_image(uploaded_image):
image = np.array(uploaded_image)
image = resize_fn(image=image)['image']
display_image = image.astype(np.uint8)
display_image = Image.fromarray(display_image)
st.title("Here is the image you've uploaded")
st.image(image, caption='Uploaded Image')
return image
@ st.cache()
def load_model():
model = RestorationModel.load_from_checkpoint(CHECKPOINT)
model.eval()
return model
# Inference function - TODO this could probably be improved ...
@ torch.no_grad()
def do_predict(image):
image = np.array(image)
with st.spinner("🏃♂️ Getting the latest model weights ..."):
model = load_model()
st.success("🚀 Model Weights Loaded Successfully !")
image = preprocess_fn(image=image)['image']
image = image.unsqueeze(0)
with st.spinner("🏃♂️ Doing the Math 🤓 ..."):
# add batch dimension to image
results = model(image)
st.success("🚀 Predictions Generated !")
results = results.sigmoid()
results = make_grid(results, normalize=True).permute(1, 2, 0).data.cpu().numpy()
results *= 255.0
results = results.astype(np.uint8)
results = A.functional.adjust_brightness_torchvision(results, factor=1.2)
results = A.functional.adjust_contrast_torchvision(results, factor=0.8)
return Image.fromarray(results)
@st.cache(show_spinner=False)
def get_file_content_as_string():
url = 'https://raw.githubusercontent.com/Git-Nayanjyoti/image-inpainter/main/Project_Info.md'
response = urllib.request.urlopen(url)
return response.read().decode("utf-8")
# fmt: off
def main():
title = """
# Image Inpainter
"""
st.markdown(title)
st.write(" --------- ")
st.sidebar.warning("Please upload SINGLE-person images. For best results, also center the person in the image")
st.sidebar.write(" -------- ")
st.sidebar.title("Browse the Following")
app_mode = st.sidebar.selectbox("Please select from the following", SIDEBAR_OPTIONS)
if app_mode == SIDEBAR_OPTION_PROJECT_INFO:
st.sidebar.write(" ------- ")
st.sidebar.success("Project information showing on the right!")
st.write(get_file_content_as_string())
st.write("Our goal is to retrieve images that have faced the wrath of time and got degraded")
st.info("👈Please select a Model to test")
elif app_mode == SIDEBAR_OPTION_ENCODER_DECODER:
st.write("Auto Encoder Decoder")
st.sidebar.success("Try our Auto Encoder Decoder model on the right!")
uploaded_image = st.file_uploader("Upload An Image", type=["jpg", "png", "jpeg"])
if uploaded_image is not None:
image = Image.open(uploaded_image)
image = resize_image(image)
image = make_holes(image)
predictions = do_predict(image)
st.image(predictions, caption='restored image')
else:
st.sidebar.success("Try our PIX2PIX model on the right!")
st.write("pix2pix GAN")
uploaded_image_gan = st.file_uploader("Upload An Image", type=["jpg", "png", "jpeg"])
if uploaded_image_gan is not None:
image_gan = Image.open(uploaded_image_gan)
image_gan = resize_image(image_gan)
image_gan = make_holes(image_gan)
if __name__ == "__main__":
# run the app
main()
about_expander = st.beta_expander("More About Our Porject")
about_expander.write("Hi there! If you have any question about our porject, or simply want to check out the source code, please visit our github repo: https://github.com/Git-Nayanjyoti/image-inpainter.git") |
# NHD_CalculateNLCDSummaries.py
#
# Description: Adds new fields to the catchment NLCD tables (cumulative and
# incremental) and computes the total area of Anderson Level I (not II)
# land covers (.e.g classes 21,22,23, and 24 are combbined into "NLCD2".
#
# Spring 2015
# [email protected]
import sys, os, arcpy
#The VPU attribute geodatabase; this is the product of the NHD_MergeVPUAttributes.py script
vpuGDB = os.path.abspath("..\\Data\\VPUMerge.gdb")
cumNLCD = "CumTotNLCD2011"
incrNLCD = "IncrNLCD2011"
#Allow overwrites
arcpy.env.overwriteOutput = True
arcpy.env.workspace = vpuGDB
#Make a list of fields to add
flds = ["NLCD1","NLCD2","NLCD3","NLCD4","NLCD5","NLCD7","NLCD8","NLCD9"]
#Add fields (Repeat for cumulative and incremental NLCD tables)
for tbl in [incrNLCD]:
#Add the fields
for fld in flds:
print ("Adding {} field to {}".format(fld,tbl))
arcpy.AddField_management(tbl,fld,"DOUBLE",8,2)
##Cumulative NLCD-------------
#Create the field calculation expressions (remove the "C" when doing incremental...)
exp1c='!NLCD11AC!'
exp2c='!NLCD21AC! + !NLCD22AC! + !NLCD23AC! + !NLCD24AC!'
exp3c='!NLCD21AC!'
exp4c='!NLCD41AC! + !NLCD22AC! + !NLCD43AC!'
exp5c='!NLCD51AC!'
exp7c='!NLCD71AC!'
exp8c='!NLCD81AC! + !NLCD82AC!'
exp9c='!NLCD90AC! + !NLCD95AC!'
# Execute the field calculations
print "Executing calculations"
x = 1 ##Counter
arcpy.CalculateField_management(cumNLCD,"NLCD1",exp1c,"PYTHON"); print "{} of 8".format(x); x +=1
arcpy.CalculateField_management(cumNLCD,"NLCD2",exp2c,"PYTHON"); print "{} of 8".format(x); x +=1
arcpy.CalculateField_management(cumNLCD,"NLCD3",exp2c,"PYTHON"); print "{} of 8".format(x); x +=1
arcpy.CalculateField_management(cumNLCD,"NLCD4",exp3c,"PYTHON"); print "{} of 8".format(x); x +=1
arcpy.CalculateField_management(cumNLCD,"NLCD5",exp4c,"PYTHON"); print "{} of 8".format(x); x +=1
arcpy.CalculateField_management(cumNLCD,"NLCD7",exp7c,"PYTHON"); print "{} of 8".format(x); x +=1
arcpy.CalculateField_management(cumNLCD,"NLCD8",exp8c,"PYTHON"); print "{} of 8".format(x); x +=1
arcpy.CalculateField_management(cumNLCD,"NLCD9",exp9c,"PYTHON"); print "{} of 8".format(x); x +=1
##Incremental NLCD----------------
#Create the field calculation expressions (remove the "C" when doing incremental...)
exp1i='!NLCD11A!'
exp2i='!NLCD21A! + !NLCD22A! + !NLCD23A! + !NLCD24A!'
exp3i='!NLCD21A!'
exp4i='!NLCD41A! + !NLCD22A! + !NLCD43A!'
exp5i='!NLCD51A!'
exp7i='!NLCD71A!'
exp8i='!NLCD81A! + !NLCD82A!'
exp9i='!NLCD90A! + !NLCD95A!'
# Execute the field calculations
x = 1 ##Counter
arcpy.CalculateField_management(incrNLCD,"NLCD1",exp1i,"PYTHON"); print "{} of 8".format(x); x +=1
arcpy.CalculateField_management(incrNLCD,"NLCD2",exp2i,"PYTHON"); print "{} of 8".format(x); x +=1
arcpy.CalculateField_management(incrNLCD,"NLCD3",exp2i,"PYTHON"); print "{} of 8".format(x); x +=1
arcpy.CalculateField_management(incrNLCD,"NLCD4",exp3i,"PYTHON"); print "{} of 8".format(x); x +=1
arcpy.CalculateField_management(incrNLCD,"NLCD5",exp4i,"PYTHON"); print "{} of 8".format(x); x +=1
arcpy.CalculateField_management(incrNLCD,"NLCD7",exp7i,"PYTHON"); print "{} of 8".format(x); x +=1
arcpy.CalculateField_management(incrNLCD,"NLCD8",exp8i,"PYTHON"); print "{} of 8".format(x); x +=1
arcpy.CalculateField_management(incrNLCD,"NLCD9",exp9i,"PYTHON"); print "{} of 8".format(x); x +=1
|
name = u'wechatbotexec'
_all = [u'command'] |
from tests.integration.build_invoke.build_invoke_base import BuildInvokeBase
"""
sam build does not support to build dotnetcore 2.1 templates using container,
here we put them in a separate file and use a dedicate codebuild project with .net 2.1 runtime to build them.
For each template, it will test the following sam commands:
1. sam init
2. sam build --use-container (if self.use_container is False, --use-container will be omitted)
3. (if there are event jsons), for each event json, check `sam local invoke` response is a valid json
"""
class BuildInvoke_dotnetcore2_1_cookiecutter_aws_sam_hello_dotnet(BuildInvokeBase.DotNetCoreExtraRerunBuildInvokeBase):
use_container = False
directory = "dotnetcore2.1/cookiecutter-aws-sam-hello-dotnet"
class BuildInvoke_dotnetcore2_1_cookiecutter_aws_sam_hello_step_functions_sample_app(
BuildInvokeBase.DotNetCoreExtraRerunBuildInvokeBase
):
use_container = False
directory = "dotnetcore2.1/cookiecutter-aws-sam-hello-step-functions-sample-app"
#
# Image templates
#
class BuildInvoke_image_dotnetcore2_1_cookiecutter_aws_sam_hello_dotnet_lambda_image(
BuildInvokeBase.DotNetCoreExtraRerunBuildInvokeBase
):
directory = "dotnetcore2.1-image/cookiecutter-aws-sam-hello-dotnet-lambda-image"
|
import time
import threading
import traceback
import json
import nose
import sys
import linecache
import inspect
import os.path
import queue as queue
import urllib.parse
from io import StringIO
from http.server import HTTPServer, BaseHTTPRequestHandler
import socketserver as socketserver
from mpi4py import MPI
from nose.plugins.capture import Capture
from nose.plugins.skip import Skip, SkipTest
from nose.core import TestProgram
from multiprocessing import Process, Queue
from optparse import OptionParser
from subprocess import call, Popen, PIPE
EDITOR = None
osascript_to_open_xcode = """on run argv
set linenumber to (item 1 of argv) as integer
set filename_string to item 2 of argv
set file_to_open to POSIX file filename_string
tell application "Xcode"
activate
set doc_to_edit to (open file_to_open)
tell doc_to_edit
set its selection to item linenumber of paragraph of it
end tell
end tell
end run"""
def open_file(path, lineno = 1):
global EDITOR
if sys.platform == 'darwin':
program = Popen(
['osascript', '-', str(lineno), os.path.join(os.getcwd(), path) ],
stdin = PIPE, stdout = PIPE, stderr = PIPE)
out, err = program.communicate(osascript_to_open_xcode)
else:
possible_programs = (
['geany', path, '+'+str(lineno)],
['kate', '-u', '--line',str(lineno),path],
['emacs', '+'+str(lineno), path],
['nedit-client','-line', str(lineno), path],
)
for program in possible_programs:
if program[0] == EDITOR:
returncode = call(['which', program[0]])
if returncode == 0:
call(program)
return
for program in possible_programs:
returncode = call(['which', program[0]])
if returncode == 0:
call(program)
return
call([EDITOR, path])
class HandleRequest(BaseHTTPRequestHandler):
def do_GET(self):
self.parsed_path = urllib.parse.urlparse(self.path)
path = self.parsed_path.path[1:]
method_name = 'do_' + path
if hasattr(self, method_name):
method = getattr(self,method_name)
string, content_type = method()
else:
if path.endswith(".js"):
string, content_type = self.javascript_file(path)
else:
string, content_type = self.index_file()
self.send_response(200)
self.send_header("Content-type", content_type)
self.send_header("Content-Length", str(len(string)))
self.end_headers()
self.wfile.write(string)
def do_long_poll(self):
self.send_response(200)
self.send_header("Content-Type", "text/javascript")
self.send_header("Transfer-Encoding", "chunked")
self.send_header("Cache-Control", "no-cache, no-store")
self.send_header("Pragma", "no-cache")
self.end_headers()
while True:
self.server.tests_finished.wait(10.0)
if self.server.tests_finished.is_set():
self.send_chunk('true')
self.server.tests_finished.clear()
else:
self.send_chunk('false')
self.wfile.write('0\r\n\r\n')
self.wfile.flush()
def send_chunk(self, string):
hex_length = hex(len(string))[2:]
self.wfile.write('%s \r\n' % hex_length)
self.wfile.flush()
self.wfile.write(string)
self.wfile.write('\r\n')
self.wfile.flush()
def index_file(self):
base = os.path.split(__file__)[0]
filename = os.path.join(base, "realtime_test.html")
with open(filename, "r") as file:
contents = file.read()
return contents, 'text/html'
def javascript_file(self, path):
base = os.path.split(__file__)[0]
filename = os.path.join(base, path)
if not os.path.exists(path):
return '', 'text/javascript'
with open(filename, "r") as file:
contents = file.read()
return contents, 'text/javascript'
def log_message(self, format, *args):
pass
#sys.stderr.write("%s - - [%s] %s\n" %
# (self.address_string(),
# self.log_date_time_string(),
# format%args))
def do_stop(self):
thread = threading.Thread(target=self.server.stop)
thread.daemon = True;
thread.start()
return 'null', 'text/javascript'
def do_events(self):
new_events = self.server.get_all_events_since_previous_query()
string = json.dumps(new_events)
content_type = 'text/javascript'
return string, content_type
def do_open_file(self):
parameters = urllib.parse.parse_qs(self.parsed_path.query)
path = parameters['path'][0]
lineno = int(parameters['lineno'][0])
open_file(path, lineno)
string = 'null'
content_type = 'text/javascript'
return string, content_type
class WebServer(socketserver.ThreadingMixIn, HTTPServer):
def __init__(self, port, request_handler):
HTTPServer.__init__(self, ('', port), request_handler)
self.daemon_threads = True
self.events_queue = queue.Queue()
def start(self):
self.serve_forever()
def stop(self):
self.shutdown()
def get_all_events_since_previous_query(self):
try:
events = []
while True:
events.append(self.events_queue.get(False))
except queue.Empty:
pass
return events
|
from PySide import QtGui
from functools import partial
from maya.app.general.mayaMixin import MayaQWidgetBaseMixin
import maya.cmds as cmds
_win = None
def show():
global _win
if _win == None:
_win = BindingDialog()
_win.show()
class BindingDialog(MayaQWidgetBaseMixin, QtGui.QDialog):
def __init__(self, parent=None):
super(BindingDialog, self).__init__(parent)
self.resize(600, 200)
self.setWindowTitle('cvWrap Rebind')
vbox = QtGui.QVBoxLayout(self)
label_width = 130
hbox = QtGui.QHBoxLayout()
vbox.addLayout(hbox)
label = QtGui.QLabel('Components to rebind:')
label.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
label.setMinimumWidth(label_width)
label.setMaximumWidth(label_width)
hbox.addWidget(label)
self.components_to_rebind = QtGui.QLineEdit()
self.components_to_rebind.textChanged.connect(self.populate_cvwrap_dropdown)
hbox.addWidget(self.components_to_rebind)
button = QtGui.QPushButton('Set Components')
button.released.connect(partial(self.set_selected_text, widget=self.components_to_rebind))
hbox.addWidget(button)
hbox = QtGui.QHBoxLayout()
vbox.addLayout(hbox)
label = QtGui.QLabel('Faces to rebind to:')
label.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
label.setMinimumWidth(label_width)
label.setMaximumWidth(label_width)
hbox.addWidget(label)
self.target_faces = QtGui.QLineEdit()
hbox.addWidget(self.target_faces)
button = QtGui.QPushButton('Set Faces')
button.released.connect(partial(self.set_selected_text, widget=self.target_faces))
hbox.addWidget(button)
hbox = QtGui.QHBoxLayout()
vbox.addLayout(hbox)
label = QtGui.QLabel('cvWrap node:')
label.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
label.setMinimumWidth(label_width)
label.setMaximumWidth(label_width)
hbox.addWidget(label)
self.cvwrap_combo = QtGui.QComboBox()
hbox.addWidget(self.cvwrap_combo)
hbox = QtGui.QHBoxLayout()
vbox.addLayout(hbox)
label = QtGui.QLabel('Sample radius:')
label.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
label.setMinimumWidth(label_width)
label.setMaximumWidth(label_width)
hbox.addWidget(label)
self.sample_radius = QtGui.QDoubleSpinBox()
self.sample_radius.setValue(0.1)
self.sample_radius.setRange(0, 100)
self.sample_radius.setDecimals(2)
self.sample_radius.setSingleStep(.1)
hbox.addWidget(self.sample_radius)
vbox.addStretch()
hbox = QtGui.QHBoxLayout()
vbox.addLayout(hbox)
button = QtGui.QPushButton('Rebind')
button.released.connect(self.rebind)
hbox.addWidget(button)
def set_selected_text(self, widget):
sel = cmds.ls(sl=True)
text = ' '.join(sel)
widget.setText(text)
def populate_cvwrap_dropdown(self, text):
node = text.split()
if not node:
return
node = node[0].split('.')
if not node:
return
node = node[0]
wrap_nodes = [x for x in cmds.listHistory(node, pdo=True) or []
if cmds.nodeType(x) == 'cvWrap']
self.cvwrap_combo.clear()
self.cvwrap_combo.addItems(wrap_nodes)
def rebind(self):
components = self.components_to_rebind.text().split()
faces = self.target_faces.text().split()
wrap_node = self.cvwrap_combo.currentText()
radius = self.sample_radius.value()
# Make sure the faces are actual faces. If they are not, convert to faces.
cmds.select(faces)
cmds.ConvertSelectionToFaces()
faces = cmds.ls(sl=True)
cmds.select(components)
cmds.ConvertSelectionToVertices()
cmds.select(faces, add=True)
cmds.cvWrap(rb=wrap_node, radius=radius)
print 'Rebounded vertices'
|
# coding: utf-8
from __future__ import unicode_literals
from admitad.items.base import Item
__all__ = [
'BrokenLinks',
'ManageBrokenLinks'
]
class BrokenLinks(Item):
SCOPE = 'broken_links'
URL = Item.prepare_url('broken_links')
SINGLE_URL = Item.prepare_url('broken_links/%(broken_link_id)s')
def get(self, **kwargs):
"""
Args:
website (list of int)
campaign (list of int)
search (str)
reason (int)
date_start (date)
date_end (date)
limit (int)
offset (int)
"""
filtering = {
'filter_by': kwargs,
'available': {
'website': lambda x: Item.sanitize_integer_array(x, 'website', blank=True),
'campaign': lambda x: Item.sanitize_integer_array(x, 'campaign', blank=True),
'search': lambda x: Item.sanitize_string_value(x, 'search', blank=True),
'reason': lambda x: Item.sanitize_integer_value(x, 'reason', blank=True),
'date_start': lambda x: Item.sanitize_date(x, 'date_start', blank=True),
'date_end': lambda x: Item.sanitize_date(x, 'date_end', blank=True),
}
}
return self.transport.get() \
.set_pagination(**kwargs) \
.set_filtering(filtering) \
.request(url=self.URL)
def getOne(self, broken_link_id):
"""
Args:
broken_link_id (int)
"""
request_data = {
'url': self.SINGLE_URL,
'broken_link_id': Item.sanitize_id(broken_link_id)
}
return self.transport.get().request(**request_data)
class ManageBrokenLinks(Item):
SCOPE = 'manage_broken_links'
RESOLVE_URL = Item.prepare_url('broken_links/resolve')
def resolve(self, broken_link_ids):
"""
Args:
broken_links_ids (list of int)
"""
filtering = {
'filter_by': {
'link_id': broken_link_ids
},
'available': {
'link_id': lambda x: Item.sanitize_integer_array(x, 'link_id', blank=True)
}
}
return self.transport.post() \
.set_filtering(filtering) \
.request(url=self.RESOLVE_URL)
|
from flask import Flask, render_template
app = Flask(__name__, template_folder='templates')
@app.route("/")
def index():
return render_template("index.html")
# otro
@app.route("/video")
def video():
return render_template("video.html")
# material
@app.route("/material")
def material():
return render_template("material.html")
@app.route("/historia")
def disclimer():
return render_template("historia.html")
|
import typing as t
from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401
from .tests import TESTS as DEFAULT_TESTS # noqa: F401
from .utils import Cycler
from .utils import generate_lorem_ipsum
from .utils import Joiner
from .utils import Namespace
if t.TYPE_CHECKING:
import typing_extensions as te
# defaults for the parser / lexer
BLOCK_START_STRING = "{%"
BLOCK_END_STRING = "%}"
VARIABLE_START_STRING = "{{"
VARIABLE_END_STRING = "}}"
COMMENT_START_STRING = "{#"
COMMENT_END_STRING = "#}"
LINE_STATEMENT_PREFIX: t.Optional[str] = None
LINE_COMMENT_PREFIX: t.Optional[str] = None
TRIM_BLOCKS = False
LSTRIP_BLOCKS = False
NEWLINE_SEQUENCE: "te.Literal['\\n', '\\r\\n', '\\r']" = "\n"
KEEP_TRAILING_NEWLINE = False
# default filters, tests and namespace
DEFAULT_NAMESPACE = {
"range": range,
"dict": dict,
"lipsum": generate_lorem_ipsum,
"cycler": Cycler,
"joiner": Joiner,
"namespace": Namespace,
}
# default policies
DEFAULT_POLICIES: t.Dict[str, t.Any] = {
"compiler.ascii_str": True,
"urlize.rel": "noopener",
"urlize.target": None,
"urlize.extra_schemes": None,
"truncate.leeway": 5,
"json.dumps_function": None,
"json.dumps_kwargs": {"sort_keys": True},
"ext.i18n.trimmed": False,
}
|
"""
.. module: lemur.users.schemas
:platform: unix
:copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <[email protected]>
"""
from marshmallow import fields
from lemur.common.schema import LemurInputSchema, LemurOutputSchema
from lemur.schemas import AssociatedRoleSchema, AssociatedCertificateSchema, AssociatedAuthoritySchema
class UserInputSchema(LemurInputSchema):
username = fields.String(required=True)
email = fields.Email(required=True)
password = fields.String() # TODO add complexity requirements
active = fields.Boolean()
roles = fields.Nested(AssociatedRoleSchema, many=True, missing=[])
certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])
authorities = fields.Nested(AssociatedAuthoritySchema, many=True, missing=[])
class UserOutputSchema(LemurOutputSchema):
id = fields.Integer()
username = fields.String()
email = fields.Email()
active = fields.Boolean()
profile_picture = fields.String()
user_input_schema = UserInputSchema()
user_output_schema = UserOutputSchema()
users_output_schema = UserOutputSchema(many=True)
class UserNestedOutputSchema(LemurOutputSchema):
__envelope__ = False
id = fields.Integer()
username = fields.String()
email = fields.Email()
active = fields.Boolean()
|
"""Module responsible for Ground Truth data set generation."""
|
"""This file defines django models for editor app."""
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from time import time
from hashids import Hashids
from django.conf import settings
def get_photo_path(instance, filename):
"""Define the upload path for saving the current user's photo to disk."""
user_slug = "{}{}".format(
instance.user.username,
instance.user.id
)
upload_path = "photos/{}/{}".format(user_slug, filename)
return upload_path
def generate_uid():
"""Generate a unique id using a custom salt, alphabet and min length."""
salt = settings.SECRET_KEY
alphabet = settings.UID_ALPHABET
hashids = Hashids(salt=salt, alphabet=alphabet)
unique_id = hashids.encode(int(time() * 1000))
return unique_id
class Photo(models.Model):
"""This model represents photo records uploaded by the current user."""
image = models.ImageField(upload_to=get_photo_path, max_length=255)
name = models.CharField(default=generate_uid, max_length=255)
image_effect = models.CharField(max_length=255, blank=True)
date_created = models.DateTimeField(editable=False, auto_now_add=True)
date_modified = models.DateTimeField(editable=False, auto_now=True)
user = models.ForeignKey(User)
|
import argparse
import glob
import os
import cv2
from yolo import YOLO
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--images', default="images", help='Path to images or image file')
ap.add_argument('-n', '--network', default="normal", help='Network Type: normal / tiny / prn / v4-tiny')
ap.add_argument('-d', '--device', default=0, help='Device to use')
ap.add_argument('-s', '--size', default=416, help='Size for yolo')
ap.add_argument('-c', '--confidence', default=0.25, help='Confidence for yolo')
args = ap.parse_args()
if args.network == "normal":
print("loading yolo...")
yolo = YOLO("models/cross-hands.cfg", "models/cross-hands.weights", ["hand"])
yolo.size = int(args.size)
yolo.confidence = float(args.confidence)
print("extracting tags for each image...")
if args.images.endswith(".txt"):
with open(args.images, "r") as myfile:
lines = myfile.readlines()
files = map(lambda x: os.path.join(os.path.dirname(args.images), x.strip()), lines)
else:
files = sorted(glob.glob("%s/*.jpg" % args.images))
conf_sum = 0
detection_count = 0
for file in files:
print(file)
mat = cv2.imread(file)
width, height, inference_time, results = yolo.inference(mat)
print("%s in %s seconds: %s classes found!" %
(os.path.basename(file), round(inference_time, 2), len(results)))
output = []
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.resizeWindow('image', 848, 640)
for detection in results:
id, name, confidence, x, y, w, h = detection
cx = x + (w / 2)
cy = y + (h / 2)
conf_sum += confidence
detection_count += 1
# draw a bounding box rectangle and label on the image
color = (255, 0, 255)
cv2.rectangle(mat, (x, y), (x + w, y + h), color, 1)
text = "%s (%s)" % (name, round(confidence, 2))
cv2.putText(mat, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
0.25, color, 1)
print("%s with %s confidence" % (name, round(confidence, 2)))
print(x,y)
# cv2.imwrite("export.jpg", mat)
# show the output image
cv2.imshow('image', mat)
cv2.waitKey(0)
print("AVG Confidence: %s Count: %s" % (round(conf_sum / detection_count, 2), detection_count))
cv2.destroyAllWindows()
|
#
# PySNMP MIB module CISCO-ENTITY-SENSOR-EXT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-ENTITY-SENSOR-EXT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:57:12 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
entPhysicalName, entPhysicalDescr, entPhysicalIndex = mibBuilder.importSymbols("ENTITY-MIB", "entPhysicalName", "entPhysicalDescr", "entPhysicalIndex")
EntitySensorValue, entPhySensorValue, entPhySensorType = mibBuilder.importSymbols("ENTITY-SENSOR-MIB", "EntitySensorValue", "entPhySensorValue", "entPhySensorType")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
Unsigned32, ObjectIdentity, NotificationType, IpAddress, ModuleIdentity, Counter32, Integer32, TimeTicks, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Bits, iso, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "ObjectIdentity", "NotificationType", "IpAddress", "ModuleIdentity", "Counter32", "Integer32", "TimeTicks", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Bits", "iso", "Gauge32")
TruthValue, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "TextualConvention")
ciscoEntitySensorExtMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 745))
ciscoEntitySensorExtMIB.setRevisions(('2010-06-09 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoEntitySensorExtMIB.setRevisionsDescriptions(('Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoEntitySensorExtMIB.setLastUpdated('201006100000Z')
if mibBuilder.loadTexts: ciscoEntitySensorExtMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoEntitySensorExtMIB.setContactInfo('Postal: Cisco Systems, Inc. 170 West Tasman Drive San Jose, CA 95134-1706 USA Tel: +1 408 526 4000 E-mail: [email protected]')
if mibBuilder.loadTexts: ciscoEntitySensorExtMIB.setDescription('This MIB is extension to ENTITY-SENSOR-MIB(RFC 3433). This MIB also defines the notifications applicable for sensors reported in ENTITY-MIB(RFC 4133).')
ciscoEntitySensorExtMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 745, 0))
ciscoEntitySensorExtMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 745, 1))
ciscoEntitySensorExtMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 745, 2))
class CiscoSensorThresholdSeverity(TextualConvention, Integer32):
description = 'sensor threshold severity. Valid values are: other(1) : a severity other than those listed below. minor(10) : Minor Problem threshold. major(20) : Major Problem threshold. critical(30): Critical problem threshold. A system might shut down the sensor associated FRU automatically if the sensor value reach the critical problem threshold.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 10, 20, 30))
namedValues = NamedValues(("other", 1), ("minor", 10), ("major", 20), ("critical", 30))
class CiscoSensorThresholdRelation(TextualConvention, Integer32):
description = 'sensor threshold relational operator types. valid values are: lessThan(1): if the sensor value is less than the threshold value lessOrEqual(2): if the sensor value is less than or equal to the threshold value greaterThan(3): if the sensor value is greater than the threshold value greaterOrEqual(4): if the sensor value is greater than or equal to the threshold value equalTo(5): if the sensor value is equal to the threshold value notEqualTo(6): if the sensor value is not equal to the threshold value'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("lessThan", 1), ("lessOrEqual", 2), ("greaterThan", 3), ("greaterOrEqual", 4), ("equalTo", 5), ("notEqualTo", 6))
ceSensorExtThresholdTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 745, 1, 1), )
if mibBuilder.loadTexts: ceSensorExtThresholdTable.setReference('ENTITY-MIB contains definition for entPhysicalTable')
if mibBuilder.loadTexts: ceSensorExtThresholdTable.setStatus('current')
if mibBuilder.loadTexts: ceSensorExtThresholdTable.setDescription('This table lists the threshold severity, relation, and comparison value, for a sensor entity listed in entPhysicalTable.')
ceSensorExtThresholdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 745, 1, 1, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"), (0, "CISCO-ENTITY-SENSOR-EXT-MIB", "ceSensorExtThresholdIndex"))
if mibBuilder.loadTexts: ceSensorExtThresholdEntry.setReference('ENTITY-MIB contains definition for entPhysicalClass')
if mibBuilder.loadTexts: ceSensorExtThresholdEntry.setStatus('current')
if mibBuilder.loadTexts: ceSensorExtThresholdEntry.setDescription("An ceSensorExtThresholdTable entry describes the thresholds for a sensor: the threshold severity, the threshold value, the relation, and the evaluation of the threshold. Only entities with entPhysicalClass 'sensor' are listed in this table. For non FRU entities the entries are created by the agent at system startup and entries are never deleted by the agent. For FRU entities the entries are created at system startup if FRU is inserted at system startup, else entries are created when FRU is inserted. Entries are deleted by the agent when FRU is removed.")
ceSensorExtThresholdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 745, 1, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: ceSensorExtThresholdIndex.setStatus('current')
if mibBuilder.loadTexts: ceSensorExtThresholdIndex.setDescription('An index that uniquely identifies an entry in the ceSensorExtThresholdTable. This index permits the same sensor to have several different thresholds.')
ceSensorExtThresholdSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 745, 1, 1, 1, 2), CiscoSensorThresholdSeverity()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ceSensorExtThresholdSeverity.setStatus('current')
if mibBuilder.loadTexts: ceSensorExtThresholdSeverity.setDescription('This object specifies the severity of this threshold.')
ceSensorExtThresholdRelation = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 745, 1, 1, 1, 3), CiscoSensorThresholdRelation()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ceSensorExtThresholdRelation.setStatus('current')
if mibBuilder.loadTexts: ceSensorExtThresholdRelation.setDescription("This object specifies the boolean relation between sensor value (entPhySensorValue) and threshold value (ceSensorExtThresholdValue), required to trigger the alarm. in pseudo-code, the evaluation-alarm mechanism is: ... if (evaluate(entPhySensorValue, ceSensorExtThresholdRelation, ceSensorExtThresholdValue)) then if (((ceSensorExtThresholdNotifEnable == enabled) || (ceSensorExtThresholdNotifEnable == transparent)) && (ceSensorExtThresholdNotifGlobalEnable == enabled)) then raise_alarm(sensor's entPhysicalIndex); endif endif ...")
ceSensorExtThresholdValue = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 745, 1, 1, 1, 4), EntitySensorValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ceSensorExtThresholdValue.setReference('ENTITY-SENSOR-MIB contains definitions for entPhysSensorScale and entPhySensorPrecision')
if mibBuilder.loadTexts: ceSensorExtThresholdValue.setStatus('current')
if mibBuilder.loadTexts: ceSensorExtThresholdValue.setDescription('This object specifies the value of the threshold. The value of objects entPhySensorType, entPhysSensorScale and entPhySensorPrecision for this sensor entity defines how ceSensorExtThresholdValue can be displayed or intepreted by the user. entPhySensorValue can be compared with ceSensorExtThresholdValue without taking care of semantics of both objects.')
ceSensorExtThresholdEvaluation = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 745, 1, 1, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ceSensorExtThresholdEvaluation.setReference('ENTITY-SENSOR-MIB contains definition for entPhySensorValueUpdateRate')
if mibBuilder.loadTexts: ceSensorExtThresholdEvaluation.setStatus('current')
if mibBuilder.loadTexts: ceSensorExtThresholdEvaluation.setDescription("This object indicates the result of the most recent evaluation of the threshold. The agent will execute the below 'evaluate' function to generate the notification. 'evaluate' function returns a boolean value. evaluate(entPhySensorValue, ceSensorExtThresholdRelation, ceSensorExtThresholdValue) If evalute function returns true then ceSensorExtThresholdEvaluation is set to 'true' If evaluate function returns false then ceSensorExtThresholdEvaluation is set to 'false'. Thresholds are evaluated at the rate indicated by entPhySensorValueUpdateRate.")
ceSensorExtThresholdNotifEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 745, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("transparent", 3))).clone('transparent')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ceSensorExtThresholdNotifEnable.setStatus('current')
if mibBuilder.loadTexts: ceSensorExtThresholdNotifEnable.setDescription("A control object to activate/deactivate ceSensorExtThresholdNotification. This object should hold any of the below values. enabled(1) - The notification is enabled for this entity disabled(2) - The notification is disabled for this entity transparent(3)- The notification is enabled/disabled based on ceSensorExtThresholdNotifGlobalEnable object This object controls generation of ceSensorExtThresholdNotification for this threshold. An exception to this is, if this object is set to 'transparent' then ceSensorExtThresholdNotification for this threshold is controlled by ceSensorExtThresholdNotifGlobalEnable object. This truth table explains how ceSensorExtThresholdNotifEnable is related with ceSensorExtThresholdNotifGlobalEnable to control the ceSensorExtThresholdNotification for this threshold E = enabled, D = Disabled, T = Transparent local_flag = ceSensorExtThresholdNotifEnable global_flag = ceSensorExtThresholdNotifGlobalEnable local_flag global_flag outcome_per_interface --------------------------------------------- E E E E D D D E D D D D T E E T D D")
ciscoEntSensorExtGlobalObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 745, 1, 2))
ceSensorExtThresholdNotifGlobalEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 745, 1, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ceSensorExtThresholdNotifGlobalEnable.setStatus('current')
if mibBuilder.loadTexts: ceSensorExtThresholdNotifGlobalEnable.setDescription("A control object to activate/deactivate ceSensorExtThresholdNotification. This object should hold any of the below values. enabled(1) - The notification is enabled globally on the device disabled(2)- The notification is disabled globally on the device This object enables the generation of ceSensorExtThresholdNotification globally on the device. If this object value is 'disabled', then no ceSensorExtThresholdNotification will be generated on this device. If this object value is 'enabled', then whether a ceSensorExtThresholdNotification for a threshold will be generated or not depends on the instance value of ceSensorExtThresholdNotifEnable for that threshold.")
ceSensorExtThresholdNotification = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 745, 0, 1)).setObjects(("ENTITY-MIB", "entPhysicalName"), ("ENTITY-MIB", "entPhysicalDescr"), ("ENTITY-SENSOR-MIB", "entPhySensorValue"), ("ENTITY-SENSOR-MIB", "entPhySensorType"), ("CISCO-ENTITY-SENSOR-EXT-MIB", "ceSensorExtThresholdValue"))
if mibBuilder.loadTexts: ceSensorExtThresholdNotification.setStatus('current')
if mibBuilder.loadTexts: ceSensorExtThresholdNotification.setDescription('This notification is generated once each time the sensor value crosses the threshold value specified by ceSensorExtThresholdValue object.')
ciscoEntSensorExtMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 745, 2, 1))
ciscoEntSensorExtMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 745, 2, 2))
ciscoEntSensorExtMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 745, 2, 1, 1)).setObjects(("CISCO-ENTITY-SENSOR-EXT-MIB", "ciscoEntSensorExtThresholdGroup"), ("CISCO-ENTITY-SENSOR-EXT-MIB", "ciscoEntSensorExtNotificationCtrlGroup"), ("CISCO-ENTITY-SENSOR-EXT-MIB", "ciscoEntSensorExtNotificationGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoEntSensorExtMIBCompliance = ciscoEntSensorExtMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: ciscoEntSensorExtMIBCompliance.setDescription('An ENTITY-MIB implementation that adds notification for sensors in the entPhysicalTable must implement this group.')
ciscoEntSensorExtThresholdGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 745, 2, 2, 1)).setObjects(("CISCO-ENTITY-SENSOR-EXT-MIB", "ceSensorExtThresholdSeverity"), ("CISCO-ENTITY-SENSOR-EXT-MIB", "ceSensorExtThresholdRelation"), ("CISCO-ENTITY-SENSOR-EXT-MIB", "ceSensorExtThresholdValue"), ("CISCO-ENTITY-SENSOR-EXT-MIB", "ceSensorExtThresholdEvaluation"), ("CISCO-ENTITY-SENSOR-EXT-MIB", "ceSensorExtThresholdNotifEnable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoEntSensorExtThresholdGroup = ciscoEntSensorExtThresholdGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoEntSensorExtThresholdGroup.setDescription('The collection of objects which are used to describe and monitor thresholds for sensors.')
ciscoEntSensorExtNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 745, 2, 2, 2)).setObjects(("CISCO-ENTITY-SENSOR-EXT-MIB", "ceSensorExtThresholdNotification"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoEntSensorExtNotificationGroup = ciscoEntSensorExtNotificationGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoEntSensorExtNotificationGroup.setDescription('The collection of notifications used for monitoring sensor threshold activity.')
ciscoEntSensorExtNotificationCtrlGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 745, 2, 2, 3)).setObjects(("CISCO-ENTITY-SENSOR-EXT-MIB", "ceSensorExtThresholdNotifGlobalEnable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoEntSensorExtNotificationCtrlGroup = ciscoEntSensorExtNotificationCtrlGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoEntSensorExtNotificationCtrlGroup.setDescription('The collection of objects which provide the global notification control on ceSensorExtThresholdNotification.')
mibBuilder.exportSymbols("CISCO-ENTITY-SENSOR-EXT-MIB", ciscoEntSensorExtMIBCompliance=ciscoEntSensorExtMIBCompliance, ciscoEntSensorExtGlobalObjects=ciscoEntSensorExtGlobalObjects, ceSensorExtThresholdValue=ceSensorExtThresholdValue, ciscoEntitySensorExtMIBNotifs=ciscoEntitySensorExtMIBNotifs, ciscoEntSensorExtNotificationGroup=ciscoEntSensorExtNotificationGroup, ciscoEntSensorExtThresholdGroup=ciscoEntSensorExtThresholdGroup, ciscoEntitySensorExtMIBConform=ciscoEntitySensorExtMIBConform, ciscoEntSensorExtMIBGroups=ciscoEntSensorExtMIBGroups, ceSensorExtThresholdSeverity=ceSensorExtThresholdSeverity, ciscoEntSensorExtMIBCompliances=ciscoEntSensorExtMIBCompliances, ciscoEntitySensorExtMIB=ciscoEntitySensorExtMIB, ceSensorExtThresholdNotification=ceSensorExtThresholdNotification, ceSensorExtThresholdEvaluation=ceSensorExtThresholdEvaluation, ceSensorExtThresholdNotifEnable=ceSensorExtThresholdNotifEnable, ceSensorExtThresholdIndex=ceSensorExtThresholdIndex, ceSensorExtThresholdRelation=ceSensorExtThresholdRelation, CiscoSensorThresholdSeverity=CiscoSensorThresholdSeverity, ciscoEntitySensorExtMIBObjects=ciscoEntitySensorExtMIBObjects, ceSensorExtThresholdTable=ceSensorExtThresholdTable, CiscoSensorThresholdRelation=CiscoSensorThresholdRelation, ceSensorExtThresholdNotifGlobalEnable=ceSensorExtThresholdNotifGlobalEnable, ceSensorExtThresholdEntry=ceSensorExtThresholdEntry, PYSNMP_MODULE_ID=ciscoEntitySensorExtMIB, ciscoEntSensorExtNotificationCtrlGroup=ciscoEntSensorExtNotificationCtrlGroup)
|
#当前问题
#1,翻页到最后三种情况,一种是404状态(),一种是200正常但是无标题,一种是200正常但是有标题仍然显示最后一页
from pyquery import PyQuery as pq
import urllib,sys
def get_article_list(url,page_rule,titles_selector,links_selector,page_second_num,encoding="utf-8"):
#如果为空则文章不分页
if page_rule:
#预处理
if "<num>" not in page_rule:
raise ValueError("<num>不在翻页网址中")
doc=pq(url,encoding=encoding)
titles=[t.text() for t in doc(titles_selector).items()]
links=[l.attr("href") for l in doc(links_selector).items()]
if not titles:
raise ValueError("未获取到文章标题")
if not links:
raise ValueError("未获取到文章链接")
i=2
last_cache=[]
print("启动循环加载")
try:
while True:
doc=pq(page_rule.replace("<num>",str(i)),encoding=encoding)
this_title=[t.text() for t in doc(titles_selector).items()]
this_link=[l.attr("href") for l in doc(links_selector).items()]
# print(this_title)
if not this_title:
print("未获取到标题")
break
if not this_link:
print("未获取到链接")
break
if this_title==last_cache:
print("两次得到相同标题")
break
last_cache=this_title
titles.extend(this_title)
links.extend(this_link)
i+=1
except urllib.error.HTTPError as e:
print(repr(e))
article_zip=zip(titles,links)
return article_zip
def print_article_list(a_data):
for t,l in a_data:
print(t,l)
if __name__=="__main__":
data={
"url":"https://www.vinoca.org/",
"page_rule":"https://www.vinoca.org/page/<num>/",
"titles_selector":".content h2 a",
"links_selector":".content h2 a",
"page_second_num":"2"
}
try:
print_article_list(get_article_list(**data))
except ValueError as e:
print(repr(e))
except urllib.error.HTTPError as e:
print(repr(e))
except:
print("未知错误:", sys.exc_info()[0])
# try:
# print(pq("http://www.yangxg.com/6"))
# except urllib.error.HTTPError as e:
# print(repr(e))
|
#!/usr/bin/python3.6
import argparse
import os
import re
import sys
import logging
def main():
parser = argparse.ArgumentParser("ya-remove")
parser.add_argument("--input", required=False, default=".", type=str, help="""
directory where all the files needs to be replaced. Default to cuyrrent working directory
""")
parser.add_argument("--regex", required=True, type=str, help="""
Python regex (with capturing groups). We will replace the matched regex with something else
""")
parser.add_argument("--simulate", action="store_true", help="""
if present we will simply log what we will change without actually change it
""")
parser.add_argument("--verbose", action="store_true", help="""
if present we will log what we change
""")
options = parser.parse_args(sys.argv[1:])
input = options.input
regex = options.regex
simulate = options.simulate
verbose = options.verbose
if verbose:
level = logging.INFO
else:
level = logging.CRITICAL
logging.basicConfig(level=level)
for filename in os.listdir(input):
m = re.search(regex, filename)
if m is None:
logging.info(f"\"{filename}\" not compliant with \"{regex}\"")
continue
# now perform the remove
if not simulate:
logging.info("removing {filename}")
os.remove(filename)
else:
logging.critical(f"we should remove \"{filename}\"")
if __name__ == "__main__":
main()
|
number = int(input())
day = ''
if number == 1:
day = 'Monday'
elif number == 2:
day = 'Tuesday'
elif number == 3:
day = 'Wednesday'
elif number == 4:
day = 'Thursday'
elif number == 5:
day = 'Friday'
elif number == 6:
day = 'Saturday'
elif number == 7:
day = 'Sunday'
else:
day = 'Error'
print(day) |
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, unicode_literals, print_function
import pytest
from thriftrw.idl import Parser
from thriftrw.spec import primitive as prim_spec
from thriftrw.spec.map import MapTypeSpec
from thriftrw.spec.typedef import TypedefTypeSpec
from thriftrw.spec.spec_mapper import type_spec_or_ref
from thriftrw.wire import ttype
from ..util.value import vi32, vmap, vbinary
@pytest.fixture
def parse():
return Parser(start='map_type', silent=True).parse
def test_mapper(parse):
ast = parse('map<string, i32>')
spec = type_spec_or_ref(ast)
assert spec == MapTypeSpec(prim_spec.TextTypeSpec, prim_spec.I32TypeSpec)
def test_link(parse, scope):
ast = parse('map<string, Foo>')
spec = type_spec_or_ref(ast)
scope.add_type_spec(
'Foo', TypedefTypeSpec('Foo', prim_spec.I32TypeSpec), 1
)
spec = spec.link(scope)
assert spec.vspec == prim_spec.I32TypeSpec
value = {u'foo': 1, u'bar': 2}
assert (
spec.to_wire(value) == vmap(
ttype.BINARY, ttype.I32,
(vbinary(b'foo'), vi32(1)),
(vbinary(b'bar'), vi32(2)),
)
) or (
spec.to_wire(value) == vmap(
ttype.BINARY, ttype.I32,
(vbinary(b'bar'), vi32(2)),
(vbinary(b'foo'), vi32(1)),
)
)
assert value == spec.from_wire(spec.to_wire(value))
def test_primitive(parse, scope, loads):
Foo = loads('struct Foo { 1: required string bar }').Foo
scope.add_type_spec('Foo', Foo.type_spec, 1)
spec = type_spec_or_ref(parse('map<string, Foo>')).link(scope)
value = {
'a': Foo('1'),
'b': Foo('2'),
'c': Foo('3'),
}
prim_value = {
'a': {'bar': '1'},
'b': {'bar': '2'},
'c': {'bar': '3'},
}
assert spec.to_primitive(value) == prim_value
assert spec.from_primitive(prim_value) == value
|
import re
import markdown
from django.http import Http404
from django.shortcuts import render, get_object_or_404
from markdown.extensions.toc import TocExtension, slugify
from blog.models import Post, Category, Label
def index(request):
posts = Post.objects.filter(is_delete=False).order_by('-c_time', '-pk')
return render(request, 'blog/index.html', context={'title': '文章列表', 'posts': posts})
def detail(request, pk):
post = get_object_or_404(Post, pk=pk)
if post.is_delete:
raise Http404('No %s matches the given query.' % post.title)
md = markdown.Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
TocExtension(slugify=slugify),
])
post.body = md.convert(post.body)
post.toc = md.toc
m = re.search(r'<div class="toc">\s*<ul>(.*)</ul>\s*</div>', md.toc, re.S)
post.toc = m.group(1) if m is not None else ''
return render(request, 'blog/detail.html', context={'title': post.title, 'post': post})
def archive(request, year, month):
posts = Post.objects.filter(is_delete=False,
c_time__year=year,
c_time__month=month).order_by('-c_time', '-pk')
return render(request, 'blog/index.html', context={'title': f'归档 - {year} 年 {month} 月', 'posts': posts})
def category(request, pk):
cat = get_object_or_404(Category, pk=pk)
posts = Post.objects.filter(is_delete=False,
category=cat).order_by('-c_time', '-pk')
return render(request, 'blog/index.html', context={'title': '分类 - ' + cat.name, 'posts': posts})
def label(request, pk):
lab = get_object_or_404(Label, pk=pk)
posts = Post.objects.filter(is_delete=False,
labels=lab).order_by('-c_time', '-pk')
return render(request, 'blog/index.html', context={'title': '标签 - ' + lab.name, 'posts': posts})
|
from pathlib import Path
from pyhdx import VERSION_STRING
from importlib.metadata import version
from datetime import datetime
import sys
import os
import platform
def write_log(script_name):
log_name = Path(script_name).stem
out_name = log_name + '.log'
lines = [f"Log file for python script: {log_name}.py"]
now = datetime.now()
date = f'{now.strftime("%Y/%m/%d %H:%M:%S")} ({int(now.timestamp())})'
lines.append(f"Executed at: {date}")
lines.append(f"Python version: {sys.version}")
lines.append(f"OS: {platform.system()}, release {platform.release()}")
try:
lines.append(f"Conda env: {os.environ['CONDA_DEFAULT_ENV']}")
except KeyError:
pass
lines.append("")
lines.append(VERSION_STRING)
lines.append("")
lines.append("Dependencies versions:")
packages = ['numpy', 'torch', 'pandas', 'hdxrate', 'scipy', 'symfit', 'scikit-image', 'dask', 'distributed']
for package in packages:
ver = version(package)
line = f"{package}: {ver}"
lines.append(line)
s = '\n'.join(lines)
Path(out_name).write_text(s)
if __name__ == '__main__':
write_log(__file__) |
# =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2014 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Radu Serban
# =============================================================================
#
# Demonstration of vehicle over SCM deformable terrain
#
# The vehicle reference frame has Z up, X towards the front of the vehicle, and
# Y pointing to the left. All units SI.
#
# =============================================================================
import pychrono as chrono
import pychrono.vehicle as veh
import pychrono.irrlicht as irr
import math as m
#// =============================================================================
class MyDriver (veh.ChDriver):
def __init__(self, vehicle, delay):
veh.ChDriver.__init__(self, vehicle)
self.delay = delay
def Synchronize(self, time):
eff_time = time - self.delay
if (eff_time < 0):
return
if (eff_time > 0.2):
self.SetThrottle(0.7)
else:
self.SetThrottle(3.5 * eff_time)
if (eff_time < 2):
self.SetSteering(0.0)
else:
self.SetSteering(0.6 * m.sin(2.0 * m.pi * (eff_time - 2) / 6))
self.SetBraking(0.0)
def main():
#print("Copyright (c) 2017 projectchrono.org\nChrono version: ", CHRONO_VERSION , "\n\n")
# Create the HMMWV vehicle, set parameters, and initialize
my_hmmwv = veh.HMMWV_Full()
my_hmmwv.SetContactMethod(chrono.ChContactMethod_SMC)
my_hmmwv.SetInitPosition(chrono.ChCoordsysD(chrono.ChVectorD(-5, -2, 0.6), chrono.ChQuaternionD(1, 0, 0, 0)))
my_hmmwv.SetPowertrainType(veh.PowertrainModelType_SHAFTS)
my_hmmwv.SetDriveType(veh.DrivelineType_AWD)
my_hmmwv.SetTireType(veh.TireModelType_RIGID)
my_hmmwv.SetTireStepSize(tire_step_size)
my_hmmwv.Initialize()
my_hmmwv.SetChassisVisualizationType(veh.VisualizationType_NONE)
my_hmmwv.SetSuspensionVisualizationType(veh.VisualizationType_PRIMITIVES)
my_hmmwv.SetSteeringVisualizationType(veh.VisualizationType_PRIMITIVES)
my_hmmwv.SetWheelVisualizationType(veh.VisualizationType_NONE)
my_hmmwv.SetTireVisualizationType(veh.VisualizationType_MESH)
# Create the (custom) driver
driver = MyDriver(my_hmmwv.GetVehicle(), 0.5)
driver.Initialize()
# Create the SCM deformable terrain patch
terrain = veh.SCMDeformableTerrain(my_hmmwv.GetSystem())
terrain.SetSoilParameters(2e6, # Bekker Kphi
0, # Bekker Kc
1.1, # Bekker n exponent
0, # Mohr cohesive limit (Pa)
30, # Mohr friction limit (degrees)
0.01, # Janosi shear coefficient (m)
2e8, # Elastic stiffness (Pa/m), before plastic yield
3e4 # Damping (Pa s/m), proportional to negative vertical speed (optional)
)
# Use automatic refinement of the SCM mesh
terrain.SetAutomaticRefinement(True)
terrain.SetAutomaticRefinementResolution(0.04)
# Optionally, enable moving patch feature (single patch around vehicle chassis)
terrain.AddMovingPatch(my_hmmwv.GetChassisBody(), chrono.ChVectorD(0, 0, 0), 5, 3)
# Set plot type for SCM (false color plotting)
terrain.SetPlotType(veh.SCMDeformableTerrain.PLOT_SINKAGE, 0, 0.1);
# Initialize the SCM terrain, specifying the initial mesh grid
terrain.Initialize(terrainHeight, terrainLength, terrainWidth, divLength, divWidth);
# Create the vehicle Irrlicht interface
app = veh.ChWheeledVehicleIrrApp(my_hmmwv.GetVehicle(), 'HMMWV Deformable Soil Demo', irr.dimension2du(1000,800))
app.SetSkyBox()
app.AddTypicalLights(irr.vector3df(30, -30, 100), irr.vector3df(30, 50, 100), 250, 130)
app.AddTypicalLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
app.SetChaseCamera(chrono.ChVectorD(0.0, 0.0, 1.75), 6.0, 0.5)
app.SetTimestep(step_size)
app.AssetBindAll()
app.AssetUpdateAll()
# Simulation loop
while (app.GetDevice().run()):
time = my_hmmwv.GetSystem().GetChTime()
# End simulation
if (time >= 4):
break
# Draw scene
app.BeginScene(True, True, irr.SColor(255, 140, 161, 192))
app.DrawAll()
app.EndScene()
# Get driver inputs
driver_inputs = driver.GetInputs()
# Update modules (process inputs from other modules)
driver.Synchronize(time)
terrain.Synchronize(time)
my_hmmwv.Synchronize(time, driver_inputs, terrain)
app.Synchronize("", driver_inputs)
# Advance simulation for one timestep for all modules
driver.Advance(step_size)
terrain.Advance(step_size)
my_hmmwv.Advance(step_size)
app.Advance(step_size)
return 0
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('path/to/data')
veh.SetDataPath(chrono.GetChronoDataPath() + 'vehicle/')
# SCM patch dimensions
terrainHeight = 0
terrainLength = 16.0 # size in X direction
terrainWidth = 8.0 # size in Y direction
# SCM mesh divisions (X and Y)
divLength = 128
divWidth = 64
# Simulation step sizes
step_size = 2e-3;
tire_step_size = 1e-3;
main()
|
class Solution:
def removeDigit(self, number: str, digit: str) -> str:
last_idx = -1
for i in range(len(number)-1):
if number[i] == digit:
last_idx = i
if number[i] < number[i+1]:
return number[:i] + number[i+1:]
if number[-1] == digit:
last_idx = len(number) - 1
return number[:last_idx] + number[last_idx+1:] |
from PyQt5.QtNetwork import QNetworkAccessManager
HTTP_METHOD_TO_QT_OPERATION = {
"HEAD": QNetworkAccessManager.HeadOperation,
"GET": QNetworkAccessManager.GetOperation,
"PUT": QNetworkAccessManager.PutOperation,
"POST": QNetworkAccessManager.PostOperation,
"DELETE": QNetworkAccessManager.DeleteOperation
}
QT_OPERATION_TO_HTTP_METHOD = {
v: k for k, v in HTTP_METHOD_TO_QT_OPERATION.items()
}
|
from .context import *
from .frontend import *
from .signals import *
from .stdlib import * |
# Tested with Python 2.1
import re
#
# The simplest, lambda-based implementation
#
def multiple_replace(dict, text):
""" Replace in 'text' all occurences of any key in the given
dictionary by its corresponding value. Returns the new tring."""
# Create a regular expression from the dictionary keys
regex = re.compile("(%s)" % "|".join(map(re.escape, list(dict.keys()))))
# For each match, look-up corresponding value in dictionary
return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text)
#
# You may combine both the dictionnary and search-and-replace
# into a single object using a 'callable' dictionary wrapper
# which can be directly used as a callback object.
#
# In Python 2.2+ you may extend the 'dictionary' built-in class instead
from UserDict import UserDict
class Xlator(UserDict):
""" An all-in-one multiple string substitution class """
def _make_regex(self):
""" Build a regular expression object based on the keys of
the current dictionary """
return re.compile("(%s)" % "|".join(map(re.escape, list(self.keys()))))
def __call__(self, mo):
""" This handler will be invoked for each regex match """
# Count substitutions
self.count += 1 # Look-up string
return self[mo.string[mo.start():mo.end()]]
def xlat(self, text):
""" Translate text, returns the modified text. """
# Reset substitution counter
self.count = 0
# Process text
return self._make_regex().sub(self, text)
#
# Test
#
if __name__ == "__main__":
text = "Larry Wall is the creator of Perl"
dict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print(multiple_replace(dict, text))
xlat = Xlator(dict)
print(xlat.xlat(text))
print("Changed %d thing(s)" % xlat.count)
|
"""setup.py
Upload to PyPI:
python setup.py sdist
twine upload --repository pypitest dist/qpack-X.X.X.tar.gz
twine upload --repository pypi dist/qpack-X.X.X.tar.gz
"""
import setuptools
from distutils.core import setup, Extension
from qpack import __version__
module = Extension(
'qpack._qpack',
define_macros=[],
include_dirs=['./qpack'],
libraries=[],
sources=['./qpack/_qpack.c'],
extra_compile_args=["--std=c99", "-pedantic"]
)
VERSION = __version__
setup(
name='qpack',
packages=['qpack'],
version=VERSION,
description='QPack (de)serializer',
author='Jeroen van der Heijden',
author_email='[email protected]',
url='https://github.com/transceptor-technology/qpack',
ext_modules=[module],
download_url='https://github.com/transceptor-technology/'
'qpack/tarball/{}'.format(VERSION),
keywords=['serializer', 'deserializer'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development'
],
)
|
# coding: utf-8
# Standard
import sys
import json
# Third-party
from flask import Flask, request
# Local
from v1api import V1API
VERSION = 0.11
APPNAME = 'BI-Beacon Open Source Server'
state = {}
flask_app = Flask(APPNAME)
def tuple2command(tup):
if len(tup) == 3:
return 'color %d %d %d\n' % tup
else:
return 'pulse_1 %d %d %d %1.2f\n' % tup
assert tuple2command((0, 100, 0)) == 'color 0 100 0\n'
assert tuple2command((1, 2, 3, 1.5)) == 'pulse_1 1 2 3 1.50\n'
@flask_app.route(
'/<channelkey>',
methods=["POST", "GET"]
)
def beacon_api(channelkey):
if request.method == 'GET':
tuple = state[channelkey] if channelkey in state else (0, 255, 0)
resp = tuple2command(tuple)
print("Responding with {resp}".format(resp=resp))
return resp
if request.method == 'POST':
def update_state(channelkey, tuple):
state[channelkey] = tuple
v1api_handler = V1API(update_state)
(json_obj, status) = v1api_handler.handle(ck=channelkey, formdata=request.form)
return json.dumps(json_obj), status
def run():
if len(sys.argv) != 2:
print("Usage: server.py <port>")
print(" port: REST API requests will be available at this port.")
return
port = int(sys.argv[1])
print("BI-Beacon version {version} starting.".format(version=VERSION))
print("Using port: {port}".format(port=port))
flask_app.run(host='0.0.0.0',
port=port,
debug=False,
threaded=True)
print("Server stopped.")
if __name__ == '__main__':
run()
|
from modules import engine
from modules import out
from modules import run
from modules import php
from time import sleep
@engine.prepare_and_clean
def execute():
#make sure we have the php command file online
php.upload_command_file()
#open it with the phpinfo parameter
run.local('open ' + engine.REMOTE_COMMAND_URL + '?phpinfo')
#give the browser some time to open the url before deleting the underlying file
sleep(5)
def help():
out.log("Open the phpinfo output of the remote in your browser.", 'help') |
from g_module import some_awesome_func
def a_func():
return "a func"
from a_module.b_module.b_file import b_func
def some_other_func():
return "other_value" |
import inspect
import re
import sys
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
List,
Optional,
Tuple,
TypeVar,
Union,
cast,
overload,
)
from redun.expression import SchedulerExpression, TaskExpression
from redun.hashing import hash_arguments, hash_struct
from redun.namespace import get_current_namespace
from redun.promise import Promise
from redun.utils import get_func_source
from redun.value import Value, get_type_registry
Func = TypeVar("Func", bound=Callable)
Func2 = TypeVar("Func2", bound=Callable)
Result = TypeVar("Result")
def get_task_registry():
"""
Returns the global task registry.
"""
return _task_registry
def undefined_task(fullname: str, *args: Any, **kwargs: Any) -> None:
"""
Default function used for a deserialized Task with unknown definition.
"""
raise ValueError("Task {} is undefined.".format(fullname))
def get_tuple_type_length(tuple_type: Any) -> Optional[int]:
"""
Returns the length of a tuple type if inferrable.
"""
if getattr(tuple_type, "__origin__", None) in (tuple, Tuple):
# Return type is Tuple[ * ].
# __args__ is not available on Tuple type in Python 3.9+ because it was removed from
# _SpecialGenericAlias in:
#
# https://github.com/python/cpython/pull/19984
#
# For more info, see bpo-40397 here:
#
# https://bugs.python.org/issue40397
#
# typing.get_args was added in Python 3.8 so we can use that instead if we detect we are
# running on Python 3.8+
if sys.version_info >= (3, 8):
from typing import get_args
tuple_type_args = get_args(tuple_type)
else:
tuple_type_args = tuple_type.__args__
if Ellipsis in tuple_type_args or len(tuple_type_args) == 0:
# Tuple of unknown length.
return None
if tuple_type_args == ((),):
# Special case for length zero.
return 0
return len(tuple_type_args)
if inspect.isclass(tuple_type) and issubclass(tuple_type, tuple):
# Return type is namedtuple.
fields = getattr(tuple_type, "_fields", None)
if fields:
return len(fields)
return None
class Task(Value, Generic[Func]):
"""
A redun Task.
Tasks are the basic unit of execution in redun. Tasks are often defined
using the :func:`redun.task.task` decorator:
.. code-block:: python
@task()
def my_task(x: int) -> int:
return x + 1
"""
type_name = "redun.Task"
def __init__(
self,
func: Callable,
name: Optional[str] = None,
namespace: Optional[str] = None,
version: Optional[str] = None,
compat: Optional[List[str]] = None,
script: bool = False,
task_options: Optional[dict] = None,
task_options_update: Optional[dict] = None,
):
self.name = name or func.__name__
self.namespace = namespace or get_current_namespace()
self.func = func
self.source = get_func_source(func)
self.version = version
self.compat = compat or []
self.script = script
self._task_options = task_options or {}
self._task_options_update = task_options_update or {}
self.hash = self._calc_hash()
self._signature: Optional[inspect.Signature] = None
self._validate()
@property
def nout(self) -> Optional[int]:
"""
Determines nout from task options and return type.
The precedence is:
- the task option
- function return type
"""
if self.has_task_option("nout"):
nout = self.get_task_option("nout")
else:
# Infer nout from return type.
return_type = self.func.__annotations__.get("return")
nout = get_tuple_type_length(return_type)
return nout
T = TypeVar("T")
@overload
def get_task_option(self, option_name: str) -> Optional[Any]:
...
@overload
def get_task_option(self, option_name: str, default: T) -> T:
...
def get_task_option(self, option_name: str, default: Optional[T] = None) -> Optional[T]:
"""
Fetch the requested option, preferring run-time updates over options from task
construction. Like the dictionary `get` method, returns the default
a `KeyError` on missing keys.
"""
if option_name in self._task_options_update:
return self._task_options_update.get(option_name)
if option_name in self._task_options:
return self._task_options.get(option_name)
return default
def get_task_options(self) -> dict:
"""
Merge and return the task options.
"""
return {
**self._task_options,
**self._task_options_update,
}
def has_task_option(self, option_name: str) -> bool:
"""
Return true if the task has an option with name `option_name`
"""
return option_name in self._task_options_update or option_name in self._task_options
def __repr__(self) -> str:
return "Task(fullname={fullname}, hash={hash})".format(
fullname=self.fullname,
hash=self.hash[:8],
)
def _validate(self) -> None:
config_args = self.get_task_option("config_args")
if config_args:
valid_args = list(self.signature.parameters)
invalid_args = [
config_arg for config_arg in config_args if config_arg not in valid_args
]
if invalid_args:
raise ValueError(
f"Invalid config args: {', '.join(invalid_args)}. Expected all config_args to "
f"be one of the function args/kwargs: {', '.join(valid_args)}"
)
if self.namespace:
if not re.match("^[A-Za-z_][A-Za-z_0-9.]+$", self.namespace):
raise ValueError(
"Task namespace must use only alphanumeric characters, "
"underscore '_', and dot '.'."
)
if not re.match("^[A-Za-z_][A-Za-z_0-9]+$", self.name):
raise ValueError("Task name must use only alphanumeric characters and underscore '_'.")
# Validate nout.
if self.nout is not None:
if not isinstance(self.nout, int):
raise TypeError("nout must be an int")
if self.nout < 0:
raise TypeError("nout must be non-negative")
@property
def fullname(self) -> str:
"""
Returns the fullname of a Task: '{namespace}.{name}'
"""
if self.namespace:
return self.namespace + "." + self.name
else:
return self.name
def _call(self: "Task[Callable[..., Result]]", *args: Any, **kwargs: Any) -> Result:
"""
Returns a lazy Expression of calling the task.
"""
return cast(
Result,
TaskExpression(
self.fullname, args, kwargs, self._task_options_update, length=self.nout
),
)
# Typing strategy: Ideally, we could use more honest types for the arguments
# and return type of `Task.__call__()`, such as `Union[Arg, Expression[Arg]]`
# and `TaskExpression[Result]` respectively. However, dynamically creating
# such a signature has several challenges at the moment, such as requiring a
# mypy plugin and forcing users to wrap every task return type. Therefore, we
# compromise and force cast `Task.__call__()` to have the same signature as
# the wrapped function, `Func`. This allows users to write tasks with very
# natural types, and mypy can catch most type errors. The one trade off is
# that this approach is too permissive about using `TaskExpression[T]`
# wherever `T` is allowed.
# Cast the signature to match the wrapped function.
__call__: Func = cast(Func, _call)
def options(self, **task_options_update: Any) -> "Task[Func]":
"""
Returns a new Task with task_option overrides.
"""
new_task_options_update = {
**self._task_options_update,
**task_options_update,
}
return Task(
self.func,
name=self.name,
namespace=self.namespace,
version=self.version,
compat=self.compat,
script=self.script,
task_options=self._task_options,
task_options_update=new_task_options_update,
)
def _calc_hash(self) -> str:
# TODO: implement for real.
if self.compat:
return self.compat[0]
if self._task_options_update:
task_options_hash = [get_type_registry().get_hash(self._task_options_update)]
else:
task_options_hash = []
if self.version is None:
source = get_func_source(self.func) if self.func else ""
return hash_struct(["Task", self.fullname, "source", source] + task_options_hash)
else:
return hash_struct(
["Task", self.fullname, "version", self.version] + task_options_hash
)
def __getstate__(self) -> dict:
# Note: We specifically don't serialize func. We will use the
# TaskRegistry during deserialization to fetch the latest func for a task.
return {
"name": self.name,
"namespace": self.namespace,
"version": self.version,
"hash": self.hash,
"compat": self.compat,
"script": self.script,
"task_options": self._task_options_update,
}
def __setstate__(self, state) -> None:
self.name = state["name"]
self.namespace = state["namespace"]
self.version = state["version"]
self.hash = state["hash"]
self.compat = state.get("compat", [])
self.script = state.get("script", False)
self._task_options_update = state.get("task_options", {})
# Set func from TaskRegistry.
registry = get_task_registry()
_task = registry.get(self.fullname)
if _task:
self.func = _task.func
self._task_options = _task._task_options
self.source = get_func_source(self.func)
else:
self.func = lambda *args, **kwargs: undefined_task(self.fullname, *args, **kwargs)
self._task_options = {}
self.source = ""
self._signature = None
def is_valid(self) -> bool:
"""
Returns True if the Task Value is still valid (task hash matches registry).
Tasks are first-class Values in redun. They can be cached and fetched
in future executions. When fetching a Task from the cache, the cached
hash might no longer exist in the code base (registered tasks).
"""
return self.hash == self._calc_hash()
def get_hash(self, data: Optional[bytes] = None) -> str:
"""
Returns the Task hash.
"""
return self.hash
# Note: we can't parameterize PartialTask to a more specific type at this
# time, due to the complexity of calculating the remaining parameter signature.
def partial(
self: "Task[Callable[..., Result]]", *args, **kwargs
) -> "PartialTask[Callable[..., Result], Callable[..., Result]]":
"""
Partially apply some arguments to the Task.
"""
return PartialTask(self, args, kwargs)
@property
def signature(self) -> inspect.Signature:
"""
Signature of the function wrapped by the task.
"""
assert self.func
if not self._signature:
self._signature = inspect.signature(self.func)
return self._signature
class SchedulerTask(Task[Func]):
"""
A Task that executes within the scheduler to allow custom evaluation.
"""
def _call(self: "SchedulerTask[Callable[..., Result]]", *args: Any, **kwargs: Any) -> Result:
"""
Returns a lazy Expression of calling the task.
"""
return cast(Result, SchedulerExpression(self.fullname, args, kwargs))
__call__ = cast(Func, _call)
class PartialTask(Task[Func], Generic[Func, Func2]):
"""
A Task with only some arguments partially applied.
The type of this class is parameterized by `Func` and `Func2`, where
`Func2` is the type of the original function and `Func` is the type
of partially applied function. They should match on their return types.
"""
type_name = "redun.PartialTask"
def __init__(self, task: Task[Func2], args: tuple, kwargs: dict):
self.task = task
self.args = tuple(args)
self.kwargs = kwargs
super().__init__(task.func, name=task.name, namespace=task.namespace)
def __repr__(self) -> str:
return (
"PartialTask(fullname={fullname}, hash={hash}, args={args}, kwargs={kwargs})".format(
fullname=self.fullname,
hash=self.hash[:8],
args=repr(self.args),
kwargs=repr(self.kwargs),
)
)
def _call(
self: "PartialTask[Callable[..., Result], Callable[..., Result]]",
*args: Any,
**kwargs: Any,
) -> Result:
# By calling the original task, we ensure that a normal pre-registered
# task will be the one in the CallGraph recording.
return self.task(*self.args, *args, **self.kwargs, **kwargs)
# Cast the signature to match the wrapped function.
__call__: Func = cast(Func, _call)
def __getstate__(self) -> dict:
"""
Returns state for pickling.
"""
state = super().__getstate__()
state.update({"task": self.task.__getstate__(), "args": self.args, "kwargs": self.kwargs})
return state
def __setstate__(self, state: dict) -> None:
"""
Sets state from pickle.
"""
super().__setstate__(state)
self.task = Task.__new__(Task)
self.task.__setstate__(state["task"])
self.args = state["args"]
self.kwargs = state["kwargs"]
def _calc_hash(self) -> str:
return hash_struct(
[
"PartialTask",
self.task._calc_hash(),
hash_arguments(get_type_registry(), self.args, self.kwargs),
]
)
def is_valid(self) -> bool:
return self.task.is_valid()
def options(
self: "PartialTask[Callable[..., Result], Callable[..., Result]]",
**task_options_update: Any,
) -> "Task[Func]":
"""
Returns a new Task with task_option overrides.
"""
return cast(
Task[Func],
self.task.options(**task_options_update).partial(*self.args, **self.kwargs),
)
# Note: we can't parameterize PartialTask to a more specific type at this
# time, due to the complexity of calculating the remaining parameter signature.
def partial(
self: "PartialTask[Callable[..., Result], Callable[..., Result]]", *args, **kwargs
) -> "PartialTask[Callable[..., Result], Callable[..., Result]]":
"""
Partially apply some arguments to the Task.
"""
# Combine new arguments to previously applied arguments.
args2 = self.args + args
kwargs2 = {
**self.kwargs,
**kwargs,
}
return PartialTask(self.task, args2, kwargs2)
@overload
def task(
func: Func,
) -> Task[Func]:
...
@overload
def task(
*,
name: Optional[str] = None,
namespace: Optional[str] = None,
version: Optional[str] = None,
compat: Optional[List[str]] = None,
script: bool = False,
**task_options: Any,
) -> Callable[[Func], Task[Func]]:
...
def task(
func: Optional[Func] = None,
*,
name: Optional[str] = None,
namespace: Optional[str] = None,
version: Optional[str] = None,
compat: Optional[List[str]] = None,
script: bool = False,
**task_options: Any,
) -> Union[Task[Func], Callable[[Func], Task[Func]]]:
"""
Decorator to register a function as a redun :class:`Task`.
Parameters
----------
func : Optional[Func]
A python function to register as a redun Task. If not given, a
parameterized decorator is returned.
name : Optional[str]
Name of task (Default: infer from function `func.__name__`)
namespace : Optional[str]
Namespace of task (Default: None)
version : Optional[str]
Optional manual versioning for a task (Default: source code of task is
hashed).
compat : Optional[List[str]]
Optional redun version compatibility. Not currently implemented.
script : bool
If True, this is a script-style task which returns a shell script string.
**task_options
Additional options for configuring a task. These must be serializable; for
example, this is necessary to send the task description over the network to
a remote worker.
"""
def deco(func: Func) -> Task[Func]:
nonlocal namespace
# Determine task namespace.
if not namespace:
namespace = getattr(sys.modules[func.__module__], "redun_namespace", None)
_task: Task[Func] = Task(
func,
name=name,
namespace=namespace,
version=version,
compat=compat,
script=script,
task_options=task_options,
)
get_task_registry().add(_task)
return _task
if func:
# If this decorator is applied directly to a function, decorate it.
return deco(func)
else:
# If a function is not given, just return the parameterized decorator.
return deco
def scheduler_task(
name: Optional[str] = None, namespace: Optional[str] = None, version: str = "1"
) -> Callable[[Callable[..., Promise[Result]]], SchedulerTask[Callable[..., Result]]]:
"""
Decorator to register a function as a scheduler task.
Unlike usual tasks, scheduler tasks are lower-level tasks that are evaluated
within the :class:`Scheduler` and allow defining custom evaluation semantics.
For example, one can implement `cond()`, `seq()` and `catch()` using
scheduler tasks.
When evaluated, scheduler tasks are called with a reference to the
:class:`Scheduler` and the parent :class:`Job` as it's first two arguments.
It's remaining arguments are the same as those passed from the user, however,
they are not evaluated and may contain :class:`Expression`s. It is the
responsibility of the scheduler task to explicitly evaluate arguments
by using `Scheduler.evaluate()` as needed. Overall, this allows the scheduler
task to implement custom evaluation semantics. Lastly, the scheduler task
must return a :class:`Promise` that resolves to the result of the task.
This concept corresponds to fexpr in Lisp:
- https://en.wikipedia.org/wiki/Fexpr
For example, one could implement a lazy if-statement called `cond` using
this scheduler task:
.. code-block:: python
@scheduler_task()
def cond(scheduler, parent_job, pred_expr, then_expr, else_expr):
def then(pred):
if pred:
return scheduler.evaluate(then_expr, parent_job=parent_job)
else:
return scheduler.evaluate(else_expr, parent_job=parent_job)
return scheduler.evaluate(pred_expr, parent_job=parent_job).then(then)
Once defined, the new `cond` expression can be used like this:
.. code-block:: python
@task()
def main():
result = task1()
return cond(result, task2(), task3())
"""
def deco(func: Callable[..., Promise[Result]]) -> SchedulerTask[Callable[..., Result]]:
nonlocal name, namespace
if not namespace:
namespace = getattr(sys.modules[func.__module__], "redun_namespace", None)
_task: SchedulerTask[Callable[..., Result]] = SchedulerTask(
func,
name=name,
namespace=namespace,
version=version,
)
get_task_registry().add(_task)
return _task
return deco
class TaskRegistry:
"""
A registry of currently registered Tasks.
The @task() decorator registers tasks to the current registry.
"""
def __init__(self):
self._tasks: Dict[str, Task] = {}
def add(self, task: Task) -> None:
self._tasks[task.fullname] = task
def get(self, task_name: Optional[str] = None, hash: Optional[str] = None) -> Optional[Task]:
if task_name:
return self._tasks.get(task_name)
elif hash:
for task in self._tasks.values():
if task.hash == hash:
return task
return None
else:
raise ValueError("No task field given.")
def __iter__(self) -> Iterable[Task]:
return iter(self._tasks.values())
# Global signleton task registry.
_task_registry = TaskRegistry()
|
from django.contrib import admin
from supervisor.models import TA, Notification, Example, Comment, News, Flag
admin.site.register(TA)
admin.site.register(Notification)
admin.site.register(Example)
admin.site.register(Comment)
admin.site.register(News)
admin.site.register(Flag) |
from tapis_cli.display import Verbosity
from tapis_cli.search import SearchWebParam
from .mixins import AppIdentifier
from tapis_cli.commands.taccapis import SearchableCommand
from .create import AppsCreate
from . import API_NAME, SERVICE_VERSION
from .models import App
from .formatters import AppsFormatOne
__all__ = ['AppsUpdate']
class AppsUpdate(AppsCreate, AppIdentifier):
"""Update an existing App
"""
def get_parser(self, prog_name):
parser = super(AppsUpdate, self).get_parser(prog_name)
parser = AppIdentifier.extend_parser(self, parser)
return parser
def take_action(self, parsed_args):
parsed_args = self.preprocess_args(parsed_args)
app_id = AppIdentifier.get_identifier(self, parsed_args)
self.requests_client.setup(API_NAME, SERVICE_VERSION)
# Activates usage of argument from WorkingDirectoryArg
self.set_working_directory(parsed_args)
self.handle_file_upload(parsed_args)
headers = self.render_headers(App, parsed_args)
rec = self.tapis_client.apps.update(appId=app_id,
body=self.json_file_contents)
data = []
for key in headers:
val = self.render_value(rec.get(key, None))
data.append(val)
return (tuple(headers), tuple(data))
|
#!/usr/bin/env python3
###############################################################################
# Main entrypoint to the Question Classification project
# Starts training or test given an experiment config
###############################################################################
import os
import hydra
from omegaconf import DictConfig
from question_classification.trainer import Trainer
from question_classification.unsupervised_trainer import UnsupervisedTrainer
@hydra.main(config_path="configs", config_name="defaults.yaml")
def main(config: DictConfig) -> None:
""" Runs the trainer based on the given experiment configuration """
if config.test:
# TODO: clean up current working directory with test=true
experiment_path = os.getcwd().replace("test=true,", "").replace("test=True,", "")
if config.unsupervised:
trainer = UnsupervisedTrainer(config, experiment_path)
else:
trainer = Trainer(config, experiment_path)
summary, report = trainer.test()
print(summary)
print(report)
else:
experiment_path = os.getcwd()
if config.unsupervised:
trainer = UnsupervisedTrainer(config, experiment_path)
else:
trainer = Trainer(config, experiment_path)
trainer.run()
print("Launched training. Press CTRL+C to stop.")
print(f"Logs available at {os.getcwd()}")
if __name__ == "__main__":
main()
|
from ...expressions import (
Expression,
Symbol,
FunctionApplication as Fa,
Constant as C,
)
from ...expression_walker import (
add_match,
ExpressionWalker,
ReplaceSymbolWalker,
)
from .chart_parser import Quote, CODE_QUOTE
from .english_grammar import S, V, NP, VP, PN, DET, N, VAR, SL, LIT
from .exceptions import ParseDatalogPredicateException
from ...logic import (
Implication,
Conjunction,
ExistentialPredicate,
UniversalPredicate,
)
import re
def indent(s, tab=" "):
return "".join(tab + l for l in s.splitlines(keepend=True))
class DRS(Expression):
def __init__(self, referents, expressions):
self.referents = referents
self.expressions = expressions
def __repr__(self):
return (
"DRS <"
+ ", ".join(map(repr, self.referents))
+ "> [\n"
+ "".join(" " + repr(e) + ",\n" for e in self.expressions)
+ "]"
)
class DRSBuilder(ExpressionWalker):
def __init__(self, grammar):
self.grammar = grammar
@add_match(
DRS, lambda drs: any(isinstance(e, DRS) for e in drs.expressions),
)
def join_drs(self, drs):
refs = drs.referents
exps = ()
for e in drs.expressions:
if isinstance(e, DRS):
refs += tuple(r for r in e.referents if r not in refs)
exps += e.expressions
else:
exps += (e,)
return self.walk(DRS(refs, exps))
@add_match(Fa, lambda fa: any(isinstance(e, DRS) for e in fa.args))
def float_drs(self, fa):
args = ()
drs = None
for e in fa.args:
if isinstance(e, DRS) and not drs:
drs = e
args += (drs.expressions[0],)
else:
args += (e,)
exps = (Fa(fa.functor, args),) + tuple(drs.expressions[1:])
return self.walk(DRS(drs.referents, exps))
@add_match(Fa(Fa(NP, ...), (Fa(Fa(PN, ...), ...),)))
def proper_names(self, np):
(pn,) = np.args
(_, _, const) = pn.functor.args
return self.walk(DRS((), (const,)))
@add_match(
Fa(Fa(S, ...), (..., Fa(Fa(VP, ...), (Fa(Fa(V, ...), ...), ...)),),)
)
def predicate(self, s):
(subject, vp) = s.args
(v, object_) = vp.args
exp = Symbol(v.args[0].value)(subject, object_)
return self.walk(DRS((), (exp,)))
@add_match(
Fa(Fa(NP, ...), (Fa(Fa(DET, ...), ...), Fa(Fa(N, ...), ...),)),
lambda np: np.args[0].args[0].value in ["a", "an"],
)
def indefinite_noun_phrase(self, np):
(det, n) = np.args
x = Symbol.fresh()
exp = Symbol(n.args[0].value)(x)
return self.walk(DRS((x,), (x, exp)))
@add_match(Fa(Fa(NP, ...), (Fa(Fa(VAR, ...), ...),)),)
def var_noun_phrase(self, np):
(var,) = np.args
v = Symbol(var.args[0].value)
return self.walk(DRS((v,), (v,)))
@add_match(
Fa(Fa(NP, ...), (Fa(Fa(NP, ...), ...), Fa(Fa(VAR, ...), ...),)),
)
def var_apposition(self, np):
(np, var) = np.args
np_drs = self.walk(np)
y = Symbol(var.args[0].value)
x = np_drs.expressions[0]
rsw = ReplaceSymbolWalker({x: y})
exps = ()
for e in np_drs.expressions:
exps += (rsw.walk(e),)
refs = ()
for r in np_drs.referents:
refs += (rsw.walk(r),)
return self.walk(DRS(refs, exps))
@add_match(
Fa(
Fa(S, ...),
(C("if"), Fa(Fa(S, ...), ...), C("then"), Fa(Fa(S, ...), ...),),
),
)
def conditional(self, s):
(_, ant, _, cons) = s.args
return self.walk(DRS((), (Implication(cons, ant),)))
@add_match(Fa(Fa(S, ...), (Fa(Quote, (C(CODE_QUOTE), ...)),),),)
def quoted_predicate(self, s):
exp = _parse_predicate(s.args[0].args[1].value)
return self.walk(DRS(exp.args, (exp,)))
@add_match(
Implication(DRS, DRS),
lambda impl: (
set(impl.antecedent.referents) & set(impl.consequent.referents)
),
)
def implication(self, impl):
drs_ant = impl.antecedent
drs_con = impl.consequent
drs_con.referents = tuple(
set(drs_con.referents) - set(drs_ant.referents)
)
return self.walk(Implication(drs_con, drs_ant))
@add_match(
Fa(Fa(S, ...), (Fa(Fa(S, ...), ...), C("and"), Fa(Fa(S, ...), ...),),),
)
def simple_and(self, s):
(a, _, b) = s.args
a = self.walk(a)
b = self.walk(b)
return self.walk(DRS((), (a, b,)))
@add_match(
Fa(
Fa(S, ...),
(Fa(Fa(SL, ...), ...), C(","), C("and"), Fa(Fa(S, ...), ...),),
),
)
def comma_and(self, s):
(sl, _, _, s) = s.args
sl = self.walk(sl)
s = self.walk(s)
return self.walk(DRS((), sl + (s,)))
@add_match(
Fa(
Fa(SL, ...),
(Fa(Fa(S, ...), ...),),
),
)
def single_sentence_list(self, sl):
(s,) = sl.args
return (self.walk(s),)
@add_match(
Fa(
Fa(SL, ...),
(Fa(Fa(SL, ...), ...), C(","), Fa(Fa(S, ...), ...)),
),
)
def sentence_list(self, sl):
(sl, _, s) = sl.args
sl = self.walk(sl)
s = self.walk(s)
return sl + (s,)
@add_match(Fa(Fa(NP, ...), (Fa(Fa(LIT, ...), ...),)),)
def lit_noun_phrase(self, np):
(lit,) = np.args
(const,) = lit.functor.args
return self.walk(DRS((), (const,)))
r = re.compile(r"^(\w+)\((\w+(,\s\w+)*)\)$")
def _parse_predicate(string):
# This could totally use the datalog parser
m = r.match(string)
if not m:
raise ParseDatalogPredicateException(
f"Quoted predicate is not valid datalog: {string}"
)
functor = Symbol(m.group(1))
args = map(Symbol, map(str.strip, m.group(2).split(",")))
return functor(*args)
class DRS2FOL(ExpressionWalker):
@add_match(DRS)
def drs(self, drs):
exp = Conjunction(tuple(map(self.walk, drs.expressions)))
for r in drs.referents:
exp = ExistentialPredicate(r, exp)
return self.walk(exp)
@add_match(Conjunction((...,)))
def unary_conjunction(self, conj):
return self.walk(conj.formulas[0])
@add_match(Implication(DRS, DRS))
def implication(self, impl):
drs_ant = impl.antecedent
drs_con = impl.consequent
ant = Conjunction(tuple(map(self.walk, drs_ant.expressions)))
con = self.walk(drs_con)
exp = Implication(con, ant)
for r in drs_ant.referents:
exp = UniversalPredicate(r, exp)
return self.walk(exp)
|
from setuptools import setup, find_packages
from codecs import open
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='websites_metrics_consumer',
version='0.0.4',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=["psycopg2","confluent_kafka","avro-python3","requests"],
url='https://github.com/antoniodimariano/metrics_consumer',
license='Apache 2.0',
python_requires='>=3.6',
author='Antonio Di Mariano',
author_email='[email protected]',
description='An application that consumes metrics from Kafka messages and store the results into ta postgres db',
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
]
)
|
#commands is a list, st
def filewriter(commands):
with open("ans", "w") as file:
file.write(len(commands))
for i in xrange(len(0, commands)):
if commands[i][1] != "L" and commands[i][1] != "U" and commands[i][1] != "W" and commands[i][1] != "D":
raise ValueError("Invalid letter in command: " + str(i))
file.write('%s %s %s %s %s' % i[0],i[1],i[2],i[3],i[4])
|
import typing
import uvicorn
from starlette.routing import Match
from starlette.types import Scope
from fastapi import FastAPI
from fastapi.routing import APIRoute, APIRouter
class NewRoute(APIRoute):
def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]:
if scope["type"] == "http":
match = self.path_regex.match(scope["path"])
if match:
# print(self.path_regex, self.path_format)
# TODO 增加日志传输功能
matched_params = match.groupdict()
for key, value in matched_params.items():
matched_params[key] = self.param_convertors[key].convert(
value)
path_params = dict(scope.get("path_params", {}))
path_params.update(matched_params)
child_scope = {
"endpoint": self.endpoint,
"path_params": path_params
}
if self.methods and scope["method"] not in self.methods:
return Match.PARTIAL, child_scope
else:
return Match.FULL, child_scope
return Match.NONE, {}
def scope_headers(self, scope: Scope) -> dict:
if not scope.get("headers"):
return {}
return {key.decode(): value.decode() for key, value in dict(scope["headers"]).items()}
def scope_cookie(self, scope: Scope) -> dict:
headers = self.scope_headers(scope)
cookie_dict = {}
for chunk in headers.get("cookie", "").split(";"):
if "=" in chunk:
key, val = chunk.split("=", 1)
else:
# Assume an empty name per
key, val = "", chunk
key, val = key.strip(), val.strip()
if key or val:
cookie_dict[key] = val
return cookie_dict
r = APIRouter(route_class=NewRoute)
@r.get("/hello/{a}/{b}")
async def root(a: int, b: str):
return {"message": "Hello World"}
@r.get("/abc")
async def abc():
pass
app = FastAPI()
app.include_router(r)
if __name__ == "__main__":
uvicorn.run("demo:app", reload=True)
|
# -*- coding: utf-8 -*-
r"""
Extract your project's __version__ variable
When creating a ``setup.py`` for a new project, do you find yourself always
writing the same block of code for parsing ``__version__`` from your project's
source? Something like this?
::
with open(join(dirname(__file__), 'package_name', '__init__.py')) as fp:
for line in fp:
m = re.search(r'^\s*__version__\s*=\s*([\'"])([^\'"]+)\1\s*$', line)
if m:
version = m.group(2)
break
else:
raise RuntimeError('Unable to find own __version__ string')
setup(
version = version,
...
)
Someone needs to put all that into a reusable package, am I right? Well,
someone did, and this is that package.
Visit <https://github.com/jwodder/read_version> for more information.
"""
from __future__ import print_function
__version__ = '0.1.1'
__author__ = 'John Thorvald Wodder II'
__author_email__ = '[email protected]'
__license__ = 'MIT'
__url__ = 'https://github.com/jwodder/read_version'
import ast
import inspect
import os.path
def read_version(*fpath, **kwargs):
""" ``read_version()`` takes one or more file path components pointing to a
Python source file to parse. The path components will be joined together
with ``os.path.join()``, and then, if the path isn't absolute, the path to
the directory containing the script calling ``read_version()`` will be
prepended to the path. (No more ``join(dirname(__file__), ...)``
boilerplate needed!) ``read_version()`` then parses the given Python file
and searches through the parse tree for any assignments to a variable named
``__version__``, returning the last value assigned.
The ``variable`` keyword argument can be set to the name of a variable
other than ``__version__`` to search for assignments to a different
variable instead.
If no assignments to the variable are found, a ``ValueError`` is raised.
To instead return a default value when this happens, set the ``default``
keyword argument.
"""
if not fpath:
raise ValueError('No filepath passed to read_version()')
fpath = os.path.join(*fpath)
if not os.path.isabs(fpath):
caller_file = inspect.stack()[1][0].f_globals["__file__"]
fpath = os.path.join(os.path.dirname(caller_file), fpath)
with open(fpath, 'rb') as handle:
src = handle.read()
top_level = ast.parse(src)
variable = kwargs.get("variable", "__version__")
try:
result = kwargs["default"]
except KeyError:
pass
for statement in top_level.body:
if isinstance(statement, ast.Assign):
for target in statement.targets:
if isinstance(target, ast.Tuple):
if any(isinstance(t, ast.Name) and t.id == variable
for t in target.elts):
value = ast.literal_eval(statement.value)
for t, v in zip(target.elts, value):
if isinstance(t, ast.Name) and t.id == variable:
result = v
elif isinstance(target, ast.Name) and target.id == variable:
result = ast.literal_eval(statement.value)
try:
return result
except NameError:
raise ValueError(f'No assignment to {variable!r} found in file')
# The “read_version_file” lambda is a simple sugary shortcut,
# which makes the assumption that your version variable is
# to be found in a file called “__version__.py”:
read_version_file = lambda *fpath, **kwargs: read_version(*fpath, '__version__.py', **kwargs)
__all__ = ('read_version', 'read_version_file')
__dir__ = lambda: list(__all__) |
"""TP-Link adapter for WebThings Gateway."""
from gateway_addon import Property
class BusproProperty(Property):
"""TP-Link property type."""
def __init__(self, device, name, description, value):
"""
Initialize the object.
device -- the Device this property belongs to
name -- name of the property
description -- description of the property, as a dictionary
value -- current value of this property
"""
Property.__init__(self, device, name, description)
self.set_cached_value(value)
class OnOffProperty(Property):
"""TP-Link property type."""
def __init__(self, device, name, description, value):
"""
Initialize the object.
device -- the Device this property belongs to
name -- name of the property
description -- description of the property, as a dictionary
value -- current value of this property
"""
Property.__init__(self, device, name, description)
def set_value(self, value):
if value:
self.device.light_device.set_on()
else:
self.device.light_device.set_off()
class NumberProperty(Property):
def __init__(self, device, name, description, value):
"""
Initialize the object.
device -- the Device this property belongs to
name -- name of the property
description -- description of the property, as a dictionary
value -- current value of this property
"""
Property.__init__(self, device, name, description)
def set_value(self, value):
if value:
self.device.set_brightness(value)
class StringProperty(Property):
def __init__(self, device, name, description, value):
"""
Initialize the object.
device -- the Device this property belongs to
name -- name of the property
description -- description of the property, as a dictionary
value -- current value of this property
"""
Property.__init__(self, device, name, description)
def set_value(self, value):
if value:
self.device.set_brightness(value)
|
import os
import json
import sqlite3
import datetime, time
import itertools
from common import util
import queue
import threading
from threading import Thread
import logging
import sqlite3
import datetime, time
import itertools
from common import util
class IDEBenchDriver:
def init(self, options, schema, driver_arg):
self.time_of_latest_request = 0
self.isRunning = False
self.requests = queue.LifoQueue()
self.config = json.load(open(os.path.join(os.path.dirname(__file__),'..','sqlite.config.json')))
def create_connection(self):
sqlite_file = self.config['dbFilename']
conn = sqlite3.connect(sqlite_file)
return conn
def sqlitefix(self, sql_statement):
if not "FLOOR" in sql_statement:
return sql_statement
else:
sql_statement=sql_statement.replace("FLOOR", "ROUND")
x=sql_statement.find("ROUND")
y=sql_statement.find(")",x)
output=sql_statement[:y]+" -0.5 "+sql_statement[y:]
#print(output,flush=True)
return output
def execute_vizrequest(self, viz_request, options, schema, result_queue):
viz = viz_request.viz
sql_statement = viz.get_computed_filter_as_sql(schema)
#calculate connection time
connection = self.conn
cursor = connection.cursor()
viz_request.start_time = util.get_current_ms_time()
#print(sql_statement,flush=True,end = '')
cursor.execute(self.sqlitefix(sql_statement))
data = cursor.fetchall()
viz_request.end_time = util.get_current_ms_time()
cursor.close()
results = {}
for row in data:
keys = []
for i, bin_desc in enumerate(viz.binning):
if "width" in bin_desc:
bin_width = bin_desc["width"]
keys.append(str(int(row[i])))
else:
keys.append(str(row[i]))
key = ",".join(keys)
results[key] = row[len(viz.binning):]
viz_request.result = results
result_queue.put(viz_request)
def process_request(self, viz_request, options, schema, result_queue):
self.requests.put((viz_request, options, schema, result_queue))
def process(self):
self.conn = self.create_connection()
while self.isRunning:
try:
request = self.requests.get(timeout=1)
viz_request = request[0]
options = request[1]
schema = request[2]
result_queue = request[3]
# only execute requests that are newer than the last one we processed (drops old/no longer needed queries)
if viz_request.expected_start_time < self.time_of_latest_request:
viz_request.dropped = True
result_queue.put(viz_request)
continue
self.time_of_latest_request = viz_request.expected_start_time
self.execute_vizrequest(viz_request, options, schema, result_queue)
except Exception as e:
# ignore queue-empty exceptions
pass
# close connection when done
self.conn.close()
def workflow_start(self):
self.isRunning = True
thread = Thread(target = self.process)
thread.start()
def workflow_end(self):
self.isRunning = False
|
from typing import Dict, Mapping, Optional, Tuple, Union, Set
from enum import Enum, auto
import os
import warnings
import numpy as np
from numpy.random.mtrand import seed
import torch as th
from ail.common.type_alias import GymEnv
from ail.common.math import normalize
class Buffer:
__slots__ = [
"_capacity",
"_sample_shapes",
"_arrays",
"_stored_keys",
"_n_data",
"_idx",
"device",
"seed",
"rng"
]
"""
A FIFO ring buffer for NumPy arrays of a fixed shape and dtype.
Supports random sampling with replacement.
:param capacity: The number of data samples that can be stored in this buffer.
:param sample_shapes: A dictionary mapping string keys to the shape of each data
samples associated with that key.
:param dtypes: A dictionary mapping string keys to the dtype of each data
of samples associated with that key.
:param device: PyTorch device to which the values will be converted.
"""
def __init__(
self,
capacity: int,
sample_shapes: Mapping[str, Tuple[int, ...]],
dtypes: Mapping[str, np.dtype],
device: Union[th.device, str],
seed: int
):
assert isinstance(capacity, int), "capacity must be integer."
if sample_shapes.keys() != dtypes.keys():
raise KeyError("sample_shape and dtypes keys don't match.")
self._capacity = capacity
self._sample_shapes = {k: tuple(shape) for k, shape in sample_shapes.items()}
# The underlying NumPy arrays (which actually store the data).
self._arrays = {
k: np.zeros((capacity,) + shape, dtype=dtypes[k])
for k, shape in self._sample_shapes.items()
}
self._stored_keys = set(self._sample_shapes.keys())
# An integer in `range(0, self.capacity + 1)`.
# This attribute is the return value of `self.size()`.
self._n_data = 0
# An integer in `range(0, self.capacity)`.
self._idx = 0
self.device = device
self.seed = seed
self.rng = np.random.default_rng(self.seed)
@property
def capacity(self) -> int:
return self._capacity
@property
def sample_shapes(self) -> Mapping[str, Tuple[int, ...]]:
return self._sample_shapes
@property
def stored_keys(self) -> Set[str]:
return self._stored_keys
def size(self) -> int:
"""Returns the number of samples currently stored in the buffer."""
# _ndata: integer in `range(0, self.capacity + 1)`.
assert (
0 <= self._n_data <= self._capacity
), "_ndata: integer in range(0, self.capacity + 1)."
return self._n_data
def full(self) -> bool:
"""Returns True if the buffer is full, False otherwise."""
return self.size() == self._capacity
@classmethod
def from_data(
cls,
data: Dict[str, np.ndarray],
device: Union[th.device, str],
capacity: Optional[int] = None,
truncate_ok: bool = False,
) -> "Buffer":
"""
Constructs and return a Buffer containing the provided data.
Shapes and dtypes are automatically inferred.
:param data: A dictionary mapping keys to data arrays. The arrays may differ
in their shape, but should agree in the first axis.
:param device: PyTorch device to which the values will be converted.
:param capacity: The Buffer capacity. If not provided, then this is automatically
set to the size of the data, so that the returned Buffer is at full
capacity.
:param truncate_ok: Whether to error if `capacity` < the number of samples in
`data`. If False, then only store the last `capacity` samples from
`data` when overcapacity.
Examples:
In the follow examples, suppose the arrays in `data` are length-1000.
`Buffer` with same capacity as arrays in `data`::
Buffer.from_data(data)
`Buffer` with larger capacity than arrays in `data`::
Buffer.from_data(data, 10000)
`Buffer with smaller capacity than arrays in `data`. Without
`truncate_ok=True`, `from_data` will error::
Buffer.from_data(data, 5, truncate_ok=True)
"""
data_capacities = [arr.shape[0] for arr in data.values()]
data_capacities = np.unique(data_capacities)
if len(data) == 0:
raise ValueError("No keys in data.")
if len(data_capacities) > 1:
raise ValueError("Keys map to different length values.")
if capacity is None:
capacity = data_capacities[0]
sample_shapes = {k: arr.shape[1:] for k, arr in data.items()}
dtypes = {k: arr.dtype for k, arr in data.items()}
buf = cls(capacity, sample_shapes, dtypes, device=device)
buf.store(data, truncate_ok=truncate_ok)
return buf
def store(
self,
data: Dict[str, np.ndarray],
truncate_ok: bool = False,
missing_ok: bool = True,
) -> None:
"""
Stores new data samples, replacing old samples with FIFO priority.
:param data: A dictionary mapping keys `k` to arrays with shape
`(n_samples,) + self.sample_shapes[k]`,
where `n_samples` is less than or equal to `self.capacity`.
:param truncate_ok: If False, then error if the length of `transitions` is
greater than `self.capacity`.
Otherwise, store only the final `self.capacity` transitions.
:param missing_ok: If False, then error if attempt to store a subset of
sample's key store in buffer
"""
data_keys = set(data.keys())
expected_keys = set(self._sample_shapes.keys())
missing_keys = expected_keys - data_keys
unexpected_keys = data_keys - expected_keys
if missing_keys and not missing_ok:
raise ValueError(f"Missing keys {missing_keys}")
if unexpected_keys:
raise ValueError(f"Unexpected keys {unexpected_keys}")
n_samples = np.unique([arr.shape[0] for arr in data.values()])
if len(n_samples) > 1:
raise ValueError("Keys map to different length values.")
n_samples = n_samples[0]
if n_samples == 0:
raise ValueError("Trying to store empty data.")
if n_samples > self._capacity:
if not truncate_ok:
raise ValueError("Not enough capacity to store data.")
else:
data = {k: data[k][-self._capacity :] for k in data.keys()}
for k in data.keys():
if data[k].shape[1:] != self._sample_shapes[k]:
raise ValueError(f"Wrong data shape for {k}.")
new_idx = self._idx + n_samples
if new_idx > self._capacity:
n_remain = self._capacity - self._idx
# Need to loop around the buffer. Break into two "easy" calls.
self._store_easy({k: data[k][:n_remain] for k in data.keys()}, truncate_ok)
assert self._idx == 0
self._store_easy({k: data[k][n_remain:] for k in data.keys()}, truncate_ok)
else:
self._store_easy(data)
def _store_easy(self, data: Dict[str, np.ndarray], truncate_ok=False) -> None:
"""
Stores new data samples, replacing old samples with FIFO priority.
Requires that `size(data) <= self.capacity - self._idx`,
where `size(data)` is the number of rows in every array in `data.values()`.
Updates `self._idx` to be the insertion point of the next call to `_store_easy` call,
looping back to `self._idx = 0` if necessary.
Also updates `self._n_data`.
:param data: Same as in `self.store`'s docstring, except with the additional
constraint `size(data) <= self.capacity - self._idx`.
:param truncate_ok: If False, then error if the length of `transitions` is
greater than `self.capacity`.
Otherwise, store only the final `self.capacity` transitions.
Note: serve as singe pair store
"""
assert isinstance(data, dict), "data must be a dictionary"
# shape (1, n): 1 is the number of samples, n is the dimension of that sample
n_samples = np.unique([arr.shape[0] for arr in data.values()])
assert len(n_samples) == 1
n_samples = n_samples[0]
assert n_samples <= self._capacity - self._idx
idx_hi = self._idx + n_samples
for k in data.keys():
if not truncate_ok:
if self._n_data + n_samples > self._capacity:
raise ValueError("exceed buffer capacity")
self._arrays[k][self._idx : idx_hi] = data[k]
self._idx = idx_hi % self._capacity
self._n_data = int(min(self._n_data + n_samples, self._capacity))
def sample(self, n_samples: int) -> Dict[str, th.Tensor]:
"""
Uniformly sample `n_samples` samples from the buffer with replacement.
:param n_samples: The number of samples to randomly sample.
:return: A dictionary of samples (np.ndarray)
with shape `(n_samples) + self.sample_shape`.
"""
# TODO: ERE (https://arxiv.org/pdf/1906.04009.pdf)
assert isinstance(n_samples, int), "n_samples must be int"
assert self.size() != 0, "Buffer is empty"
# Uniform sampling
ind = self.rng.integers(self.size(), size=n_samples)
return self._get_batch_from_index(ind)
# res_idx = []
# res2={}
# for idx, (k, buffer) in enumerate(self._arrays.items()):
# if idx == 0:
# enum = self.rng.choice(list(enumerate(buffer[:self.size()])), size=n_samples, replace=True, p=None, axis=0, shuffle=True)
# # ic(k, enum)
# for i in enum:
# res_idx.append(i[0])
# # ic(res_idx)
# break
# res2 = self._get_batch_from_index(np.asarray(res_idx))
# ic(res_idx)
# ic(res2)
# return res2
def get(
self,
n_samples: Optional[int] = None,
last_n: bool = True,
shuffle: bool = False,
) -> Dict[str, th.Tensor]:
"""
Returns samples in the buffer.
:param: n_samples: The number of samples to return.
By default, return all samples in the buffer, if n_samples is None.
:param last_n: If True, then return the last `n_samples` samples.
:param shuffle: If True, then return the samples in a random order.
return: Tensor Dict
"""
if n_samples is None:
assert self.full(), "Buffer is not full"
# Obatain all data in buffer.
if shuffle:
# Same as uniform sampling whole buffer.
return self.sample(n_samples=self._capacity)
else:
# Get all buffer data with order preserved.
return self._get_batch_from_index(batch_idxes=slice(0, self._capacity))
else:
# Obtain a slice of data in buffer
assert isinstance(n_samples, int), "n_samples must be integer."
n_data = self.size()
if n_samples > n_data:
raise ValueError(
f"Cannot get {n_samples} of samples, "
f"which exceeds {n_data} samples currrently store in buffer."
)
if last_n:
# Obtain `last n_samples` data with index in range [n_data - n_samples, n_data)
start, end = (n_data - n_samples), n_data
else:
# Obtain data with index in range [0, n_samples)
start, end = 0, n_samples
batch_idxes = (
np.random.randint(low=start, high=end, size=n_samples)
if shuffle
else slice(start, end)
)
return self._get_batch_from_index(batch_idxes)
def _get_batch_from_index(
self,
batch_idxes: Union[np.ndarray, slice],
) -> Dict[str, th.Tensor]:
"""
Get a batch data based on index.
:param batch_idxes: Index of batch.
:param shuffle: If True, then return the samples in a random order.
"""
assert isinstance(batch_idxes, (slice, np.ndarray))
return {
k: self.to_torch(buffer[batch_idxes]) for k, buffer in self._arrays.items()
}
def to_torch(self, array: np.ndarray, copy: bool = True, **kwargs) -> th.Tensor:
"""
Convert a numpy array to a PyTorch tensor.
Note: it copies the data by default.
:param array:
:param copy: Whether to copy or not the data
(may be useful to avoid changing things be reference)
"""
if copy:
return th.tensor(array, dtype=th.float32, device=self.device, **kwargs)
elif isinstance(array, np.ndarray):
return th.from_numpy(array).float().to(self.device)
else:
return th.as_tensor(array, dtype=th.float32, device=self.device)
@staticmethod
def to_numpy(tensor: th.Tensor) -> np.ndarray:
"""Convert torch tensor to numpy array and send to CPU."""
return tensor.detach().cpu().numpy()
def save(self, save_dir: str) -> None:
"""
Saving the data in buffer as .npz archive to a directory.
see: https://numpy.org/doc/stable/reference/generated/numpy.savez.html#numpy.savez
"""
dir_name = os.path.dirname(save_dir)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
print("Saving buffer _arrays into a .npz archive.")
print(f"data key: {self._arrays.keys()}")
np.savez(
save_dir,
obs=self._arrays["obs"],
acts=self._arrays["acts"],
dones=self._arrays["dones"],
next_obs=self._arrays["next_obs"],
)
class BaseBuffer:
__slots__ = [
"capacity",
"sample_shapes",
"dtypes",
"device",
"_buffer",
"seed"
"abs_counter",
]
"""
Base class that represent a buffer (rollout or replay).
:param capacity: The number of samples that can be stored.
:param device: PyTorch device to which the values will be converted.
:param env: The environment whose action and observation
spaces can be used to determine the data shapes of
the underlying buffers.
Overrides all the following arguments.
:param obs_shape: The shape of the observation space.
:param act_shape: The shape of the action space.
:param obs_dtype: The dtype of the observation space.
:param act_dtype: The dtype of the action space.
"""
def __init__(
self,
capacity: int,
device: Union[th.device, str],
seed: int,
env: Optional[GymEnv] = None,
obs_shape: Optional[Tuple[int, ...]] = None,
act_shape: Optional[Tuple[int, ...]] = None,
obs_dtype: np.dtype = np.float32,
act_dtype: np.dtype = np.float32,
with_reward=True,
):
if isinstance(capacity, float):
self.capacity = int(capacity)
elif isinstance(capacity, int):
self.capacity = capacity
else:
raise ValueError("capacity must be integer number.")
params = [obs_shape, act_shape, obs_dtype, act_dtype]
self.sample_shapes = {}
self.dtypes = {}
if env is not None:
if np.any([x is not None for x in params]):
print("Specified shape and dtype and environment.", flush=True)
print("Shape and dtypes will be refer to env.", flush=True)
self.sample_shapes.update(
{
"obs": tuple(env.observation_space.shape),
"acts": tuple(env.action_space.shape),
"next_obs": tuple(env.observation_space.shape),
"dones": (1,),
}
)
self.dtypes.update(
{
"obs": env.observation_space.dtype,
"acts": env.action_space.dtype,
"next_obs": env.observation_space.dtype,
"dones": np.float32,
}
)
else:
if np.any([x is None for x in params]):
raise ValueError("Shape or dtype missing and no environment specified.")
self.sample_shapes = {
"obs": tuple(obs_shape),
"acts": tuple(act_shape),
"next_obs": tuple(obs_shape),
"dones": (1,),
}
self.dtypes = {
"obs": obs_dtype,
"acts": act_dtype,
"next_obs": obs_dtype,
"dones": np.float32,
}
if with_reward:
self.sample_shapes["rews"] = (1,)
self.dtypes["rews"] = np.float32
self.device = device
self._buffer = None
self.seed = seed
self.abs_counter=0
def __repr__(self) -> str:
return f"{self.__class__.__name__}"
def _init_buffer(self) -> None:
"""Initiate Buffer"""
if len(self.sample_shapes) == 0:
raise ValueError("sample shape not define.")
if len(self.dtypes) == 0:
raise ValueError("dtypes not define.")
self.reset()
def reset(self) -> None:
"""Reset equivalent to re-initiate a new Buffer."""
self._buffer = Buffer(
capacity=self.capacity,
sample_shapes=self.sample_shapes,
dtypes=self.dtypes,
device=self.device,
seed=self.seed,
)
def stored_keys(self) -> Set[str]:
return self._buffer.stored_keys
def size(self) -> int:
"""Returns the number of samples stored in the buffer."""
return self._buffer.size()
def full(self) -> bool:
"""Returns whether the buffer is full."""
return self._buffer.full()
def store(
self,
transitions: Dict[str, np.ndarray],
truncate_ok: bool = False,
) -> None:
"""Store obs-act-obs triples and additional info in transitions.
Args:
transitions: Transitions to store.
truncate_ok: If False, then error if the length of `transitions` is
greater than `self.capacity`. Otherwise, store only the final
`self.capacity` transitions.
Raises:
ValueError: The arguments didn't have the same length.
"""
if not isinstance(transitions, dict):
try:
transitions = dict(transitions)
except TypeError:
raise TypeError(
"Prefer transitions to be a dict or a dictionary-like object"
)
keys = set(transitions.keys())
intersect = self._buffer._stored_keys & keys
difference = self._buffer._stored_keys - keys
ignore = keys - self._buffer._stored_keys
if difference:
warnings.warn(f"Unfulfill keys: {difference}.")
if ignore:
warnings.warn(f"Ignore keys: {ignore}.")
# Remove unnecessary fields
trans_dict = {k: transitions[k] for k in intersect}
self._buffer._store_easy(trans_dict, truncate_ok=truncate_ok) # noqa
def store_path(
self, transitions: Dict[str, np.ndarray], truncate_ok: bool = True
) -> None:
"""Store a path of obs-act-obs triples and additional info in transitions.
Args:
transitions: Transitions to store.
truncate_ok: If False, then error if the length of `transitions` is
greater than `self.capacity`. Otherwise, store only the final
`self.capacity` transitions.
Raises:
ValueError: The arguments didn't have the same length.
"""
if not isinstance(transitions, dict):
try:
transitions = dict(transitions)
except TypeError:
raise TypeError(
"Prefer transitions to be a dict or a dictionary-like object."
)
keys = set(transitions.keys())
intersect = self._buffer._stored_keys & keys
difference = self._buffer._stored_keys - keys
ignore = keys - self._buffer._stored_keys
if difference:
warnings.warn(f"Unfulfill keys: {difference}.")
if ignore:
warnings.warn(f"Ignore keys: {ignore}.")
# Remove unnecessary fields
trans_dict = {k: transitions[k] for k in intersect}
self._buffer.store(trans_dict, truncate_ok=truncate_ok)
def sample(self, n_samples: int) -> Dict[str, th.Tensor]:
"""
Sample obs-act-obs triples.
:param n_samples: The number of samples.
:return:A Transitions named tuple containing n_samples transitions.
"""
return self._buffer.sample(n_samples)
def get(
self,
n_samples: Optional[int] = None,
last_n: bool = True,
shuffle: bool = False,
) -> Dict[str, th.Tensor]:
"""
Obtain a batch of samples with size = n_samples. (order preserved)
By default, return all samples in the buffer, if n_samples is None.
"""
return self._buffer.get(n_samples, last_n, shuffle)
@classmethod
def from_data(
cls,
transitions: Dict[str, np.ndarray],
device: Union[th.device, str],
seed: int = 0,
capacity: Optional[int] = None,
truncate_ok: bool = False,
with_reward: bool = True,
) -> "BaseBuffer":
"""
Construct and return a ReplayBuffer/RolloutBuffer containing the provided data.
Shapes and dtypes are automatically inferred, and the returned ReplayBuffer is
ready for sampling.
Args:
transitions: Transitions to store.
device: PyTorch device to which the values will be converted.
capacity: The ReplayBuffer capacity. If not provided, then this is
automatically set to the size of the data, so that the returned Buffer
is at full capacity.
truncate_ok: Whether to error if `capacity` < the number of samples in
`data`. If False, then only store the last `capacity` samples from
`data` when overcapacity.
Examples:
`ReplayBuffer` with same capacity as arrays in `data`::
ReplayBuffer.from_data(data)
`ReplayBuffer` with larger capacity than arrays in `data`::
ReplayBuffer.from_data(data, 10000)
`ReplayBuffer with smaller capacity than arrays in `data`. Without
`truncate_ok=True`, `from_data` will error::
ReplayBuffer.from_data(data, 5, truncate_ok=True)
Returns:
A new ReplayBuffer.
"""
obs_shape = transitions["obs"].shape[1:]
act_shape = transitions["acts"].shape[1:]
if capacity is None:
capacity = transitions["obs"].shape[0]
instance = cls(
capacity=capacity,
obs_shape=obs_shape,
act_shape=act_shape,
obs_dtype=transitions["obs"].dtype,
act_dtype=transitions["acts"].dtype,
device=device,
seed=seed,
with_reward=with_reward,
)
instance._init_buffer()
instance.store_path(transitions, truncate_ok=truncate_ok)
return instance
def save(self, save_dir) -> None:
"""Save trainsitions to save_dir."""
self._buffer.save(save_dir)
class BufferTag(Enum):
REPLAY = auto()
ROLLOUT = auto()
class ReplayBuffer(BaseBuffer):
"""Replay Buffer for Transitions."""
def __init__(
self,
capacity: int,
device: Union[th.device, str],
seed: int,
env: Optional[GymEnv] = None,
obs_shape: Optional[Tuple[int, ...]] = None,
act_shape: Optional[Tuple[int, ...]] = None,
obs_dtype: np.dtype = np.float32,
act_dtype: np.dtype = np.float32,
with_reward: bool = True,
extra_shapes: Optional[Dict[str, Tuple[int, ...]]] = None,
extra_dtypes: Optional[Dict[str, np.dtype]] = None,
):
"""
Constructs a ReplayBuffer.
:param capacity: The number of samples that can be stored.
:param device: PyTorch device to which the values will be converted.
:param env: The environment whose action and observation
spaces can be used to determine the data shapes of
the underlying buffers.
Overrides all the following arguments.
:param obs_shape: The shape of the observation space.
:param act_shape: The shape of the action space.
:param obs_dtype: The dtype of the observation space.
:param act_dtype: The dtype of the action space.
"""
super(ReplayBuffer, self).__init__(
capacity,
device,
seed,
env,
obs_shape,
act_shape,
obs_dtype,
act_dtype,
with_reward,
)
if extra_shapes is not None:
if isinstance(extra_shapes, dict):
self.sample_shapes.update(extra_shapes)
else:
raise ValueError("extra_shapes should be Dict[str, Tuple[int, ...]]")
if extra_dtypes is not None:
if isinstance(extra_dtypes, dict):
self.dtypes.update(extra_dtypes)
else:
raise ValueError("extra_dtypes should be Dict[str, np.dtype]")
self._init_buffer()
self._tag = BufferTag.REPLAY
def __repr__(self) -> str:
return f"{self.__class__.__name__} (capacity={self.capacity}, data={self.stored_keys()}, size={self.size()})"
@property
def tag(self) -> BufferTag:
return self._tag
class RolloutBuffer(BaseBuffer):
"""Rollout Buffer for Transitions."""
def __init__(
self,
capacity: int,
device: Union[th.device, str],
seed: int,
env: Optional[GymEnv] = None,
obs_shape: Optional[Tuple[int, ...]] = None,
act_shape: Optional[Tuple[int, ...]] = None,
obs_dtype: np.dtype = np.float32,
act_dtype: np.dtype = np.float32,
with_reward: bool = True,
extra_shapes: Optional[Dict[str, Tuple[int, ...]]] = None,
extra_dtypes: Optional[Dict[str, np.dtype]] = None,
):
"""
Constructs a ReplayBuffer.
:param capacity: The number of samples that can be stored.
:param device: PyTorch device to which the values will be converted.
:param env: The environment whose action and observation
spaces can be used to determine the data shapes of
the underlying buffers.
Overrides all the following arguments.
:param obs_shape: The shape of the observation space.
:param act_shape: The shape of the action space.
:param obs_dtype: The dtype of the observation space.
:param act_dtype: The dtype of the action space.
"""
super(RolloutBuffer, self).__init__(
capacity,
device,
seed,
env,
obs_shape,
act_shape,
obs_dtype,
act_dtype,
with_reward,
)
if extra_shapes is not None:
if isinstance(extra_shapes, dict):
self.sample_shapes.update(extra_shapes)
else:
raise ValueError("extra_shapes should be Dict[str, Tuple[int, ...]]")
if extra_dtypes is not None:
if isinstance(extra_dtypes, dict):
self.dtypes.update(extra_dtypes)
else:
raise ValueError("extra_dtypes should be Dict[str, np.dtype]")
self._init_buffer()
self._tag = BufferTag.ROLLOUT
def __repr__(self) -> str:
return f"{self.__class__.__name__} (capacity={self.capacity}, data={self.stored_keys()}, size={self.size()})"
@property
def tag(self) -> BufferTag:
return self._tag
class BufferType(Enum):
rollout = RolloutBuffer
replay = ReplayBuffer
rolloutbuffer = RolloutBuffer
replaybuffer = ReplayBuffer
rollout_buffer = RolloutBuffer
replay_buffer = ReplayBuffer
ROLLOUT_BUFFER = RolloutBuffer
REPLAY_BUFFER = ReplayBuffer
|
from functools import partialmethod
class PartialMethod:
def method(self, value, expected, lower: bool = False):
if lower is True:
value = value.lower()
assert value == expected
partial_method = partialmethod(method, expected='value')
|
import discord
import os
from discord.ext import commands
import json
import asyncio
import datetime
import traceback
import sys
import pathlib
import time
class Owner(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
#------------------------------
# Get info of certain server
#------------------------------
async def serverinfo(self, ctx, guild_id: int = 0):
if self.checkInvos(ctx.guild.id) == 1:
await ctx.message.delete(delay=3)
if self.checkOwner(ctx.message.author.id) == False:
return
if guild_id == 0:
guild: discord.Guild = ctx.message.guild
else:
guild: discord.Guild = self.client.get_guild(guild_id)
self.log(0, f"{ctx.author} requested server info for {guild.name}[{guild.id}]")
embed = discord.Embed(title = f"**Server {guild.name}**", color = discord.Colour.from_rgb(119, 137, 218))
embed.set_thumbnail(url=guild.icon_url_as(format="png"))
embed.add_field(name = "**Owner**", value = f"<@{guild.owner.id}>[`{guild.owner_id}`]", inline = False)
embed.add_field(name = "**ID**", value = f"`{guild.id}`", inline = True)
embed.add_field(name = "**Region**", value = guild.region, inline = True)
if guild.premium_tier != 0:
embed.add_field(name = "**Boost Status**", value = guild.premium_tier, inline = True)
if guild.rules_channel != None:
embed.add_field(name = "**Rules_Channel**", value = guild.rules_channel, inline = True)
embed.add_field(name = "**Members**", value = guild.member_count, inline = True)
embed.add_field(name = "**Roles**", value = len(guild.roles), inline = True)
embed.add_field(name = "**Channels**", value = f"Categories ~ {len(guild.categories)}\nText Channels ~ {len(guild.text_channels)}\nVoice Channels ~ {len(guild.voice_channels)}", inline = True)
if guild.splash != None:
embed.add_field(name = "**Splash URL**", value = guild.splash_url, inline = True)
if guild.banner != None:
embed.add_field(name = "**Banner URL**", value = guild.banner_url, inline = True)
if guild.description != None:
embed.add_field(name = "**Guild Description**", value = guild.description, inline = True)
now = datetime.datetime.now()
embed.set_footer(text = f"{now.strftime('%H:%M')} / {now.strftime('%d/%m/%y')} | Wormhole made with \u2764\ufe0f by Nevalicjus")
await ctx.send(embed = embed)
@commands.command()
#------------------------------
# Get info about all your bot's guilds
#------------------------------
async def allserverinfo(self, ctx):
if self.checkInvos(ctx.guild.id) == 1:
await ctx.message.delete(delay=3)
if self.checkOwner(ctx.message.author.id) == False:
return
self.log(0, f"{ctx.author} requested server info for all guilds")
guildsfp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
counter = 0
with open(f'temp/{guildsfp}.txt', 'a') as f:
start_time = time.time()
for guild in self.client.guilds:
try:
f.write(f'Guild [{counter}]\nName: {guild.name}\nID: {guild.id}\nOwner: {guild.owner.name} [{guild.owner.id}]\nCreation Date: {guild.created_at}\nRegion: {guild.region}\n')
f.write(f'Members: {guild.member_count}\nRoles: {len(guild.roles)}\nInvites: {len(await guild.invites())}\nChannels:\n Categories: {len(guild.categories)}\n Text: {len(guild.text_channels)}\n Voice: {len(guild.voice_channels)}\n')
if guild.premium_tier != 0:
f.write(f'Boost Status: {guild.premium_tier}\n')
if guild.rules_channel != None:
f.write(f'Rules: {guild.rules_channel}\n')
if guild.icon != None:
f.write(f'IconURL: {guild.icon_url}\n')
if guild.splash != None:
f.write(f'SplashURL: {guild.splash_url}\n')
if guild.banner != None:
f.write(f'BannerURL: {guild.banner_url}\n')
if guild.description != None:
f.write(f'Description: {guild.description}\n')
except discord.HTTPException as msg_ex:
if msg_ex.code == 50013 and msg_ex.status == 403:
f.write(f'Guild [{counter}]\nName: {guild.name}\nID: {guild.id}\nOwner: {guild.owner.name} [{guild.owner.id}]\nCreation Date: {guild.created_at}\nRegion: {guild.region}\n')
f.write(f'Guild Permissions Restricted\n')
f.write('\n')
counter += 1
f.close()
end_time = time.time()
self.log(0, f"{ctx.author} server info report for all guilds elapsed {end_time - start_time} seconds and was saved under {guildsfp}")
await ctx.send(f"Report Generated in {end_time - start_time}s", file = discord.File(fp = f'temp/{guildsfp}.txt', filename = f'temp/{guildsfp}.txt'))
@commands.command()
#------------------------------
# Ping the bot
#------------------------------
async def ping(self, ctx):
if self.checkInvos(ctx.guild.id) == 1:
await ctx.message.delete(delay=3)
if self.checkOwner(ctx.message.author.id) == False:
return
self.log(ctx.guild.id, f"{ctx.message.author} pinged me on {ctx.message.channel}. Latency was equal to {round(self.client.latency * 1000)}ms")
await ctx.send(f'Pong! Latency equals {round(self.client.latency * 1000)}ms')
@commands.command()
#------------------------------
# Leave specified or current guild
#------------------------------
async def leave(self, ctx, guild_id: int = 0):
if self.checkOwner(ctx.message.author.id) == False:
return
if guild_id == 0:
guild = ctx.message.guild
else:
guild = self.client.get_guild(guild_id)
self.log(guild.id, f"Leaving guild due to request by {ctx.message.author}[{ctx.message.author.id}]")
await guild.leave()
@commands.command()
#------------------------------
# Generate file not found error
#------------------------------
async def err(ctx):
if self.checkOwner(ctx.message.author.id) == False:
return
with open('file.txt', 'r') as f:
file = json.load(f)
@commands.command(help="logs x")
#------------------------------
# Add an entry to log
#------------------------------
async def alog(self, ctx, log_entry):
if self.checkOwner(ctx.message.author.id) == False:
return
if self.checkInvos(ctx.guild.id) == 1:
await ctx.message.delete(delay=3)
self.log(0, f"{ctx.author}[{ctx.author.id}]: {log_entry}")
@commands.command(help = "regens not present configs")
#------------------------------
# Regenerate config files for servers that do now have them
#------------------------------
async def regenconf(self, ctx, mode = 0):
if self.checkOwner(ctx.message.author.id) == False:
return
if self.checkInvos(ctx.guild.id) == 1:
await ctx.message.delete(delay=3)
if mode == 0:
self.log(0, " === REGEN CONF DRY RUN === ")
noconf_guild_ids = []
for guild in self.client.guilds:
#pathcurrconf = Path(f"{Path.cwd()}/configs/{guild.id}")
if pathlib.Path(f"{pathlib.Path.cwd()}/configs/{guild.id}.json").exists() == False:
self.log(0, f"Guild {guild.id} had no configuration present")
noconf_guild_ids.append(guild.id)
if mode == 1:
self.log(0, " === REGEN CONF REGEN MISSING === ")
noconf_guild_ids = []
for guild in self.client.guilds:
#pathcurrconf = Path(f"{Path.cwd()}/configs/{guild.id}")
if pathlib.Path(f"{pathlib.Path.cwd()}/configs/{guild.id}.json").exists() == False:
self.log(0, f"Guild {guild.id} had no configuration present")
noconf_guild_ids.append(guild.id)
try:
with open(f'docs/blank.json', 'r') as f:
config = json.load(f)
except FileNotFoundError:
self.log(0, f"You are missing a blank example config file under docs/blank.json")
try:
for invite in await guild.invites():
config['Invites'][f'{invite.code}'] = {}
config['Invites'][f'{invite.code}']['name'] = "None"
config['Invites'][f'{invite.code}']['roles'] = []
config['Invites'][f'{invite.code}']['uses'] = invite.uses
config['Invites'][f'{invite.code}']['welcome'] = "None"
config['Invites'][f'{invite.code}']['tags'] = {}
except:
pass
with open(f'configs/{guild.id}.json', 'w') as f:
json.dump(config, f, indent = 4)
self.log(0, f"Regenerated configs in mode {mode}. Guilds with no present configurations {noconf_guild_ids}")
#if mode == 2:
# self.log(0, " === REGEN CONF REGEN ALL === ")
# noconf_guild_ids = []
# for guild in client.guilds:
# bots_guild_ids.append(guild.id)
# try:
# with open(f'configs/{guild.id}.json', 'r') as f:
# config = json.load(f)
# except FileNotFoundError:
# self.log(0, f"Guild {guild.id} had no configuration present")
# noconf_guild_ids.append(guild.id)
@commands.command()
#
# Sends guilds stats
#
async def stats(self, ctx):
members = 0
for guild in client.guilds:
members += guild.member_count
#await client.change_presence(status=discord.Status.online, activity=discord.Activity(type=discord.ActivityType.playing, name=f"on {len(client.guilds)} guilds with {members} members"))
await ctx.send(embed = self.constructResponseEmbedBase(f"I'm on {len(self.client.guilds)} with {members} members"))
def log(self, guild_id, log_msg: str):
with open('main-config.json', 'r') as f:
config = json.load(f)
logfile = config['LogFile']
if guild_id == 0:
print(f"[{datetime.datetime.now()}] [\033[1;31mOWNER-UTILITIES\033[0;0m]: " + log_msg)
with open(f'{logfile}', 'a') as f:
f.write(f"[{datetime.datetime.now()}] [OWNER-UTILITIES]: " + log_msg + "\n")
else:
print(f"[{datetime.datetime.now()}] [{guild_id}] [\033[1;31mOWNER-UTILITES\033[0;0m]: " + log_msg)
with open(f'{logfile}', 'a') as f:
f.write(f"[{datetime.datetime.now()}] [{guild_id}] [OWNER-UTILITES]: " + log_msg + "\n")
def checkOwner(self, user_id):
with open(f'main-config.json', 'r') as f:
main_config = json.load(f)
owners = main_config['OwnerUsers']
if user_id in owners:
return True
else:
return False
def checkInvos(self, guild_id):
with open(f'configs/{guild_id}.json', 'r') as f:
config = json.load(f)
delinvos = config['General']['DeleteInvocations']
if delinvos == 1:
return True
else:
return False
def constructResponseEmbedBase(self, desc):
embed = discord.Embed(title = f"**Wormhole**", description = desc, color = discord.Colour.from_rgb(119, 137, 218))
embed.set_thumbnail(url="https://n3v.xyz/icons/wormhole-logo.png")
now = datetime.datetime.now()
embed.set_footer(text = f"{now.strftime('%H:%M')} / {now.strftime('%d/%m/%y')} | Wormhole made with \u2764\ufe0f by Nevalicjus")
return embed
async def serverLog(self, guild_id, type, log_msg):
with open(f'configs/{guild_id}.json', 'r') as f:
config = json.load(f)
log_channel_id = config['General']['ServerLog']
if log_channel_id == 0:
return False
if type in []:
em_color = discord.Colour.from_rgb(67, 181, 129)
if type in []:
em_color = discord.Colour.from_rgb(250, 166, 26)
if type in []:
em_color = discord.Colour.from_rgb(240, 71, 71)
em_color = discord.Colour.from_rgb(119, 137, 218)
embed = discord.Embed(title = f"**Wormhole Logging**", color = em_color)
now = datetime.datetime.now()
embed.set_footer(text = f"{now.strftime('%H:%M')} / {now.strftime('%d/%m/%y')} | Wormhole made with \u2764\ufe0f by Nevalicjus")
if type == "":
embed.add_field(name = "logtitle", value = log_msg, inline = False)
embed.add_field(name = "Wormhole Log", value = log_msg, inline = False)
log_channel = self.client.get_channel(log_channel_id)
await log_channel.send(embed = embed)
def setup(client):
client.add_cog(Owner(client))
|
'''
Aim: Determine whether the entered string is palindrome or not.
'''
class Solution:
def __init__(self):
self.stack = []
self.queue = []
return(None)
def pushCharacter(self, char):
self.stack.append(char)
def popCharacter(self):
return(self.stack.pop(-1))
def enqueueCharacter(self, char):
self.queue.append(char)
def dequeueCharacter(self):
return(self.queue.pop(0))
# read the string s
s = input()
# creating the Solution class object
obj = Solution()
l = len(s)
# push/enqueue all the characters of string s to stack
for i in range(l):
obj.pushCharacter(s[i])
obj.enqueueCharacter(s[i])
isPalindrome = True
'''
pop the top character from stack
dequeue the first character from queue
compare both the characters
'''
for i in range(l // 2):
if obj.popCharacter()!=obj.dequeueCharacter():
isPalindrome = False
break
# finally print whether string s is palindrome or not
if isPalindrome:
print("The word, "+s+", is a palindrome.")
else:
print("The word, "+s+", is not a palindrome.")
'''
Sample Test Case:
Input:
level
Output:
The word, level, is a palindrome.
Explaination:
All the characters popped from stack, matched the ones dequeued from queue.
''' |
import argparse
from bbworld import save_pic, load_pic, iterate_board
def _save_iteration(savename, save_board, iteration, max_iterations):
if (savename):
filename = savename[0]
num_digits = len(str(max_iterations))
filename += "_" + str(iteration).zfill(num_digits) + ".png"
save_pic(save_board, filename)
def run(loadname, iterations, savename):
#main(args.loadpic, args.iterate, args.print, args.printonce, ags.savepic)
board = load_pic(loadname[0])
# num_iterations defaults to 1 if it isn't defined
num_iterations = 1
if (iterations):
num_iterations = iterations[0]
# Iterate
for ii in range(0 ,num_iterations):
_save_iteration(savename, board, ii, num_iterations)
board = iterate_board(board)
_save_iteration(savename, board, num_iterations, num_iterations)
def main():
parser = argparse.ArgumentParser(description='A weird RGB cellular automata')
parser.add_argument("loadpic",
help="Load an image of the board from a filename",
type=str, nargs=1)
parser.add_argument("--savepic","-s",
help="save an image to the directory, appended _xxx.png",
type=str, nargs=1)
parser.add_argument("--iterate","-i",
help="iterate n times. Default 1.",
type=int, nargs=1)
args = parser.parse_args()
run(args.loadpic, args.iterate, args.savepic)
if __name__ == '__main__':
main()
|
# encoding: utf-8
"""
Script to train handwriting model
Change global vars if you want to change how data is loaded or to change major model training params
"""
import os
import json
import time
import torch
import torch.nn.functional as F
import torchvision.models as models
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import torch.optim as optim
from PIL import Image
from model_definition import Model
from read_data import HW_Dataset
import utils
# Global Vars
CLASS_NAMES = [
'1', '2', '3', '4', '5', '6', '7', '8', '9', '0'
]
NUM_CLASSES = len(CLASS_NAMES)
LABELS_DIR = '../phase_1/labels'
IMGS_DIR = '../phase_1/images'
TRAIN_LIST = '../phase_1/train_list.json'
VAL_LIST = '../phase_1/val_list.json'
SAVE_MODEL_PATH = '../models/hw_model.pth'
PRIOR_MODEL_PATH = '../models/hw_model_ref.pth'
TRAIN_BATCH_SIZE = 8
TEST_BATCH_SIZE = 8
LEARNING_RATE = 1e-3
MAX_EPOCHS = 100
BATCH_LOG_INTERVAL = 100
USE_GPU = True
GPU_IDX = 0
USE_PRIOR = False # transfer learning
SAVE_MODEL = True
STOPPING_LOSS = 0.25
# Initialize and Load the Model
device = torch.device('cuda:%s'%GPU_IDX if (USE_GPU and torch.cuda.is_available()) else "cpu")
model = Model(NUM_CLASSES, device).to(device)
if USE_PRIOR:
model.load_state_dict(torch.load(PRIOR_MODEL_PATH))
model.reset_lstm()
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=5, threshold=1e-3)
# Define Train and Test Functions
def train(start_time, epoch):
model.train()
normalize = transforms.Normalize(mean=[0.000],
std=[1.000])
train_dataset = HW_Dataset(labels_dir=LABELS_DIR,
imgs_dir=IMGS_DIR,
data_list=TRAIN_LIST,
transform=transforms.Compose([
transforms.ToTensor(),
normalize
]))
train_loader = DataLoader(dataset=train_dataset, batch_size=TRAIN_BATCH_SIZE,
shuffle=False, num_workers=8, pin_memory=True)
model.train()
epoch_loss = 0
start = time.time()
for batch_idx, (x, y_target) in enumerate(train_loader):
# handle gpu
x = x.to(device)
# foward propagate
y = model(x)
# encode target for ctc
y_target = utils.encode_words(y_target, CLASS_NAMES)
# pytorch ctc loss
input_lengths = torch.tensor([y.shape[0]]*y.shape[1], dtype=torch.long).to(device)
target_lengths = torch.tensor([len(l) for l in y_target], dtype=torch.long).to(device)
y_target = torch.cat(y_target)
loss = F.ctc_loss(y, y_target, input_lengths, target_lengths, reduction='mean')
optimizer.zero_grad()
loss.backward()
epoch_loss += loss.item()
optimizer.step()
if batch_idx > 0 and batch_idx % BATCH_LOG_INTERVAL == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tTime per batch: {:.2f} s\tTotal Time: {:.2f} hrs'.format(
epoch, batch_idx * TRAIN_BATCH_SIZE, len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item(), (time.time() - start) / BATCH_LOG_INTERVAL, (time.time()-start_time)/3600))
start = time.time()
print('Avg. Epoch Loss: {:.6f}\n'.format(epoch_loss/len(train_loader)))
scheduler.step(epoch_loss)
def test(print_sample_preds=True):
normalize = transforms.Normalize(mean=[0.000],
std=[1.000])
test_dataset = HW_Dataset(labels_dir=LABELS_DIR,
imgs_dir=IMGS_DIR,
data_list=VAL_LIST,
transform=transforms.Compose([
transforms.ToTensor(),
normalize
]))
test_loader = DataLoader(dataset=test_dataset, batch_size=TEST_BATCH_SIZE,
shuffle=False, num_workers=0, pin_memory=True)
# switch to evaluate mode (deactivate dropout)
model.eval()
test_loss = 0
num_incorrect = 0
total_str_dist = 0
preds = []
targets = []
with torch.no_grad():
for batch_idx, (x, y_target) in enumerate(test_loader):
x = x.to(device)
y = model(x)
y_target = utils.encode_words(y_target, CLASS_NAMES)
# pytorch ctc loss
input_lengths = torch.tensor([y.shape[0]]*y.shape[1], dtype=torch.long).to(device)
target_lengths = torch.tensor([len(l) for l in y_target], dtype=torch.long).to(device)
loss = F.ctc_loss(y, torch.cat(y_target), input_lengths, target_lengths, reduction='mean')
test_loss += loss.item()
preds.extend(utils.decode_output(y.cpu(), CLASS_NAMES))
targets.extend(utils.decode_label_words(y_target, CLASS_NAMES))
if print_sample_preds and batch_idx == 0:
print("Test Prediction")
print(preds)
print("Test Ground Truth")
print(targets, "\n")
num_incorrect += sum([p!=gt for gt,p in zip(targets, preds)]) # WER
total_str_dist += sum([utils.norm_levenshtein_dist(x,y) for x,y in zip(targets, preds)]) # CER
# calculate accuracy stats
WER = 100*(num_incorrect/(len(test_dataset)))
CER = 100*(total_str_dist/(len(test_dataset)))
test_loss = test_loss*TEST_BATCH_SIZE/len(test_dataset)
print('Avg. Test Loss: {:.4f}\tCER: {:.1f}%\tWER: {}/{} ({:.1f}%) \n'.format(
test_loss, CER, num_incorrect, len(test_dataset), WER))
# termination condition
finished_training = test_loss <= STOPPING_LOSS
return finished_training
# Run Train and Test
if __name__ == '__main__':
start_time = time.time()
for epoch in range(MAX_EPOCHS):
train(start_time, epoch)
# has termination condition been met
if test(print_sample_preds=True):
break
if SAVE_MODEL:
utils.save_model(model, SAVE_MODEL_PATH)
|
"""
This module contains the classes that encapsulate the business logics to
collect tweets.
The collectors available are:
* OfficialAPICollector - It collects tweets through the official
Twitter API.
Notes
-----
It is wanted to add web scraping collectors to bypass the official API
limitations. Feel free to open a pull request.
"""
import tweepy
from django.conf import settings
class OfficialAPICollector:
"""
This class encapsulates the business logic to collect tweets through the
official Twitter API.
Attributes
----------
api : tweepy.API
Connected and authenticated Tweepy interface to interact with the
Twitter API.
Notes
-----
For this collector to work, you must set the following environment
variables to the keys and tokens provided by Twitter:
'TWITTER_CONSUMER_KEY', 'TWITTER_CONSUMER_SECRET', 'TWITTER_ACCESS_TOKEN'
and 'TWITTER_ACCESS_TOKEN_SECRET'. See the docker-compose.yml file and set
those variables in the proper env_file specified there.
Refer to https://developer.twitter.com/en/docs/twitter-api/ and
https://docs.tweepy.org/en/latest/ to know more about the limitations of
this collection method.
"""
def __init__(self):
"""
It sets up the connection to the Twitter API using Tweepy.
"""
auth = tweepy.OAuthHandler(
settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET
)
auth.set_access_token(
settings.TWITTER_ACCESS_TOKEN, settings.TWITTER_ACCESS_TOKEN_SECRET
)
self.api = tweepy.API(auth, wait_on_rate_limit=True)
def collect(self, search_term, number_of_tweets):
"""
This method performs the actual tweets collection.
Parameters
----------
search_term : str
Term entered by the user in the search box.
number_of_tweets : int
Number of tweets the user requested to collect.
Returns
-------
tweepy.SearchResults
Iterable of tweepy.Status objects containing information about a
tweet.
"""
query = f'{search_term} -filter:retweets'
cursor = tweepy.Cursor(
self.api.search, q=query, tweet_mode='extended',
result_type='recent', include_entities=False
)
return cursor.items(number_of_tweets)
|
import numpy as np
x_data = np.float32(np.random.rand(2,100))
y_data = np.dot([0.1, 0.2], x_data) + 0.300
print(y_data) |
#
# Copyright 2015 Horde Software Inc.
#
import sys
from PySide import QtGui, QtCore
# Add the pyflowgraph module to the current environment if it does not already exist
import imp
try:
imp.find_module('pyflowgraph')
found = True
except ImportError:
import os, sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..")))
from pyflowgraph.graph_view import GraphView
from pyflowgraph.graph_view_widget import GraphViewWidget
from pyflowgraph.node import Node
from pyflowgraph.port import InputPort, OutputPort, IOPort
print GraphView
app = QtGui.QApplication(sys.argv)
widget = GraphViewWidget()
graph = GraphView(parent=widget)
# generate a diamod shape graph.
totalCount = 0
def generateNodes(count, offset, depth):
for i in range(count):
node1 = Node(graph, 'node' + str(depth) + str(i))
node1.addPort(InputPort(node1, graph, 'InPort', QtGui.QColor(128, 170, 170, 255), 'MyDataX'))
node1.addPort(OutputPort(node1, graph, 'OutPort', QtGui.QColor(32, 255, 32, 255), 'MyDataX'))
node1.setGraphPos(QtCore.QPointF(offset, i * 80 ))
graph.addNode(node1)
global totalCount
totalCount += 1
if depth < 6:
generateNodes( count * 2, offset+160, depth+1)
for i in range(count):
graph.connectPorts('node' + str(depth) + str(i), 'OutPort', 'node' + str(depth+1) + str(i*2), 'InPort')
graph.connectPorts('node' + str(depth) + str(i), 'OutPort', 'node' + str(depth+1) + str(i*2+1), 'InPort')
elif depth < 12:
generateNodes( int(count / 2), offset+160, depth+1)
for i in range(count/2):
graph.connectPorts('node' + str(depth) + str(i), 'OutPort', 'node' + str(depth+1) + str(int(i)), 'InPort')
generateNodes( 1, 0, 0)
print "totalCount:" + str(totalCount)
widget.setGraphView(graph)
widget.show()
sys.exit(app.exec_()) |
# -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.windows_delivery_optimization_mode import WindowsDeliveryOptimizationMode
from ..model.prerelease_features import PrereleaseFeatures
from ..model.automatic_update_mode import AutomaticUpdateMode
from ..model.windows_update_install_schedule_type import WindowsUpdateInstallScheduleType
from ..model.windows_update_type import WindowsUpdateType
from datetime import datetime
from ..one_drive_object_base import OneDriveObjectBase
class WindowsUpdateForBusinessConfiguration(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def delivery_optimization_mode(self):
"""
Gets and sets the deliveryOptimizationMode
Returns:
:class:`WindowsDeliveryOptimizationMode<onedrivesdk.model.windows_delivery_optimization_mode.WindowsDeliveryOptimizationMode>`:
The deliveryOptimizationMode
"""
if "deliveryOptimizationMode" in self._prop_dict:
if isinstance(self._prop_dict["deliveryOptimizationMode"], OneDriveObjectBase):
return self._prop_dict["deliveryOptimizationMode"]
else :
self._prop_dict["deliveryOptimizationMode"] = WindowsDeliveryOptimizationMode(self._prop_dict["deliveryOptimizationMode"])
return self._prop_dict["deliveryOptimizationMode"]
return None
@delivery_optimization_mode.setter
def delivery_optimization_mode(self, val):
self._prop_dict["deliveryOptimizationMode"] = val
@property
def prerelease_features(self):
"""
Gets and sets the prereleaseFeatures
Returns:
:class:`PrereleaseFeatures<onedrivesdk.model.prerelease_features.PrereleaseFeatures>`:
The prereleaseFeatures
"""
if "prereleaseFeatures" in self._prop_dict:
if isinstance(self._prop_dict["prereleaseFeatures"], OneDriveObjectBase):
return self._prop_dict["prereleaseFeatures"]
else :
self._prop_dict["prereleaseFeatures"] = PrereleaseFeatures(self._prop_dict["prereleaseFeatures"])
return self._prop_dict["prereleaseFeatures"]
return None
@prerelease_features.setter
def prerelease_features(self, val):
self._prop_dict["prereleaseFeatures"] = val
@property
def automatic_update_mode(self):
"""
Gets and sets the automaticUpdateMode
Returns:
:class:`AutomaticUpdateMode<onedrivesdk.model.automatic_update_mode.AutomaticUpdateMode>`:
The automaticUpdateMode
"""
if "automaticUpdateMode" in self._prop_dict:
if isinstance(self._prop_dict["automaticUpdateMode"], OneDriveObjectBase):
return self._prop_dict["automaticUpdateMode"]
else :
self._prop_dict["automaticUpdateMode"] = AutomaticUpdateMode(self._prop_dict["automaticUpdateMode"])
return self._prop_dict["automaticUpdateMode"]
return None
@automatic_update_mode.setter
def automatic_update_mode(self, val):
self._prop_dict["automaticUpdateMode"] = val
@property
def microsoft_update_service_allowed(self):
"""
Gets and sets the microsoftUpdateServiceAllowed
Returns:
bool:
The microsoftUpdateServiceAllowed
"""
if "microsoftUpdateServiceAllowed" in self._prop_dict:
return self._prop_dict["microsoftUpdateServiceAllowed"]
else:
return None
@microsoft_update_service_allowed.setter
def microsoft_update_service_allowed(self, val):
self._prop_dict["microsoftUpdateServiceAllowed"] = val
@property
def drivers_excluded(self):
"""
Gets and sets the driversExcluded
Returns:
bool:
The driversExcluded
"""
if "driversExcluded" in self._prop_dict:
return self._prop_dict["driversExcluded"]
else:
return None
@drivers_excluded.setter
def drivers_excluded(self, val):
self._prop_dict["driversExcluded"] = val
@property
def installation_schedule(self):
"""
Gets and sets the installationSchedule
Returns:
:class:`WindowsUpdateInstallScheduleType<onedrivesdk.model.windows_update_install_schedule_type.WindowsUpdateInstallScheduleType>`:
The installationSchedule
"""
if "installationSchedule" in self._prop_dict:
if isinstance(self._prop_dict["installationSchedule"], OneDriveObjectBase):
return self._prop_dict["installationSchedule"]
else :
self._prop_dict["installationSchedule"] = WindowsUpdateInstallScheduleType(self._prop_dict["installationSchedule"])
return self._prop_dict["installationSchedule"]
return None
@installation_schedule.setter
def installation_schedule(self, val):
self._prop_dict["installationSchedule"] = val
@property
def quality_updates_deferral_period_in_days(self):
"""
Gets and sets the qualityUpdatesDeferralPeriodInDays
Returns:
int:
The qualityUpdatesDeferralPeriodInDays
"""
if "qualityUpdatesDeferralPeriodInDays" in self._prop_dict:
return self._prop_dict["qualityUpdatesDeferralPeriodInDays"]
else:
return None
@quality_updates_deferral_period_in_days.setter
def quality_updates_deferral_period_in_days(self, val):
self._prop_dict["qualityUpdatesDeferralPeriodInDays"] = val
@property
def feature_updates_deferral_period_in_days(self):
"""
Gets and sets the featureUpdatesDeferralPeriodInDays
Returns:
int:
The featureUpdatesDeferralPeriodInDays
"""
if "featureUpdatesDeferralPeriodInDays" in self._prop_dict:
return self._prop_dict["featureUpdatesDeferralPeriodInDays"]
else:
return None
@feature_updates_deferral_period_in_days.setter
def feature_updates_deferral_period_in_days(self, val):
self._prop_dict["featureUpdatesDeferralPeriodInDays"] = val
@property
def quality_updates_paused(self):
"""
Gets and sets the qualityUpdatesPaused
Returns:
bool:
The qualityUpdatesPaused
"""
if "qualityUpdatesPaused" in self._prop_dict:
return self._prop_dict["qualityUpdatesPaused"]
else:
return None
@quality_updates_paused.setter
def quality_updates_paused(self, val):
self._prop_dict["qualityUpdatesPaused"] = val
@property
def feature_updates_paused(self):
"""
Gets and sets the featureUpdatesPaused
Returns:
bool:
The featureUpdatesPaused
"""
if "featureUpdatesPaused" in self._prop_dict:
return self._prop_dict["featureUpdatesPaused"]
else:
return None
@feature_updates_paused.setter
def feature_updates_paused(self, val):
self._prop_dict["featureUpdatesPaused"] = val
@property
def quality_updates_pause_expiry_date_time(self):
"""
Gets and sets the qualityUpdatesPauseExpiryDateTime
Returns:
datetime:
The qualityUpdatesPauseExpiryDateTime
"""
if "qualityUpdatesPauseExpiryDateTime" in self._prop_dict:
return datetime.strptime(self._prop_dict["qualityUpdatesPauseExpiryDateTime"].replace("Z", ""), "%Y-%m-%dT%H:%M:%S.%f")
else:
return None
@quality_updates_pause_expiry_date_time.setter
def quality_updates_pause_expiry_date_time(self, val):
self._prop_dict["qualityUpdatesPauseExpiryDateTime"] = val.isoformat()+"Z"
@property
def feature_updates_pause_expiry_date_time(self):
"""
Gets and sets the featureUpdatesPauseExpiryDateTime
Returns:
datetime:
The featureUpdatesPauseExpiryDateTime
"""
if "featureUpdatesPauseExpiryDateTime" in self._prop_dict:
return datetime.strptime(self._prop_dict["featureUpdatesPauseExpiryDateTime"].replace("Z", ""), "%Y-%m-%dT%H:%M:%S.%f")
else:
return None
@feature_updates_pause_expiry_date_time.setter
def feature_updates_pause_expiry_date_time(self, val):
self._prop_dict["featureUpdatesPauseExpiryDateTime"] = val.isoformat()+"Z"
@property
def business_ready_updates_only(self):
"""
Gets and sets the businessReadyUpdatesOnly
Returns:
:class:`WindowsUpdateType<onedrivesdk.model.windows_update_type.WindowsUpdateType>`:
The businessReadyUpdatesOnly
"""
if "businessReadyUpdatesOnly" in self._prop_dict:
if isinstance(self._prop_dict["businessReadyUpdatesOnly"], OneDriveObjectBase):
return self._prop_dict["businessReadyUpdatesOnly"]
else :
self._prop_dict["businessReadyUpdatesOnly"] = WindowsUpdateType(self._prop_dict["businessReadyUpdatesOnly"])
return self._prop_dict["businessReadyUpdatesOnly"]
return None
@business_ready_updates_only.setter
def business_ready_updates_only(self, val):
self._prop_dict["businessReadyUpdatesOnly"] = val
|
'''
Name: flowcontrol_if.py
Author: Yang Ze
Created: 2019.04.02
Last Modified:
Version: V1.0
Description: An example for if...elif...else...
'''
# There is no switch statement in Python. You can use an if..elif..else statement to
# do the same thing (and in some cases, use a dictionary to do it quickly)
number = 23
guess = int(input('Enter an integer : '))
if guess == number:
# New block starts here
print('Congratulations, you guessed it.')
print('(but you do not win any prizes!)')
# New block ends here
elif guess < number:
# Another block
print('No, it is a little higher than that')
# You can do whatever you want in a block ...
else:
print('No, it is a little lower than that')
# you must have guessed > number to reach here
print('Done')
# This last statement is always executed,
# after the if statement is executed. |
#!/usr/bin/env python
import argparse
import importlib
import inspect
import struct
import sys
import time
import emuplugin
import disasm
try:
raw_input
except NameError:
raw_input = input
# offsets into DCPU16.memory corresponding to addressing mode codes
SP, PC, O, LIT = 0x1001B, 0x1001C, 0x1001D, 0x1001E
def opcode(code):
"""A decorator for opcodes"""
def decorator(func):
setattr(func, "_is_opcode", True)
setattr(func, "_opcode", code)
return func
return decorator
class DCPU16:
def __init__(self, memory, plugins=[]):
self.plugins = plugins
self.memory = [memory[i] if i < len(memory) else 0 for i in range(0x1001F)]
self.skip = False
self.cycle = 0
self.opcodes = {}
for name, value in inspect.getmembers(self):
if inspect.ismethod(value) and getattr(value, "_is_opcode", False):
self.opcodes[getattr(value, "_opcode")] = value
@opcode(0x01)
def SET(self, a, b):
self.memory[a] = b
self.cycle += 1
@opcode(0x02)
def ADD(self, a, b):
o, r = divmod(self.memory[a] + b, 0x10000)
self.memory[O] = o
self.memory[a] = r
self.cycle += 2
@opcode(0x03)
def SUB(self, a, b):
o, r = divmod(self.memory[a] - b, 0x10000)
self.memory[O] = 0xFFFF if o == -1 else 0x0000
self.memory[a] = r
self.cycle += 2
@opcode(0x04)
def MUL(self, a, b):
o, r = divmod(self.memory[a] * b, 0x10000)
self.memory[a] = r
self.memory[O] = o % 0x10000
self.cycle += 2
@opcode(0x05)
def DIV(self, a, b):
if b == 0x0:
r = 0x0
o = 0x0
else:
r = self.memory[a] / b % 0x10000
o = ((self.memory[a] << 16) / b) % 0x10000
self.memory[a] = r
self.memory[O] = o
self.cycle += 3
@opcode(0x06)
def MOD(self, a, b):
if b == 0x0:
r = 0x0
else:
r = self.memory[a] % b
self.memory[a] = r
self.cycle += 3
@opcode(0x07)
def SHL(self, a, b):
o, r = divmod(self.memory[a] << b, 0x10000)
self.memory[a] = r
self.memory[O] = o % 0x10000
self.cycle += 2
@opcode(0x08)
def SHR(self, a, b):
r = self.memory[a] >> b
o = ((self.memory[a] << 16) >> b) % 0x10000
self.memory[a] = r
self.memory[O] = o
self.cycle += 2
@opcode(0x09)
def AND(self, a, b):
self.memory[a] = self.memory[a] & b
self.cycle += 1
@opcode(0x0a)
def BOR(self, a, b):
self.memory[a] = self.memory[a] | b
self.cycle += 1
@opcode(0x0b)
def XOR(self, a, b):
self.memory[a] = self.memory[a] ^ b
self.cycle += 1
@opcode(0x0c)
def IFE(self, a, b):
self.skip = not (self.memory[a] == b)
self.cycle += 2 + 1 if self.skip else 0
@opcode(0x0d)
def IFN(self, a, b):
self.skip = not (self.memory[a] != b)
self.cycle += 2 + 1 if self.skip else 0
@opcode(0x0e)
def IFG(self, a, b):
self.skip = not (self.memory[a] > b)
self.cycle += 2 + 1 if self.skip else 0
@opcode(0x0f)
def IFB(self, a, b):
self.skip = not ((self.memory[a] & b) != 0)
self.cycle += 2 + 1 if self.skip else 0
@opcode(0x010)
def JSR(self, a, b):
self.memory[SP] = (self.memory[SP] - 1) % 0x10000
pc = self.memory[PC]
self.memory[self.memory[SP]] = pc
self.memory[PC] = b
self.cycle += 2
def get_operand(self, a, dereference=False):
literal = False
if a < 0x08 or 0x1B <= a <= 0x1D:
arg1 = 0x10000 + a
elif a < 0x10:
arg1 = self.memory[0x10000 + (a % 0x08)]
elif a < 0x18:
next_word = self.memory[self.memory[PC]]
self.memory[PC] += 1
arg1 = next_word + self.memory[0x10000 + (a % 0x10)]
self.cycle += 0 if self.skip else 1
elif a == 0x18:
arg1 = self.memory[SP]
if not self.skip:
self.memory[SP] = (self.memory[SP] + 1) % 0x10000
elif a == 0x19:
arg1 = self.memory[SP]
elif a == 0x1A:
if not self.skip:
self.memory[SP] = (self.memory[SP] - 1) % 0x10000
arg1 = self.memory[SP]
elif a == 0x1E:
arg1 = self.memory[self.memory[PC]]
self.memory[PC] += 1
self.cycle += 0 if self.skip else 1
elif a == 0x1F:
arg1 = self.memory[PC]
self.memory[PC] += 1
self.cycle += 0 if self.skip else 1
else:
literal = True
arg1 = a % 0x20
if not dereference:
self.memory[LIT] = arg1
arg1 = LIT
if dereference and not literal:
arg1 = self.memory[arg1]
return arg1
def run(self, trace=False, show_speed=False):
tick = 0
last_time = time.time()
last_cycle = self.cycle
if trace:
disassembler = disasm.Disassembler(self.memory)
while True:
pc = self.memory[PC]
w = self.memory[pc]
self.memory[PC] += 1
operands, opcode = divmod(w, 16)
b, a = divmod(operands, 64)
if trace:
disassembler.offset = pc
print("(%08X) %s" % (self.cycle, disassembler.next_instruction()))
if opcode == 0x00:
if a == 0x00:
break
arg1 = None
opcode = (a << 4) + 0x0
else:
arg1 = self.get_operand(a)
op = self.opcodes[opcode]
arg2 = self.get_operand(b, dereference=True)
if self.skip:
if trace:
print("skipping")
self.skip = False
else:
if 0x01 <= opcode <=0xB: # write to memory
oldval = self.memory[arg1]
op(arg1, arg2)
val = self.memory[arg1]
if oldval != val:
for p in self.plugins:
p.memory_changed(self, arg1, val, oldval)
else:
op(arg1, arg2)
if trace:
self.dump_registers()
self.dump_stack()
tick += 1
if tick >= 100000:
if show_speed:
print("%dkHz" % (int((self.cycle - last_cycle) / (time.time() - last_time)) / 1000))
last_time = time.time()
last_cycle = self.cycle
tick = 0
try:
for p in self.plugins:
p.tick(self)
except SystemExit:
break
def dump_registers(self):
print(" ".join("%s=%04X" % (["A", "B", "C", "X", "Y", "Z", "I", "J"][i],
self.memory[0x10000 + i]) for i in range(8)))
print("PC={0:04X} SP={1:04X} O={2:04X}".format(*[self.memory[i] for i in (PC, SP, O)]))
def dump_stack(self):
if self.memory[SP] == 0x0:
print("Stack: []")
else:
print("Stack: [" + " ".join("%04X" % self.memory[m] for m in range(self.memory[SP], 0x10000)) + "]")
if __name__ == "__main__":
plugins = emuplugin.importPlugins()
parser = argparse.ArgumentParser(description="DCPU-16 emulator")
parser.add_argument("-d", "--debug", action="store_const", const=True, default=False, help="Run emulator in debug mode. This implies '--trace'")
parser.add_argument("-t", "--trace", action="store_const", const=True, default=False, help="Print dump of registers and stack after every step")
parser.add_argument("-s", "--speed", action="store_const", const=True, default=False, help="Print speed the emulator is running at in kHz")
parser.add_argument("object_file", help="File with assembled DCPU binary")
for p in plugins:
for args in p.arguments:
parser.add_argument(*args[0], **args[1])
args = parser.parse_args()
if args.debug:
args.trace = True
program = []
with open(args.object_file, "rb") as f:
word = f.read(2)
while word:
program.append(struct.unpack(">H", word)[0])
word = f.read(2)
plugins_loaded = []
try:
for p in plugins:
p = p(args)
if p.loaded:
print("Started plugin: %s" % p.name)
plugins_loaded.append(p)
dcpu16 = DCPU16(program, plugins_loaded)
dcpu16.run(trace=args.trace, show_speed=args.speed)
except KeyboardInterrupt:
pass
finally:
for p in plugins_loaded:
p.shutdown()
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import numpy as np
import tensorflow as tf
import resnet_model
HEIGHT = 32
WIDTH = 32
DEPTH = 3
NUM_CLASSES = 10
NUM_DATA_BATCHES = 5
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 10000 * NUM_DATA_BATCHES
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
parser = argparse.ArgumentParser()
# Basic model parameters.
parser.add_argument('--data_dir', type=str, default='/tmp/cifar10_data',
help='The path to the CIFAR-10 data directory.')
parser.add_argument('--model_dir', type=str, default='/tmp/cifar10_model',
help='The directory where the model will be stored.')
parser.add_argument('--resnet_size', type=int, default=32,
help='The size of the ResNet model to use.')
parser.add_argument('--train_steps', type=int, default=100000,
help='The number of batches to train.')
parser.add_argument('--steps_per_eval', type=int, default=4000,
help='The number of batches to run in between evaluations.')
parser.add_argument('--batch_size', type=int, default=128,
help='The number of images per batch.')
FLAGS = parser.parse_args()
# Scale the learning rate linearly with the batch size. When the batch size is
# 128, the learning rate should be 0.1.
_INITIAL_LEARNING_RATE = 0.1 * FLAGS.batch_size / 128
_MOMENTUM = 0.9
# We use a weight decay of 0.0002, which performs better than the 0.0001 that
# was originally suggested.
_WEIGHT_DECAY = 2e-4
_BATCHES_PER_EPOCH = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
def record_dataset(filenames):
"""Returns an input pipeline Dataset from `filenames`."""
record_bytes = HEIGHT * WIDTH * DEPTH + 1
return tf.contrib.data.FixedLengthRecordDataset(filenames, record_bytes)
def filenames(mode):
"""Returns a list of filenames based on 'mode'."""
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
assert os.path.exists(data_dir), ('Run cifar10_download_and_extract.py first '
'to download and extract the CIFAR-10 data.')
if mode == tf.estimator.ModeKeys.TRAIN:
return [
os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in range(1, NUM_DATA_BATCHES + 1)
]
elif mode == tf.estimator.ModeKeys.EVAL:
return [os.path.join(data_dir, 'test_batch.bin')]
else:
raise ValueError('Invalid mode: %s' % mode)
def dataset_parser(value):
"""Parse a CIFAR-10 record from value."""
# Every record consists of a label followed by the image, with a fixed number
# of bytes for each.
label_bytes = 1
image_bytes = HEIGHT * WIDTH * DEPTH
record_bytes = label_bytes + image_bytes
# Convert from a string to a vector of uint8 that is record_bytes long.
raw_record = tf.decode_raw(value, tf.uint8)
# The first byte represents the label, which we convert from uint8 to int32.
label = tf.cast(raw_record[0], tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(raw_record[label_bytes:record_bytes],
[DEPTH, HEIGHT, WIDTH])
# Convert from [depth, height, width] to [height, width, depth], and cast as
# float32.
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
return image, tf.one_hot(label, NUM_CLASSES)
def train_preprocess_fn(image, label):
"""Preprocess a single training image of layout [height, width, depth]."""
# Resize the image to add four extra pixels on each side.
image = tf.image.resize_image_with_crop_or_pad(image, HEIGHT + 8, WIDTH + 8)
# Randomly crop a [HEIGHT, WIDTH] section of the image.
image = tf.random_crop(image, [HEIGHT, WIDTH, DEPTH])
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
return image, label
def input_fn(mode, batch_size):
"""Input_fn using the contrib.data input pipeline for CIFAR-10 dataset.
Args:
mode: Standard names for model modes (tf.estimators.ModeKeys).
batch_size: The number of samples per batch of input requested.
"""
dataset = record_dataset(filenames(mode))
# For training repeat forever.
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.repeat()
dataset = dataset.map(dataset_parser, num_threads=1,
output_buffer_size=2 * batch_size)
# For training, preprocess the image and shuffle.
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.map(train_preprocess_fn, num_threads=1,
output_buffer_size=2 * batch_size)
# Ensure that the capacity is sufficiently large to provide good random
# shuffling.
buffer_size = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * 0.4) + 3 * batch_size
dataset = dataset.shuffle(buffer_size=buffer_size)
# Subtract off the mean and divide by the variance of the pixels.
dataset = dataset.map(
lambda image, label: (tf.image.per_image_standardization(image), label),
num_threads=1,
output_buffer_size=2 * batch_size)
# Batch results by up to batch_size, and then fetch the tuple from the
# iterator.
iterator = dataset.batch(batch_size).make_one_shot_iterator()
images, labels = iterator.get_next()
return images, labels
def cifar10_model_fn(features, labels, mode):
"""Model function for CIFAR-10."""
tf.summary.image('images', features, max_outputs=6)
network = resnet_model.cifar10_resnet_v2_generator(
FLAGS.resnet_size, NUM_CLASSES)
inputs = tf.reshape(features, [-1, HEIGHT, WIDTH, DEPTH])
logits = network(inputs, mode == tf.estimator.ModeKeys.TRAIN)
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate loss, which includes softmax cross entropy and L2 regularization.
cross_entropy = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name='cross_entropy')
tf.summary.scalar('cross_entropy', cross_entropy)
# Add weight decay to the loss.
loss = cross_entropy + _WEIGHT_DECAY * tf.add_n(
[tf.nn.l2_loss(v) for v in tf.trainable_variables()])
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
# Multiply the learning rate by 0.1 at 100, 150, and 200 epochs.
boundaries = [int(_BATCHES_PER_EPOCH * epoch) for epoch in [100, 150, 200]]
values = [_INITIAL_LEARNING_RATE * decay for decay in [1, 0.1, 0.01, 0.001]]
learning_rate = tf.train.piecewise_constant(
tf.cast(global_step, tf.int32), boundaries, values)
# Create a tensor named learning_rate for logging purposes
tf.identity(learning_rate, name='learning_rate')
tf.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=_MOMENTUM)
# Batch norm requires update ops to be added as a dependency to the train_op
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step)
else:
train_op = None
accuracy= tf.metrics.accuracy(
tf.argmax(labels, axis=1), predictions['classes'])
metrics = {'accuracy': accuracy}
# Create a tensor named train_accuracy for logging purposes
tf.identity(accuracy[1], name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
def main(unused_argv):
# Using the Winograd non-fused algorithms provides a small performance boost.
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
cifar_classifier = tf.estimator.Estimator(
model_fn=cifar10_model_fn, model_dir=FLAGS.model_dir)
for cycle in range(FLAGS.train_steps // FLAGS.steps_per_eval):
tensors_to_log = {
'learning_rate': 'learning_rate',
'cross_entropy': 'cross_entropy',
'train_accuracy': 'train_accuracy'
}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
cifar_classifier.train(
input_fn=lambda: input_fn(tf.estimator.ModeKeys.TRAIN,
batch_size=FLAGS.batch_size),
steps=FLAGS.steps_per_eval,
hooks=[logging_hook])
# Evaluate the model and print results
eval_results = cifar_classifier.evaluate(
input_fn=lambda: input_fn(tf.estimator.ModeKeys.EVAL,
batch_size=FLAGS.batch_size))
print(eval_results)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
|
name = "Foundation"
|
"""
Amazon MWS Feeds API
"""
from __future__ import absolute_import
from ..mws import MWS
from .. import utils
from ..decorators import next_token_action
# TODO Add FeedProcessingStatus enumeration
# TODO Add FeedType enumeration
def feed_options_str(feed_options):
"""Convert a FeedOptions dict of values into an appropriate string value.
Amazon docs for VAT upload with details:
https://m.media-amazon.com/images/G/01/B2B/DeveloperGuide/vat_calculation_service__dev_guide_H383rf73k4hsu1TYRH139kk134yzs.pdf
(section 6.4)
Example:
feed_options = {
"shippingid": "283845474",
"totalAmount": 3.25,
"totalvatamount": 1.23,
"invoicenumber": "INT-3431-XJE3",
"documenttype": "CreditNote",
"transactionid": "amzn:crow:429491192ksjfhe39s",
}
print(feed_options_str(feed_options))
>>> "metadata:shippingid=283845474;metadata:totalAmount=3.25;metadata:totalvatamount=1.23;
metadata:invoicenumber=INT-3431-XJE3;metadata:documenttype=CreditNote;
metadata:transactionid=amzn:crow:429491192ksjfhe39s"
"""
if not feed_options:
return None
if not isinstance(feed_options, dict):
raise ValueError("`feed_options` should be a dict or None")
output = []
for key, val in feed_options.items():
outval = val
if outval is True or outval is False:
# Convert literal `True` or `False` to strings `"true"` and `"false"`
outval = str(outval).lower()
output.append(f"metadata:{key}={outval}")
return ";".join(output)
class Feeds(MWS):
"""
Amazon MWS Feeds API
Docs:
http://docs.developer.amazonservices.com/en_US/feeds/Feeds_Overview.html
"""
ACCOUNT_TYPE = "Merchant"
NEXT_TOKEN_OPERATIONS = [
'GetFeedSubmissionList',
]
def submit_feed(self, feed, feed_type, feed_options=None, marketplace_ids=None,
amazon_order_id=None, document_type=None, content_type="text/xml",
purge='false'):
"""
Uploads a feed for processing by Amazon MWS.
`feed` should contain a file object in XML or flat-file format.
Docs:
http://docs.developer.amazonservices.com/en_US/feeds/Feeds_SubmitFeed.html
"""
if isinstance(feed_options, dict):
# Convert dict of options to str value
feed_options = feed_options_str(feed_options)
data = {
'Action': 'SubmitFeed',
'FeedType': feed_type,
'FeedOptions': feed_options,
'PurgeAndReplace': purge,
}
# for feed type _POST_EASYSHIP_DOCUMENTS_
# check http://docs.developer.amazonservices.com/en_IN/easy_ship/EasyShip_HowToGetEasyShipDocs.html
if amazon_order_id:
data.update({'AmazonOrderId': amazon_order_id})
# by default all document pdfs are included
# allowed values: ShippingLabel, Invoice, Warranty
if document_type:
data.update({'DocumentType': document_type})
data.update(utils.enumerate_param('MarketplaceIdList.Id.', marketplace_ids))
md5_hash = utils.calc_md5(feed)
return self.make_request(data, method="POST", body=feed,
extra_headers={'Content-MD5': md5_hash, 'Content-Type': content_type})
@next_token_action('GetFeedSubmissionList')
def get_feed_submission_list(self, feed_ids=None, max_count=None, feed_types=None,
processing_statuses=None, from_date=None, to_date=None,
next_token=None):
"""
Returns a list of all feed submissions submitted between `from_date` and `to_date`.
If these parameters are ommitted, defaults to the previous 90 days.
Pass `next_token` to call "GetFeedSubmissionListByNextToken" instead.
Docs:
http://docs.developer.amazonservices.com/en_US/feeds/Feeds_GetFeedSubmissionList.html
"""
data = {
'Action': 'GetFeedSubmissionList',
'MaxCount': max_count,
'SubmittedFromDate': from_date,
'SubmittedToDate': to_date,
}
data.update(utils.enumerate_param('FeedSubmissionIdList.Id', feed_ids))
data.update(utils.enumerate_param('FeedTypeList.Type.', feed_types))
data.update(utils.enumerate_param('FeedProcessingStatusList.Status.', processing_statuses))
return self.make_request(data)
def get_feed_submission_list_by_next_token(self, token):
"""
Alias for `get_feed_submission_list(next_token=token)`
Docs:
http://docs.developer.amazonservices.com/en_US/feeds/Feeds_GetFeedSubmissionListByNextToken.html
"""
return self.get_feed_submission_list(next_token=token)
def get_feed_submission_count(self, feed_types=None, processing_statuses=None, from_date=None, to_date=None):
"""
Returns a count of the feeds submitted between `from_date` and `to_date`.
If these parameters are ommitted, defaults to the previous 90 days.
Docs:
http://docs.developer.amazonservices.com/en_US/feeds/Feeds_GetFeedSubmissionCount.html
"""
data = {
'Action': 'GetFeedSubmissionCount',
'SubmittedFromDate': from_date,
'SubmittedToDate': to_date,
}
data.update(utils.enumerate_param('FeedTypeList.Type.', feed_types))
data.update(utils.enumerate_param('FeedProcessingStatusList.Status.', processing_statuses))
return self.make_request(data)
def cancel_feed_submissions(self, feed_ids=None, feed_types=None, from_date=None, to_date=None):
"""
Cancels one or more feed submissions and returns a count of the feed submissions that were canceled.
Docs:
http://docs.developer.amazonservices.com/en_US/feeds/Feeds_CancelFeedSubmissions.html
"""
data = {
'Action': 'CancelFeedSubmissions',
'SubmittedFromDate': from_date,
'SubmittedToDate': to_date,
}
data.update(utils.enumerate_param('FeedSubmissionIdList.Id.', feed_ids))
data.update(utils.enumerate_param('FeedTypeList.Type.', feed_types))
return self.make_request(data)
def get_feed_submission_result(self, feed_id):
"""
Returns the feed processing report and the Content-MD5 header.
Docs:
http://docs.developer.amazonservices.com/en_US/feeds/Feeds_GetFeedSubmissionResult.html
"""
data = {
'Action': 'GetFeedSubmissionResult',
'FeedSubmissionId': feed_id,
}
return self.make_request(data, rootkey='Message')
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
try:
# Not guaranteed available at setup time
from .spectrum import BaseSpectrum, AbsorptionSpectrum, CDESpectrum
except ImportError:
if not _ASTROPY_SETUP_:
raise
|
from itertools import izip, izip_longest
import numpy as np
from cocoa.core.entity import is_entity
from cocoa.lib import logstats
from cocoa.model.util import EPS
from cocoa.model.evaluate import BaseEvaluator
from preprocess import markers
from graph import Graph
def remove_entities(entity_tokens):
eoe_inds = [i for i, x in enumerate(entity_tokens) if x == markers.EOE]
to_remove = set(eoe_inds)
def find_entities(eoe_ind):
i = eoe_ind - 1
while i >= 0:
if entity_tokens[i] != markers.EOS:
to_remove.add(i)
else:
break
i -= 1
for eoe_ind in eoe_inds:
find_entities(eoe_ind)
return [x for i, x in enumerate(entity_tokens) if i not in to_remove], [x for i, x in enumerate(entity_tokens) if i in to_remove]
def pred_to_token(preds, stop_symbol, remove_symbols, textint_map, remove_entity, num_sents=None):
'''
Convert integer predition to tokens. Remove PAD and EOS.
preds: (batch_size, max_len)
'''
def find_stop(array, n):
count = 0
for i, a in enumerate(array):
if a == stop_symbol:
count += 1
if count == n:
# +1: include </s>
return i + 1
return None
tokens = []
entities = []
if num_sents is None:
num_sents = [1 for _ in preds]
for pred, n in izip(preds, num_sents):
if remove_entity:
#print 'raw pred:', textint_map.int_to_text(pred, 'target')
entity_tokens, prepended_entities = remove_entities(textint_map.int_to_text([x for x in pred[:find_stop(pred, n)]], 'target'))
#tokens.append([x for x in entity_tokens if not x in (markers.EOS, markers.PAD)])
tokens.append([x for x in entity_tokens if not x in (markers.PAD,)])
entities.append(prepended_entities)
else:
tokens.append(textint_map.int_to_text([x for x in pred[:find_stop(pred, n)] if not x in remove_symbols], 'target'))
return tokens, entities if len(entities) > 0 else None
class Evaluator(BaseEvaluator):
def __init__(self, data, model, splits=('dev',), batch_size=1, verbose=True):
super(Evaluator, self).__init__(data, model, splits, batch_size, verbose)
self.copy = data.copy
self.prepend = data.prepend
def _stop_symbol(self):
return self.vocab.to_ind(markers.EOS)
def _remove_symbols(self):
return map(self.vocab.to_ind, (markers.PAD,))
def _generate_response(self, sess, dialogue_batch, summary_map):
encoder_init_state = None
# Whether we're using knowledge graphs
graphs = dialogue_batch.get('graph', None)
utterances = None
for batch in dialogue_batch['batch_seq']:
targets = batch['targets']
max_len = targets.shape[1] + 10
output_dict = self.model.generate(sess, batch, encoder_init_state, max_len, graphs=graphs, utterances=utterances, vocab=self.vocab, copy=self.copy, textint_map=self.data.textint_map)
preds = output_dict['preds']
true_final_state = output_dict['true_final_state']
if graphs:
encoder_init_state = true_final_state
utterances = output_dict['utterances']
else:
encoder_init_state = true_final_state
if self.copy:
preds = graphs.copy_preds(preds, self.vocab.size)
num_sents = np.sum(targets == self.stop_symbol, axis=1)
pred_tokens, pred_entities = pred_to_token(preds, self.stop_symbol, self.remove_symbols, self.data.textint_map, self.prepend, num_sents)
references = [self._process_target_tokens(tokens) for tokens in batch['decoder_tokens']]
# Metrics
# Sentence bleu: only for verbose print
bleu_scores = self.sentence_bleu_score(pred_tokens, references)
self.update_bleu_stats(summary_map, pred_tokens, references)
self.update_entity_stats(summary_map, pred_tokens, references, 'entity_')
if 'selection_scores' in output_dict:
self.update_selection_stats(summary_map, output_dict['selection_scores'], output_dict['true_checklists'][:, -1, :], 'select_')
if pred_entities is not None:
self.update_entity_stats(summary_map, pred_entities, references, 'prepend_')
if self.verbose:
attn_scores = output_dict.get('attn_scores', None)
probs = output_dict.get('probs', None)
self._print_batch(batch, pred_tokens, references, bleu_scores, graphs, attn_scores, probs)
def get_stats(self, summary_map):
output = super(Evaluator, self).get_stats(summary_map)
output['entity_f1'] = self.get_f1(summary_map, 'entity_')
output['selection_f1'] = self.get_f1(summary_map, 'select_')
output['prepend_f1'] = self.get_f1(summary_map, 'prepend_')
return output
def stats2str(self, stats):
s = [super(Evaluator, self).stats2str(stats)]
for m in ('entity_f1', 'selection_f1', 'prepend_f1'):
s.append('%s=%.4f/%.4f/%.4f' % (m, stats[m][0], stats[m][1],stats[m][2]))
return ' '.join(s)
def update_selection_stats(self, summary_map, scores, targets, prefix=''):
# NOTE: targets are from ground truth response and many contain new entities.
# Ideally this would not happen as a mentioned entity is either from the agent's
# KB or from partner's mentions (which is added to the graph), so during decoding
# there shouldn't be new entities. However, the lexicon may "create" an entity.
batch_size, num_nodes = scores.shape
targets = targets[:, :num_nodes]
pos_pred = scores > 0
pos_target = targets == 1
tp = np.sum(np.logical_and(pos_pred, pos_target))
logstats.update_summary_map(summary_map, {prefix+'tp': tp, prefix+'pos_pred': np.sum(pos_pred), prefix+'pos_target': np.sum(pos_target)})
def log_dict(self, stats):
d = super(Evaluator, self).log_dict(stats)
precision, recall, f1 = stats['entity_f1']
d.update({'entity_precision': precision, 'entity_recall': recall, 'entity_f1': f1})
return d
def _process_target_tokens(self, tokens):
targets = super(Evaluator, self)._process_target_tokens(tokens)
if self.prepend:
targets, _ = remove_entities(targets)
return targets
def _print_batch(self, batch, preds, targets, bleu_scores, graphs, attn_scores, probs):
'''
inputs are integers; targets and preds are tokens (converted in test_bleu).
'''
encoder_tokens = batch['encoder_tokens']
inputs = batch['encoder_inputs']
decoder_tokens = batch['decoder_tokens']
print '-------------- batch ----------------'
for i, (target, pred, bleu) in enumerate(izip_longest(targets, preds, bleu_scores)):
# Skip padded turns
if len(decoder_tokens[i]) == 0:
continue
print i
if graphs:
graphs.graphs[i].kb.dump()
print 'RAW INPUT:', encoder_tokens[i]
print 'RAW TARGET:', target
print '----------'
print 'INPUT:', self.data.textint_map.int_to_text(inputs[i], 'encoding')
print 'TARGET:', target
print 'PRED:', pred
print 'BLEU:', bleu
if probs is not None:
print 'TOP-K:'
for j, w in enumerate(pred):
print j
topk = np.argsort(probs[j][i])[::-1][:5]
for id_ in topk:
prob = probs[j][i][id_]
if id_ < self.vocab.size:
print self.vocab.to_word(id_), prob
else:
print graphs.graphs[i].nodes.to_word(id_ - self.vocab.size), prob
#if attn_scores is not None:
# print 'ATTENTION:'
# for j, w in enumerate(pred):
# print 'TOKEN', j, w
# sorted_scores = sorted([(node_id, score) for node_id, score in enumerate(attn_scores[j][i])], key=lambda x: x[1], reverse=True)
# for node_id, score in sorted_scores:
# try:
# print node_id, graphs.graphs[i].nodes.to_word(node_id), score
# except KeyError:
# print node_id, 'pad', score
# NOTE: both batch_preds and batch_targets must use canonical entity form: (name, type)
def update_entity_stats(self, summary_map, batch_preds, batch_targets, prefix=''):
def get_entity(x):
return [e for e in x if is_entity(e)]
pos_target = prefix + 'pos_target'
pos_pred = prefix + 'pos_pred'
tp = prefix + 'tp'
for preds, targets in izip(batch_preds, batch_targets):
preds = set(get_entity(preds))
targets = set(get_entity(targets))
# Don't record cases where no entity is presented
if len(targets) > 0:
logstats.update_summary_map(summary_map, {pos_target: len(targets), pos_pred: len(preds)})
logstats.update_summary_map(summary_map, {tp: sum([1 if e in preds else 0 for e in targets])})
class FactEvaluator(object):
'''
Evaluate if a statement is true (approximately) given a KB.
'''
def __init__(self):
keys = ('undecided', 'fact', 'single_fact', 'joint_fact', 'coref', 'correct_single', 'correct_joint', 'correct_joint_ent', 'repeated', 'same_col')
self.summary_map = {}
for k in keys:
logstats.update_summary_map(self.summary_map, {k: 0})
def inc_undecided(self):
logstats.update_summary_map(self.summary_map, {'undecided': 1})
def inc_fact(self):
logstats.update_summary_map(self.summary_map, {'fact': 1})
def inc_coref(self):
logstats.update_summary_map(self.summary_map, {'coref': 1})
def str_to_num(self, token):
if token == 'no':
return 0
elif token == 'one':
return 1
elif token == 'two':
return 2
elif token == '3':
return 3
elif token == 'most':
return 4
elif token == 'all':
return 5
return None
def eval_single(self, kb, span):
#print 'eval_single:', span
logstats.update_summary_map(self.summary_map, {'single_fact': 1})
num, ent = span
ent = ent[1] # take the canonical form
num = self.str_to_num(num)
count = 0
for i, item in enumerate(kb.items):
for entity in self.item_entities(item):
if entity == ent:
count += 1
if num == count:
#print 'correct single'
logstats.update_summary_map(self.summary_map, {'correct_single': 1})
def item_entities(self, item):
attrs = sorted(item.items(), key=lambda x: x[0])
for attr_name, value in attrs:
type_ = Graph.metadata.attribute_types[attr_name]
yield (value.lower(), type_)
def eval_joint(self, kb, span):
#print 'eval_joint:', span
logstats.update_summary_map(self.summary_map, {'joint_fact': 1})
num, ent1, _, ent2 = span
ent1 = ent1[1]
ent2 = ent2[1]
if ent1 == ent2:
#print 'repeated'
logstats.update_summary_map(self.summary_map, {'repeated': 1})
return
# Same type, i.e. in the same column
if ent1[1] == ent2[1]:
#print 'same column'
logstats.update_summary_map(self.summary_map, {'same_col': 1})
return
num = self.str_to_num(num)
count = 0
for i, item in enumerate(kb.items):
entities = [entity for entity in self.item_entities(item)]
if ent1 in entities and ent2 in entities:
count += 1
#print 'correct joint ent'
logstats.update_summary_map(self.summary_map, {'correct_joint_ent': 1})
if count == num:
#print 'correct joint'
logstats.update_summary_map(self.summary_map, {'correct_joint': 1})
def report(self):
num_total_facts = float(self.summary_map['fact']['sum']) + EPS
num_single_facts = float(self.summary_map['single_fact']['sum']) + EPS
num_joint_facts = float(self.summary_map['joint_fact']['sum']) + EPS
result = {
'undecided': self.summary_map['undecided']['sum'] / num_total_facts,
'single_facts': self.summary_map['single_fact']['sum'] / num_total_facts,
'joint_facts': self.summary_map['joint_fact']['sum'] / num_total_facts,
'correct_single': self.summary_map['correct_single']['sum'] / num_single_facts,
'correct_joint': self.summary_map['correct_joint']['sum'] / num_joint_facts,
'correct_ent': self.summary_map['correct_joint_ent']['sum'] / num_joint_facts,
'repeated': self.summary_map['repeated']['sum'] / num_joint_facts,
'same_col': self.summary_map['same_col']['sum'] / num_joint_facts,
'coref': self.summary_map['coref']['sum'] / num_total_facts,
}
return result
def eval(self, kb, utterance):
'''
utterance: a list of tokens and entities represented as a tuple (surface_form, (caninical_form, type))
'''
#print 'eval:', utterance
N = len(utterance)
i = 0
while i < N:
token = utterance[i]
if is_entity(token) and token[1][1] != 'item':
self.inc_fact()
if i+1 < N and utterance[i+1] == 'and':
# number ent1 and ent2
if i-1 < 0 or i+3 > N:
self.inc_undecided()
i += 1
else:
start, end = i-1, i+3
if not is_entity(utterance[i+2]):
self.inc_undecided()
else:
if end + 1 < N and utterance[end:end+2] == ['in', 'those']:
self.inc_coref()
i = end + 2
else:
self.eval_joint(kb, utterance[start:end])
i = end
elif i-1 > 0:
# number ent
start, end = i-1, i+1
if end + 1 < N and utterance[end:end+2] == ['in', 'those']:
self.inc_coref()
i = end + 2
else:
self.eval_single(kb, utterance[start:end])
i = end
else:
self.inc_undecided()
i += 1
else:
i += 1
|
import os
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import random
LOCATION = 'images/'
location_src = LOCATION + 'movie_pics/'
location_squares = LOCATION + 'squares/'
location_squares_error = LOCATION + 'error_squares/'
images = os.listdir(location_src)
n_images = len(images)
print(f"[INFO] loading {n_images} images from {location_src}...")
data = []
labels = []
b = 0
!pwd
# slice images into 64x64 squares
# TO DO: fix row selection is not working!
for image in images:
label = image.split('.')[-2]
print(label)
labels.append(label)
img = Image.open(location_src+image)
img = np.array(img)
for x in range(0, img.shape[0], 64):
xvon = x
xbis = x+64
if xvon > img.shape[0]-64:
xvon = img.shape[0]-64
xbis = img.shape[0]
for y in range(0, img.shape[1], 64):
yvon = y
ybis = y+64
if ybis > img.shape[1]:
yvon = img.shape[1]-64
ybis = img.shape[1]
square = img[xvon:xbis, yvon:ybis,:]
Image.fromarray(square).convert("RGB").save(location_squares+label+"_"+str(x)+"_"+str(y)+".png")
for i in range(random.choice([1, 2, 3])):
#Add some hot-pixel errors
square[np.random.randint(low=0,high=64),np.random.randint(low=0,high=64)]= np.random.randint(low=200,high=255)
square[np.random.randint(low=0,high=64),np.random.randint(low=0,high=64)]= np.random.randint(low=0,high=10)
square[np.random.randint(low=0,high=64),np.random.randint(low=0,high=64)]= np.random.randint(low=0,high=10)
Image.fromarray(square).convert("RGB").save(location_squares_error+label+"_"+str(x)+"_"+str(y)+".png")
# stop after 3 images for testing
# b = b+1
# if b > 20:
# break
# TRY ZONE:
img = Image.open(location_squares+'zachariah047_0_192.png')
img = np.array(img)
plt.imshow(img)
|
from django.urls import re_path
from . import consumers
websocket_urlpatterns = [
re_path(r"ws/judge/queue/", consumers.QueueUpdateConsumer.as_asgi()),
re_path(r"ws/judge/scoreboard/", consumers.ScoreboardUpdateConsumer.as_asgi()),
]
|
from input import input_file
file = input_file('input_files/day2_input.txt')
def part1(input_values):
horiz = 0
up = 0
down = 0
for i in input_values:
if 'f' in i:
horiz += int(i[-1])
elif 'n' in i:
down += int(i[-1])
elif 'p' in i:
up += int(i[-1])
depth = down - up
return horiz*depth
def part2(input_values):
fd = 0
aim = 0
depth = 0
for i in input_values:
if 'f' in i:
fd += int(i[-1])
depth += int(i[-1]) * aim
elif 'n' in i:
aim += int(i[-1])
elif 'p' in i:
aim -= int(i[-1])
return fd*depth
if __name__ == '__main__':
print(f'Part 1: {part1(file)}\nPart 2: {part2(file)}')
|
"""Implementation of core Haskell rules"""
load(
"@io_tweag_rules_haskell//haskell:providers.bzl",
"C2hsLibraryInfo",
"HaskellInfo",
"HaskellLibraryInfo",
"HaskellPrebuiltPackageInfo",
)
load(":cc.bzl", "cc_interop_info")
load(
":private/actions/link.bzl",
"link_binary",
"link_library_dynamic",
"link_library_static",
)
load(":private/actions/package.bzl", "package")
load(":private/actions/repl.bzl", "build_haskell_repl")
load(":private/actions/runghc.bzl", "build_haskell_runghc")
load(":private/context.bzl", "haskell_context")
load(":private/dependencies.bzl", "gather_dep_info")
load(":private/java.bzl", "java_interop_info")
load(":private/mode.bzl", "is_profiling_enabled")
load(
":private/path_utils.bzl",
"ln",
"match_label",
"parse_pattern",
"target_unique_name",
)
load(":private/pkg_id.bzl", "pkg_id")
load(":private/set.bzl", "set")
load(":private/version_macros.bzl", "generate_version_macros")
load(":providers.bzl", "GhcPluginInfo", "HaskellCoverageInfo")
load("@bazel_skylib//lib:paths.bzl", "paths")
load("@bazel_skylib//lib:collections.bzl", "collections")
load("@bazel_skylib//lib:shell.bzl", "shell")
def _prepare_srcs(srcs):
srcs_files = []
import_dir_map = {}
for src in srcs:
# If it has the "files" attribute, it must be a Target
if hasattr(src, "files"):
if C2hsLibraryInfo in src:
srcs_files += src.files.to_list()
for f in src.files.to_list():
import_dir_map[f] = src[C2hsLibraryInfo].import_dir
else:
srcs_files += src.files.to_list()
# otherwise it's just a file
else:
srcs_files.append(src)
return srcs_files, import_dir_map
def haskell_test_impl(ctx):
return _haskell_binary_common_impl(ctx, is_test = True)
def haskell_binary_impl(ctx):
return _haskell_binary_common_impl(ctx, is_test = False)
def _should_inspect_coverage(ctx, hs, is_test):
return hs.coverage_enabled and is_test
def _coverage_enabled_for_target(coverage_source_patterns, label):
for pat in coverage_source_patterns:
if match_label(pat, label):
return True
return False
# Mix files refer to genfile srcs including their root. Therefore, we
# must condition the src filepaths passed in for coverage to match.
def _condition_coverage_src(hs, src):
if not src.path.startswith(hs.genfiles_dir.path):
return src
""" Genfiles have the genfile directory as part of their path,
so declaring a file with the sample path actually makes the new
file double-qualified by the genfile directory.
This is necessary because mix files capture the genfile
path before compilation, and then expect those files to be
qualified by the genfile directory when `hpc report` or
`hpc markup` are used. But, genfiles included as runfiles
are no longer qualified. So, double-qualifying them results in
only one level of qualification as runfiles.
"""
conditioned_src = hs.actions.declare_file(src.path)
hs.actions.run_shell(
inputs = [src],
outputs = [conditioned_src],
arguments = [
src.path,
conditioned_src.path,
],
command = """
mkdir -p $(dirname "$2") && cp "$1" "$2"
""",
)
return conditioned_src
def _haskell_binary_common_impl(ctx, is_test):
hs = haskell_context(ctx)
dep_info = gather_dep_info(ctx, ctx.attr.deps)
plugin_dep_info = gather_dep_info(
ctx,
[dep for plugin in ctx.attr.plugins for dep in plugin[GhcPluginInfo].deps],
)
# Add any interop info for other languages.
cc = cc_interop_info(ctx)
java = java_interop_info(ctx)
with_profiling = is_profiling_enabled(hs)
srcs_files, import_dir_map = _prepare_srcs(ctx.attr.srcs)
inspect_coverage = _should_inspect_coverage(ctx, hs, is_test)
c = hs.toolchain.actions.compile_binary(
hs,
cc,
java,
dep_info,
plugin_dep_info,
srcs = srcs_files,
ls_modules = ctx.executable._ls_modules,
import_dir_map = import_dir_map,
extra_srcs = depset(ctx.files.extra_srcs),
user_compile_flags = ctx.attr.compiler_flags,
dynamic = False if hs.toolchain.is_windows else not ctx.attr.linkstatic,
with_profiling = False,
main_function = ctx.attr.main_function,
version = ctx.attr.version,
inspect_coverage = inspect_coverage,
plugins = ctx.attr.plugins,
)
# gather intermediary code coverage instrumentation data
coverage_data = c.coverage_data
for dep in ctx.attr.deps:
if HaskellCoverageInfo in dep:
coverage_data += dep[HaskellCoverageInfo].coverage_data
c_p = None
if with_profiling:
c_p = hs.toolchain.actions.compile_binary(
hs,
cc,
java,
dep_info,
plugin_dep_info,
srcs = srcs_files,
ls_modules = ctx.executable._ls_modules,
import_dir_map = import_dir_map,
# NOTE We must make the object files compiled without profiling
# available to this step for TH to work, presumably because GHC is
# linked against RTS without profiling.
extra_srcs = depset(transitive = [
depset(ctx.files.extra_srcs),
depset([c.objects_dir]),
]),
user_compile_flags = ctx.attr.compiler_flags,
# NOTE We can't have profiling and dynamic code at the
# same time, see:
# https://ghc.haskell.org/trac/ghc/ticket/15394
dynamic = False,
with_profiling = True,
main_function = ctx.attr.main_function,
version = ctx.attr.version,
plugins = ctx.attr.plugins,
)
(binary, solibs) = link_binary(
hs,
cc,
dep_info,
ctx.files.extra_srcs,
ctx.attr.compiler_flags,
c_p.objects_dir if with_profiling else c.objects_dir,
dynamic = False if hs.toolchain.is_windows else not ctx.attr.linkstatic,
with_profiling = with_profiling,
version = ctx.attr.version,
)
hs_info = HaskellInfo(
package_ids = dep_info.package_ids,
package_databases = dep_info.package_databases,
version_macros = set.empty(),
source_files = c.source_files,
extra_source_files = c.extra_source_files,
import_dirs = c.import_dirs,
static_libraries = dep_info.static_libraries,
static_libraries_prof = dep_info.static_libraries_prof,
dynamic_libraries = dep_info.dynamic_libraries,
interface_dirs = dep_info.interface_dirs,
compile_flags = c.compile_flags,
prebuilt_dependencies = dep_info.prebuilt_dependencies,
cc_dependencies = dep_info.cc_dependencies,
transitive_cc_dependencies = dep_info.transitive_cc_dependencies,
)
cc_info = cc_common.merge_cc_infos(
cc_infos = [dep[CcInfo] for dep in ctx.attr.deps if CcInfo in dep],
)
target_files = depset([binary])
build_haskell_repl(
hs,
ghci_script = ctx.file._ghci_script,
ghci_repl_wrapper = ctx.file._ghci_repl_wrapper,
user_compile_flags = ctx.attr.compiler_flags,
repl_ghci_args = ctx.attr.repl_ghci_args,
output = ctx.outputs.repl,
package_databases = dep_info.package_databases,
version = ctx.attr.version,
hs_info = hs_info,
)
# XXX Temporary backwards compatibility hack. Remove eventually.
# See https://github.com/tweag/rules_haskell/pull/460.
ln(hs, ctx.outputs.repl, ctx.outputs.repl_deprecated)
build_haskell_runghc(
hs,
runghc_wrapper = ctx.file._ghci_repl_wrapper,
extra_args = ctx.attr.runcompile_flags,
user_compile_flags = ctx.attr.compiler_flags,
output = ctx.outputs.runghc,
package_databases = dep_info.package_databases,
version = ctx.attr.version,
hs_info = hs_info,
)
executable = binary
extra_runfiles = []
if inspect_coverage:
binary_path = paths.join(ctx.workspace_name, binary.short_path)
hpc_path = paths.join(ctx.workspace_name, hs.toolchain.tools.hpc.short_path)
tix_file_path = hs.label.name + ".tix"
mix_file_paths = [
paths.join(ctx.workspace_name, datum.mix_file.short_path)
for datum in coverage_data
]
mix_file_paths = collections.uniq(mix_file_paths) # remove duplicates
# find which modules to exclude from coverage analysis, by using the specified source patterns
raw_coverage_source_patterns = ctx.attr.experimental_coverage_source_patterns
coverage_source_patterns = [parse_pattern(ctx, pat) for pat in raw_coverage_source_patterns]
modules_to_exclude = [paths.split_extension(datum.mix_file.basename)[0] for datum in coverage_data if not _coverage_enabled_for_target(coverage_source_patterns, datum.target_label)]
modules_to_exclude = collections.uniq(modules_to_exclude) # remove duplicates
expected_covered_expressions_percentage = ctx.attr.expected_covered_expressions_percentage
expected_uncovered_expression_count = ctx.attr.expected_uncovered_expression_count
strict_coverage_analysis = ctx.attr.strict_coverage_analysis
coverage_report_format = ctx.attr.coverage_report_format
if coverage_report_format != "text" and coverage_report_format != "html":
fail("""haskell_test attribute "coverage_report_format" must be one of "text" or "html".""")
wrapper = hs.actions.declare_file("{}_coverage/coverage_wrapper.sh".format(ctx.label.name))
ctx.actions.expand_template(
template = ctx.file._coverage_wrapper_template,
output = wrapper,
substitutions = {
"{binary_path}": shell.quote(binary_path),
"{hpc_path}": shell.quote(hpc_path),
"{tix_file_path}": shell.quote(tix_file_path),
"{expected_covered_expressions_percentage}": str(expected_covered_expressions_percentage),
"{expected_uncovered_expression_count}": str(expected_uncovered_expression_count),
"{mix_file_paths}": shell.array_literal(mix_file_paths),
"{modules_to_exclude}": shell.array_literal(modules_to_exclude),
"{strict_coverage_analysis}": str(strict_coverage_analysis),
"{coverage_report_format}": shell.quote(ctx.attr.coverage_report_format),
"{package_path}": shell.quote(ctx.label.package),
},
is_executable = True,
)
executable = wrapper
mix_runfiles = [datum.mix_file for datum in coverage_data]
srcs_runfiles = [_condition_coverage_src(hs, datum.src_file) for datum in coverage_data]
extra_runfiles = [
ctx.file._bash_runfiles,
hs.toolchain.tools.hpc,
binary,
] + mix_runfiles + srcs_runfiles
return [
hs_info,
cc_info,
DefaultInfo(
executable = executable,
files = target_files,
runfiles = ctx.runfiles(
files =
solibs +
extra_runfiles,
collect_data = True,
),
),
]
def haskell_library_impl(ctx):
hs = haskell_context(ctx)
dep_info = gather_dep_info(ctx, ctx.attr.deps)
plugin_dep_info = gather_dep_info(
ctx,
[dep for plugin in ctx.attr.plugins for dep in plugin[GhcPluginInfo].deps],
)
version = ctx.attr.version if ctx.attr.version else None
my_pkg_id = pkg_id.new(ctx.label, version)
with_profiling = is_profiling_enabled(hs)
with_shared = False if hs.toolchain.is_windows else not ctx.attr.linkstatic
# Add any interop info for other languages.
cc = cc_interop_info(ctx)
java = java_interop_info(ctx)
srcs_files, import_dir_map = _prepare_srcs(ctx.attr.srcs)
other_modules = ctx.attr.hidden_modules
exposed_modules_reexports = _exposed_modules_reexports(ctx.attr.exports)
c = hs.toolchain.actions.compile_library(
hs,
cc,
java,
dep_info,
plugin_dep_info,
srcs = srcs_files,
ls_modules = ctx.executable._ls_modules,
other_modules = other_modules,
exposed_modules_reexports = exposed_modules_reexports,
import_dir_map = import_dir_map,
extra_srcs = depset(ctx.files.extra_srcs),
user_compile_flags = ctx.attr.compiler_flags,
with_shared = with_shared,
with_profiling = False,
my_pkg_id = my_pkg_id,
plugins = ctx.attr.plugins,
)
c_p = None
if with_profiling:
c_p = hs.toolchain.actions.compile_library(
hs,
cc,
java,
dep_info,
plugin_dep_info,
srcs = srcs_files,
ls_modules = ctx.executable._ls_modules,
other_modules = other_modules,
exposed_modules_reexports = exposed_modules_reexports,
import_dir_map = import_dir_map,
# NOTE We must make the object files compiled without profiling
# available to this step for TH to work, presumably because GHC is
# linked against RTS without profiling.
extra_srcs = depset(transitive = [
depset(ctx.files.extra_srcs),
depset([c.objects_dir]),
]),
user_compile_flags = ctx.attr.compiler_flags,
# NOTE We can't have profiling and dynamic code at the
# same time, see:
# https://ghc.haskell.org/trac/ghc/ticket/15394
with_shared = False,
with_profiling = True,
my_pkg_id = my_pkg_id,
plugins = ctx.attr.plugins,
)
static_library = link_library_static(
hs,
cc,
dep_info,
c.objects_dir,
my_pkg_id,
with_profiling = False,
)
if with_shared:
dynamic_library = link_library_dynamic(
hs,
cc,
dep_info,
depset(ctx.files.extra_srcs),
c.objects_dir,
my_pkg_id,
)
dynamic_libraries = set.insert(
dep_info.dynamic_libraries,
dynamic_library,
)
else:
dynamic_library = None
dynamic_libraries = dep_info.dynamic_libraries
static_library_prof = None
if with_profiling:
static_library_prof = link_library_static(
hs,
cc,
dep_info,
c_p.objects_dir,
my_pkg_id,
with_profiling = True,
)
conf_file, cache_file = package(
hs,
dep_info,
c.interfaces_dir,
c_p.interfaces_dir if c_p != None else None,
static_library,
dynamic_library,
c.exposed_modules_file,
other_modules,
my_pkg_id,
static_library_prof = static_library_prof,
)
static_libraries_prof = dep_info.static_libraries_prof
if static_library_prof != None:
static_libraries_prof = [static_library_prof] + dep_info.static_libraries_prof
interface_dirs = set.union(
dep_info.interface_dirs,
set.singleton(c.interfaces_dir),
)
if c_p != None:
interface_dirs = set.mutable_union(
interface_dirs,
set.singleton(c_p.interfaces_dir),
)
version_macros = set.empty()
if version != None:
version_macros = set.singleton(
generate_version_macros(ctx, hs.name, version),
)
hs_info = HaskellInfo(
package_ids = set.insert(dep_info.package_ids, pkg_id.to_string(my_pkg_id)),
package_databases = set.insert(dep_info.package_databases, cache_file),
version_macros = version_macros,
source_files = c.source_files,
extra_source_files = c.extra_source_files,
import_dirs = c.import_dirs,
# NOTE We have to use lists for static libraries because the order is
# important for linker. Linker searches for unresolved symbols to the
# left, i.e. you first feed a library which has unresolved symbols and
# then you feed the library which resolves the symbols.
static_libraries = [static_library] + dep_info.static_libraries,
static_libraries_prof = static_libraries_prof,
dynamic_libraries = dynamic_libraries,
interface_dirs = interface_dirs,
compile_flags = c.compile_flags,
prebuilt_dependencies = dep_info.prebuilt_dependencies,
cc_dependencies = dep_info.cc_dependencies,
transitive_cc_dependencies = dep_info.transitive_cc_dependencies,
)
lib_info = HaskellLibraryInfo(
package_id = pkg_id.to_string(my_pkg_id),
version = version,
)
dep_coverage_data = []
for dep in ctx.attr.deps:
if HaskellCoverageInfo in dep:
dep_coverage_data += dep[HaskellCoverageInfo].coverage_data
coverage_info = HaskellCoverageInfo(
coverage_data = dep_coverage_data + c.coverage_data,
)
target_files = depset([file for file in [static_library, dynamic_library] if file])
if hasattr(ctx, "outputs"):
build_haskell_repl(
hs,
ghci_script = ctx.file._ghci_script,
ghci_repl_wrapper = ctx.file._ghci_repl_wrapper,
repl_ghci_args = ctx.attr.repl_ghci_args,
user_compile_flags = ctx.attr.compiler_flags,
output = ctx.outputs.repl,
package_databases = dep_info.package_databases,
version = ctx.attr.version,
hs_info = hs_info,
lib_info = lib_info,
)
# XXX Temporary backwards compatibility hack. Remove eventually.
# See https://github.com/tweag/rules_haskell/pull/460.
ln(hs, ctx.outputs.repl, ctx.outputs.repl_deprecated)
build_haskell_runghc(
hs,
runghc_wrapper = ctx.file._ghci_repl_wrapper,
extra_args = ctx.attr.runcompile_flags,
user_compile_flags = ctx.attr.compiler_flags,
output = ctx.outputs.runghc,
package_databases = dep_info.package_databases,
version = ctx.attr.version,
hs_info = hs_info,
lib_info = lib_info,
)
default_info = None
if hasattr(ctx, "runfiles"):
default_info = DefaultInfo(
files = target_files,
runfiles = ctx.runfiles(collect_data = True),
)
else:
default_info = DefaultInfo(
files = target_files,
)
# Create a CcInfo provider so that CC rules can work with
# a haskell library as if it was a regular CC one.
# XXX Workaround https://github.com/bazelbuild/bazel/issues/6874.
# Should be find_cpp_toolchain() instead.
cc_toolchain = ctx.attr._cc_toolchain[cc_common.CcToolchainInfo]
feature_configuration = cc_common.configure_features(
cc_toolchain = cc_toolchain,
requested_features = ctx.features,
unsupported_features = ctx.disabled_features,
)
library_to_link = cc_common.create_library_to_link(
actions = ctx.actions,
feature_configuration = feature_configuration,
dynamic_library = dynamic_library,
static_library = static_library,
cc_toolchain = cc_toolchain,
)
compilation_context = cc_common.create_compilation_context()
linking_context = cc_common.create_linking_context(
libraries_to_link = [library_to_link],
)
cc_info = cc_common.merge_cc_infos(
cc_infos = [
CcInfo(
compilation_context = compilation_context,
linking_context = linking_context,
),
] + [dep[CcInfo] for dep in ctx.attr.deps if CcInfo in dep],
)
return [
hs_info,
cc_info,
coverage_info,
default_info,
lib_info,
]
def haskell_toolchain_library_impl(ctx):
hs = haskell_context(ctx)
if ctx.attr.package:
package = ctx.attr.package
else:
package = ctx.label.name
id_file = hs.actions.declare_file(target_unique_name(hs, "id"))
hs.actions.run_shell(
inputs = [hs.tools.ghc_pkg],
outputs = [id_file],
command = """
"$1" --simple-output -v1 field "$2" id > "$3"
""",
arguments = [
hs.tools.ghc_pkg.path,
package,
id_file.path,
],
)
version_macros_file = hs.actions.declare_file("{}_version_macros.h".format(hs.name))
hs.actions.run_shell(
inputs = [hs.tools.ghc_pkg, ctx.executable._version_macros],
outputs = [version_macros_file],
command = """
"$1" \\
`"$2" --simple-output -v1 field "$3" name` \\
`"$2" --simple-output -v1 field "$3" version` \\
> "$4"
""",
arguments = [
ctx.executable._version_macros.path,
hs.tools.ghc_pkg.path,
package,
version_macros_file.path,
],
)
prebuilt_package_info = HaskellPrebuiltPackageInfo(
package = package,
id_file = id_file,
version_macros_file = version_macros_file,
)
return [prebuilt_package_info]
def _exposed_modules_reexports(exports):
"""Creates a ghc-pkg-compatible list of reexport declarations.
A ghc-pkg registration file declares reexports as part of the
exposed-modules field in the following format:
exposed-modules: A, B, C from pkg-c:C, D from pkg-d:Original.D
Here, the Original.D module from pkg-d is renamed by virtue of a
different name being used before the "from" keyword.
This function creates a ghc-pkg-compatible list of reexport declarations
(as shown above) from a dictionary mapping package targets to "Cabal-style"
reexported-modules declarations. That is, something like:
{
":pkg-c": "C",
":pkg-d": "Original.D as D",
":pkg-e": "E1, Original.E2 as E2",
}
Args:
exports: a dictionary mapping package targets to "Cabal-style"
reexported-modules declarations.
Returns:
a ghc-pkg-compatible list of reexport declarations.
"""
exposed_reexports = []
for dep, cabal_decls in exports.items():
for cabal_decl in cabal_decls.split(","):
stripped_cabal_decl = cabal_decl.strip()
cabal_decl_parts = stripped_cabal_decl.split(" as ")
original = cabal_decl_parts[0]
if len(cabal_decl_parts) == 2:
reexported = cabal_decl_parts[1]
else:
reexported = cabal_decl_parts[0]
if HaskellPrebuiltPackageInfo in dep:
pkg = dep[HaskellPrebuiltPackageInfo].package
elif HaskellLibraryInfo in dep:
pkg = dep[HaskellLibraryInfo].package_id
exposed_reexport = "{reexported} from {pkg}:{original}".format(
reexported = reexported,
pkg = pkg,
original = original,
)
exposed_reexports.append(exposed_reexport)
return exposed_reexports
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Import Pygments Package
from pygments.lexers.sql import SqlLexer
# Import PromptToolkit Package
from prompt_toolkit import PromptSession
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.styles import Style
import re
# Import Provider Packages
# Import Sys Package
import sys
from prompt_toolkit.validation import Validator
# Importing Custom Logger & Logging Modules
# from core.logger.custom_logger import CustomLogger
# from core.logger.constants.custom_verbose_levels import VERBOSE, FATAL
# from logging import INFO, DEBUG, WARNING
# import logging
gnosis_safe_cli_completer = WordCompleter([
'safe_addr', 'add', 'after', 'all', 'before', 'check', 'current_date',
'current_time', 'current_timestamp', 'default',
'delete','exit', 'quit', 'without'], ignore_case=True)
style = Style.from_dict({
'completion-menu.completion': 'bg:#008888 #ffffff',
'completion-menu.completion.current': 'bg:#00aaaa #000000',
'scrollbar.background': 'bg:#88aaaa',
'scrollbar.button': 'bg:#222222',
})
def eval_function_old(param, param_type):
""" Eval Function (Deprecated)
isOwner 0xe982E462b094850F12AF94d21D470e21bE9D0E9C
:param param:
:param param_type:
:return:
"""
try:
splitted_input = param.split(' ')
except TypeError:
pass
else:
try:
print(splitted_input)
if len(splitted_input[1][2:]) != 40:
print('launch error, address must be 40 alfanumeric hash')
else:
re.search('0x[0-9,aA-zZ]{40}', splitted_input[1]).group(0)
except IndexError:
print('there is not enough data to verify current input')
pass
return splitted_input[1]
def validate_byte_byte32_input(param, param_type):
""""""
# bytes32
return
def string_to_byte(data):
""" String To Byte (Hex)
:param data:
:return:
"""
if len(data) > 8:
byte8 = data[:8]
else:
byte8 = data.ljust(8, '0')
return bytes(byte8, 'utf-8')
def string_to_bytes32(data):
""" String To Bytes32 (Hex)
:param data:
:return:
"""
if len(data) > 32:
bytes32 = data[:32]
else:
bytes32 = data.ljust(32, '0')
return bytes(bytes32, 'utf-8')
def validate_address_input(param):
""" Validate Address Input
:param param:
:return:
"""
try:
if '0x' in param:
if len(param[1][2:]) != 40:
re.search('0x[0-9,aA-zZ]{40}', param).group(0)
return True, ''
return False, 'Not a valid address (Does not have 40 alphanumeric values).'
return False, 'Not a valid address (Does not start with 0x).'
except Exception as err:
print(err)
return False, 'Not a valid address (Unable to parse param).'
def validate_integer_input(param, param_type):
""" Validate Integer Input
:param param:
:param param_type:
:return:
"""
# use hex()
# address payable 160
# address 256
if param_type == 'uint8' and param <= 255:
return True, ''
elif param_type == 'uint16' and param <= 65535:
return True, ''
elif param_type == 'uint32' and param <= 4294967295:
return True, ''
elif param_type == 'uint64'and param <= 18446744073709551615:
return True, ''
elif param_type == 'uint128'and param <= 340282366920938463463374607431768211455:
return True, ''
elif param_type == 'uint160'and param <= 1461501637330902918203684832716283019655932542975:
return True, ''
elif param_type == 'uint256'and param <= 115792089237316195423570985008687907853269984665640564039457584007913129639935:
return True, ''
return False, 'Not a valid {0} (Does not fit the current type for the function input)'.format(param_type)
def is_valid_address(text):
return '0x' in text
validator = Validator.from_callable(
is_valid_address, error_message='Not a valid address (Does not contain an 0x).', move_cursor_to_end=True
)
# Code Reference: https://github.com/prompt-toolkit/python-prompt-toolkit/tree/master/examples/prompts
# todo: Remove crappy code from the current class GnosisConosleInput
class GnosisConsoleInput:
def run(self, session_completer=gnosis_safe_cli_completer, contract_interface=None, current_contract=None):
""" Gnosis Console Input
:param session_completer:
:param contract_interface:
:param current_contract:
:return:
"""
session = PromptSession(completer=session_completer, style=style)
while True:
try:
# text = session.prompt('(gnosis-safe-cli)> ', validator=validator, validate_while_typing=True)
# text = session.prompt('(gnosis-safe-cli)> ', validator=validator, validate_while_typing=False)
current_function_call = ''
text = session.prompt('(gnosis-safe-cli)> ')
try:
for item in contract_interface:
if contract_interface[item]['function_name'] in text:
current_function_call = contract_interface[item]['function_call']
print('Contract Call to: %s' % current_function_call)
# Todo: remove this piece of not so very good just so really bad code, this is only to showcase early functionallity
splitted_input = len(text.split(' '))
if splitted_input == 1:
print(eval(current_function_call)())
else:
for data in contract_interface[item]['function_input']:
try:
function_schema = contract_interface[item]['function_call_clean']
# Todo: base de eval process in a list of input validations, based on the function_input stored in the current_dict for contract interface
params = '\'' + eval_function_old(text) + '\''
current_function = function_schema.format(contract_interface[item]['function_name'], params)
print(function_schema.format(contract_interface[item]['function_name'], params))
print(eval(current_function)())
except Exception as err:
print(err)
except Exception as err: # KeyError
print(err)
continue
if text == 'exit':
raise EOFError
elif text == 'quit':
raise EOFError
except KeyboardInterrupt:
continue # Control-C pressed. Try again.
except EOFError:
break # Control-D pressed.
print('GoodBye!')
|
# Copyright 2020 Robin Scheibler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This script generates figure 3 in the paper
"""
import argparse
import json
import os
import sys
import warnings
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.ticker import MaxNLocator
from data_loader import load_data
from plot_config import seaborn_config
matplotlib.rc("pdf", fonttype=42)
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser(
description="Plot the data simulated by separake_near_wall"
)
parser.add_argument(
"-p",
"--pickle",
action="store_true",
help="Read the aggregated data table from a pickle cache",
)
parser.add_argument(
"-s",
"--show",
action="store_true",
help="Display the plots at the end of data analysis",
)
parser.add_argument(
"--pca", action="store_true", help="Plot results for PCA initialization",
)
parser.add_argument(
"dirs",
type=str,
nargs="+",
metavar="DIR",
help="The directory containing the simulation output files.",
)
cli_args = parser.parse_args()
plot_flag = cli_args.show
pickle_flag = cli_args.pickle
df_all_iters, final_val_tbl, conv_tbl, rt60, parameters = load_data(
cli_args.dirs, pickle=pickle_flag
)
# in this script, we only care about the final values
df = final_val_tbl
# Draw the figure
print("Plotting...")
df_melt = df.melt(id_vars=df.columns[:-5], var_name="metric")
# df_melt = df_melt.replace(substitutions)
# Aggregate the convergence curves
df_agg = (
df_melt.groupby(
by=[
"Algorithm",
"Sources",
"Interferers",
"SINR",
"Mics",
"Iteration",
"metric",
]
)
.mean()
.reset_index()
)
if cli_args.pca:
pca_str = " (PCA)"
else:
pca_str = ""
all_algos = [
"IVA-NG" + pca_str,
"FastIVA" + pca_str,
"AuxIVA-IP" + pca_str,
"AuxIVA-ISS" + pca_str,
"AuxIVA-IP2" + pca_str,
"AuxIVA-IPA" + pca_str,
]
seaborn_config(n_colors=len(all_algos), style="whitegrid")
if not os.path.exists("figures"):
os.mkdir("figures")
fig_dir = "figures/{}_{}_{}".format(
parameters["name"], parameters["_date"], parameters["_git_sha"]
)
if not os.path.exists(fig_dir):
os.mkdir(fig_dir)
plt_kwargs = {
"improvements": {
"yticks": [[-10, 0, 10, 20], [-10, 0, 10, 20], [0, 10, 20, 30]],
},
"raw": {
"yticks": [[-20, -10, 0, 10, 20], [-10, 0, 10, 20], [-10, 0, 10, 20, 30]],
},
}
full_width = 6.93 # inches, == 17.6 cm, double column width
half_width = 3.35 # inches, == 8.5 cm, single column width
# Third figure
# Classic # of microphones vs metric (box-plots ?)
the_metrics = {
"improvements": ["\u0394SI-SDR [dB]", "\u0394SI-SIR [dB]"],
"raw": ["SI-SDR [dB]", "SI-SIR [dB]"],
}
# width = aspect * height
aspect = 1.2 # width / height
height = (half_width / 2) / aspect
# height = 1.6
iteration_index = 12
n_interferers = 0
# first we need to pair the algorithm with their maximum number of iterations
for m_name in the_metrics.keys():
metric = the_metrics[m_name]
select = (
# np.logical_or(df_melt["Iteration"] == 100, df_melt["Iteration"] == 2000) &
(df_melt["Interferers"] == n_interferers)
& df_melt.metric.isin(metric)
)
fig = plt.figure()
g = sns.catplot(
data=df_melt[select],
x="Mics",
y="value",
hue="Algorithm",
row="SINR",
col="metric",
col_order=metric,
hue_order=all_algos,
kind="box",
legend=False,
aspect=aspect,
height=height,
linewidth=0.5,
fliersize=0.3,
# whis=np.inf,
sharey="row",
# size=3, aspect=0.65,
margin_titles=True,
)
g.set(clip_on=False)
# remove original titles before adding custom ones
[plt.setp(ax.texts, text="") for ax in g.axes.flat]
g.set_titles(col_template="{col_name}", row_template="SNR {row_name} dB")
g.set_ylabels("Decibels")
g.set_xlabels("# channels")
# remove the white background on the margin titles on the right
for the_ax in g.axes.flat:
plt.setp(the_ax.texts, bbox=dict(alpha=0.0)) # , fontsize="large")
plt.setp(the_ax.title, bbox=dict(alpha=0.0)) # , fontsize="large")
all_artists = []
leg_handles = {}
for r in range(3):
for c, _ in enumerate(metric):
if m_name in plt_kwargs and r < len(plt_kwargs[m_name]["yticks"]):
g.facet_axis(r, c).set_yticks(plt_kwargs[m_name]["yticks"][r])
handles, labels = g.facet_axis(r, c).get_legend_handles_labels()
for lbl, hand in zip(labels, handles):
if lbl not in leg_handles:
if lbl.endswith(" (PCA)"):
lbl = lbl[:-6]
leg_handles[lbl] = hand
sns.despine(offset=10, trim=False, left=True, bottom=True)
g.fig.tight_layout()
left_ax = g.facet_axis(0, 0)
leg = g.fig.legend(
leg_handles.values(),
leg_handles.keys(),
title="",
frameon=False,
framealpha=0.85,
fontsize="xx-small",
loc="upper center",
bbox_to_anchor=[0.5, 1.01],
ncol=len(all_algos),
)
# leg.get_frame().set_linewidth(0.0)
all_artists.append(leg)
g.fig.align_ylabels()
g.fig.subplots_adjust(top=0.90)
"""
for c, lbl in enumerate(metric):
g_ax = g.facet_axis(0, c)
g_ax.set_ylabel(lbl)
"""
for ext in ["pdf", "png"]:
pca_fn_str = "_pca" if cli_args.pca else ""
fig_fn = os.path.join(
fig_dir, f"figure3_{m_name}_interf{n_interferers}{pca_fn_str}.{ext}"
)
plt.savefig(
fig_fn, bbox_extra_artists=all_artists
) # , bbox_inches="tight")
plt.close()
if plot_flag:
plt.show()
|
from .emailutil import * |
import collections
import functools
import logging
import uuid
from ..collections.attrdict import AttrDict
from ..utils import perf
from .run_state import RunState
log = logging.getLogger(__name__)
class Entity(int):
"""Entity is just and integer ID.
All data related to given Entity is stored in ComponentManagers.
"""
__slots__ = ()
def __new__(cls, entity_id=None):
if entity_id is None:
entity_id = uuid.uuid4().int
return super().__new__(cls, entity_id)
@property
def short_id(self):
hex = '%032x' % self
return hex[:8]
def __str__(self):
return self.short_id
def __repr__(self):
hex = '%032x' % self
return '<Entity id="%s-%s-%s-%s-%s">' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
class Component:
"""Component that holds some data.
Components should provide only setters and getters.
All logic should be handled in Systems!
params - values that are used by constructor and serialization
"""
__slots__ = ()
params = None
@property
def name(self):
return f'{self.__class__.__name__}'
@property
def qualname(self):
return f'{self.__class__.__module__}.{self.name}'
@property
def parameters(self):
parameters = self.params
if parameters is None:
parameters = self.__slots__
return parameters
def __reduce__(self):
data = []
for param in self.parameters:
data.append(getattr(self, param))
return data
def serialize(self):
data = {}
for param in self.parameters:
data[param] = getattr(self, param)
return data
def __repr__(self):
param_values = [(param, getattr(self, param)) for param in self.parameters]
if not param_values:
return f'<{self.name}={super().__repr__()}>'
else:
param_values_txt = ', '.join([f'{param}={value!r}' for param, value in param_values])
return f'<{self.name} {param_values_txt}>'
class JoinIterator:
"""Iterate through values of combined managers.
Only entities / components that are present in all provided managers are returned.
"""
__slots__ = ('ignore', 'managers', )
def __init__(self, ignore, *managers):
self.ignore = ignore
self.managers = managers
def __iter__(self):
if not all(self.managers):
return
entities = set.intersection(*[
manager.entities
for manager in self.managers
# NOTE: No need to intersect with ALL entitites
if not manager is self.ignore
])
# TODO: Consider filtering out FlagComponent no need to have element that is always True
for entity in entities:
values = [
manager.get(entity)
for manager in self.managers
]
yield values
class JoinableManager:
"""Entity to value manager that can be used in JoinIterator."""
__slots__ = ('entities', '_values', )
def __init__(self):
# Keeping separate entities set and key-value dict gives slight performance boost.
self.entities = EntitiesSet()
self._values = {} # = {entity: value, }
def __len__(self):
return len(self._values)
def __contains__(self, entity):
return entity in self.entities
def get(self, entity, default=None):
return self._values.get(entity, default)
def __iter__(self):
"""Yield (entity, component) pairs.
If you only want entities - iterate over manager.entities instead
"""
# NOTE: Iterating over values dict directly boosted performance significantly!
yield from self._values.items()
def insert(self, entity, value):
self.entities.add(entity)
self._values[entity] = value
def discard(self, entity):
self.entities.discard(entity)
self._values.pop(entity, None)
def remove(self, *entities):
for entity in entities:
self.discard(entity)
def clear(self):
self.entities.clear()
self._values.clear()
class EntitiesSet(set):
"""Entities container, compatibile with JoinManager interface."""
def get(self, entity):
# NOTE: It's assumed that get() is called inside JoinIterator that already knows if entity is present
return entity
@property
def entities(self):
return self
class ComponentManager(JoinableManager):
__slots__ = ('component_type', )
def __init__(self, component_type):
super().__init__()
self.component_type = component_type
def insert(self, entity, *args, component=None, **kwargs):
if component is not None and not isinstance(component, self.component_type):
raise ValueError('Invalid component type!')
if component is None:
component = self.component_type(*args, **kwargs)
super().insert(entity, component)
return component
def __repr__(self):
return f'<{self.__class__.__name__}({self.component_type.__name__})>'
class System:
INCLUDE_STATES = set()
EXCLUDE_STATES = set()
def __init__(self, ecs):
self.ecs = ecs
@functools.lru_cache
def should_run(self, state):
"""Return True if system should run with given RunState."""
if self.EXCLUDE_STATES and state in self.EXCLUDE_STATES:
return False
if self.INCLUDE_STATES and not state in self.INCLUDE_STATES:
return False
return True
def run(self):
return
def __repr__(self):
return f'<{self.__class__.__name__}>'
class SystemsManager:
def __init__(self):
self.systems = []
self.run_state = RunState.PRE_RUN # TODO: RENDER or WAIT_FOR_INPUT
self.next_run_state = None
def register(self, system):
self.systems.append(system)
def run(self):
# Run all systems that should run with given run_state
systems = []
for system in self.systems:
if not system.should_run(self.run_state):
continue
with perf.Perf(system.run):
system.run()
systems.append(system)
log.debug(f'systems.run({self.run_state.name}): {systems}')
# Change run_state AFTER running all systems
if self.next_run_state:
self.run_state = self.next_run_state
self.next_run_state = None
def __iter__(self):
yield from self.systems
class ResourcesManager(AttrDict):
pass
class ECS:
"""Entity Component System - data storage and processing.
Entity - simple ID for object
Component - data associated with given object, pure data
System - implements all logic of interaction between entities and their components
Resources - resources not associated with any entity
"""
def __init__(self):
self.entities = EntitiesSet()
self._components = {} # {component_type: ComponentManager(component_type), }
self._systems = SystemsManager()
self.resources = ResourcesManager()
@property
def run_state(self):
return self._systems.run_state
@run_state.setter
def run_state(self, run_state):
self._systems.next_run_state = run_state
def set_run_state(self, run_state):
log.warning('set_run_state() -> %s', run_state)
self.run_state = run_state
@property
def next_state(self):
return self._systems.next_run_state
def create(self, *components, entity_id=None):
"""Create Entity with given components."""
entity = Entity(entity_id)
self.entities.add(entity)
for component in components:
if component is None:
continue
component_manager = self.manage(component)
component_manager.insert(entity, component=component)
return entity
def manage(self, component_type):
"""Return ComponentManager for given Component."""
if isinstance(component_type, Component):
component_type = type(component_type)
component_manager = self._components.get(component_type)
if component_manager is None:
component_manager = ComponentManager(component_type)
self._components[component_type] = component_manager
return component_manager
def get_components(self, entity):
"""Return list of all components for given Entity."""
components = []
for component_manager in self._components.values():
component = component_manager.get(entity)
if component:
components.append(component)
return components
def remove(self, *entities):
"""Remove Entity."""
for entity in entities:
for component_manager in self._components.values():
component_manager.discard(entity)
self.entities.discard(entity)
def join(self, *managers):
"""Return iterator over values of multiple managers.
If you only want to iterate for entity, component pairs iterate over manager itself!
"""
yield from JoinIterator(self.entities, *managers)
def register(self, system):
"""Register System."""
self._systems.register(system)
def run_once(self, *args, **kwargs):
"""Run Systems for given RunState."""
with perf.Perf(self._systems.run):
self._systems.run(*args, **kwargs)
def run(self):
# Should be named join() as in Thread.join()
# but join() is already used for joining managers as in database query
while True:
self.run_once()
|
from api.lib.testutils import BaseTestCase
import api.companies.unittest
class TestReadCompany(BaseTestCase):
def test_read_company(self):
resp = self.app.get('/companies/exponential')
assert 'Palo Alto' in resp.data
if __name__ == "__main__":
api.companies.unittest.main() |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from NodeGraphQt import QtWidgets, QtCore
from NodeGraphQt.constants import DRAG_DROP_ID
TYPE_NODE = QtWidgets.QTreeWidgetItem.UserType + 1
TYPE_CATEGORY = QtWidgets.QTreeWidgetItem.UserType + 2
class BaseNodeTreeItem(QtWidgets.QTreeWidgetItem):
def __eq__(self, other):
return id(self) == id(other)
class NodeTreeWidget(QtWidgets.QTreeWidget):
def __init__(self, parent=None):
super(NodeTreeWidget, self).__init__(parent)
self.setDragDropMode(QtWidgets.QAbstractItemView.DragOnly)
self.setHeaderHidden(True)
self._factory = None
self._custom_labels = {}
def __repr__(self):
return '<{} object at {}>'.format(self.__class__.__name__, hex(id(self)))
def mimeData(self, items):
node_ids = ','.join(i.toolTip(0) for i in items)
mime_data = super(NodeTreeWidget, self).mimeData(items)
mime_data.setText('<${}>:{}'.format(DRAG_DROP_ID, node_ids))
return mime_data
def _build_tree(self):
"""
Populate the node tree.
"""
self.clear()
categories = set()
node_types = {}
for name, node_ids in self._factory.names.items():
for nid in node_ids:
categories.add('.'.join(nid.split('.')[:-1]))
node_types[nid] = name
category_items = {}
for category in sorted(categories):
if category in self._custom_labels.keys():
label = self._custom_labels[category]
else:
label = '- {}'.format(category)
cat_item = BaseNodeTreeItem(self, [label], type=TYPE_CATEGORY)
cat_item.setFirstColumnSpanned(True)
cat_item.setFlags(QtCore.Qt.ItemIsEnabled)
self.addTopLevelItem(cat_item)
cat_item.setExpanded(True)
category_items[category] = cat_item
for node_id, node_name in node_types.items():
category = '.'.join(node_id.split('.')[:-1])
category_item = category_items[category]
item = BaseNodeTreeItem(category_item, [node_name], type=TYPE_NODE)
item.setToolTip(0, node_id)
category_item.addChild(item)
def set_node_factory(self, factory):
"""
Set current node factory.
Args:
factory (NodeFactory): node factory.
"""
self._factory = factory
def set_category_label(self, category, label):
"""
Set custom display label for a node category.
Args:
category (str): node identifier category eg. "nodeGraphQt.nodes"
label (str): custom display label.
"""
self._custom_labels[category] = label
def update(self):
"""
Update and refresh the node list widget.
"""
self._build_tree()
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: primitive.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='primitive.proto',
package='',
syntax='proto3',
serialized_options=b'Z\010pipeline',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0fprimitive.proto\"[\n\tPrimitive\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x13\n\x0bpython_path\x18\x03 \x01(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x0e\n\x06\x64igest\x18\x05 \x01(\tB\nZ\x08pipelineb\x06proto3'
)
_PRIMITIVE = _descriptor.Descriptor(
name='Primitive',
full_name='Primitive',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='Primitive.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version', full_name='Primitive.version', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='python_path', full_name='Primitive.python_path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='Primitive.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='digest', full_name='Primitive.digest', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=19,
serialized_end=110,
)
DESCRIPTOR.message_types_by_name['Primitive'] = _PRIMITIVE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Primitive = _reflection.GeneratedProtocolMessageType('Primitive', (_message.Message,), {
'DESCRIPTOR' : _PRIMITIVE,
'__module__' : 'primitive_pb2'
# @@protoc_insertion_point(class_scope:Primitive)
})
_sym_db.RegisterMessage(Primitive)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
import argparse
import asyncio
import os
import socket
import tempfile
from pathlib import Path
from aiohttp import (
ClientSession,
ClientTimeout,
WSMessage,
)
from chia.cmds.init_funcs import create_all_ssl
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.protocols.shared_protocol import Handshake
from chia.protocols import wallet_protocol
from chia.server.outbound_message import Message, make_msg
from chia.server.server import ssl_context_for_client, NodeType
from chia.types.spend_bundle import SpendBundle
from chia.util.ints import uint16, uint8
DNS_INTRODUCER_HOSTNAME = "dns-introducer.chia.net"
def remote_host_ipv4():
r = socket.getaddrinfo(DNS_INTRODUCER_HOSTNAME, 8444)
for _ in set(r):
t = _[4][0]
if _[0] == socket.AddressFamily.AF_INET6:
t = f"[{t}]"
yield t
def make_ssl_path():
# wow, this code sucks, but it's mostly due to the code in the chia module
# not being very flexible
temp_dir = tempfile.TemporaryDirectory()
root_path = Path(temp_dir.name)
ssl_dir = root_path / "config" / "ssl"
os.makedirs(ssl_dir)
create_all_ssl(root_path)
# we have to keep `temp_dir` around because the contents
# are deleted when it's garbage-collected
return temp_dir, root_path
def get_ssl_context():
_temp_dir, root_path = make_ssl_path()
ssl_path = root_path / "config" / "ssl"
ca_path = ssl_path / "ca"
wallet_path = ssl_path / "wallet"
chia_ca_crt_path = ca_path / "chia_ca.crt"
chia_ca_key_path = ca_path / "chia_ca.key"
crt_path = wallet_path / "public_wallet.crt"
key_path = wallet_path / "public_wallet.key"
ssl_context = ssl_context_for_client(
chia_ca_crt_path, chia_ca_key_path, crt_path, key_path
)
# we have to keep `temp_dir` around because the contents
# are deleted when it's garbage-collected
ssl_context.temp_dir = _temp_dir
return ssl_context
async def push_tx(spend_bundle: SpendBundle):
ssl_context = get_ssl_context()
jobs = []
for remote_host in remote_host_ipv4():
job = asyncio.create_task(
push_tx_to_host(ssl_context, spend_bundle, remote_host, 8444)
)
jobs.append(job)
d = {}
while 1:
done, pending = await asyncio.wait(jobs, return_when=asyncio.FIRST_COMPLETED)
for t in done:
try:
rv = t.result()
except Exception as ex:
rv = str(ex)
d[rv] = d.setdefault(rv, 0) + 1
lp = len(pending)
d["pending"] = lp
if lp == 0:
del d["pending"]
s = ", ".join("%s: %d" % (k, v) for k, v in sorted(d.items()))
print(s)
if len(pending) == 0:
break
jobs = list(pending)
async def push_tx_to_host(
ssl_context, spend_bundle: SpendBundle, remote_host, remote_port
):
ws = None
session = None
try:
timeout = ClientTimeout(total=10)
session = ClientSession(timeout=timeout)
url = f"wss://{remote_host}:{remote_port}/ws"
# print(f"trying {url}")
ws = await session.ws_connect(
url,
autoclose=True,
autoping=True,
heartbeat=60,
ssl=ssl_context,
max_msg_size=100 * 1024 * 1024,
)
network_id = "mainnet"
protocol_version = "0.0.33"
chia_full_version_str = "1.0.0.0"
server_port = 1023
node_type = NodeType.WALLET
capabilities = [(1, "1")]
handshake = Handshake(
network_id,
protocol_version,
chia_full_version_str,
uint16(server_port),
uint8(node_type),
capabilities,
)
outbound_handshake = make_msg(ProtocolMessageTypes.handshake, handshake)
await ws.send_bytes(bytes(outbound_handshake))
response: WSMessage = await ws.receive()
# print(response)
data = response.data
full_message_loaded: Message = Message.from_bytes(data)
message_type = ProtocolMessageTypes(full_message_loaded.type).name
# print(message_type)
# print(full_message_loaded)
# breakpoint()
msg = make_msg(
ProtocolMessageTypes.send_transaction,
wallet_protocol.SendTransaction(spend_bundle),
)
await ws.send_bytes(bytes(msg))
rv = "failed"
while 1:
response: WSMessage = await ws.receive()
if response.type == 8: # WSMsgType.CLOSE
v = None
break
if response.type != 2: # WSMsgType.BINARY
v = None
break
# print(response)
data = response.data
full_message_loaded: Message = Message.from_bytes(data)
message_type = ProtocolMessageTypes(full_message_loaded.type).name
# print(message_type)
if str(message_type) == "transaction_ack":
v = wallet_protocol.TransactionAck.from_bytes(full_message_loaded.data)
# breakpoint()
ack_map = {
"ALREADY_INCLUDING_TRANSACTION": "included",
"DOUBLE_SPEND": "double-spend",
"NO_TRANSACTIONS_WHILE_SYNCING": "catching-up",
"ASSERT_SECONDS_RELATIVE_FAILED": "not-valid-yet",
}
msg = ack_map.get(v.error, v.error)
rv = f"ack.{msg}"
break
# print(full_message_loaded)
# print(v)
# breakpoint()
# print(v)
if rv == "ack.3":
print(v)
# breakpoint()
pass
await ws.close()
await session.close()
return rv
except Exception as ex:
if ws is not None:
await ws.close()
# breakpoint()
if session is not None:
await session.close()
exception_map = [
("Cannot connect to host", "no-connection"),
("ConnectionResetError", "reset"),
("TimeoutError", "timeout"),
("ClientConnectorError", "client-error"),
]
msg = repr(ex)
for s, r in exception_map:
if msg.startswith(s):
return r
print(f"unknown `msg`, consider diagnosing and adding code for this case")
print("Dropping into debugger; enter `c` to continue `pushtx`")
breakpoint()
return msg
def show_coins_spent(spend_bundle):
for coin_spend in spend_bundle.coin_spends:
coin = coin_spend.coin
print(f"spending coin id 0x{coin.name().hex()}")
async def async_main(args, parser):
spend_bundle = args.spend_bundle[0]
if args.debug:
spend_bundle.debug()
show_coins_spent(spend_bundle)
if not args.dry_run:
await push_tx(spend_bundle)
def spend_bundle_from_hex(h):
return SpendBundle.from_bytes(bytes.fromhex(h))
def create_parser():
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
"spend_bundle",
metavar="SPENDBUNDLE_HEX",
type=spend_bundle_from_hex,
nargs=1,
help="the `SpendBundle` as hex",
)
parser.add_argument(
"-d",
"--debug",
action="store_true",
help="show debug information for spendbundle",
)
parser.add_argument(
"-n",
"--dry-run",
action="store_true",
help="don't actually send `SpendBundle` to the network",
)
return parser
def main():
parser = create_parser()
args = parser.parse_args()
return asyncio.run(async_main(args, parser))
if __name__ == "__main__":
main()
|
from rubygems_utils import RubyGemsTestUtils
class RubyGemsTestrubygems_mixlib_config(RubyGemsTestUtils):
def test_gem_list_rubygems_mixlib_config(self):
self.gem_is_installed("mixlib-config")
|
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Masters"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Territory",
"description": _(" "),
},
{
"type": "doctype",
"name": "Customer Group",
"description": _(" "),
},
{
"type": "doctype",
"name": "Customer",
"description": _(" "),
},
{
"type": "doctype",
"name": "Supplier Type",
"description": _(" "),
},
{
"type": "doctype",
"name": "Supplier",
"description": _(" "),
},
{
"type": "doctype",
"name": "Item Group",
"description": _(" "),
},
{
"type": "doctype",
"name": "Item",
"description": _(" "),
},
{
"type": "doctype",
"name": "Contact",
"description": _(" "),
},
{
"type": "doctype",
"name": "Address",
"description": _(" "),
},
{
"type": "doctype",
"name": "Price List",
"description": _(" "),
},
{
"type": "doctype",
"name": "Material Price List",
"description": _(" "),
}
]
},
{
"label": _("Documents"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Material Request",
"description": _(" "),
},
{
"type": "doctype",
"name": "Purchase Order",
"description": _(" "),
},
{
"type": "doctype",
"name": "Purchase Receipt",
"description": _(" "),
},
{
"type": "doctype",
"name": "Purchase Invoice",
"description": _(" "),
},
{
"type": "doctype",
"name": "Sales Invoice",
"description": _(" "),
},
{
"type": "doctype",
"name": "Delivery Note",
"description": _(" "),
},
{
"type": "doctype",
"name": "Stock Entry",
"description": _(" "),
},
{
"type": "doctype",
"name": "Payment Entry",
"description": _(" "),
},
{
"type": "doctype",
"name": "Journal Entry",
"description": _(" "),
}
]
},
{
"label": _("Village Level Collection Centre"),
"icon": "icon-star",
"items": [
{
"type": "page",
"name": "vlcc-dashboard",
"label": _("Vlcc Dashboard"),
"description": _(" "),
},
{
"type": "doctype",
"name": "Village Level Collection Centre",
"description": _(" "),
},
{
"type": "doctype",
"name": "Farmer",
"description": _(" "),
},
{
"type": "doctype",
"name": "Farmer Milk Collection Record",
"description": _(" "),
},
{
"type": "doctype",
"name": "Shift Ended Confirmation",
"description": _(" "),
},
{
"type": "doctype",
"name": "Veterinary AI Technician",
"description": _(" "),
}
]
},
{
"label": _("Reports"),
"icon": "icon-star",
"items": [
{
"type": "page",
"name": "smarterp_comparison_report",
"label": _("SmartAMCU - SmartERP Comparison Report"),
"description": _(" "),
},
{
"type": "report",
"name": "Milk Bill txt",
"label":_("Milk Bill Txt"),
"doctype": "Vlcc Milk Collection Record",
"is_query_report": True
},
{
"type": "report",
"name": "10 Days STMT",
"label":_("10 Days STMT"),
"doctype": "Vlcc Milk Collection Record",
"is_query_report": True
},
{
"type": "report",
"name": "Truck sheet",
"label":_("Truck sheet"),
"doctype": "Vlcc Milk Collection Record",
"is_query_report": True
},
{
"type": "report",
"name": "Milk Passbook",
"label":_("Milk Passbook"),
"doctype": "Farmer Milk Collection Record",
"is_query_report": True
},
{
"type": "report",
"name": "Local Sales Report",
"label": _("Local Sales Report"),
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "page",
"name": "individual_farmer_milk_report",
"label": _("Individual Farmer Milk Report"),
"description": _(" "),
},
{
"type": "page",
"name": "daily-milk-purchase",
"label": _("Daily Milk Purchase Report"),
"description": _(" "),
},
{
"type": "doctype",
"name": "Dairy Register",
"label": _("Dairy Register"),
"description": _(" "),
},
{
"type": "page",
"name": "mis_report",
"label": _("MIS Report"),
"description": _(" "),
},
{
"type": "report",
"name": "Cattle Feed Sales Report",
"label":_("Cattle Feed Sales Report"),
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Cattle Feed Advance Report",
"label":_("Cattle Feed Advance Report"),
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "CC Report",
"label":_("CC Report"),
"doctype": "Vlcc Milk Collection Record",
"is_query_report": True
},
{
"type": "report",
"name": "Farmer Net Payoff",
"label": _("Farmer Net Payoff"),
"doctype": "Farmer",
"is_query_report": True
},
{
"type": "report",
"name": "Accounts Receivable",
"label": _("Accounts Receivable"),
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Accounts Payable",
"label": _("Accounts Payable"),
"doctype": "Purchase Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Profit and Loss Statement",
"label":_("Profit and Loss Statement"),
"doctype": "GL Entry",
"is_query_report": True
},
{
"type": "report",
"name": "Trial Balance",
"label": _("Trial Balance"),
"doctype": "GL Entry",
"is_query_report": True
},
{
"type": "report",
"name": "Balance Sheet",
"label":_("Balance Sheet"),
"doctype": "GL Entry",
"is_query_report": True
},
{
"type": "report",
"name": "Cash Flow",
"label":_("Cash Flow"),
"doctype": "GL Entry",
"is_query_report": True
},
{
"type": "report",
"name": "General Ledger",
"label":_("General Ledger"),
"doctype": "GL Entry",
"is_query_report": True
},
{
"type": "report",
"name": "Stock Ledger",
"label": _("Stock Ledger"),
"doctype": "Stock Ledger Entry",
"is_query_report": True
},
{
"type": "report",
"name": "Stock Balance",
"label": _("Stock Balance"),
"doctype": "Stock Ledger Entry",
"is_query_report": True
},
{
"type": "report",
"name": "Vlcc Net Pay Off",
"doctype": "Village Level Collection Centre",
"label": _("Vlcc Net Pay Off"),
"is_query_report": True
},
{
"type": "report",
"name": "VLCC Payment Settlement",
"label": _("VLCC Payment Settlement"),
"doctype": "VLCC Payment Cycle",
"is_query_report": True
},
{
"type": "report",
"name": "Purchase Order Detail Report",
"label":_("Purchase Order Detail Report"),
"doctype": "Purchase Order",
"is_query_report": True
},
{
"type": "report",
"name": "Material Indent Detail Report",
"label":_("Material Indent Detail Report"),
"doctype": "Material Request",
"is_query_report": True
},
{
"type": "report",
"is_query_report": False,
"name": "Stock Entry Report",
"doctype": "Stock Entry",
}
]
},
{
"label": _("Loan & Advance"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Farmer Loan",
"description": _(" "),
},
{
"type": "doctype",
"name": "Farmer Advance",
"description": _(" "),
},
{
"type": "doctype",
"name": "Vlcc Advance",
"description": _(" "),
},
{
"type": "doctype",
"name": "Vlcc Loan",
"description": _(" "),
},
{
"type": "doctype",
"name": "VLCC Payment Cycle Report",
"description": _(" "),
}
]
},
{
"label": _("Settings"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Farmer Settings",
"description": _(" "),
},
{
"type": "doctype",
"name": "VLCC Settings",
"description": _(" "),
},
{
"type": "doctype",
"name": "Dairy Log",
"description": _(" "),
},
{
"type": "doctype",
"name": "Mobile App Log",
"description": _(" "),
},
{
"type": "doctype",
"name": "AgRupay Log",
"description": _(" "),
}
]
},
{
"label": _("Payment Tool"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Farmer Payment Cycle",
"description": _(" "),
},
{
"type": "doctype",
"name": "Farmer Payment Cycle Report",
"description": _(" "),
},
{
"type": "report",
"name": "Farmer Payment Settlement",
"label": _("Farmer Payment Settlement"),
"doctype": "Farmer Payment Cycle",
"is_query_report": True
},
{
"type": "doctype",
"name": "VLCC Payment Cycle",
"description": _(" "),
},
{
"type": "report",
"is_query_report": False,
"name": "Payment Settlement Report",
"doctype": "Farmer Payment Log",
}
]
},
{
"label": _("Dairy"),
"icon": "icon-star",
"items": [
{
"type": "page",
"name": "dairy-dashboard",
"label": _("Dairy Dashboard"),
"description": _(" "),
},
{
"type": "doctype",
"name": "Vlcc Milk Collection Record",
"description": _(" "),
}
]
}
]
|
"""
This file implements Batch Normalization as described in the paper:
"Batch Normalization: Accelerating Deep Network Training
by Reducing Internal Covariate Shift"
by Sergey Ioffe, Christian Szegedy
This implementation is useful for inputs NOT coming from convolution layers.
For convolution layers, use nn.SpatialBatchNormalization.
The operation implemented is:
y = ( x - mean(x) )
########## * gamma + beta
standard-deviation(x)
where gamma and beta are learnable parameters.
The learning of gamma and beta is optional.
Usage:
with learnable parameters: nn.BatchNormalization(N [, eps] [, momentum])
where N = dimensionality of input
without learnable parameters: nn.BatchNormalization(N [, eps] [, momentum], False)
eps is a small value added to the standard-deviation to avoid divide-by-zero.
Defaults to 1e-5
In training time, this layer keeps a running estimate of it's computed mean and std.
The running sum is kept with a default momentum of 0.1 (unless over-ridden)
In test time, this running mean/std is used to normalize.
"""
import torch
from .Module import Module
from .utils import clear
class BatchNormalization(Module):
# expected dimension of input
nDim = 2
def __init__(self, nOutput, eps=1e-5, momentum=0.1, affine=True):
super(BatchNormalization, self).__init__()
assert nOutput != 0
self.affine = affine
self.eps = eps
self.train = True
self.momentum = momentum
self.running_mean = torch.zeros(nOutput)
self.running_var = torch.ones(nOutput)
self.save_mean = None
self.save_std = None
self._input = None
self._gradOutput = None
if self.affine:
self.weight = torch.Tensor(nOutput)
self.bias = torch.Tensor(nOutput)
self.gradWeight = torch.Tensor(nOutput)
self.gradBias = torch.Tensor(nOutput)
self.reset()
else:
self.weight = None
self.bias = None
self.gradWeight = None
self.gradBias = None
def reset(self):
if self.weight is not None:
self.weight.uniform_()
if self.bias is not None:
self.bias.zero_()
self.running_mean.zero_()
self.running_var.fill_(1)
def _checkInputDim(self, input):
if input.dim() != self.nDim:
raise RuntimeError(
'only mini-batch supported ({}D tensor), got {}D tensor instead'.format(self.nDim, input.dim()))
if input.size(1) != self.running_mean.nelement():
raise RuntimeError('got {}-feature tensor, expected {}'.format(input.size(1), self.running_mean.nelement()))
def _makeContiguous(self, input, gradOutput=None):
if not input.is_contiguous():
if self._input is None:
self._input = input.new()
self._input.resize_as_(input).copy_(input)
input = self._input
if gradOutput is not None:
if not gradOutput.is_contiguous():
if self._gradOutput is None:
self._gradOutput = gradOutput.new()
self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)
gradOutput = self._gradOutput
return input, gradOutput
def updateOutput(self, input):
self._checkInputDim(input)
input = self._makeContiguous(input)[0]
self.output.resize_as_(input)
if self.save_mean is None:
self.save_mean = input.new()
self.save_mean.resize_as_(self.running_mean)
if self.save_std is None:
self.save_std = input.new()
self.save_std.resize_as_(self.running_var)
self._backend.BatchNormalization_updateOutput(
self._backend.library_state,
input,
self.output,
self.weight,
self.bias,
self.running_mean,
self.running_var,
self.save_mean,
self.save_std,
self.train,
self.momentum,
self.eps
)
return self.output
def _backward(self, input, gradOutput, scale, gradInput=None, gradWeight=None, gradBias=None):
self._checkInputDim(input)
self._checkInputDim(gradOutput)
if not hasattr(self, 'save_mean') or not hasattr(self, 'save_std'):
raise RuntimeError('you have to call updateOutput() at least once before backward()')
input, gradOutput = self._makeContiguous(input, gradOutput)
scale = scale or 1.
if gradInput is not None:
gradInput.resize_as_(gradOutput)
self._backend.BatchNormalization_backward(
self._backend.library_state,
input,
gradOutput,
gradInput,
gradWeight,
gradBias,
self.weight,
self.running_mean,
self.running_var,
self.save_mean,
self.save_std,
self.train,
scale,
self.eps
)
return self.gradInput
def backward(self, input, gradOutput, scale=1.):
return self._backward(input, gradOutput, scale, self.gradInput, self.gradWeight, self.gradBias)
def updateGradInput(self, input, gradOutput):
return self._backward(input, gradOutput, 1., self.gradInput)
def accGradParameters(self, input, gradOutput, scale=1.):
return self._backward(input, gradOutput, scale, None, self.gradWeight, self.gradBias)
def read(self, file, version):
super(BatchNormalization, self).read(self, file)
if version < 2:
if self.running_std:
self.running_var = self.running_std.pow_(-2).add_(-self.eps)
self.running_std = None
def clearState(self):
# first 5 buffers are not present in the current implementation,
# but we keep them for cleaning old saved models
clear(self, [
'buffer',
'buffer2',
'centered',
'std',
'normalized',
'_input',
'_gradOutput',
'save_mean',
'save_std',
])
return super(BatchNormalization, self).clearState()
|
"""Define util functions for data preparation"""
import re
from typing import List
import nltk
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
nltk.download("wordnet")
nltk.download("stopwords")
nltk.download("punkt")
abbr_dict = {
"what's": "what is",
"what're": "what are",
"who's": "who is",
"who're": "who are",
"where's": "where is",
"where're": "where are",
"when's": "when is",
"when're": "when are",
"how's": "how is",
"how're": "how are",
"i'm": "i am",
"we're": "we are",
"you're": "you are",
"they're": "they are",
"it's": "it is",
"he's": "he is",
"she's": "she is",
"that's": "that is",
"there's": "there is",
"there're": "there are",
"i've": "i have",
"we've": "we have",
"you've": "you have",
"they've": "they have",
"who've": "who have",
"would've": "would have",
"not've": "not have",
"i'll": "i will",
"we'll": "we will",
"you'll": "you will",
"he'll": "he will",
"she'll": "she will",
"it'll": "it will",
"they'll": "they will",
"isn't": "is not",
"wasn't": "was not",
"aren't": "are not",
"weren't": "were not",
"can't": "can not",
"couldn't": "could not",
"don't": "do not",
"didn't": "did not",
"shouldn't": "should not",
"wouldn't": "would not",
"doesn't": "does not",
"haven't": "have not",
"hasn't": "has not",
"hadn't": "had not",
"won't": "will not",
}
def basic_cleaner(tweet: str) -> str:
"""Basic tweet cleanup function.
Args:
tweet (str): Raw tweet.
Returns:
str: Cleaned tweet.
"""
# remove urls
clean_text = re.sub(
r"pic.twitter.com\S+", " ", re.sub(r"(http|https):\S+", " ", tweet)
)
# remove quotations
clean_text = clean_text.replace("\t", " ").replace("\n", " ")
clean_text = re.sub(r'"', "'", clean_text)
# remove punctuations
clean_text = re.sub(
r"(\.|,|:|;|\?|!|\)|\(|\-|\[|\]|\{|\}|\*|\||\<|\>|%|&|/|$|\+|@|#|\$|£|=|\^|~)",
" ",
clean_text,
)
# remove haha and variants
clean_text = re.sub("hh+", "h", clean_text)
clean_text = re.sub("aaa+", "a", clean_text)
clean_text = re.sub(r"(ah){2,}|(ha){2,}", " laugh ", clean_text)
# remove repeated vowels
clean_text = re.sub("[a]{3,}", "aa", clean_text)
clean_text = re.sub("[e]{3,}", "ee", clean_text)
clean_text = re.sub("[i]{3,}", "ii", clean_text)
clean_text = re.sub("[o]{3,}", "oo", clean_text)
clean_text = re.sub("[u]{3,}", "uu", clean_text)
# remove hashtags
clean_text = re.sub(r"#\S+", " ", clean_text)
# remove the @ (mentions)
clean_text = re.sub(r"@\S+", " ", clean_text)
# remove the RT
clean_text = re.sub(r"(RT )", " ", clean_text)
# remove non ascii
tmp = ""
for char in clean_text:
if ord(char) < 128:
tmp += char
clean_text = tmp
# remove redundant whitespaces
clean_text = re.sub(" +", " ", clean_text)
# strip messages
clean_text = clean_text.strip()
return clean_text
def normalize_tweets(tweet: str) -> List[str]:
"""[summary]
Args:
tweet (str): [description]
Returns:
str: [description]
"""
# Fix word lengthening
clean_text = re.compile(r"(.)\1{2,}").sub(r"\1\1", tweet)
clean_text = clean_text.lower()
clean_text = clean_text.strip()
clean_text = re.sub(" +", " ", clean_text)
# Word lemmatization
lemmatizer = WordNetLemmatizer()
clean_text = lemmatizer.lemmatize(clean_text)
# Replace abbreviations with common word
for abbrev, _ in abbr_dict.items():
clean_text = clean_text.replace(abbrev, abbr_dict[abbrev])
# Remove stopwords
stop_words = set(stopwords.words("english"))
word_tokens = word_tokenize(clean_text)
filtered_sentence = [w for w in word_tokens if not w.lower() in stop_words]
return filtered_sentence
|
not_yet_food = "cous"
food = not_yet_food * 2
print(food)
|
#!/usr/bin/env python
from constructs import Construct
from cdktf import App, TerraformStack
from imports.aws import SnsTopic
class MyStack(TerraformStack):
def __init__(self, scope: Construct, ns: str):
super().__init__(scope, ns)
# define resources here
SnsTopic(self, 'cdktf-phython-topic', display_name='sns-cdktf-python')
app = App()
MyStack(app, "cdktf-sns-python")
app.synth()
|
from typing import List
import pandas as pd
"""
%cd /workspace/twint/app
from app import tools
tools.generate_rss_yahoo_csv(
save_to="./resource/rss_yahoo_us_indicies.csv",
symbol_path="./resource/symbol_indicies.csv")
"""
def generate_rss_yahoo_csv(
save_to="./resource/rss_yahoo_us_stock.csv",
symbol_path=None) -> None:
if symbol_path is None:
from get_all_tickers.get_tickers import get_tickers
symbols = get_tickers()
else:
symbols = pd.read_csv(symbol_path, header=None)[0]
urls = [
f"http://finance.yahoo.com/rss/headline?s={s}" for s in symbols]
df = pd.DataFrame({
"ticker": symbols,
"url": urls,
})
df.to_csv(save_to, index=False)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from instabot_py import InstaBot
bot = InstaBot(
login="",
password="",
like_per_day=7019,
comments_per_day=197,
tag_list=["aviation", "sky", "avgeek", "aviationlovers", "boeing", "aircraft", "pilot", "airplane" ,"plane" ,"fly" ,"travel","flight","airport" ,"aviationphotography", "instagramaviation"
,"airbus" ,"pilotlife", "flying", "instaaviation" ,"aviationgeek", "planespotting"
,"aviationdaily" ,"photography", "instaplane", "instagood", "planes", "love", "bhfyp","jet", "crewlife" ,"photooftheday" ,"clouds", "b", "cabincrew" ,"follow" ,"megaplane", "airline" ,"instagram"
,"lovers" , "landing" ,"pilots", "cessna", "airforce", "takeoff", "beautiful" ,"avporn" ,"planespotter" ,"military", "usa" , "sun", "crew", "boeinglovers", "picoftheday","flightattendant" ,"instatravel"
, "avgeek", "aviation", "aircraft", "airplane", "boeing", "avporn", "instaplane", "pilot", "megaplane", "plane", "instaaviation", "airport", "airbus", "aviationlovers", "planeporn", "planespotting", "flying", "aviationphotography", "boeinglovers","pilotlife"
, "flight", "aviationgeek", "planespotter", "fly", "crewlife","sky", "spotting", "travel", "planes", "instapilot"],
tag_blacklist=['rain', 'thunderstorm'],
user_blacklist={},
max_like_for_one_tag=50,
follow_per_day=402,
follow_time=1 * 30,
unfollow_per_day=402,
unfollow_break_min=5,
unfollow_break_max=20,
log_mod=0,
proxy='',
# List of list of words, each of which will be used to generate comment
# For example: "This shot feels wow!"
comment_list=[["🤞","🙏","✌️"],
['❤️','💜','💚','💙',"🧡","💛"]
],
# Use unwanted_username_list to block usernames containing a string
# Will do partial matches; i.e. 'mozart' will block 'legend_mozart'
# 'free_followers' will be blocked because it contains 'free'
unwanted_username_list=[
"second",
"stuff",
"art",
"project",
"love",
"life",
"food",
"blog",
"free",
"keren",
"graphy",
"indo",
"art",
"shop",
"store",
"sex",
"toko",
"jual",
"online",
"murah",
"jam",
"kaos",
"case",
"baju",
"fashion",
"corp",
"tas",
"butik",
"grosir",
"karpet",
"sosis",
"salon",
"skin",
"care",
"cloth",
"tech",
"rental",
"kamera",
"beauty",
"express",
"kredit",
"collection",
"impor",
"preloved",
"follow",
"follower",
"gain",
".id",
"_id",
"bags",
],
unfollow_whitelist=["example_user_1", "example_user_2"],
# Enable the following to schedule the bot. Uses 24H
# end_at_h = 23, # Hour you want the bot to stop
# end_at_m = 30, # Minute you want the bot stop, in this example 23:30
# start_at_h = 9, # Hour you want the bot to start
# start_at_m = 10, # Minute you want the bot to start, in this example 9:10 (am).
)
bot.mainloop()
|
import os
import datetime
import numpy as np
import scipy
import pandas as pd
import torch
from torch import nn
import criscas
from criscas.utilities import create_directory, get_device, report_available_cuda_devices
from criscas.predict_model_training import *
# base_dir = os.path.abspath('...')
base_dir = "/home/data/bedict_reproduce"
'''Read sample data'''
seq_df = pd.read_csv(os.path.join(base_dir, 'sample_data', 'abemax_sampledata.csv'), header=0)
# create a directory where we dump the predictions of the models
csv_dir = create_directory(os.path.join(base_dir, 'sample_data', 'predictions'))
'''Specify device (i.e. CPU or GPU) to run the models on'''
report_available_cuda_devices()
# instantiate a device using the only one available :P
device = get_device(True, 2)
'''Create a BE-DICT model by sepcifying the target base editor'''
base_editor = 'ABEmax'
bedict = BEDICT_CriscasModel(base_editor, device)
pred_w_attn_runs_df, proc_df = bedict.predict_from_dataframe(seq_df)
pred_option = 'mean'
pred_w_attn_df = bedict.select_prediction(pred_w_attn_runs_df, pred_option)
pred_w_attn_runs_df.to_csv(os.path.join(csv_dir, f'predictions_allruns.csv'))
pred_w_attn_df.to_csv(os.path.join(csv_dir, f'predictions_predoption_{pred_option}.csv'))
'''Generate attention plots'''
# create a dictionary to specify target sequence and the position we want attention plot for
# we are targeting position 5 in the sequence
seqid_pos_map = {'CTRL_HEKsiteNO1':[5], 'CTRL_HEKsiteNO2':[5]}
pred_option = 'mean'
apply_attn_filter = False
bedict.highlight_attn_per_seq(pred_w_attn_runs_df,
proc_df,
seqid_pos_map=seqid_pos_map,
pred_option=pred_option,
apply_attnscore_filter=apply_attn_filter,
fig_dir=None)
# create a dictionary to specify target sequence and the position I want attention plot for
# we are targeting position 5 in the sequence
seqid_pos_map = {'CTRL_HEKsiteNO1':[5], 'CTRL_HEKsiteNO2':[5]}
pred_option = 'mean'
apply_attn_filter = False
fig_dir = create_directory(os.path.join(base_dir, 'sample_data', 'fig_dir'))
bedict.highlight_attn_per_seq(pred_w_attn_runs_df,
proc_df,
seqid_pos_map=seqid_pos_map,
pred_option=pred_option,
apply_attnscore_filter=apply_attn_filter,
fig_dir=create_directory(os.path.join(fig_dir, pred_option))) |
from django import test, forms
from ginger.forms import FileOrUrlInput
class DummyForm(forms.Form):
file = forms.FileField(widget=FileOrUrlInput, required=False)
class RequiredDummyForm(forms.Form):
file = forms.FileField(widget=FileOrUrlInput, required=True)
class TestFileOrUrlInput(test.SimpleTestCase):
image_url = "http://media-cache-ec0.pinimg.com/236x/cb/99/03/cb9903c463fda9a46f6d79005f29a9be.jpg"
def test_valid(self):
form = DummyForm(data={"file": self.image_url}, files={})
self.assertTrue(form.is_valid())
file_obj = form.cleaned_data["file"]
self.assertEqual(file_obj.url, self.image_url)
def test_contradiction(self):
form = DummyForm(data={"file": self.image_url,
"file-clear": "on"}, files={})
self.assertFalse(form.is_valid()) |
"""
j is the index to insert when a number not equal to val.
j will never catch up i, so j will not mess up the check.
"""
class Solution(object):
def removeElement(self, nums, val):
j = 0
for n in nums:
if n!=val:
nums[j] = n
j += 1
return j |
"""
Usage:
convert_to_excel.py [options]
Options:
-h --help Show this screen
-v --version Show version
-f, --file <file-name> Path to SSA.gov site XML file
"""
import os
import sys
import xmltodict
from docopt import docopt
from excel_helper import Excel
class EarningsData(object):
def __init__(self, filename):
self.data_sets = []
self.data = {}
self.filename = filename
with open(self.filename) as fd:
self.data = xmltodict.parse(fd.read())
file, ext = os.path.splitext(os.path.basename(self.filename))
self.user_name = self.data.get('osss:OnlineSocialSecurityStatementData').get('osss:UserInformation').get(
'osss:Name').replace('.', '').replace(',', '').replace('-', '')
self.build_data_sets()
self.excel_file = Excel(response='{}.xlsx'.format(file),
workbook_name='{}'.format(self.user_name), data_sets=self.data_sets,
timezone_support=False, local_tz=None)
print('Writing file: {}/{}.xlsx'.format(os.getcwd(), file))
def build_data_sets(self):
earnings_record = self.data.get('osss:OnlineSocialSecurityStatementData').get('osss:EarningsRecord').get(
'osss:Earnings')
column_order = ['Year', 'Fica Earnings', 'Medicare Earnings', ]
data = []
for record in earnings_record:
row = {'Year': record.get('@endYear'),
'Fica Earnings': record.get('osss:FicaEarnings'),
'Medicare Earnings': record.get('osss:MedicareEarnings')}
data.append(row)
self.data_sets.append(
{'sheet_name': 'Earnings History'.format(self.user_name)[:30], 'column_order': column_order,
'data': data})
if __name__ == "__main__":
arguments = docopt(__doc__, version='pyssagov 0.0.1') or {}
filename = arguments.get('--file')
if not filename:
sys.exit('Please supply a file with source data from SSA.gov XML export file')
earnings_data = EarningsData(filename=filename)
earnings_data.excel_file.make()
|
from django.test import TestCase, Client
import http.client
from checkout.models import Order, PickList
from meadery.models import Product
from checkout.checkout import all_in_stock, create_picklist, process_picklist, cancel_picklist
# admin.py
# write tests for processing and cancelling orders from web
# write silly test identifying an order's picklist
# write test for processing and cancelling picklists from web
# write silly test identifying a picklist's order
# checkout.py
# write test for creating and cancelling order
# - bonus points for doing email right
# write silly test trying to cancel a fulfilled picklist
# forms.py
# test clean_phone, which will test strip_non_numbers
# models.py
# printstatus for orders
# orderitems properties
# picklist str and absoluteurl (email!)
# picklistitem properties and absoluteurl (email!)
# checkout_tags.py
# having a real order will be enough I suspect
# views
# showcheckout (get and post, logged in and not)
# receipt?!?
class OrderTestCase(TestCase):
fixtures = ["accounts", "meadery", "inventory", "checkout"]
def setUp(self):
self.order = Order.objects.all()[0]
self.client = Client()
def test_permalink(self):
url = self.order.get_absolute_url()
response = self.client.get(url)
self.assertTrue(response)
# not OK, but FOUND (302, redirect)
self.assertEqual(response.status_code, http.client.FOUND)
# FIXME: check that it redirects the right place
def test_str(self):
self.assertEqual(self.order.__str__(), "Order #" + str(self.order.id))
# FIXME: test show_checkout somehow?
# FIXME: test receipt like category views
# FIXME: test create_order
def test_create_order(self):
# FIXME: learn how to create an order from a request
return True
def test_all_in_stock(self):
# requires order #1 to be valid
checkval = all_in_stock(self.order)
self.assertTrue(checkval)
# requires order #2 to be invalid
# (requests 1 jar of product 2 which is not in stock)
order2 = Order.objects.all()[1]
checkval2 = all_in_stock(order2)
self.assertFalse(checkval2)
def test_create_picklist(self):
# confirm both jars are marked active and available
sip1a = Product.active.get(slug="sip-1-a")
self.assertEqual(sip1a.jar_set.filter(is_active=True).count(), 14)
self.assertEqual(sip1a.jar_set.filter(is_available=True).count(), 14)
# generate valid picklist from order #1
picklist = create_picklist(self.order)
self.assertEqual(sip1a.jar_set.filter(is_available=True).count(), 12)
self.assertEqual(picklist.status, PickList.SUBMITTED)
self.assertEqual(self.order.status, Order.PROCESSED)
# try to generate another picklist from order #1
# (should fail as status should have changed)
picklist_again = create_picklist(self.order)
self.assertEqual(picklist_again, None)
# try to generate picklist from order #2
# (should fail as no product exists)
order2 = Order.objects.all()[1]
picklist2 = create_picklist(order2)
self.assertEqual(picklist2, None)
# FIXME: check process_picklist
def test_process_picklist(self):
# confirm both jars are marked available and active
sip1a = Product.active.get(slug="sip-1-a")
self.assertEqual(sip1a.jar_set.filter(is_active=True).count(), 14)
self.assertEqual(sip1a.jar_set.filter(is_available=True).count(), 14)
# generate valid picklist from order #1
picklist = create_picklist(self.order)
retval = process_picklist(picklist)
self.assertEqual(retval, True)
# check that jars are no longer active
self.assertEqual(sip1a.jar_set.filter(is_active=True).count(), 12)
# check status of picklist
self.assertEqual(picklist.status, PickList.PROCESSED)
# check status of order
self.assertEqual(self.order.status, Order.DELIVERED)
# process it again
# (should fail as status should have changed)
retval2 = process_picklist(picklist)
self.assertEqual(retval2, False)
# FIXME: check cancel_picklist
def test_cancel_picklist(self):
# confirm both jars are marked available and active
sip1a = Product.active.get(slug="sip-1-a")
self.assertEqual(sip1a.jar_set.filter(is_active=True).count(), 14)
self.assertEqual(sip1a.jar_set.filter(is_available=True).count(), 14)
# generate valid picklist from order #1
picklist = create_picklist(self.order)
# confirm jars are unavailable
self.assertEqual(sip1a.jar_set.filter(is_available=True).count(), 12)
retval = cancel_picklist(picklist)
self.assertEqual(retval, True)
# check that jars are available again
self.assertEqual(sip1a.jar_set.filter(is_available=True).count(), 14)
# check status of picklist
self.assertEqual(picklist.status, PickList.CANCELLED)
# check status of order
self.assertEqual(self.order.status, Order.SUBMITTED)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import include, url
from . import rest_api
from . import auth
from ..views.term import RebuildTermTreeView
from ..views.data_mart import RebuildDataMartTreeView
urlpatterns = (
url(r'^rebuild_term_tree/',
RebuildTermTreeView.as_view(),
name='rebuild_term_tree'
),
url(r'^rebuild_datamart_tree/',
RebuildDataMartTreeView.as_view(),
name='rebuild_datamart_tree'
),
url(r'^api/', include(rest_api)),
url(r'^auth/', include(auth)),
)
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from .models import Snack
class SnacksTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username="dina", email="[email protected]", password="pass"
)
self.snack = Snack.objects.create(
title = 'falafel',
description = 'delecious',
purchaser = self.user
)
def test_snack_representation(self):
self.assertEqual(str(self.snack), "falafel")
def test_snack_content(self):
self.assertEqual(f"{self.snack.title}", "falafel")
self.assertEqual(f"{self.snack.purchaser}", "dina")
self.assertEqual(f"{self.snack.description}", "delecious")
def test_snack_list_view(self):
response = self.client.get(reverse("snack_list"))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "falafel")
self.assertTemplateUsed(response, "snack_list.html")
def test_snack_details_view(self):
expected = 200
response = self.client.get(reverse("snack_detail", args="1"))
no_response = self.client.get("/999/")
self.assertEqual(response.status_code, expected)
self.assertEqual(no_response.status_code, 404)
self.assertContains(response, "falafel")
self.assertTemplateUsed(response, "snack_detail.html")
def test_snack_delete_view(self):
response = self.client.get(reverse("snack_delete", args="1"))
self.assertEqual(response.status_code, 200) |
import threading
import os
import hashlib
DATABASE_DIR = "dbFiles"
os.chdir(DATABASE_DIR)
class Database:
def __init__(self, name = None):
if name:
if self.CreateDatabaseFile(name):
print("DB created successfuly")
elif self.SelectDatabaseFile(name):
print("DB selected successfuly")
else:
print("Couldn't create DB")
else:
print("DB Class instance created")
def CreateDatabaseFile(self, name):
if name + ".df" in os.listdir():
return False
open(name + ".df" , 'w')
self.CurrentDB = name + ".df"
return True
def SelectDatabaseFile(self, name):
if name + ".df" in os.listdir():
self.CurrentDB = name + ".df"
return True
else:
return False
def CreateTable(self , table_name , table_columns ):
table_signature = "{}>".format(table_name)
for column in table_columns:
table_signature += self._find_type(column)
table_signature += " "
table_signature += column
table_signature += ","
table_signature = table_signature[:-1]
table_signature = table_signature.encode("utf-8")
table_signature = hashlib.sha256(table_signature).hexdigest()[:10]
with open(self.CurrentDB , 'a') as DBFile:
pattern = "<" + table_name + " {"
for column in table_columns:
pattern += self._find_type(column) + " " + column + ","
pattern = pattern[:-1] + "}"
pattern += table_signature
pattern += ">"
pattern += "\n"
DBFile.write( pattern )
pass
def _find_type(self,var):
temp = type(var)
if temp == str: return "str"
if temp == int: return "int"
if temp == float: return "float"
return 0
def ReadTables(self):
tbl_list = list()
with open(self.CurrentDB , "r") as dbfile:
temp = dbfile.readlines(1)
while len(temp)!=0:
temp = temp[0].replace("\n","")
if temp[0]=="<":
tbl_name = temp.split(" ")[0][1:]
tbl_sign = temp.split("}")[1][:-1]
columns = temp.split("{")[1].split("}")[0]
columns = columns.split(",")
print(columns)
columns_dict = dict()
for column in columns:
column = column.split(" ")
columns_dict[column[1]] = column[0]
tbl_list.append( (tbl_name , tbl_sign , columns_dict) )
else:
pass
temp = dbfile.readlines(1)
self.TableList = tbl_list
return tbl_list
def ReadData(self, table_name, pattern_dict, limit=0):
data_list = list()
tbl_sign = None
for table in self.TableList:
if table[0]==table_name:
tbl_sign = table[1]
if not(tbl_sign):
return False
with open(self.CurrentDB , "r") as dbfile:
temp = dbfile.readlines(1)
while len(temp)!=0:
temp = temp[0]
if temp[:10]==tbl_sign:
temp = temp.replace("\n","")
temp = temp.split(":")[1]
temp = temp.split(",")
if self._match_with_pattern(temp,pattern_dict):
data_list.append(temp)
temp = dbfile.readlines(1)
return data_list
def _match_with_pattern(self, data, pattern):
return True
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import typing as t
from dataclasses import dataclass, field
from hmalib.common.classification_models import Label
from hmalib.common.config import HMAConfig
@dataclass(unsafe_hash=True)
class ActionLabel(Label):
key: str = field(default="Action", init=False)
@dataclass(unsafe_hash=True)
class ThreatExchangeReactionLabel(Label):
key: str = field(default="ThreatExchangeReaction", init=False)
@dataclass
class Action:
action_label: ActionLabel
priority: int
superseded_by: t.List[ActionLabel]
@dataclass
class ActionRule(HMAConfig):
"""
Action rules are config-backed objects that have a set of labels (both
"must have" and "must not have") which, when evaluated against the
classifications of a matching banked piece of content, lead to an action
to take (specified by the rule's action label). By convention each action
rule's name field is also the value field of the rule's action label.
"""
action_label: ActionLabel
must_have_labels: t.Set[Label]
must_not_have_labels: t.Set[Label]
|
try:
from tornado import speedups
except ImportError:
speedups = None
|
from typing import List
from functools import lru_cache
class Solution:
def maxProduct(self, words: List[str]) -> int:
mask = {}
for word in words:
key = 0
for c in word:
key |= 1 << (ord(c) - 97)
mask[key] = max(mask.get(key, 0), len(word))
return max([mask[x] * mask[y] for x in mask for y in mask if not x & y] or [0])
# TESTS
for words, expected in [
(["abcw", "baz", "foo", "bar", "xtfn", "abcdef"], 16),
(["a", "ab", "abc", "d", "cd", "bcd", "abcd"], 4),
(["a", "aa", "aaa", "aaaa"], 0),
]:
sol = Solution()
actual = sol.maxProduct(words)
print("Maximum product of distinct word lengths in", words, "->", actual)
assert actual == expected
|
#!/usr/bin/env python
# encoding: utf-8
"""
power-of-three.py
Created by Shuailong on 2016-01-10.
https://leetcode.com/problems/power-of-three/.
"""
'''Refer to some discussions. Think about it later. '''
class Solution(object):
def isPowerOfThree(self, n):
"""
:type n: int
:rtype: bool
"""
return n in {1,
3,
9,
27,
81,
243,
729,
2187,
6561,
19683,
59049,
177147,
531441,
1594323,
4782969,
14348907,
43046721,
129140163,
387420489,
1162261467,
3486784401,
10460353203,
31381059609,
94143178827,
282429536481,
847288609443,
2541865828329,
7625597484987,
22876792454961,
68630377364883,
205891132094649,
617673396283947,
1853020188851841,
5559060566555523,
16677181699666569,
50031545098999707,
150094635296999121,
450283905890997363,
1350851717672992089,
4052555153018976267,
12157665459056928801,
}
def main():
n = 243
solution = Solution()
print solution.isPowerOfThree(n)
if __name__ == '__main__':
main()
|
import gym.spaces as spaces
from gym import ActionWrapper
class FlattenAction(ActionWrapper):
r"""Action wrapper that flattens the action."""
def __init__(self, env):
super(FlattenAction, self).__init__(env)
self.action_space = spaces.flatten_space(env.action_space)
def action(self, action):
# return spaces.flatten(self.env.action_space, action)
return action |
from os.path import isfile, isdir, join, dirname
from os import listdir, makedirs
import shutil
import numpy as np
from tqdm import tqdm
def transform(h36m_path, target_dir):
from spacepy import pycdf # make sure that this is only imported when actually needed..
assert isdir(h36m_path), h36m_path
if isdir(target_dir):
shutil.rmtree(target_dir)
makedirs(target_dir)
print()
print('Human3.6M path:', h36m_path)
print('write to:', target_dir)
print()
ACTORS = ["S1", 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']
ACTIONS = [
'Directions',
'Discussion',
'Eating',
'Greeting',
'Phoning',
'Posing',
'Purchases',
'Sitting',
'SittingDown',
'Smoking',
'Photo',
'Waiting',
'Walking',
'WalkingDog',
'WalkTogether'
]
for actor in ACTORS:
print('\n[get h36m skeleton] ->', actor)
for action in tqdm(ACTIONS):
for sid in [0, 1]:
# fix labeling... Human3.6M labeling is very messy and we need to fix it...
fixed_action = ''
if action == 'WalkTogether':
fixed_action = 'walkingtogether'
elif action == 'Photo':
fixed_action = 'takingphoto'
else:
fixed_action = action.lower()
if actor == 'S1' and action == 'Photo':
action = 'TakingPhoto'
if actor != 'S1' and action == 'WalkingDog':
action = 'WalkDog'
cdf_dir = join(join(h36m_path, actor), 'MyPoseFeatures')
cdf_dir = join(cdf_dir, 'D3_Positions')
videos = sorted(
[f for f in listdir(cdf_dir) if f.startswith(action)])
if (actor == 'S1' and action == 'Walking') or \
action == 'Sitting':
# separate Walking from WalkingDog OR
# separate Sitting from SittingDown
assert len(videos) == 4
videos = videos[0:2]
assert len(videos) == 2, '# of videos:' + str(len(videos))
a, b = videos
if len(a) > len(b): # ['xxx 9.cdf', 'xxx.cdf']
videos = [b, a]
else:
assert len(a) == len(b)
cdf_file = join(cdf_dir, videos[sid])
assert isfile(cdf_file)
cdf = pycdf.CDF(cdf_file)
joints3d = np.squeeze(cdf['Pose']).reshape((-1, 32, 3)) / 1000
joints3d = joints3d.astype('float32')
fixed_sid = sid + 1
fixed_sid = fixed_sid % 2 + 1 # 2 -> 1 and 1 -> 2
if (actor == 'S8' and fixed_action == 'walkingtogether') or \
(actor == 'S7' and fixed_action == 'walking') or \
(actor == 'S7' and fixed_action == 'waiting') or \
(actor == 'S5' and fixed_action == 'waiting') or \
(actor == 'S7' and fixed_action == 'takingphoto') or \
(actor == 'S6' and fixed_action == 'takingphoto') or \
(actor == 'S5' and fixed_action == 'takingphoto') or \
(actor == 'S11' and fixed_action == 'sittingdown') or \
(actor == 'S9' and fixed_action == 'sittingdown') or \
(actor == 'S8' and fixed_action == 'sittingdown') or \
(actor == 'S7' and fixed_action == 'sittingdown') or \
(actor == 'S5' and fixed_action == 'sittingdown') or \
(actor == 'S6' and fixed_action == 'sitting') or \
(actor == 'S1' and fixed_action == 'sitting') or \
(actor == 'S5' and fixed_action == 'greeting') or \
(actor == 'S6' and fixed_action == 'eating') or \
(actor == 'S11' and fixed_action == 'discussion') or \
(actor == 'S9' and fixed_action == 'discussion') or \
(actor == 'S5' and fixed_action == 'discussion') or \
(actor == 'S5' and fixed_action == 'directions'):
fixed_sid = fixed_sid % 2 + 1 # 2 -> 1 and 1 -> 2
if fixed_action == 'walkdog':
fixed_action = 'walkingdog'
fname = join(target_dir, actor + '_' + fixed_action + '_' + str(fixed_sid) + '.npy')
np.save(fname, joints3d)
|
"""Parking Valet, by Al Sweigart [email protected]
A sliding tile puzzle game to move cars out of the way.
Inspired by Nob Yoshihagara's Rush Hour.
parkingvaletpuzzle.txt generated from puzzles by Michael Fogleman.
More info at https://www.michaelfogleman.com/rush/
This and other games are available at https://nostarch.com/XX
Tags: large, game, puzzle game, board game"""
__version__ = 0
import math, random, sys
# Set up the constants:
EMPTY_SPACE = '.'
WALL = chr(9617) # Character 9617 is '░'
def main():
print("""Parking Valet, by Al Sweigart [email protected]
Original Rush Hour game by Nob Yoshihagara.
Puzzles by Michael Fogleman.
Get the "A" car to the right edge of the board.
Enter moves as <car letter><direction>.
Directions are (L)eft, (R)ight, (U)p, and (D)own.
""")
input('Press Enter to begin...')
puzzle = getRandomPuzzle()
gameBoard = getBoard(puzzle) # Start a new puzzle boad.
while True:
print('\n' * 60) # "Clear" the screen by printing many newlines.
displayBoard(gameBoard)
playerMove = askForPlayerMove(gameBoard)
if playerMove == 'RESET':
gameBoard = getBoard(puzzle) # Restore the original board.
else:
makeMove(gameBoard, playerMove)
if hasWon(gameBoard):
displayBoard(gameBoard)
print()
print('PUZZLE COMPLETE!')
sys.exit()
def getRandomPuzzle():
"""Return a string representing a randomly selected puzzle."""
numberOfPuzzles = 0
puzzleFile = open('parkingvaletpuzzles.txt')
while puzzleFile.readline():
numberOfPuzzles += 1
puzzleFile.close()
randomPuzzleNum = random.randint(1, numberOfPuzzles)
counter = 1
puzzleFile = open('parkingvaletpuzzles.txt')
while True:
if counter == randomPuzzleNum:
return puzzleFile.readline()
else:
puzzleFile.readline()
counter += 1
def getBoard(puzzleAsString):
"""Return a board data structure based on the puzzle string."""
# Set up data structure.
board = {}
# We assume that the puzzles are square shaped:
board['width'] = int(math.sqrt(len(puzzleAsString)))
board['height'] = int(math.sqrt(len(puzzleAsString)))
x = 0
y = 0
for character in puzzleAsString:
if character == 'X':
# Draw walls using the block character instead of x:
character = WALL
board[(x, y)] = character
if x == board['width'] - 1:
y += 1
x = 0
else:
x += 1
return board
def displayBoard(board):
"""Display the board on the screen."""
for y in range(board['height']):
for i in range(3): # We draw 3 rows per board-row.
if i == 0 and y != 0:
# Draw a horizontal dividing line:
for x in range(board['width']):
if (board[(x, y)] != EMPTY_SPACE
and board[(x, y)] == board[(x, y - 1)]):
# Draw car in dividing line:
print(board[(x, y)] * 3 + ' ', end='')
else:
# Draw empty dividing line:
print(' ' * 4, end='')
print()
for x in range(board['width']):
# Draw the board space:
print(board[(x, y)] * 3, end='')
if (x != board['width'] - 1
and board[(x, y)] != EMPTY_SPACE
and board[(x, y)] == board[(x + 1, y)]):
# Draw car in vertical dividing line:
print(board[(x, y)], end='')
else:
# Draw empty vertical dividing line:
print(' ', end='')
print()
def getValidMoves(board):
"""Return a list of valid moves that can be made on the board."""
validMoves = []
for x in range(board['width']):
for y in range(board['height']):
if board[(x, y)] in (EMPTY_SPACE, WALL):
continue # Skip this empty or wall space.
xNotOnLeftEdge = x != 0
xNotOnRightEdge = x != board['width'] - 1
yNotOnTopEdge = y != 0
yNotOnBottomEdge = y != board['height'] - 1
# Check if the car at x, y can move down.
if (yNotOnTopEdge
and board[(x, y)] == board[(x, y - 1)]
and y + 1 < board['height']
and board[(x, y + 1)] == EMPTY_SPACE):
validMoves.append(board[(x, y)] + 'D')
# Check if the car at x, y can move up.
if (yNotOnBottomEdge
and board[(x, y)] == board[(x, y + 1)]
and y - 1 >= 0
and board[(x, y - 1)] == EMPTY_SPACE):
validMoves.append(board[(x, y)] + 'U')
# Check if the car at x, y can move right.
if (xNotOnLeftEdge
and board[(x, y)] == board[(x - 1, y)]
and x + 1 < board['width']
and board[(x + 1, y)] == EMPTY_SPACE):
validMoves.append(board[(x, y)] + 'R')
# Check if the car at x, y can move left.
if (xNotOnRightEdge
and board[(x, y)] == board[(x + 1, y)]
and x - 1 >= 0
and board[(x - 1, y)] == EMPTY_SPACE):
validMoves.append(board[(x, y)] + 'L')
return validMoves
def makeMove(board, move):
"""Carry out a move on the given board."""
validMoves = getValidMoves(board)
if move not in validMoves:
return False
car = move[0]
direction = move[1]
newCarPositions = []
for x in range(board['width']):
for y in range(board['height']):
if board[(x, y)] == car:
board[(x, y)] = '.'
if direction == 'U':
newCarPositions.append((x, y - 1))
elif direction == 'D':
newCarPositions.append((x, y + 1))
elif direction == 'L':
newCarPositions.append((x - 1, y))
elif direction == 'R':
newCarPositions.append((x + 1, y))
for newCarPosition in newCarPositions:
board[newCarPosition] = car
def hasWon(board):
"""Return True if the 'A' car has reached the right edge."""
# The puzzle is solved when the 'A' car reaches the right edge.
for y in range(board['height']):
if board[(board['width'] - 1, y)] == 'A':
return True
return False
def askForPlayerMove(board):
"""Let the player enter the car and direction they want to move."""
validMoves = getValidMoves(board)
while True:
allValidMoves = '", "'.join(validMoves)
print('Moves: "{}", "RESET", or "QUIT".'.format(allValidMoves))
move = input('> ').upper()
if move == 'QUIT':
sys.exit()
if move == 'RESET':
return 'RESET'
if move in validMoves:
return move
# If this program was run (instead of imported), run the game:
if __name__ == '__main__':
main()
|
import logging
from typing import Callable
import numpy as np
# Starting logger
LOGGER = logging.getLogger(__name__)
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience: int = 7, delta: float = 1e-6):
"""
Parameters
-----------
patience
How long to wait after last time validation loss improved.
delta
Minimum change in the monitored quantity to qualify as an improvement.
"""
self.patience = patience
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, saver: Callable, epoch: int, val_loss: float) -> None:
"""Update the early stopping state.
Parameters
----------
saver
Function to save the current state of the network and the other training parameters
epoch
Current epoch
val_loss
Validation loss
"""
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(saver, epoch, val_loss)
elif score < self.best_score + self.delta:
self.counter += 1
LOGGER.debug(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(saver, epoch, val_loss)
self.counter = 0
def save_checkpoint(self, saver: Callable[[int, float], None], epoch: int, val_loss: float):
"""Saves model when validation loss decrease.
Parameters
----------
saver
Function to save the current state of the network and the other training parameters
epoch
Current epoch
val_loss
Validation loss
"""
LOGGER.debug(f"Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model")
saver(epoch, val_loss)
self.val_loss_min = val_loss
|
from recipes.native_typing import simple
def test_wf():
x, y = simple.t1(a=5)
assert x == 7
|
from django.contrib import admin
from .models import Account;
from django.contrib.auth.admin import UserAdmin;
# Register your models here.
class AccountAdmin(UserAdmin):
list_display = ('email','date_joined','last_login','is_admin','is_staff','profile_pic','first_name','last_name',"id","show_to_public", "chat_keys")
search_fields =('email','first_name','last_name')
readonly_fields = ('date_joined','last_login')
list_filter = ('email','first_name','last_name',)
filter_horizontal = ()
fieldsets = ()
ordering = ('profile_pic', 'email', 'first_name', 'last_name', 'university', 'major', 'school_year', 'date_joined', 'last_login', 'is_admin', 'is_active', 'is_staff', 'is_superuser', "chat_keys")
admin.site.register(Account,AccountAdmin)
|
# import tensorflow as tf
import numpy as np
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Layer
from tensorflow.keras.layers import Conv1D
from tensorflow.keras.regularizers import Regularizer
from tensorflow.keras import initializers
import scipy.interpolate as si
DNA = ["A", "C", "G", "T"]
def normalize_data_format(value):
if value is None:
value = K.image_data_format()
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(value))
return data_format
class GlobalAveragePooling1D_Mask0(Layer):
"""
Global average pooling operation for temporal data.
Masking out 0-padded input.
"""
def __init__(self, data_format='channels_last', **kwargs):
super(GlobalAveragePooling1D_Mask0, self).__init__(**kwargs)
self.data_format = normalize_data_format(data_format)
def compute_output_shape(self, input_shape):
input_shape = input_shape[0]
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1])
else:
return (input_shape[0], input_shape[2])
def call(self, inputs):
inputs, model_inputs = inputs
steps_axis = 1 if self.data_format == 'channels_last' else 2
mask = K.max(model_inputs, axis=2, keepdims=True)
inputs *= mask
return K.sum(inputs, axis=steps_axis) / K.maximum(
K.sum(mask, axis=steps_axis), K.epsilon())
class ConvSequence(Conv1D):
VOCAB = DNA
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
seq_length=None,
**kwargs):
# override input shape
if seq_length:
kwargs["input_shape"] = (seq_length, len(self.VOCAB))
kwargs.pop("batch_input_shape", None)
super(ConvSequence, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.seq_length = seq_length
def build(self, input_shape):
if int(input_shape[-1]) != len(self.VOCAB):
raise ValueError("{cls} requires input_shape[-1] == {n}. Given: {s}".
format(cls=self.__class__.__name__, n=len(self.VOCAB), s=input_shape[-1]))
return super(ConvSequence, self).build(input_shape)
def get_config(self):
config = super(ConvSequence, self).get_config()
config["seq_length"] = self.seq_length
return config
class ConvDNA(ConvSequence):
VOCAB = DNA
VOCAB_name = "DNA"
def get_S(n_bases=10, spline_order=3, add_intercept=True):
# mvcv R-code
# S<-diag(object$bs.dim);
# if (m[2]) for (i in 1:m[2]) S <- diff(S)
# object$S <- list(t(S)%*%S) # get penalty
# object$S[[1]] <- (object$S[[1]]+t(object$S[[1]]))/2 # exact symmetry
S = np.identity(n_bases)
m2 = spline_order - 1 # m[2] is the same as m[1] by default
# m2 order differences
for i in range(m2):
S = np.diff(S, axis=0) # same as diff() in R
S = np.dot(S.T, S)
S = (S + S.T) / 2 # exact symmetry
if add_intercept is True:
# S <- cbind(0, rbind(0, S)) # in R
zeros = np.zeros_like(S[:1, :])
S = np.vstack([zeros, S])
zeros = np.zeros_like(S[:, :1])
S = np.hstack([zeros, S])
return S.astype(np.float32)
def get_knots(start, end, n_bases=10, spline_order=3):
"""
Arguments:
x; np.array of dim 1
"""
x_range = end - start
start = start - x_range * 0.001
end = end + x_range * 0.001
# mgcv annotation
m = spline_order - 1
nk = n_bases - m # number of interior knots
dknots = (end - start) / (nk - 1)
knots = np.linspace(start=start - dknots * (m + 1),
stop=end + dknots * (m + 1),
num=nk + 2 * m + 2)
return knots.astype(np.float32)
def get_X_spline(x, knots, n_bases=10, spline_order=3, add_intercept=True):
"""
Returns:
np.array of shape [len(x), n_bases + (add_intercept)]
# BSpline formula
https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.html#scipy.interpolate.BSpline
Fortran code:
https://github.com/scipy/scipy/blob/v0.19.0/scipy/interpolate/fitpack/splev.f
"""
if len(x.shape) is not 1:
raise ValueError("x has to be 1 dimentional")
tck = [knots, np.zeros(n_bases), spline_order]
X = np.zeros([len(x), n_bases])
for i in range(n_bases):
vec = np.zeros(n_bases)
vec[i] = 1.0
tck[1] = vec
X[:, i] = si.splev(x, tck, der=0)
if add_intercept is True:
ones = np.ones_like(X[:, :1])
X = np.hstack([ones, X])
return X.astype(np.float32)
class BSpline():
"""Class for computing the B-spline funcions b_i(x) and
constructing the penality matrix S.
# Arguments
start: float or int; start of the region
end: float or int; end of the region
n_bases: int; number of spline bases
spline_order: int; spline order
# Methods
- **getS(add_intercept=False)** - Get the penalty matrix S
- Arguments
- **add_intercept**: bool. If true, intercept column is added to the returned matrix.
- Returns
- `np.array`, of shape `(n_bases + add_intercept, n_bases + add_intercept)`
- **predict(x, add_intercept=False)** - For some x, predict the bn(x) for each base
- Arguments
- **x**: np.array; Vector of dimension 1
- **add_intercept**: bool; If True, intercept column is added to the to the final array
- Returns
- `np.array`, of shape `(len(x), n_bases + (add_intercept))`
"""
def __init__(self, start=0, end=1, n_bases=10, spline_order=3):
self.start = start
self.end = end
self.n_bases = n_bases
self.spline_order = spline_order
self.knots = get_knots(self.start, self.end,
self.n_bases, self.spline_order)
self.S = get_S(self.n_bases, self.spline_order, add_intercept=False)
def __repr__(self):
return "BSpline(start={0}, end={1}, n_bases={2}, spline_order={3})".\
format(self.start, self.end, self.n_bases, self.spline_order)
def getS(self, add_intercept=False):
"""Get the penalty matrix S
Returns
np.array, of shape (n_bases + add_intercept, n_bases + add_intercept)
"""
S = self.S
if add_intercept is True:
# S <- cbind(0, rbind(0, S)) # in R
zeros = np.zeros_like(S[:1, :])
S = np.vstack([zeros, S])
zeros = np.zeros_like(S[:, :1])
S = np.hstack([zeros, S])
return S
def predict(self, x, add_intercept=False):
"""For some x, predict the bn(x) for each base
Arguments:
x: np.array; Vector of dimension 1
add_intercept: bool; should we add the intercept to the final array
Returns:
np.array, of shape (len(x), n_bases + (add_intercept))
"""
# sanity check
if x.min() < self.start:
raise Warning("x.min() < self.start")
if x.max() > self.end:
raise Warning("x.max() > self.end")
return get_X_spline(x=x,
knots=self.knots,
n_bases=self.n_bases,
spline_order=self.spline_order,
add_intercept=add_intercept)
def get_config(self):
return {"start": self.start,
"end": self.end,
"n_bases": self.n_bases,
"spline_order": self.spline_order
}
@classmethod
def from_config(cls, config):
return cls(**config)
class GAMRegularizer(Regularizer):
def __init__(self, n_bases=10, spline_order=3, l2_smooth=0., l2=0.):
"""Regularizer for GAM's
# Arguments
n_bases: number of b-spline bases
order: spline order (2 for quadratic, 3 for qubic splines)
l2_smooth: float; Smoothness penalty (penalize w' * S * w)
l2: float; L2 regularization factor - overall weights regularizer
"""
# convert S to numpy-array if it's a list
self.n_bases = n_bases
self.spline_order = spline_order
self.l2_smooth = K.cast_to_floatx(l2_smooth)
self.l2 = K.cast_to_floatx(l2)
# convert to K.constant
self.S = K.constant(
K.cast_to_floatx(
get_S(n_bases, spline_order, add_intercept=False)
))
def __call__(self, x):
# x.shape = (n_bases, n_spline_tracks)
# from conv: (kernel_width=1, n_bases, n_spline_tracks)
from_conv = len(K.int_shape(x)) == 3
if from_conv:
x = K.squeeze(x, 0)
n_spline_tracks = K.cast_to_floatx(K.int_shape(x)[1])
regularization = 0.
if self.l2:
regularization += K.sum(self.l2 * K.square(x)) / n_spline_tracks
if self.l2_smooth:
# https://keras.io/backend/#batch_dot
# equivalent to mean( diag(x' * S * x) )
regularization += self.l2_smooth * \
K.mean(K.batch_dot(x, K.dot(self.S, x), axes=1))
return regularization
def get_config(self):
# convert S to list()
return {'n_bases': self.n_bases,
'spline_order': self.spline_order,
'l2_smooth': float(self.l2_smooth),
'l2': float(self.l2),
}
class SplineWeight1D(Layer):
"""Up- or down-weight positions in the activation array of 1D convolutions:
`x^{out}_{ijk} = x^{in}_{ijk}* (1 + f_S^k(j)) \;,`
where f_S is the spline transformation.
# Arguments
n_bases: int; Number of spline bases used for the positional effect.
l2_smooth: (float) L2 regularization strength for the second
order differences in positional bias' smooth splines. (GAM smoothing regularization)
l2: (float) L2 regularization strength for the spline base coefficients.
use_bias: boolean; should we add a bias to the transition
bias_initializer: bias initializer - from `keras.initializers`
"""
def __name__(self):
return "SplineWeight1D"
def __init__(self,
# spline type
n_bases=10,
spline_degree=3,
share_splines=False,
# regularization
l2_smooth=0,
l2=0,
use_bias=False,
bias_initializer='zeros',
**kwargs):
self.n_bases = n_bases
self.spline_degree = spline_degree
self.share_splines = share_splines
self.l2 = l2
self.l2_smooth = l2_smooth
self.use_bias = use_bias
self.bias_initializer = initializers.get(bias_initializer)
super(SplineWeight1D, self).__init__(**kwargs)
def build(self, input_shape):
# input_shape = (None, steps, filters)
start = 0
end = int(input_shape[1])
filters = int(input_shape[2])
if self.share_splines:
n_spline_tracks = 1
else:
n_spline_tracks = filters
# setup the bspline object
self.bs = BSpline(start, end - 1,
n_bases=self.n_bases,
spline_order=self.spline_degree
)
# create X_spline,
self.positions = np.arange(end)
# shape = (end, self.n_bases)
self.X_spline = self.bs.predict(self.positions, add_intercept=False)
# convert to the right precision and K.constant
self.X_spline_K = K.constant(K.cast_to_floatx(self.X_spline))
# add weights - all set to 0
self.kernel = self.add_weight(shape=(self.n_bases, n_spline_tracks),
initializer='zeros',
name='kernel',
regularizer=GAMRegularizer(self.n_bases, self.spline_degree,
self.l2_smooth, self.l2),
trainable=True)
if self.use_bias:
self.bias = self.add_weight((n_spline_tracks, ),
initializer=self.bias_initializer,
name='bias',
regularizer=None)
# Be sure to call this somewhere!
super(SplineWeight1D, self).build(input_shape)
def call(self, x):
spline_track = K.dot(self.X_spline_K, self.kernel)
if self.use_bias:
spline_track = K.bias_add(spline_track, self.bias)
# if self.spline_exp:
# spline_track = K.exp(spline_track)
# else:
spline_track = spline_track + 1
# multiply together the two coefficients
output = spline_track * x
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'n_bases': self.n_bases,
'spline_degree': self.spline_degree,
'share_splines': self.share_splines,
# 'spline_exp': self.spline_exp,
'l2_smooth': self.l2_smooth,
'l2': self.l2,
'use_bias': self.use_bias,
'bias_initializer': initializers.serialize(self.bias_initializer),
}
base_config = super(SplineWeight1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def positional_effect(self):
w = self.get_weights()[0]
pos_effect = np.dot(self.X_spline, w)
return {"positional_effect": pos_effect, "positions": self.positions}
|
#Do not trust this code, does not work
print("This program will transform input into pig latin")
Cont = False
while Cont == False:
beforePig = input("What shalt thou make pig? : ")
#File = open('piglatin.txt', a)
print(len(beforePig))
afterPig = beforePig[0]
beforePig.append(afterPig)
break
print(len(afterPig))
print(afterPig)
input("awaiting input...")
|
from base64 import b64encode
from datetime import datetime
from decimal import Decimal
from math import ceil
from flask_sqlalchemy import SQLAlchemy, BaseQuery
from sqlalchemy import orm
from sqlalchemy.orm import joinedload
from functools import wraps
import jwt
from sqlalchemy.orm.state import InstanceState
from views import Result
db = SQLAlchemy()
def get_fillable(model: db.Model, get_attr_object=False, **kwargs):
if len(kwargs) == 0:
raise Exception('Model keywords are missing. Try ** or spread key values')
if not hasattr(model, 'fillable') and any(kwargs):
raise Exception('Must declare a fillable on class ' + model.__name__)
fillable = {}
for attribute_name in model.fillable:
if attribute_name in kwargs:
if get_attr_object:
key = getattr(model, attribute_name)
else:
key = attribute_name
fillable[key] = kwargs[attribute_name][0] if isinstance(kwargs[attribute_name], list) else \
kwargs[attribute_name]
return fillable
def token_required(f):
from dal.models import User
from flask import current_app, request
@wraps(f)
def decorated(*args, **kwargs):
token = None
if 'X-Access-Token' in request.headers:
token = request.headers.get('X-ACCESS-TOKEN')
if not token:
return Result.error('Token is missing!', 401)
try:
data = jwt.decode(token, current_app.config['SECRET_KEY'], algorithms=['HS256'])
current_user = User.query.options(joinedload('roles')).filter_by(email=data['email']).first()
except Exception:
return Result.error('Token is invalid!', 401)
request.user = current_user
return f(*args, **kwargs)
return decorated
def system_call(f):
"""
meant to be called from within server instance, this is a temporary solution until an API key system is created
:param f:
:return:
"""
from flask import current_app, request
@wraps(f)
def decorated(*args, **kwargs):
token = None
if 'X-System-Token' in request.headers:
token = request.headers.get('X-SYSTEM-TOKEN')
if not token or token != current_app.config['SECRET_KEY']:
return Result.error('Token is missing!', 401)
return f(*args, **kwargs)
return decorated
def access_required(f):
from flask import request
from core.router import permissions
@wraps(f)
def access_decorator(*args, **kwargs):
if not request.user:
return Result.error('Invalid user', 401)
if not has_access(request.user.roles, request.endpoint, request.method, permissions):
return Result.error('Access denied', 403)
return f(*args, **kwargs)
return access_decorator
access_map = {
'GET': 'read',
'PUT': 'write',
'POST': 'write',
'DELETE': 'delete'
}
def has_access(roles, endpoint, method, permissions):
access = False
for role in roles:
for name, grant in role.get_permissions.items():
if name == permissions[endpoint]:
for access in grant:
if access == access_map[method]:
access = True
break
return access
class Paginator:
per_page = 20
def __init__(self, query: BaseQuery, page: int = 1, order_by: str = None, order_dir: str = None):
self.total = query.count()
self.offset = (page * self.per_page) - self.per_page
self.total_pages = ceil(self.total / self.per_page)
self.query = query
self.page = page
if order_by:
order_by = getattr(self.query.column_descriptions[0]['type'], order_by)
order_dir = getattr(order_by, order_dir if order_dir else 'asc')
self.query = self.query.order_by(order_dir())
def get_items(self) -> list:
items = self.get_result()
return list(map(lambda row: dict(row), items))
def get_result(self):
return self.query.offset(self.offset).limit(self.per_page).all()
class ModelIter(object):
allowed_widget = False
def __init__(self, *args, **kwargs):
super(self, *args, **kwargs)
def __iter__(self):
if isinstance(self, db.Model):
for column in self.__dict__.keys():
attr = getattr(self, column)
if isinstance(attr, InstanceState) or hasattr(self.__mapper__.attrs, column) and \
hasattr(getattr(self.__mapper__.attrs, column), 'deferred') and \
getattr(self.__mapper__.attrs, column).deferred:
continue
if isinstance(attr, bool) or isinstance(attr, int) or isinstance(attr, float) or isinstance(attr, dict) \
or attr is None:
yield column, attr
elif isinstance(attr, Decimal):
yield column, '{0:.2f}'.format(attr)
elif isinstance(attr, datetime):
yield column, str(attr.isoformat())
elif isinstance(attr, bytes):
yield column, b64encode(attr).decode()
elif not isinstance(attr, str):
yield column, str(attr)
else:
yield column, attr
if hasattr(self, '__mapper__'):
# models that have not been loaded
unloaded = orm.attributes.instance_state(self).unloaded
for relationship in self.__mapper__.relationships:
if relationship.key not in unloaded and hasattr(self, relationship.key):
value = getattr(self, relationship.key)
if isinstance(value, list):
yield relationship.key, list(map(dict, value))
else:
yield relationship.key, dict(value) if value else value
|
#!/usr/bin/env python
# Author: Trevor Sherrard
# Since: July 28, 2021
# Purpose: This node translates the current orientation
# of the haptic controller into robot base motion
import rospy
from pyrobot import Robot
import time
import numpy as np
from sensor_msgs.msg import LaserScan
from haptic_orientation_capture_driver.msg import bno055_euler_data
from haptic_orientation_capture_driver.msg import controller_event_data, controller_haptic_voice
from haptic_orientation_capture_driver.srv import haptic_voice_event
class hapticBaseControl:
def __init__(self, controller_id):
self.nodeName = "haptic_base_control_node"
# define controller id
self.controller_id = controller_id
# define locobot instance
self.LoCoBotInstance = None
# define placeholder for euler data
self.eulerData = dict()
self.eulerData["Y"] = 0.0
self.eulerData["P"] = 0.0
self.eulerData["R"] = 0.0
# define topic and service names
self.eulerDataTopic = "/bno055_euler"
self.buttonDataTopic = "/controller_button_event"
self.lidarTopic = "/scan"
self.hapticVoiceSrvName = "/haptic_voice_event"
# define scaling and limit constants
self.currentFwdScaleFactor = 0.2
self.currentTwistScaleFactor = 0.5
self.fineTuneTwistScaleFactor = 0.25
self.fineTuneFwdScaleFactor = 0.05
self.courseFwdScaleFactor = self.currentFwdScaleFactor
self.courseTwistScaleFactor = self.currentTwistScaleFactor
self.deadZonePitchPlus = 6.5
self.deadZonePitchMinus = -6.5
self.deadZoneRollPlus = 6.5
self.deadZoneRollMinus = -6.5
self.maxAnglePitch = 40
self.maxAngleRoll = 40
self.velExecTime = 0.0001
# define constants for obstacle detection
self.lidarDistanceThresh = 0.3
self.maxNumBumps = 15
self.timeOfLastDisplay = time.time()
self.minTimeBetweenDisplays = 10 # seconds
self.collisionVoiceID = 2
self.collisionHapticID = 52
def start_node(self):
rospy.init_node(self.nodeName)
rospy.loginfo("started " + self.nodeName)
# create locobot instance
arm_config = dict(control_mode='torque')
self.LoCoBotInstance = Robot('locobot', arm_config=arm_config)
# init subscribers
rospy.Subscriber(self.eulerDataTopic, bno055_euler_data, self.euler_callback)
rospy.Subscriber(self.buttonDataTopic, controller_event_data, self.button_callback)
#rospy.Subscriber(self.lidarTopic, LaserScan, self.lidar_callback)
rospy.spin()
def euler_callback(self, msg):
"""
this function extracts the sensor data from the recieved
message payload and updates the values within the class itself.
params:
msg (bno055_euler_data instance): recieved message
returns:
None
"""
# make sure we are using the right controller for
# the recieved message.
if(msg.controller_name == self.controller_id):
self.eulerData["Y"] = msg.yaw
self.eulerData["P"] = msg.pitch
self.eulerData["R"] = msg.roll
self.scale_and_set_vel()
else:
pass
def button_callback(self, msg):
"""
this function will increment or decrement the scale
factor for motion based on user button presses.
params:
msg (controller_event_data instance): recieved message
returns:
None
"""
# first check to make sure we are using the right controller
rospy.loginfo("here")
if(msg.controller_name == self.controller_id):
# increment or decrement scale factor based on event type
if(msg.event_type == "bottom_button_pressed"):
self.currentFwdScaleFactor = self.courseFwdScaleFactor
self.currentTwistScaleFactor = self.courseTwistScaleFactor
rospy.loginfo("changed to course scaling!")
elif(msg.event_type == "top_button_pressed"):
self.currentFwdScaleFactor = self.fineTuneFwdScaleFactor
self.currentTwistScaleFactor = self.fineTuneTwistScaleFactor
rospy.loginfo("changed to fine scaling!")
else:
pass
def lidar_callback(self, msg):
"""
This function will check if self.maxNumBumps of returns
are within self.lidarDistanceThresh of the robot base. If
this condition is met, a audio-haptic effect is sent to the controller
params:
msg (LaserScan msg): recieved scan message
return:
None
"""
start_angle = float(msg.angle_min)
end_angle = float(msg.angle_max)
angle_inc = float(msg.angle_increment)
points = msg.ranges
num_consecutive_bumps = 0
for pt in points:
# check if current point is within
# threshold or not
if(pt < self.lidarDistanceThresh):
num_consecutive_bumps += 1
else:
num_consecutive_bumps = 0
# check if we have enough 'bumps' for audio-haptic display
if(num_consecutive_bumps >= self.maxNumBumps):
# make sure we've waited awhile before replaying display
time_diff = time.time() - self.timeOfLastDisplay
if(time_diff > self.minTimeBetweenDisplays):
# make call and reset time
self.haptic_voice_srv_call(self.collisionHapticID, self.collisionVoiceID)
self.timeOfLastDisplay = time.time()
else:
pass
else:
pass
def haptic_voice_srv_call(self, haptic_id, voice_id):
"""
this function allows for control of a given controller's
haptic actuator and TTS engine.
params:
haptic_id (int): ID of the haptic effect to play (1->117)
voice_id (int): ID of the voice effect to play (0->2)
returns:
res (bool) the result of the service call
"""
rospy.wait_for_service(self.hapticVoiceSrvName)
try:
# create message
controller_event_msg = controller_haptic_voice()
controller_event_msg.header.stamp = rospy.Time.now()
controller_event_msg.controller_name = self.controller_id
controller_event_msg.haptic_action_id = haptic_id
controller_event_msg.voice_action_id = voice_id
controller_event = rospy.ServiceProxy(self.hapticVoiceSrvName, haptic_voice_event)
res = controller_event(controller_event_msg)
return res.status
except rospy.ServiceException as e:
rospy.logerr("Service call failed: %s"%e)
def scale_and_set_vel(self):
# check if we are in pitch or roll deadzones
curRoll = self.eulerData["R"]
curPitch = self.eulerData["P"]
inPitchDeadzone = curPitch < self.deadZonePitchPlus and curPitch > self.deadZonePitchMinus
inRollDeadzone = curRoll < self.deadZoneRollPlus and curRoll > self.deadZoneRollMinus
# see if we are above max angles
overMaxPitch = abs(curPitch) > self.maxAnglePitch
overMaxRoll = abs(curRoll) > self.maxAngleRoll
# if we are not in deadzones, and not greater than max angle
# try to scale to usable velocity
unit_fwd_vel = 0
unit_twist_vel = 0
if(not inPitchDeadzone and not overMaxPitch):
isPitchNeg = curPitch < 0
if(isPitchNeg):
# get angle as fraction of 'usable' workspace
totalUsableNegative = self.deadZonePitchMinus - self.maxAnglePitch
negFraction = (curPitch + self.deadZonePitchMinus)/totalUsableNegative
unit_fwd_vel = -1*negFraction
else:
# get angle as fraction of 'usable' workspace
totalUsablePositive = self.deadZonePitchPlus + self.maxAnglePitch
posFraction = (curPitch + self.deadZonePitchPlus)/totalUsablePositive
unit_fwd_vel = posFraction
# handle OOB cases
elif(inPitchDeadzone):
unit_fwd_vel = 0
elif(overMaxPitch and curPitch > 0):
unit_fwd_vel = 1
elif(overMaxPitch and curPitch < 0):
unit_fwd_vel = -1
if(not inRollDeadzone and not overMaxRoll):
isRollNeg = curRoll < 0
if(isRollNeg):
# get angle as fraction of 'usable' workspace
totalUsableNegative = self.deadZoneRollMinus - self.maxAngleRoll
negFraction = (curRoll + self.deadZoneRollMinus)/totalUsableNegative
unit_twist_vel = -1*negFraction
else:
# get angle as fraction of 'usable' workspace
totalUsablePositive = self.deadZoneRollPlus + self.maxAngleRoll
posFraction = (curRoll + self.deadZoneRollPlus)/totalUsablePositive
unit_twist_vel = posFraction
# handle OOB cases
elif(inRollDeadzone):
unit_twist_vel = 0
elif(overMaxRoll and curRoll > 0):
unit_twist_vel = 1
elif(overMaxRoll and curRoll < 0):
unit_twist_vel = -1
# scale velocities
unit_fwd_vel *= self.currentFwdScaleFactor
unit_twist_vel *= self.currentTwistScaleFactor
rospy.loginfo('current velocity (fwd, twist): (%s, %s)' % (unit_fwd_vel, unit_twist_vel))
# set robot vel
self.LoCoBotInstance.base.set_vel(fwd_speed=unit_fwd_vel,
turn_speed=-1*unit_twist_vel,
exe_time=self.velExecTime)
if(__name__ == "__main__"):
try:
controller_id = "right_hand"
haptic_base_control = hapticBaseControl(controller_id)
haptic_base_control.start_node()
except rospy.ROSInterruptException:
pass
|
import os
from disnake import ApplicationCommandInteraction
from dotenv import load_dotenv
load_dotenv()
DEBUG = os.getenv("DEBUG", None) is not None
DEFAULT_PREFIX = os.getenv("DEFAULT_PREFIX", "$")
TEST_GUILDS = (
[int(id_) for id_ in os.getenv("TEST_GUILDS").split(",")]
if os.getenv("TEST_GUILDS", None)
else None
)
DATABASE_URL = os.getenv("DATABASE_URL", "")
if not DATABASE_URL:
raise Exception("DATABASE_URL is not set")
TOKEN = os.getenv("TOKEN", "")
if not TOKEN:
raise ValueError("TOKEN is not set")
GIT_SHA = os.getenv("GIT_SHA", "unknown")
LOGGING_LEVEL = os.getenv("LOGGING_LEVEL", "DEBUG" if DEBUG else "INFO")
# Typing aliases
ACI = ApplicationCommandInteraction
|
Subsets and Splits