content
stringlengths 5
1.05M
|
---|
import fastapi
from fastapi.responses import JSONResponse
from fastapi import Depends
from services.db_service import get_one_course, all_courses
from models.course import Course
router = fastapi.APIRouter()
@router.get('/api/rec/{course_id}')
def rec(crs: Course = Depends()):
result = get_one_course(crs.course_id)
if result is None:
return JSONResponse(content={"Result": f"THis course {crs.course_id} is not not foud"}, status_code=200)
else:
return JSONResponse(content={"Result" :{"course_id":result.course_id,
"title":result.title,
"speciality":result.speciality
}}, status_code=200)
@router.get('/api/rec/')
def rec_all():
return JSONResponse( content={"Results":all_courses()} , status_code=200) |
import shutil
from fastapi import APIRouter, File, UploadFile, Query
from fastapi.responses import FileResponse # from starlette.responses import FileResponse
from typing import List
# this is referencing fastapi/app...
from services.s3 import get_s3, get_s3_bucket_name
router_custom_app_s3 = APIRouter(
tags=["custom_app_s3"],
prefix="/s3"
)
## S3 buckets
@router_custom_app_s3.get("/ext-list-s3-buckets")
async def ext_list_buckets():
try:
s3buckets = None
s3obj = get_s3()
if s3obj != None:
res = s3obj.list_buckets()
if res != None:
s3buckets = res["Buckets"]
# print("Buckets")
# for bucket in res["Buckets"]:
# print(f" {bucket["Name"]}")
return { "Buckets": s3buckets }
except:
return { "Error": "List Buckets Error" }
@router_custom_app_s3.get("/ext-list-s3-files")
def ext_list_objects():
bucket_name=get_s3_bucket_name()
files=[]
try:
objs = get_s3().list_objects_v2(Bucket=bucket_name, MaxKeys=500)
for obj in objs["Contents"]:
files.append(obj["Key"])
return files
except:
return { "Error": "List" }
@router_custom_app_s3.post("/ext-upload-s3-file")
def ext_upload_file(image: UploadFile = File(...)):
bucket_name=get_s3_bucket_name()
try:
# f = open("guru99.txt","rb")
# s3.upload_fileobj(f, BUCKET_NAME, 'aaa/vvv.txt')
# f.close()
get_s3().upload_fileobj(image.file, bucket_name, image.filename)
return { "Status": "Ok" }
except:
return { "Error": "Upload" }
@router_custom_app_s3.delete("/ext-delete-s3-file")
def ext_delete_file(object_key: str = Query(None)):
bucket_name=get_s3_bucket_name()
try:
res = get_s3().delete_object(Bucket=bucket_name, Key=object_key)
return { "Status": "Ok", "Response": res }
except:
return { "Error": "Delete" }
@router_custom_app_s3.get("/ext-read-s3-file")
def ext_read_file(object_key: str = Query(None)):
bucket_name=get_s3_bucket_name()
try:
res = get_s3().get_object(Bucket=bucket_name, Key=object_key)
data_str = res['Body'].read().decode('utf-8')
# TBD handle binary data
# TBD save to file
# with open('output.txt', 'wb') as data:
# s3.download_fileobj(BUCKET_NAME, 'aaa/vvv.txt', data)
return { "Status": "Ok", "Data": data_str }
except:
return { "Error": "Read" }
|
import os
import math
import pandas as pd
import datetime
variables = [
'date_stamp',
'cnt_confirmed',
'cnt_recovered',
'cnt_active',
'cnt_hospitalized',
'cnt_hospitalized_current',
'cnt_death',
'cnt_probable'
]
def deleteFiles(path):
today = datetime.date.today();
one_week = datetime.timedelta(days=7)
week = today - one_week
week_ago = datetime.datetime.combine(week, datetime.time(0, 0))
for filename in os.listdir(path):
if(filename.endswith('.csv')):
newFilename = filename.replace('.csv', '');
filedate = datetime.datetime.strptime(newFilename, '%Y-%m-%d')
if(filedate < week_ago):
print('removing files that are more than a week old: ',path,'/',filename)
os.remove(f"{path}/{filename}")
return None
def cleanData(data, fileName):
# The source is has multiple records that make a single record.
# We will use the first value in the record to map to the appropriate variables.
source = pd.DataFrame(data)
# the target structure we will be getting to.
df = pd.DataFrame(columns = variables)
# iterate over the source rows and create the target row
record = {}
record['date_stamp'] = fileName[0:-4]
for index, row in source.iterrows():
variable = row[0]
value = row[1]
if(type(variable) != str and math.isnan(variable)):
continue
if(math.isnan(value)):
value = None
if('Number of Cases' in variable):
record['cnt_confirmed'] = value
elif('Recovered Cases' in variable):
record['cnt_recovered'] = value
elif('Number of Active Cases' in variable):
record['cnt_active'] = value
elif('Hospitalizations' in variable):
record['cnt_hospitalized'] = value
elif('Currently Hospitalized' in variable):
record['cnt_hospitalized_current'] = value
elif('Deaths' in variable):
record['cnt_death'] = value
elif('Probable Cases' in variable):
record['cnt_probable'] = value
# add record to df
df = df.append(record, ignore_index=True)
# apply data types
df['date_stamp'] = pd.to_datetime(df['date_stamp']).dt.strftime('%Y-%m-%d')
df['cnt_confirmed'] = df['cnt_confirmed'].astype(pd.Int32Dtype())
df['cnt_recovered'] = df['cnt_recovered'].astype(pd.Int32Dtype())
df['cnt_active'] = df['cnt_active'].astype(pd.Int32Dtype())
df['cnt_hospitalized'] = df['cnt_hospitalized'].astype(pd.Int32Dtype())
df['cnt_hospitalized_current'] = df['cnt_hospitalized_current'].astype(pd.Int32Dtype())
df['cnt_death'] = df['cnt_death'].astype(pd.Int32Dtype())
df['cnt_probable'] = df['cnt_probable'].astype(pd.Int32Dtype())
return df
if __name__ == "__main__":
path = os.path
# Loop over the files within the folder
for filename in sorted(os.listdir('./data/us-tn/co-knox/covid_cases/raw')):
if filename.endswith('.csv') and path.exists(f'./data/us-tn/co-knox/covid_cases/clean/{filename}') == False:
print(filename)
# For each csv file, map the transformed data to its respective file in the harvested folder
data = pd.read_csv(f"./data/us-tn/co-knox/covid_cases/raw/{filename}")
df = cleanData(data, filename)
df.to_csv(f"./data/us-tn/co-knox/covid_cases/clean/{filename}", index=False)
# if there is no aggregate file create one, otherwise append to it.
if path.exists(f"./data/us-tn/co-knox/covid_cases/latest.csv"):
df.to_csv(f"./data/us-tn/co-knox/covid_cases/latest.csv", mode='a', header=False, index=False)
else:
df.to_csv(f"./data/us-tn/co-knox/covid_cases/latest.csv", index=False)
deleteFiles('./data/us-tn/co-knox/covid_cases/raw')
deleteFiles('./data/us-tn/co-knox/covid_cases/clean')
|
text = input('Введите текст: ')
try:
file = open('res.txt', 'w')
file.write(text)
except:
print('Возможна ошибка!')
else:
print('Успешно записано!')
finally:
file.close() |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
=============
Cadc TAP plus
=============
"""
from astroquery.cadc.tests.DummyJob import DummyJob
class DummyTapHandler(object):
def __init__(self):
self.__invokedMethod = None
self.__parameters = {}
def reset(self):
self.__parameters = {}
self.__invokedMethod = None
def check_call(self, method_name, parameters):
self.check_method(method_name)
self.check_parameters(parameters, method_name)
def check_method(self, method):
if method == self.__invokedMethod:
return
else:
raise Exception("Method '"+str(method) +
"' not invoked. (Invoked method is '" +
str(self.__invokedMethod)+"')")
def check_parameters(self, parameters, method_name):
if parameters is None:
return len(self.__parameters) == 0
if len(parameters) != len(self.__parameters):
raise Exception("Wrong number of parameters for method '%s'. \
Found: %d. Expected %d",
(method_name,
len(self.__parameters),
len(parameters)))
for key in parameters:
if key in self.__parameters:
# check value
if self.__parameters[key] != parameters[key]:
raise Exception("Wrong '%s' parameter value for method '%s'. \
Found: '%s'. Expected: '%s'", (
method_name,
key,
self.__parameters[key],
parameters[key]))
else:
raise Exception("Parameter '%s' not found for method '%s'",
(str(key), method_name))
return False
def load_tables(self, only_names=False, verbose=False):
self.__invokedMethod = 'get_tables'
self.__parameters['only_names'] = only_names
self.__parameters['verbose'] = verbose
return None
def load_table(self, table, verbose=False):
self.__invokedMethod = 'get_table'
self.__parameters['table'] = table
self.__parameters['verbose'] = verbose
return None
def launch_job(self, query, name=None, output_file=None,
output_format="votable", verbose=False, dump_to_file=False,
upload_resource=None, upload_table_name=None):
self.__invokedMethod = 'run_query'
self.__parameters['query'] = query
self.__parameters['name'] = name
self.__parameters['output_file'] = output_file
self.__parameters['output_format'] = output_format
self.__parameters['verbose'] = verbose
self.__parameters['dump_to_file'] = dump_to_file
self.__parameters['upload_resource'] = upload_resource
self.__parameters['upload_table_name'] = upload_table_name
job = DummyJob()
job.set_parameter('query', query)
job.set_parameter('format', output_format)
return job
def load_async_job(self, jobid=None, verbose=False):
self.__invokedMethod = 'load_async_job'
self.__parameters['jobid'] = jobid
self.__parameters['verbose'] = verbose
return None
def list_async_jobs(self, verbose=False):
self.__invokedMethod = 'list_async_jobs'
self.__parameters['verbose'] = verbose
return [DummyJob()]
def save_results(self, job, filename, verbose=False):
self.__invokedMethod = 'save_results'
self.__parameters['job'] = job
self.__parameters['filename'] = filename
self.__parameters['verbose'] = verbose
return None
def login(self, user, password, certificate_file, cookie_prefix=None,
login_url=None, verbose=False):
self.__invokedMethod = 'login'
self.__parameters['user'] = user
self.__parameters['password'] = password
self.__parameters['certificate_file'] = certificate_file
self.__parameters['cookie_prefix'] = cookie_prefix
self.__parameters['login_url'] = login_url
self.__parameters['verbose'] = verbose
return None
def logout(self, verbose=False):
self.__invokedMethod = 'logout'
self.__parameters['verbose'] = verbose
return None
def _TapPlus__getconnhandler(self):
return self
|
import re
import sys
input = sys.argv[1]
output = sys.argv[2]
print("input:",input)
print("output:",output)
count=0
with open(input,'r') as inputFile:
with open(output,'a') as outputFile:
for line in inputFile:
if(line.split(",")[1]=="\"0\""):
print(line)
outputFile.write(line)
|
"""A new print function that adds new and exciting functionality."""
from .print import print, render
from .table import table
from .putil import clear_fmt, join, span, pad, number
from .div import hfill, center, div
from .constants import *
from .exception import RenderedException
__all__ = [
# Functions
"print", "render", "table", "div",
"clear_fmt", "join", "span", "pad", "number",
# Error
"RenderedException",
# Effects
"RESET", "BOLD", "FAINT", "ITALIC",
"UNDERLINE", "REVERSE", "CONCEAL", "STRIKEOUT",
# Colors
"BLACK", "RED", "GREEN", "YELLOW",
"BLUE", "MAGENTA", "CYAN", "WHITE",
# Modifiers
"BRIGHT", "BR", "BACKGROUND", "BG",
# Fonts
"SM", "STD", "BIG", "ISO1", "ISO2", "ISO3", "ISO4",
"SA", "DOOM", "DP", "L3D", "SMISO", "KB", "SLANT", "SMSLANT"
]
|
from collections import Counter
def longest_palindrome(s):
s=s.lower()
check=Counter(s)
even=0
odd=0
for i in check:
if check[i]%2==0 and i.isalnum():
even+=check[i]
elif check[i]%2==1 and i.isalnum():
odd=max(odd, check[i])
even+=check[i]-1
return even+1 if odd else even |
from dbnd import PipelineTask, namespace, output, parameter
from test_dbnd.factories import FooConfig, TTask
namespace("n_tv")
class FirstTask(TTask):
foo = parameter(default="FooConfig")[FooConfig]
param = parameter(default="from_first")[str]
class SecondATask(FirstTask):
param = "from_second"
class SecondBTask(FirstTask):
pass
class InnerPipeTask(PipelineTask):
second_b = output
def band(self):
self.second_b = SecondBTask(task_name="innerB", param="from_pipe")
class BigPipeTask(PipelineTask):
second_a = output
second_b = output
inner_second_b = output
def band(self):
self.second_a = SecondATask().t_output
self.second_b = SecondBTask().t_output
self.inner_second_b = InnerPipeTask().second_b
namespace()
def assert_task_version(expected, task):
all_tasks = task.task_dag.subdag_tasks()
actual = {t.task_name: t.task_version for t in all_tasks}
print(actual)
assert expected == actual
class TestTaskVersion(object):
def test_sanity(self):
target = FirstTask()
assert_task_version({"n_tv.FirstTask": "1"}, target)
def test_default_vertion(self):
target = BigPipeTask()
assert_task_version(
{
"n_tv.SecondATask": "1",
"n_tv.SecondBTask": "1",
"innerB": "1",
"n_tv.InnerPipeTask": "1",
"n_tv.BigPipeTask": "1",
},
target,
)
def test_force_version(self):
target = BigPipeTask(task_version=2)
assert_task_version(
{
"n_tv.SecondATask": "2",
"n_tv.SecondBTask": "2",
"innerB": "2",
"n_tv.InnerPipeTask": "2",
"n_tv.BigPipeTask": "2",
},
target,
)
def test_force_specific_vertion(self):
target = BigPipeTask(override={SecondATask.task_version: 2})
assert_task_version(
{
"n_tv.SecondATask": "2",
"n_tv.SecondBTask": "1",
"innerB": "1",
"n_tv.InnerPipeTask": "1",
"n_tv.BigPipeTask": "1",
},
target,
)
def test_force_pipe_version(self):
target = BigPipeTask(override={InnerPipeTask.task_version: 2})
assert_task_version(
{
"n_tv.SecondATask": "1",
"n_tv.SecondBTask": "1",
"innerB": "2",
"n_tv.InnerPipeTask": "2",
"n_tv.BigPipeTask": "1",
},
target,
)
|
# Generated by Django 3.2.9 on 2021-11-23 12:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0007_alter_avaliacao_options'),
]
operations = [
migrations.AlterModelOptions(
name='topico_swebook_1',
options={'verbose_name': 'Tópico SWEBOOK 1', 'verbose_name_plural': 'Tópicos SWEBOOK 1'},
),
migrations.AlterModelOptions(
name='topico_swebook_2',
options={'verbose_name': 'Tópico SWEBOOK 2', 'verbose_name_plural': 'Tópicos SWEBOOK 2'},
),
migrations.AlterModelOptions(
name='topico_swebook_3',
options={'verbose_name': 'Tópico SWEBOOK 3', 'verbose_name_plural': 'Tópicos SWEBOOK 3'},
),
migrations.AddField(
model_name='exemplo',
name='autor',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.RESTRICT, to=settings.AUTH_USER_MODEL),
),
]
|
from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse
from allauth.socialaccount.providers import registry
from .provider import PaypalProvider
class PaypalTests(create_oauth2_tests(registry.by_id(PaypalProvider.id))):
def get_mocked_response(self):
return MockedResponse(200, """
{
"user_id": "https://www.paypal.com/webapps/auth/server/64ghr894040044",
"name": "Jane Doe",
"given_name": "Jane",
"family_name": "Doe",
"email": "[email protected]"
}
""")
|
#!/usr/bin/env python
#! -*- coding:utf-8 -*-
"""
TODO: Short description
TODO: Details description
"""
#TODO: all import block
import sys
import requests
__author__ = "Paweł Siergiejuk"
__date__ = "16/04/2019"
__version__ = "v0.0"
__email__ = "[email protected]"
__status__ = "Development"
class NetException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return(repr(self.value))
class BambooStatus:
SETUP = "setup"
PORT = "port"
URL = "url"
USER = "user"
PASSWD = "passwd"
HEADERS = {'Accept': 'application/json'}
def __init__(self, config):
self.url = config.get(self.SETUP, self.URL)
self.port = config.getint(self.SETUP, self.PORT)
self.user = config.get(self.SETUP, self.USER)
self.passwd = config.get(self.SETUP, self.PASSWD)
self.session = requests.Session()
self.session.auth = (self.user, self.passwd)
def __get_data_from_url__(self, url):
"""Method that get data from url"""
request = self.session.get(url, headers=self.HEADERS)
if request.status_code != 200:
raise NetException(request.reason)
return request.text
if __name__ == "__main__":
sys.exit()
|
import logging
log = logging.getLogger(__name__)
class NFS(object):
# TODO: Implement it
pass
|
from __future__ import annotations
from watchmen_model.system import DataSource, DataSourceType
from .data_source_oracle import OracleDataSourceHelper, OracleDataSourceParams
from .storage_oracle import StorageOracle, TopicDataStorageOracle
# noinspection PyRedeclaration
class Configuration:
dataSource: DataSource = DataSource(dataSourceType=DataSourceType.ORACLE)
params: OracleDataSourceParams = OracleDataSourceParams()
def host(self, host: str, port: int = 3306) -> Configuration:
self.dataSource.host = host
self.dataSource.port = str(port)
return self
def account(self, username: str, password: str) -> Configuration:
self.dataSource.username = username
self.dataSource.password = password
return self
def url(self, url: str) -> Configuration:
"""
url should include account information, like:
1. oracle://username:password@host:port/?sid=your_sid
2. oracle+cx_oracle://username:password@host:port/?sid=your_sid
3. oracle://username:password@host:port/?service_name=your_service_name
4. oracle+cx_oracle://username:password@host:port/?service_name=your_service_name
"""
self.dataSource.url = url
return self
def schema(self, schema: str) -> Configuration:
self.dataSource.name = schema
return self
def echo(self, enabled: bool = False) -> Configuration:
self.params.echo = enabled
return self
def build(self) -> StorageOracle:
return StorageOracleConfiguration(self.dataSource, self.params).create_storage()
def build_topic_data(self) -> TopicDataStorageOracle:
return StorageOracleConfiguration(self.dataSource, self.params).create_topic_data_storage()
class StorageOracleConfiguration:
"""
configuration of Oracle storage
"""
def __init__(self, data_source: DataSource, params: OracleDataSourceParams):
super().__init__()
self.helper = OracleDataSourceHelper(data_source, params)
def create_storage(self) -> StorageOracle:
return self.helper.acquire_storage()
def create_topic_data_storage(self) -> TopicDataStorageOracle:
return self.helper.acquire_topic_data_storage()
@staticmethod
def config() -> Configuration:
return Configuration()
|
from dolfin import *
import numpy as np
import pickle
#######################
Nx=20
Ny=40
Nz=41
Ly=1.0
Lx=2.0
M=10
#Lx=1*Ly
Lz=0.1
Ho=1.2*(Lz/Nz)
#Ho=0.5
#DH=Hx
#DHZ=Hx
Hb=2.0*Lx/Nx
dx1=Lx/10
#######################
parameters["form_compiler"]["cpp_optimize"]=True
parameters["form_compiler"]["representation"]='uflacs'
parameters['std_out_all_processes'] = False
solver_parameters={"newton_solver":{"maximum_iterations":100,
"relative_tolerance": 1.0e-14,
"absolute_tolerance": 1.0e-7,"linear_solver": "mumps"}}
mesh = Mesh()
f = HDF5File(mesh.mpi_comm(), "meshq.hdf5", 'r')
f.read(mesh, "mesh", False)
materials=MeshFunction("size_t",mesh,mesh.topology().dim())
## Mark boundary subdomians
film=CompiledSubDomain("x[2]>=Rcut",Rcut=Lz-Ho) #,size=1.0)
materials.set_all(0)
film.mark(materials,1)
##wall.mark(materials,2)
bmats=File("matx.pvd")
bmats << materials
##########################################
V = VectorFunctionSpace(mesh,"CG",1)
left = CompiledSubDomain("near(x[0], side) && on_boundary", side = 0.0)
bott = CompiledSubDomain("near(x[2], side) && on_boundary", side = 0.0)
lefta = CompiledSubDomain("near(x[1], side) && on_boundary && x[0]<dx1", side = 0.0,dx1=dx1)
leftb = CompiledSubDomain("near(x[1], side) && on_boundary && x[0]<dx1", side = Ly,dx1=dx1)
leftup = CompiledSubDomain("near(x[2], side) && on_boundary && x[0]<dx1", side = Lz, dx1=dx1)
leftbott = CompiledSubDomain("near(x[2], side) && on_boundary && x[0]<dx1", side = 0.0, dx1=dx1)
right = CompiledSubDomain("near(x[0], side) && on_boundary", side = Lx)
righta = CompiledSubDomain("near(x[1], side) && on_boundary && x[0]>dx1", side = 0.0,dx1=Lx-dx1)
rightb = CompiledSubDomain("near(x[1], side) && on_boundary && x[0]>dx1", side = Ly ,dx1=Lx-dx1)
rightup = CompiledSubDomain("near(x[2], side) && on_boundary && x[0]>dx1", side = Lz, dx1=Lx-dx1)
rightbott = CompiledSubDomain("near(x[2], side) && on_boundary && x[0]>dx1", side = 0.0, dx1=Lx-dx1)
############################################
south1= CompiledSubDomain("near(x[2], side) && on_boundary && x[1]<dx1 && x[0]>=dx1 && x[0]<=dx2 ", side = 0.0,dx1=dx1,dx2=Lx-dx1)
southx= CompiledSubDomain("near(x[1], side) && on_boundary", side = 0.0)
north1= CompiledSubDomain("near(x[2], side) && on_boundary && x[1]>dx2 && x[0]>=dx1 && x[0]<=dx2 ", side = 0.0,dx1=dx1,dx2=Lx-dx1)
northx= CompiledSubDomain("near(x[1], side) && on_boundary", side = Ly)
boundary_parts = MeshFunction("size_t", mesh, mesh.topology().dim() - 1)
boundary_parts.set_all(0)
bott.mark(boundary_parts, 3)
left.mark(boundary_parts, 1)
#leftup.mark(boundary_parts, 3)
#leftbott.mark(boundary_parts, 4)
#lefta.mark(boundary_parts, 7)
#leftb.mark(boundary_parts, 8)
right.mark(boundary_parts, 2)
#rightup.mark(boundary_parts, 5)
#rightbott.mark(boundary_parts, 6)
#righta.mark(boundary_parts, 9)
#rightb.mark(boundary_parts, 10)
#south1.mark(boundary_parts, 12)
southx.mark(boundary_parts, 4)
#north1.mark(boundary_parts, 13)
northx.mark(boundary_parts, 5)
bmark = File("bmarks_mark.pvd")
bmark << boundary_parts
##############################################
#ds = Measure("ds", subdomain_data=boundary_parts)
#ds=ds(degree=4)
cl = Expression(("x0","ky*x[1]","R*(x[1])*(x[1]-z0)"),x0 = 0.0, ky = 0.0, z0=Ly,R=-0.00,degree=1)
cr = Expression(("x0","ky*x[1]","R*(x[1])*(x[1]-z0)"),x0 = 0.0, ky = 0.0, z0=Ly,R=-0.00,degree=1)
cb = Expression(("kx*x[0]","ky*x[1]","R*(x[1])*(x[1]-z0)"),kx= 0.0, ky = 0.0, z0=Ly,R=-0.00,degree=1)
cs = Expression(("kx*x[0]","ky*x[1]","R*(x[1])*(x[1]-z0)"),kx= 0.0, ky = 0.0, z0=Ly,R=-0.00,degree=1)
cn = Expression(("kx*x[0]","y0","R*(x[1])*(x[1]-z0)"),kx= 0.0, y0 = 0.0, z0=Ly,R=-0.00,degree=1)
#clya = Expression(("x0","y0","z0"),x0 = 0.0, y0 = 0.0, z0=0.0000,degree=1)
#clyb = Expression(("x0","y0","z0"),x0 = 0.0, y0= 0.0, z0=0.0000,degree=1)
#crya = Expression(("x0","y0","z0"),x0 = 0.0, y0 = 0.0, z0=0.0000,degree=1)
#cryb = Expression(("x0","y0","z0"),x0 = 0.0, y0= 0.0, z0=0.0000,degree=1)
#crys = Expression(("Ks*(x[0]-x0)","y0","z0"),Ks=0.0, x0 = dx1, y0= 0.0, z0=0.0000,degree=1)
#cryn = Expression(("Ks*(x[0]-x0)","y0","z0"),Ks=0.0, x0 = dx1, y0= 0.0, z0=0.0000,degree=1)
Pz = Expression((0.0,0.0,"pz"),pz=0,degree=1)
#bclu = DirichletBC(V, cl, leftup)
#bcld = DirichletBC(V, cl, leftbott)
bcl = DirichletBC(V, cl, left)
#bcla= DirichletBC(V, clya, lefta)
#bclb= DirichletBC(V, clyb, leftb)
#bcru = DirichletBC(V, cr, rightup)
#bcrd = DirichletBC(V, cr, rightbott)
bcr = DirichletBC(V, cr, right)
bcb = DirichletBC(V, cb, bott)
bcs = DirichletBC(V, cs, southx)
bcn = DirichletBC(V, cn, northx)
#bcra = DirichletBC(V, crya, righta)
#bcrb = DirichletBC(V, cryb, rightb)
#bcrs = DirichletBC(V, crys, south1)
#bcrn = DirichletBC(V, cryn, north1)
###########################################
# Define functions
du = TrialFunction(V) # Incremental displacement
v = TestFunction(V) # Test function
u = Function(V) # Displacement from previous iteration
B = Constant((0.0, -0.0,0.0)) # Body force per unit volume
##############
# Elasticity parameters
E1, nu1 = 1e6, 0.48
E2 = 25*E1
E3 = 25*E1
nu2=nu1
nu3=nu1
############################################
# Elasticity parameters 1 BULK-0
#E1, nu1 = 1.0, 0.46
mu1, lda1 = Constant(E1/(2*(1 + nu1))), Constant(E1*nu1/((1 + nu1)*(1 - 2*nu1)))
# Elasticity parameters 3 FILM - 2
#E3, nu3 = 20.0, 0.46
mu3, lda3 = Constant(E3/(2*(1 + nu3))), Constant(E3*nu3/((1 + nu3)*(1 - 2*nu3)))
#psisf = (mu3/2)*(Ic-3) - mu3*ln(J) + (lda3/2)*ln(J)**2
###########################################
d = u.geometric_dimension()
I = Identity(d) # Identity tensor
#Growth Film
#dgnx=1.0
#dgny=1.0
#dgnz=1.0
#Fgf = Expression( (("dgnx",0.0,0.0),(0.0,"dgny",0.0),(0.0,0.0,"dgnz")), dgnx=dgnx, dgny=dgny, dgnz=dgnz, degree=1)
#Fgfinv = inv(Fgf)
#Jgf = det(Fgf)
#Growth Sub
#dgnx=0.0
#Fgs = Expression ((("dgn", 0.0, 0.0), (0.0, "dgn2", 0.0), (0.0,0.0,"dgn3")),dgn=dgnx,dgn2=dgny,dgn3=dgny,degree=1)
#Fgs = Expression(("1+dgn"), dgn=dgnx, degree=1)
#Fgsinv = inv(Fgs)
#Jgs = det(Fgs) #Fgs ** 3
#Fgb= I
#Fgbinv=inv(Fgb)
#Jgb = det(Fgb)
# Kinematics
F = I + grad(u) # Deformation gradient
#FILM
#Fef = F * Fgfinv # Elastic deformation gradient
C = F.T * F # Elastic right Cauchy-Green tensor
# Invariants of deformation tensors
I = variable(tr(C))
Je = variable(det(F))
# Stored strain energy density (compressible neo-Hookean model)
psif = (mu3/2)*(I - 3) - mu3*ln(Je) + (lda3/2)*(ln(Je))**2
# Elastic second Piola-Kirchhoff stress tensor
#Sef = 2*diff(psif, Icef)*I + Jef*Jef*diff(psif, Jef)*inv(Cef)
## Total second Piola-Kirchhoff stress
#Sf = Jgf*Fgfinv * Sef * Fgfinv
## First Piola-Kirchhoff stress tensor
#Pf = F*Sf
#SUBSTRATE
#Fes = F * Fgsinv # Elastic deformation gradient
#Ces = Fes.T * Fes # Elastic right Cauchy-Green tensor
# Invariants of deformation tensors
#Ices = variable(tr(Ces))
#Jes = variable(det(Fes))
# Stored strain energy density (compressible neo-Hookean model)
psis = (mu1/2)*(I - 3) - mu1*ln(Je) + (lda1/2)*(ln(Je))**2
# Elastic second Piola-Kirchhoff stress tensor
#Ses = 2*diff(psis, Ices)*I + Jes*Jes*diff(psis, Jes)*inv(Ces)
# Total second Piola-Kirchhoff stress
#Ss = Jgs*Fgsinv * Ses * Fgsinv
# First Piola-Kirchhoff stress tensor
#Ps = F*Ss
##########################################################################################
dx = Measure('dx', domain=mesh, subdomain_data=materials)
dx = dx(degree=4)
ds = Measure("ds", subdomain_data=boundary_parts)
ds=ds(degree=4)
Pi=psif*dx(1)+psis*dx(0)
F=derivative(Pi,u,v)
#F = inner(Ps, grad(v))*dx(0) + inner(Pf, grad(v))*dx(1)-dot(Pz,v)*ds(3)
# Compute Jacobian of F
J = derivative(F, u, du)
#############################################################################################
#bcsa = [bclu,bcld,bcru,bcrd,bcr,bcl,bcra,bcrb,bcla,bclb]
file = File("displacement.pvd");
#solve(F == 0, u , bcsa, J=J, solver_parameters={"newton_solver":{"maximum_iterations":100,
# "relative_tolerance": 1.0e-14,
# "absolute_tolerance": 1.0e-6,"linear_solver": "mumps"}})
#file = File("displacement.pvd");
file << u;
mu=0
d1=0.000
d2=0.000
#DX=0.01
#DY=0.01
ky=0.0
kx=0.0
DZ=-0.001
DX=0
DY=0
pzo=0.0
mp=1
for j in range(100):
print(j)
if j <5:
DY=0.05
#DX=0.05
mp=1
if j>=5 and j<10:
DY=0.01
#DX=0.01
mp=5
if j>=10:
#DY=1e-3
DX=1e-3
mp=10
DX=(Lx*DY)/(Ly-DY)
print(DX,DY)
d2+=(-1.0*DY)
d1+=(1.0*DX)
ky=d2/Ly
kxb=d1/Lx
cr.x0=d1
cr.ky=ky
cl.ky=ky
cb.kx=kxb
cb.ky=ky
cn.y0=d2
cn.kx=kxb
cs.kx=kxb
bcsa =[bcr,bcl,bcb,bcn,bcs]#,bcb]#S,bcrs,bcrn] # [bclu,bcld,bcru,bcrd,bcl,bcr]
solve(F == 0, u , bcsa, J=J, solver_parameters={"newton_solver":{"maximum_iterations":100,
"relative_tolerance": 1.0e-14,
"absolute_tolerance": 1.0e-6,"linear_solver": "mumps"}})
#file = File("displacement.pvd");
#if mu%10 ==0 :
# file << u;
if j%mp==0:
file << u;
#mu+=1
#file << u;
|
from pycket import interpreter as interp
from pycket import values, values_string, vector, util, values_regex
from pycket.prims.correlated import W_Correlated
from pycket.error import SchemeException
from pycket.hash import simple, equal, base
from pycket.assign_convert import assign_convert
from pycket.util import PerfRegion
mksym = values.W_Symbol.make
def to_rpython_list(r_list, unwrap_correlated=False, reverse=False, improper=False):
# assumes r_list is proper
length = 0
acc = r_list
while(acc is not values.w_null):
length += 1
if improper:
length += 1
break
acc = acc.cdr()
acc = r_list
py_ls = [None]*length
out = False
for n in range(length):
a = None
if improper and not isinstance(acc, values.W_List):
a = acc.get_obj() if (unwrap_correlated and isinstance(acc, W_Correlated)) else acc
out = True
else:
a = acc.car().get_obj() if (unwrap_correlated and isinstance(acc.car(), W_Correlated)) else acc.car()
if reverse:
py_ls[length-n-1] = a
else:
py_ls[n] = a
if out:
break
acc = acc.cdr()
return py_ls, length
dir_sym = mksym(":D:")
bundle_sym = mksym(":B:")
linklet_sym = mksym("linklet")
import_sym = mksym("Import")
export_sym = mksym("Export")
def ast_to_sexp(form):
from pycket.prims.linklet import W_Linklet, W_LinkletBundle, W_LinkletDirectory
#util.console_log("ast->sexp is called with form : %s" % form.tostring(), 8)
if is_val_type(form, extra=[vector.W_Vector, base.W_HashTable, values.W_List, values.W_Symbol]):
return form
elif isinstance(form, W_Linklet):
name = form.name # W_Symbol
importss = form.importss # [[Import ...] ...]
exports = form.exports # {int_id:Export ...}
body_forms = form.forms # rlist of ASTs
# The AST contains gensymed references to all the variables
# the linklet closes on, so we need to serialize all the
# information we have in Import and Export objects to recreate
# the same gensymed bindings at the instantiation of the
# deserialized linklet.
# So it will look like this when serialized:
#
# (linklet name (((Import grp gen_id int_id ext_id) ...) ...) ((Export int_id gen_int_id ext_id) ...) body)
importss_rlist = [None]*len(importss)
for index, imp_group in enumerate(importss):
len_group = len(imp_group)
importss_inst = [None]*len_group
for i, imp_obj in enumerate(imp_group):
importss_inst[i] = values.to_list([import_sym, imp_obj.group, imp_obj.id, imp_obj.int_id, imp_obj.ext_id])
importss_rlist[index] = values.to_list(importss_inst)
importss_list = values.to_list(importss_rlist)
exports_rlist = [None]*len(exports)
i = 0
for k, exp_obj in exports.iteritems():
exports_rlist[i] = values.to_list([export_sym, k, exp_obj.int_id, exp_obj.ext_id])
i += 1
exports_list = values.to_list(exports_rlist)
body_forms_rlist = [None]*len(body_forms)
for index, ast_form in enumerate(body_forms):
body_forms_rlist[index] = ast_form.to_sexp()
linklet_rlist = [linklet_sym, name, importss_list, exports_list] + body_forms_rlist
linklet_s_exp = values.to_list(linklet_rlist)
return linklet_s_exp
elif isinstance(form, W_LinkletBundle) or isinstance(form, W_LinkletDirectory):
bd_sym = None
if isinstance(form, W_LinkletBundle):
bd_sym = mksym(":B:")
else:
bd_sym = mksym(":D:")
mapping = form.get_mapping()
l = mapping.length()
keys = [None]*l
vals = [None]*l
if isinstance(mapping, equal.W_EqualHashTable):
i = 0
for k, v in mapping.hash_items():
keys[i] = k
vals[i] = ast_to_sexp(v)
i += 1
return values.W_Cons.make(bd_sym, equal.W_EqualHashTable(keys, vals, immutable=True))
elif isinstance(mapping, simple.W_EqImmutableHashTable):
i = 0
for k, v in mapping.iteritems():
keys[i] = k
vals[i] = ast_to_sexp(v)
i += 1
return values.W_Cons.make(bd_sym, simple.make_simple_immutable_table(simple.W_EqImmutableHashTable, keys, vals))
else:
raise SchemeException("Something wrong with the bundle/directory mapping : %s" % mapping.tostring())
else:
return form.to_sexp()
def def_vals_to_ast(def_vals_sexp, exports, all_toplevels, linkl_imports, mutated_ids):
ls, ln = to_rpython_list(def_vals_sexp)
if not ln == 3:
raise SchemeException("defs_vals_to_ast : unhandled define-values form : %s" % def_vals_sexp.tostring())
names = ls[1] # def_vals_sexp.cdr().car()
names_ls, names_ln = to_rpython_list(names, unwrap_correlated=True)
the_name = names_ls[0].variable_name() if names_ln > 0 else ""
body = sexp_to_ast(ls[2], [], exports, all_toplevels, linkl_imports, mutated_ids, cell_ref=[], name=the_name)
return interp.DefineValues(names_ls, body, names_ls)
def lam_to_ast(lam_sexp, lex_env, exports, all_toplevels, linkl_imports, mutated_ids, cell_ref, name=""):
from pycket.expand import SourceInfo
lam_sexp_elements, l = to_rpython_list(lam_sexp)
if not (l == 3 or l == 2):
raise SchemeException("lam_to_ast : unhandled lambda form : %s" % lam_sexp.tostring())
if lam_sexp.car() is mksym("lambda"):
lam_sexp = lam_sexp.cdr()
formals_ = lam_sexp.car()
rest = None
formals_ls = []
formals_len = 0
if isinstance(formals_, values.W_Symbol):
# check for a "rest"
rest = formals_
lex_env.append(rest)
else:
# two passes over the formals
# 1) determine the rest arg and the number of formal args
while (formals_ is not values.w_null):
if isinstance(formals_, values.W_Symbol):
rest = formals_
lex_env.append(formals_)
break
elif formals_.car() is mksym("."):
# another check for a "rest"
if formals_.cdr() is values.w_null:
raise SchemeException("lam_to_ast : invalid lambda form : %s" % lam_sexp.tostring())
rest = formals_.cdr().car()
lex_env.append(rest)
break
formals_len += 1
formals_ = formals_.cdr()
# 2) make the r_list for formals
formals_ls = [None]*formals_len
formals_ = lam_sexp.car() # reset
index = 0
while isinstance(formals_, values.W_Cons) and formals_.car() is not mksym("."):
formals_ls[index] = formals_.car()
index += 1
formals_ = formals_.cdr()
body = sexp_to_ast(lam_sexp.cdr().car(), formals_ls + lex_env, exports, all_toplevels, linkl_imports, mutated_ids, cell_ref=[], name=name)
dummy = 1
return interp.make_lambda(formals_ls, rest, [body], SourceInfo(dummy, dummy, dummy, dummy, name))
def let_like_to_ast(let_sexp, lex_env, exports, all_toplevels, linkl_imports, mutated_ids, is_letrec, cell_ref):
let_ls, let_len = to_rpython_list(let_sexp)
# just a sanity check
if not (let_ls[0] is mksym("let-values") or (let_ls[0] is mksym("letrec-values") and is_letrec)):
raise SchemeException("let_to_ast : unhandled let form : %s" % let_sexp.tostring())
varss_rhss, varss_len = to_rpython_list(let_ls[1])
if is_letrec:
# populate lex_env
for rhs in varss_rhss: # rhs : ((id ...) rhs-expr)
ids, ids_len = to_rpython_list(rhs.car(), unwrap_correlated=True) # (id ...)
lex_env += ids
varss_list = [None] * varss_len
rhss_list = [None] * varss_len
num_ids = 0
i = 0
for w_vars_rhss in varss_rhss:
varr, varr_len = to_rpython_list(w_vars_rhss.car(), unwrap_correlated=True)
varss_list[i] = varr
rhsr = sexp_to_ast(w_vars_rhss.cdr().car(), lex_env, exports, all_toplevels, linkl_imports, mutated_ids, cell_ref=[])
rhss_list[i] = rhsr
i += 1
num_ids += varr_len
ids = [None] * num_ids
index = 0
for vars_ in varss_list:
for var_ in vars_:
ids[index] = var_ # W_Symbol
index += 1
let_body_ls = let_ls[2:]
body_ls = [None]*(let_len-2)
for index, b in enumerate(let_body_ls):
body_ls[index] = sexp_to_ast(b, ids + lex_env, exports, all_toplevels, linkl_imports, mutated_ids, cell_ref=[])
if varss_len == 0:
return interp.Begin.make(body_ls)
if is_letrec:
return interp.make_letrec(varss_list, rhss_list, body_ls)
else:
return interp.make_let(varss_list, rhss_list, body_ls)
def is_val_type(form, extra=[]):
val_types = [values.W_Number,
values.W_Void,
values.W_Bool,
values_string.W_String,
values.W_ImmutableBytes,
values.W_Character] + extra
for t in val_types:
if isinstance(form, t):
return True
return False
def is_imported(id_sym, linkl_importss):
for imp_index, imports_group in enumerate(linkl_importss):
for imp in imports_group:
if id_sym is imp.int_id:
return imp.id
return None
begin_sym = mksym("begin")
begin0_sym = mksym("begin0")
def_val_sym = mksym("define-values")
wcm_sym = mksym("with-continuation-mark")
variable_ref_sym = mksym("#%variable-reference")
caselam_sym = mksym("case-lambda")
lam_sym = mksym("lambda")
let_sym = mksym("let-values")
letrec_sym = mksym("letrec-values")
set_bang_sym = mksym("set!")
quote_sym = mksym("quote")
if_sym = mksym("if")
var_ref_sym = mksym("variable-ref")
var_ref_no_check_sym = mksym("variable-ref/no-check")
var_set_check_undef_sym = mksym("variable-set!/check-undefined")
var_set_sym = mksym("variable-set!")
var_prim_syms = [var_ref_sym, var_ref_no_check_sym, var_set_check_undef_sym, var_set_sym]
var_ref_mod_var = interp.ModuleVar(var_ref_sym, "#%kernel", var_ref_sym, None)
var_ref_no_check_mod_var = interp.ModuleVar(var_ref_no_check_sym, "#%kernel", var_ref_no_check_sym, None)
var_set_check_undef_mod_var = interp.ModuleVar(var_set_check_undef_sym, "#%kernel", var_set_check_undef_sym, None)
var_set_mod_var = interp.ModuleVar(var_set_sym, "#%kernel", var_set_sym, None)
known_mod_vars = {} # cache for kernel primitive ModuleVars
def sexp_to_ast(form, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref=[], name=""):
#util.console_log("sexp->ast is called with form : %s" % form.tostring(), 8)
if isinstance(form, W_Correlated):
return sexp_to_ast(form.get_obj(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
elif is_val_type(form):
return interp.Quote(form)
elif isinstance(form, values.W_Symbol):
if form in cell_ref:
return interp.CellRef(form)
if form in lex_env:
return interp.LexicalVar(form)
if form in exports and (form in mutated_ids or form not in all_toplevels):
# dynamically find the W_LinkletVar for the exported variable
# possible point of optimization
rands = [interp.LinkletVar(exports[form].int_id)]
return interp.App.make(var_ref_mod_var, rands)
if form in all_toplevels:
return interp.ToplevelVar(form, is_free=False)
import_var_int_id = is_imported(form, linkl_importss)
if import_var_int_id: # this is gensymed internal variable name
# dynamically find the W_LinkletVar for the imported variable
# possible point of optimization
rands = [interp.LinkletVar(import_var_int_id)]
return interp.App.make(var_ref_no_check_mod_var, rands)
# kernel primitive ModuleVar
if form in known_mod_vars:
return known_mod_vars[form]
m_var = interp.ModuleVar(form, "#%kernel", form, None)
known_mod_vars[form] = m_var
return m_var
elif isinstance(form, values.W_List):
c = form.car()
### these are for the desearialization of the linklet body
if c in var_prim_syms:
linklet_var_sym = form.cdr().car()
rator, rands = None, None
if c is var_set_sym or c is var_set_check_undef_sym:
rator = var_set_mod_var if c is var_set_sym else var_set_check_undef_mod_var
linklet_var = interp.LinkletVar(linklet_var_sym)
new_val = sexp_to_ast(form.cdr().cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
mode = interp.Quote(values.w_false) # FIXME: possible optimization
rands = [linklet_var, new_val, mode]
return interp.App.make(rator, rands)
if c is var_ref_sym or c is var_ref_no_check_sym:
rator = var_ref_mod_var if c is var_ref_sym else var_ref_no_check_mod_var
rands = [interp.LinkletVar(linklet_var_sym)]
return interp.App.make(rator, rands)
###
if c is begin_sym:
begin_exprs, ln = to_rpython_list(form.cdr())
return interp.Begin.make([sexp_to_ast(f, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name) for f in begin_exprs])
elif c is begin0_sym:
fst = sexp_to_ast(form.cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
rst_exprs, rest_len = to_rpython_list(form.cdr().cdr())
rst = [sexp_to_ast(f, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name) for f in rst_exprs]
if rest_len == 0:
return fst
else:
return interp.Begin0.make(fst, rst)
elif c is def_val_sym:
return def_vals_to_ast(form, exports, all_toplevels, linkl_importss, mutated_ids)
elif c is wcm_sym:
from pycket.prims.general import elidable_length
if elidable_length(form) != 4:
raise SchemeException("Unrecognized with-continuation-mark form : %s" % form.tostring())
key = sexp_to_ast(form.cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
val = sexp_to_ast(form.cdr().cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
body = sexp_to_ast(form.cdr().cdr().cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
return interp.WithContinuationMark(key, val, body)
elif c is variable_ref_sym:
if form.cdr() is values.w_null: # (variable-reference)
return interp.VariableReference(None, None)
elif form.cdr().cdr() is values.w_null: # (variable-reference id)
if isinstance(form.cdr().car(), values.W_Symbol):
var = sexp_to_ast(form.cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
return interp.VariableReference(var, "dummy-path.rkt") # FIXME
elif isinstance(form.cdr().car(), values.W_Fixnum):
# because we're 'writing' variable-reference with is_mutable information
is_mut = False
if form.cdr().car().toint() != 0:
is_mut = True
return interp.VariableReference(None, None, is_mut)
else:
raise SchemeException("Invalid variable-reference form : %s -- arg type : %s" % (form.tostring(), form.cdr().car()))
elif form.cdr().cdr().cdr() is values.w_null: # (variable-reference 1 2)
raise SchemeException("Unhandled variable-reference form : %s" % (form.tostring()))
else:
# This is to handle varrefs serialized by Pycket
# no Racket varref has more than 1 argument
var_ = form.cdr().car()
path_ = form.cdr().cdr().car()
mut_ = form.cdr().cdr().cdr().car()
var = None
path = None
mut = False
if var_ is not values.w_false:
var = sexp_to_ast(var_, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
if isinstance(path_, values.W_Object) and path_ is not values.w_false:
path = path_.tostring()
elif isinstance(path_, str):
path = path_
if mut_ is values.w_true:
mut = True
return interp.VariableReference(var, path, mut)
elif c is caselam_sym:
maybe_rec_sym_part = values.w_null
if form.cdr() is not values.w_null:
maybe_rec_sym_part = form.cdr().car() # (recursive-sym <sym>)
rec_sym = None
new_lex_env = lex_env
lams_part = form.cdr()
if isinstance(maybe_rec_sym_part, values.W_Cons) and maybe_rec_sym_part is not values.w_null:
if maybe_rec_sym_part.car() is mksym("recursive-sym"):
# then we're reading a caselam that we wrote
lams_part = form.cdr().cdr()
if maybe_rec_sym_part.cdr() is not values.w_null:
rec_sym = maybe_rec_sym_part.cdr().car()
new_lex_env = lex_env + [rec_sym]
lams_expr, ln = to_rpython_list(lams_part)
lams = [lam_to_ast(f, new_lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name) for f in lams_expr]
return interp.CaseLambda(lams, rec_sym)
elif c is lam_sym:
return interp.CaseLambda([lam_to_ast(form, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)])
elif c is let_sym:
return let_like_to_ast(form, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, False, cell_ref)
elif c is letrec_sym:
return let_like_to_ast(form, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, True, cell_ref)
elif c is set_bang_sym:
import_id = is_imported(form.cdr().car(), linkl_importss)
if import_id:
raise SchemeException("cannot mutate imported variable : %s" % form.tostring())
cr = cell_ref
target = form.cdr().car()
rhs = sexp_to_ast(form.cdr().cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
# if it's for an exported variable, don't emit a set!
# we're going to variable-set! the exported variable
if target in exports:
rator = var_set_check_undef_mod_var
mode = interp.Quote(values.w_false) # FIXME: possible optimization
rands = [interp.LinkletVar(exports[target].int_id), rhs, mode]
return interp.App.make(rator, rands)
if target in lex_env:
cr = [target] if not cr else [target] + cr
var = sexp_to_ast(form.cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref=cr, name=name)
rhs = sexp_to_ast(form.cdr().cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
assert isinstance(var, interp.Var)
return interp.SetBang(var, rhs)
elif c is quote_sym:
if form.cdr() is values.w_null or form.cdr().cdr() is not values.w_null:
raise SchemeException("malformed quote form : %s" % form.tostring())
return interp.Quote(form.cdr().car())
elif c is if_sym:
tst_w = form.cdr().car()
thn_w = form.cdr().cdr().car()
els_w = form.cdr().cdr().cdr().car()
tst = sexp_to_ast(tst_w, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
thn = sexp_to_ast(thn_w, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
els = sexp_to_ast(els_w, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
return interp.If.make(tst, thn, els)
else:
form_rator = sexp_to_ast(c, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref)
rands_ls, rands_len = to_rpython_list(form.cdr())
rands = [sexp_to_ast(r, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name) for r in rands_ls]
return interp.App.make(form_rator, rands)
else:
raise SchemeException("Don't know what to do with this form yet : %s" % form.tostring())
def looks_like_linklet(sexp):
# (linklet () () ...)
# we know the sexp is not w_null
if not isinstance(sexp, values.W_Cons):
return False
if sexp.car() is not linklet_sym:
return False
if not isinstance(sexp.cdr(), values.W_Cons):
return False
if not isinstance(sexp.cdr().cdr(), values.W_Cons):
return False
maybe_name = sexp.cdr().car()
named = isinstance(maybe_name, values.W_Symbol)
if named and not isinstance(sexp.cdr().cdr().cdr(), values.W_Cons):
return False
rest = sexp.cdr() if (not named) else sexp.cdr().cdr()
# check the imports/exports
_imports = rest.car()
_exports = rest.cdr().car()
# FIXME : also check the imports and exports' inner structures
if not isinstance(_imports, values.W_List) or not isinstance(_exports, values.W_List):
return False
return True
class Import(object):
def __init__(self, group, id, int_id, ext_id):
self.group = group
self.id = id
self.int_id = int_id
self.ext_id = ext_id
def get_imports_from_w_importss_sexp(w_importss):
from pycket.interpreter import Gensym
importss_acc, importss_len = to_rpython_list(w_importss)
importss_list = [None]*importss_len
for index, importss_current in enumerate(importss_acc):
importss_group_ls, group_len = to_rpython_list(importss_current)
inner_acc = [None]*group_len
for i, c in enumerate(importss_group_ls):
if isinstance(c, values.W_Symbol):
w_imp_sym = Gensym.gensym(c.tostring())
inner_acc[i] = Import(values.W_Fixnum(index), w_imp_sym, c, c)
elif isinstance(c, values.W_List):
if c.cdr().cdr() is not values.w_null:
raise SchemeException("Unhandled renamed import form : %s" % c.tostring())
external_id = c.car().get_obj() if isinstance(c.car(), W_Correlated) else c.car()
internal_id = c.cdr().car().get_obj() if isinstance(c.cdr().car(), W_Correlated) else c.cdr().car()
w_internal_id = Gensym.gensym(internal_id.tostring())
inner_acc[i] = Import(values.W_Fixnum(index), w_internal_id, internal_id, external_id)
elif isinstance(c, W_Correlated):
cc = c.get_obj()
w_cc = Gensym.gensym(cc.tostring())
inner_acc[i] = Import(values.W_Fixnum(index), w_cc, cc, cc)
else:
raise SchemeException("uncrecognized import : %s" % c.tostring())
importss_list[index] = inner_acc
return importss_list
class Export(object):
def __init__(self, int_gensym, ext_id):
self.int_id = int_gensym
self.ext_id = ext_id
def get_exports_from_w_exports_sexp(w_exports):
from pycket.interpreter import Gensym
r_exports, exports_len = to_rpython_list(w_exports)
exports = {}
for i, exp in enumerate(r_exports):
if isinstance(exp, values.W_WrappedConsProper):
car = exp.car()
internal_name = car.get_obj() if isinstance(car, W_Correlated) else car
cadr = exp.cdr().car()
external_name = cadr.get_obj() if isinstance(cadr, W_Correlated) else cadr
w_internal_name = Gensym.gensym(internal_name.tostring())
# don't gensym the external_id
exports[internal_name] = Export(w_internal_name, external_name)
else:
c_exp = exp.get_obj() if isinstance(exp, W_Correlated) else exp
w_c_exp = Gensym.gensym(c_exp.tostring())
exports[c_exp] = Export(w_c_exp, c_exp)
return exports
# collect the ids in define-values forms
def create_toplevel_linklet_vars(forms_ls, linklet):
linkl_toplevels = {} # {W_Symbol:LinkletVar}
for form in forms_ls:
if isinstance(form, W_Correlated):
form = form.get_obj()
if isinstance(form, values.W_List) and form.car() is mksym("define-values"):
ids = form.cdr().car()
ids_ls, ids_len = to_rpython_list(ids, unwrap_correlated=True)
# create LinkletVar for each id
for id in ids_ls:
if id in linkl_toplevels:
raise SchemeException("duplicate binding name : %s" % id.tostring())
linkl_toplevels[id] = interp.LinkletDefinedVar(id, defining_linklet=linklet)
return linkl_toplevels
# collect the ids in define-values forms
def get_toplevel_defined_ids(forms_ls):
linkl_toplevels = {} # {W_Symbol:None}
for form in forms_ls:
if isinstance(form, W_Correlated):
form = form.get_obj()
if isinstance(form, values.W_List) and form.car() is mksym("define-values"):
ids = form.cdr().car()
ids_ls, ids_len = to_rpython_list(ids, unwrap_correlated=True)
# create LinkletVar for each id
for id in ids_ls:
if id in linkl_toplevels:
raise SchemeException("duplicate binding name : %s" % id.tostring())
linkl_toplevels[id] = None
return linkl_toplevels
def extend_dict(a, b):
for k,v in b.iteritems():
a[k] = v
return a
def extend_dicts(list_of_dicts):
a = {}
for d in list_of_dicts:
a = extend_dict(a, d)
return a
def find_mutated(form):
if isinstance(form, W_Correlated):
return find_mutated(form.get_obj())
elif isinstance(form, values.W_Cons):
if not form.is_proper_list():
elements, _ = to_rpython_list(form, unwrap_correlated=True, improper=True)
return extend_dicts([find_mutated(f) for f in elements])
c = form.car()
if c is set_bang_sym:
return extend_dict({form.cdr().car():None}, find_mutated(form.cdr().cdr().car()))
elif isinstance(c, values.W_Cons) and c is not values.w_null:
all_exprs, _ = to_rpython_list(form, unwrap_correlated=True)
return extend_dicts([find_mutated(f) for f in all_exprs])
else:
rest_exprs, _ = to_rpython_list(form.cdr(), unwrap_correlated=True)
return extend_dicts([find_mutated(f) for f in rest_exprs])
else:
return {}
def process_w_body_sexp(w_body, importss_list, exports, from_zo=False):
body_forms_ls, body_length = to_rpython_list(w_body, unwrap_correlated=True)
cur_toplevels = {}
# make a recursive (!arbitrarily deep!) pass to find set!ed ids
mutated = find_mutated(w_body) # {W_Symbol:None}
# another pass to find toplevel defined ids
all_toplevels = get_toplevel_defined_ids(body_forms_ls)
variable_set_lines = 0
for d in all_toplevels:
if d in exports:
variable_set_lines += 1
# for each exported defined id, we need to add a variable-set! for
# the exported var with the defined id
total_forms_len = body_length + variable_set_lines
body_forms = [None]*(total_forms_len)
added = 0
current_index = 0
# this juggling is because we don't know how many extra ast forms
# we're going to add for the exported defined ids
for b in body_forms_ls:
b_form = sexp_to_ast(b, [], exports, all_toplevels, importss_list, mutated)
if not from_zo: # no need to normalize if it's alread normalized
with PerfRegion("compile-normalize"):
b_form = interp.Context.normalize_term(b_form)
with PerfRegion("compile-assign-convert"):
b_form = assign_convert(b_form)
body_forms[current_index+added] = b_form
current_index += 1
if isinstance(b_form, interp.DefineValues):
for n in b_form.names:
if n in exports:
rator = interp.ModuleVar(var_set_sym, "#%kernel", var_set_sym, None)
exp_var = interp.LinkletVar(exports[n].int_id)
top_var = interp.ToplevelVar(n, is_free=False)
mode = interp.Quote(values.w_false) # FIXME: possible optimization
rands = [exp_var, top_var, mode]
body_forms[current_index+added] = interp.App.make(rator,rands)
added += 1
return body_forms
def looks_like_an_import(sexp):
# should be (Import grp gen_id int_id ext_id)
if not isinstance(sexp, values.W_Cons):
return False
if sexp.car() is not import_sym:
return False
if not isinstance(sexp.cdr(), values.W_Cons):
return False
if not isinstance(sexp.cdr().cdr(), values.W_Cons):
return False
if not isinstance(sexp.cdr().cdr().cdr(), values.W_Cons):
return False
if not isinstance(sexp.cdr().cdr().cdr().cdr(), values.W_Cons):
return False
return True
# We can't use the same thing with what compile-linklet uses anymore,
# becuse what we serialize is now specific to Pycket (contains some
# extra info than a regular linklet s-expr that the expander would
# pass)
def deserialize_importss(w_importss):
importss_acc, importss_len = to_rpython_list(w_importss)
importss_list = [None]*importss_len
for index, importss_current in enumerate(importss_acc):
importss_group_ls, group_len = to_rpython_list(importss_current)
inner_acc = [None]*group_len
for i, c in enumerate(importss_group_ls):
if looks_like_an_import(c):
w_grp_index = c.cdr().car()
id = c.cdr().cdr().car()
int_id = c.cdr().cdr().cdr().car()
ext_id = c.cdr().cdr().cdr().cdr().car()
inner_acc[i] = Import(w_grp_index, id, int_id, ext_id)
else:
raise SchemeException("looks like an invalid serialization of import : %s" % c.tostring())
importss_list[index] = inner_acc
return importss_list
def looks_like_an_export(sexp):
# should be (Import grp gen_id int_id ext_id)
if not isinstance(sexp, values.W_Cons):
return False
if sexp.car() is not export_sym:
return False
if not isinstance(sexp.cdr(), values.W_Cons):
return False
if not isinstance(sexp.cdr().cdr(), values.W_Cons):
return False
if not isinstance(sexp.cdr().cdr().cdr(), values.W_Cons):
return False
return True
# See the comment for deserialize_importss
def deserialize_exports(w_exports):
r_exports, exports_len = to_rpython_list(w_exports)
exports = {}
for i, exp in enumerate(r_exports):
if looks_like_an_export(exp):
k = exp.cdr().car()
gen_int_id = exp.cdr().cdr().car()
ext_id = exp.cdr().cdr().cdr().car()
exports[k] = Export(gen_int_id, ext_id)
else:
raise SchemeException("looks like an invalid serialization of export : %s" % exp.tostring())
return exports
def deserialize_loop(sexp):
from pycket.prims.linklet import W_Linklet, W_LinkletBundle, W_LinkletDirectory
from pycket.env import w_global_config
if isinstance(sexp, values.W_Cons):
c = sexp.car()
if c is dir_sym:
dir_map = sexp.cdr()
return W_LinkletDirectory(deserialize_loop(dir_map))
elif c is bundle_sym:
bundle_map = sexp.cdr()
return W_LinkletBundle(deserialize_loop(bundle_map))
elif looks_like_linklet(sexp):
# Unify this with compile_linklet
if isinstance(sexp.cdr().car(), values.W_List):
w_name = mksym("anonymous")
w_importss = sexp.cdr().car()
w_exports = sexp.cdr().cdr().car()
w_body = sexp.cdr().cdr().cdr()
else:
w_name = sexp.cdr().car()
w_importss = sexp.cdr().cdr().car()
w_exports = sexp.cdr().cdr().cdr().car()
w_body = sexp.cdr().cdr().cdr().cdr()
importss_list = deserialize_importss(w_importss)
# Process the exports
exports = deserialize_exports(w_exports)
# Process the body
with PerfRegion("compile-sexp-to-ast"):
body_forms = process_w_body_sexp(w_body, importss_list, exports, from_zo=True)
return W_Linklet(w_name, importss_list, exports, body_forms)
else:
# get the length
ls = sexp
length = 0
is_improper = False
while ls is not values.w_null:
if isinstance(ls, values.W_Cons):
length += 1
ls = ls.cdr()
else:
is_improper = True
ls = values.w_null
# allocate an r_list (to avoid reversing w_list)
if is_improper:
sexp_ls = [None]*(length+1)
else:
sexp_ls = [None]*length
# second pass, get the elements
ls = sexp
for i in range(length-1, -1, -1):
sexp_ls[i] = ls.car()
ls = ls.cdr()
if is_improper:
sexp_ls[length] = ls
# make the new list
new = values.w_null
for s in sexp_ls:
new = values.W_Cons.make(deserialize_loop(s), new)
return new
elif isinstance(sexp, simple.W_EqImmutableHashTable):
l = sexp.length()
keys = [None]*l
vals = [None]*l
i = 0
for k, v in sexp.iteritems():
keys[i] = k
vals[i] = deserialize_loop(v)
i += 1
return simple.make_simple_immutable_table(simple.W_EqImmutableHashTable, keys, vals)
elif isinstance(sexp, equal.W_EqualHashTable):
l = sexp.length()
keys = [None]*l
vals = [None]*l
i = 0
for k, v in sexp.hash_items():
keys[i] = k
vals[i] = deserialize_loop(v)
i += 1
return equal.W_EqualHashTable(keys, vals, immutable=True)
elif isinstance(sexp, vector.W_Vector):
new = [None]*sexp.length()
items = sexp.get_strategy().ref_all(sexp)
for index, obj in enumerate(items):
new[index] = deserialize_loop(obj)
return vector.W_Vector.fromelements(new, sexp.immutable())
else:
return sexp
|
import unittest
from activity.activity_VerifyLaxResponse import activity_VerifyLaxResponse
import activity
import settings_mock
from ddt import ddt, data
from mock import patch
def fake_emit_monitor_event(settings, item_identifier, version, run, event_type, status, message):
pass
@ddt
class TestVerifyLaxResponse(unittest.TestCase):
def setUp(self):
self.verifylaxresponse = activity_VerifyLaxResponse(settings_mock, None, None, None, None)
@data({
"run": "74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff",
"article_id": "00353",
"result": "ingested",
"status": "vor",
"version": "1",
"expanded_folder": "00353.1/74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff",
"eif_location": "00353.1/74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff/elife-00353-v1.json",
"requested_action": "ingest",
"message": None,
"update_date": "2012-12-13T00:00:00Z"
})
@patch.object(activity_VerifyLaxResponse, 'emit_monitor_event')
def test_do_activity(self, data, fake_emit_monitor):
fake_emit_monitor.side_effect = fake_emit_monitor_event
result = self.verifylaxresponse.do_activity(data)
fake_emit_monitor.assert_called_with(settings_mock,
data["article_id"],
data["version"],
data["run"],
"Verify Lax Response",
"end",
" Finished Verification. Lax has responded with result: ingested."
" Article: " + data["article_id"])
self.assertEqual(result, self.verifylaxresponse.ACTIVITY_SUCCESS)
@data({
"run": "74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff",
"article_id": "00353",
"result": "error",
"status": "vor",
"version": "1",
"expanded_folder": "00353.1/74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff",
"eif_location": "00353.1/74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff/elife-00353-v1.json",
"requested_action": "ingest",
"message": None,
"update_date": "2012-12-13T00:00:00Z"
})
@patch.object(activity_VerifyLaxResponse, 'emit_monitor_event')
def test_do_activity_error_no_message(self, data, fake_emit_monitor):
fake_emit_monitor.side_effect = fake_emit_monitor_event
result = self.verifylaxresponse.do_activity(data)
fake_emit_monitor.assert_called_with(settings_mock,
data["article_id"],
data["version"],
data["run"],
"Verify Lax Response",
"error",
"Lax has not ingested article " + data["article_id"] +
" result from lax:" + str(data['result']) + '; message from lax: ' + "(empty message)")
self.assertEqual(result, self.verifylaxresponse.ACTIVITY_PERMANENT_FAILURE)
@data({
"run": "74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff",
"article_id": "00353",
"result": "error",
"status": "poa",
"version": "1",
"expanded_folder": "00353.1/74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff",
"eif_location": "00353.1/74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff/elife-00353-v1.json",
"requested_action": "ingest",
"message": "An error has occurred",
"update_date": "2012-12-13T00:00:00Z"
})
@patch.object(activity_VerifyLaxResponse, 'emit_monitor_event')
def test_do_activity_error(self, data, fake_emit_monitor):
fake_emit_monitor.side_effect = fake_emit_monitor_event
result = self.verifylaxresponse.do_activity(data)
fake_emit_monitor.assert_called_with(settings_mock,
data["article_id"],
data["version"],
data["run"],
"Verify Lax Response",
"error",
"Lax has not ingested article " + data["article_id"] +
" result from lax:" + str(data['result']) + '; message from lax: ' + data["message"])
self.assertEqual(result, self.verifylaxresponse.ACTIVITY_PERMANENT_FAILURE)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 CERN.
# Copyright (C) 2018 RERO.
#
# Invenio-Circulation is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""."""
|
import getpass
import json
import logging
import warnings
from collections import Counter, defaultdict, namedtuple
from json import JSONDecodeError
from typing import Any, Dict, List, Tuple
import apteco_api as aa
import PySimpleGUI
from apteco.data.apteco_logo import APTECO_LOGO
from apteco.exceptions import (
ApiResultsError,
DeserializeError,
TablesError,
VariablesError,
)
from apteco.tables import Table, TablesAccessor
from apteco.variables import (
ArrayVariable,
DateTimeVariable,
DateVariable,
FlagArrayVariable,
NumericVariable,
ReferenceVariable,
SelectorVariable,
TextVariable,
Variable,
VariablesAccessor,
)
NOT_ASSIGNED: Any = object()
VARIABLES_PER_PAGE = 1000
class Session:
def __init__(self, credentials: "Credentials", system: str):
self._unpack_credentials(credentials)
self._create_client()
self.system = system
self._fetch_system_info()
tables_without_vars, master_table_name = InitializeTablesAlgorithm(self).run()
variables, tables = InitializeVariablesAlgorithm(
self, tables_without_vars
).run()
self.tables = TablesAccessor(tables)
self.variables = VariablesAccessor(variables)
self.master_table = self.tables[master_table_name]
def _unpack_credentials(self, credentials):
"""Copy credentials data into session."""
self.base_url = credentials.base_url
self.data_view = credentials.data_view
self.session_id = credentials.session_id
self.access_token = credentials.access_token
self.user = credentials.user
def _create_client(self):
"""Create an authorized API client."""
config = aa.Configuration()
config.host = self.base_url
config.api_key = {"Authorization": self.access_token}
config.api_key_prefix = {"Authorization": "Bearer"}
self._config = config
self.api_client = aa.ApiClient(configuration=self._config)
def _fetch_system_info(self):
"""Fetch FastStats system info from API and add to session."""
systems_controller = aa.FastStatsSystemsApi(self.api_client)
result = systems_controller.fast_stats_systems_get_fast_stats_system(
self.data_view, self.system
)
self.system_info = FastStatsSystem(
name=result.name,
description=result.description,
build_date=result.fast_stats_build_date,
view_name=result.view_name,
)
def _to_dict(self):
return {
"base_url": self.base_url,
"data_view": self.data_view,
"session_id": self.session_id,
"access_token": self.access_token,
"user": self.user._asdict(),
"system": self.system,
}
@staticmethod
def _from_dict(d):
try:
credentials = Credentials(
d["base_url"],
d["data_view"],
d["session_id"],
d["access_token"],
User(**d["user"]),
)
system = d["system"]
except KeyError as e:
raise DeserializeError(f"Data missing from 'Session' object: no {e} found.")
except TypeError as exc: # arguments missing from User __new__() call
raise DeserializeError(
f"The following parameter(s) were missing from 'User' object: "
f"{exc.args[0].split(':')[1].strip()}"
)
else:
return Session(credentials, system)
def serialize(self):
return json.dumps(self._to_dict())
@staticmethod
def deserialize(s: str):
try:
d = json.loads(s)
except JSONDecodeError:
raise DeserializeError("The given input could not be deserialized.")
else:
return Session._from_dict(d)
User = namedtuple("User", ["username", "first_name", "surname", "email_address"])
class Credentials:
"""Class to hold credentials from the simple login process."""
def __init__(
self,
base_url: str,
data_view: str,
session_id: str,
access_token: str,
user: User,
):
"""
Args:
base_url (str): API base URL, normally ending '/OrbitAPI'
data_view (str): name of data view
session_id (str): Apteco session ID
access_token (str): access token for current session
user (User): API user
"""
self.base_url = base_url
self.data_view = data_view
self.session_id = session_id
self.access_token = access_token
self.user = user
FastStatsSystem = namedtuple(
"FastStatsSystem", ["name", "description", "build_date", "view_name"]
)
def login(base_url: str, data_view: str, system: str, user: str) -> Session:
"""Log in to the API without supplying password directly.
Args:
base_url (str): API base URL, normally ending '/OrbitAPI'
data_view (str): DataView being logged into
system (str): FastStats system to connect to
user (str): username of API user
Returns:
Session: API session object
"""
return login_with_password(
base_url, data_view, system, user, password=_get_password()
)
def login_with_password(
base_url: str, data_view: str, system: str, user: str, password: str
) -> Session:
"""Log in to the API, supplying password directly.
Args:
base_url (str): API base URL, normally ending '/OrbitAPI'
data_view (str): DataView being logged into
system (str): FastStats system to connect to
user (str): username of API user
password (str): password for this user
Returns:
Session: API session object
"""
credentials = SimpleLoginAlgorithm(base_url, data_view).run(user, password)
return Session(credentials, system)
def _get_password(prompt: str = "Enter your password: ") -> str:
"""Get password from user without displaying it on screen."""
try:
with warnings.catch_warnings():
warnings.filterwarnings("error")
return getpass.getpass(prompt)
except getpass.GetPassWarning:
return PySimpleGUI.PopupGetText(
prompt,
password_char="*",
title="Apteco API",
button_color=("#ffffff", "#004964"),
icon=APTECO_LOGO,
)
# https://gieseanw.wordpress.com/2019/05/10/algorithms-as-objects/
class SimpleLoginAlgorithm:
"""Class holding the algorithm to carry out a simple login.
Attributes:
base_url (str): API base URL, normally ending '/OrbitAPI'
data_view (str): DataView being logged into
api_client (aa.ApiClient): API client used to log in
session_id (str): Apteco session ID for the created session
access_token (str): access token for the created session
user (User): API user
Methods:
run(): entry point to run the algorithm
"""
def __init__(self, base_url: str, data_view: str):
"""
Args:
base_url (str): API base URL, normally ending '/OrbitAPI'
data_view (str): DataView being logged into
"""
self.base_url = base_url
self.data_view = data_view
def run(self, user: str, password: str) -> Credentials:
"""Run the algorithm with the given login credentials.
Args:
user (str): username of API user
password (str): password for this user
Returns:
Credentials: API session credentials
"""
self._create_unauthorized_client()
self._simple_login(user, password)
self._create_credentials()
return self.credentials
def _create_unauthorized_client(self):
"""Create an unauthorized API client."""
config = aa.Configuration()
config.host = self.base_url
self._config = config
self.api_client = aa.ApiClient(configuration=self._config)
def _simple_login(self, user, password):
"""Call API to perform simple login."""
sessions_controller = aa.SessionsApi(self.api_client)
login_response = sessions_controller.sessions_create_session_simple(
self.data_view, user, password
)
self.session_id = login_response.session_id
self.access_token = login_response.access_token
self.user = User(
username=login_response.user.username,
first_name=login_response.user.firstname,
surname=login_response.user.surname,
email_address=login_response.user.email_address,
)
def _create_credentials(self):
"""Initialize session credentials object."""
self.credentials = Credentials(
self.base_url, self.data_view, self.session_id, self.access_token, self.user
)
class InitializeTablesAlgorithm:
"""Class holding the algorithm to initialize system tables.
The purpose of this algorithm is to
retrieve the raw tables data for the given system,
process it to infer parent and child table relationships,
convert the raw tables into py-apteco ``Table`` objects,
and assign to these tables their relationships to other tables.
Attributes:
data_view (str): DataView the system belongs to
system (str): FastStats system the session is connected to
api_client (aa.ApiClient): client to handle API calls
session (Session): API session the tables data belongs to
raw_tables (List[aa.Table]): list of raw tables
children_lookup (Dict[str, List[str]]):
mapping from table name to list of its child table names
master_table (Table): master table of the FastStats system
tables_lookup (Dict[str, Table]):
mapping from table name to its ``Table`` object
Methods:
run(): entry point to run the algorithm
"""
def __init__(self, session):
"""
Args:
session (Session): API session the tables data belongs to
"""
self.data_view = session.data_view
self.system = session.system
self.api_client = session.api_client
self.session = session
def run(self) -> Tuple[Dict[str, Table], str]:
"""Run the algorithm.
Returns:
(tuple): tuple containing:
tables_lookup (Dict[str, Table]):
mapping from table name to its ``Table`` object
master_table_name (str):
name of the master table of the FastStats system
"""
self._get_raw_tables()
self._identify_children()
self._create_tables()
self._assign_parent_and_children()
self._find_master_table()
_tree_tables = self._assign_ancestors_and_descendants(self.master_table, [])
self._check_all_tables_in_tree(_tree_tables)
self._check_all_relations_assigned()
return self.tables_lookup, self.master_table.name
def _get_raw_tables(self):
"""Get list of all tables from API."""
systems_controller = aa.FastStatsSystemsApi(self.api_client)
results = systems_controller.fast_stats_systems_get_fast_stats_tables(
self.data_view, self.system, count=1000
)
self._check_table_results_consistency(results)
self.raw_tables = results.list
@staticmethod
def _check_table_results_consistency(results: aa.PagedResultsTable):
"""Check the number of tables in list matches stated count."""
results_count = results.count
list_count = len(results.list)
if not results_count == list_count:
raise ApiResultsError(
f"API stated {results_count} tables were returned"
f" but {list_count} were found."
)
def _identify_children(self):
"""Identify child tables for each table."""
self.children_lookup = defaultdict(list)
for table in self.raw_tables:
self.children_lookup[table.parent_table].append(table.name)
# don't freeze yet: will need to look up childless tables and return empty list
def _create_tables(self):
"""Create py-apteco tables from apteco-api ones."""
self.tables_lookup = {
t.name: Table(
t.name,
t.singular_display_name,
t.plural_display_name,
t.is_default_table,
t.is_people_table,
t.total_records,
t.child_relationship_name,
t.parent_relationship_name,
t.has_child_tables,
t.parent_table,
NOT_ASSIGNED,
NOT_ASSIGNED,
NOT_ASSIGNED,
NOT_ASSIGNED,
NOT_ASSIGNED,
session=self.session,
)
for t in self.raw_tables
}
def _assign_parent_and_children(self):
"""Assign parent and children attributes for each table."""
for table in self.tables_lookup.values():
if table.parent_name == "":
table.parent = None
else:
table.parent = self.tables_lookup[table.parent_name]
table.children = [
self.tables_lookup[name] for name in self.children_lookup[table.name]
]
self._check_child_tables_consistency(table)
self.children_lookup.default_factory = None # 'freeze' as normal dict
@staticmethod
def _check_child_tables_consistency(table: Table):
"""Check table's children matches ``has_children``."""
if table.has_children and not table.children:
raise TablesError(
f"API stated '{table.name}' table has child tables but none were found."
)
if not table.has_children and table.children:
raise TablesError(
f"API stated '{table.name}' table has no child tables"
f" but {len(table.children)} were found."
)
def _find_master_table(self):
"""Retrieve master table, ensuring there is exactly one."""
try:
(master_table_name,) = self.children_lookup[""]
except KeyError:
raise TablesError("No master table found.")
except ValueError: # unpacking failed => !=1 master tables
raise TablesError(
f"Found {len(self.children_lookup[''])} master tables,"
f" there should be 1."
)
try:
self.master_table = self.tables_lookup[master_table_name]
except KeyError:
raise TablesError(
f"The master table '{master_table_name}' could not be found."
)
def _assign_ancestors_and_descendants(
self, table: Table, ancestors: List[Table]
) -> List[Table]:
"""Assign ancestor and descendant tables for each table."""
table.ancestors = ancestors
table.descendants = [
descendant
for child in table.children
for descendant in self._assign_ancestors_and_descendants(
child, [table] + table.ancestors
)
]
return [table] + table.descendants
def _check_all_tables_in_tree(self, _tree_tables):
"""Check all tables appear in table tree exactly once."""
tree_tables_counter = Counter(t.name for t in _tree_tables)
raw_tables_counter = Counter(t.name for t in self.raw_tables)
if not tree_tables_counter == raw_tables_counter:
diff = Counter(tree_tables_counter)
diff.subtract(raw_tables_counter)
raise TablesError(
f"Error constructing table tree:"
f" {len(+diff)} table(s) occurred more than once in tree"
f" and {len(-diff)} table(s) did not occur at all."
)
def _check_all_relations_assigned(self):
"""Check tables have all relation attributes assigned."""
relations = ["parent", "children", "ancestors", "descendants"]
no_relation = defaultdict(list)
for table in self.tables_lookup.values():
for rel in relations:
if getattr(table, rel) is NOT_ASSIGNED:
no_relation[rel].append(table.name)
error_details = ""
for rel in relations:
if no_relation[rel]:
error_details += (
f"\n{len(no_relation[rel])} table(s) had no {rel} assigned."
f" First example: '{no_relation[rel][0]}' table"
)
if error_details:
raise TablesError("Error constructing table tree:" + error_details)
class InitializeVariablesAlgorithm:
"""Class holding the algorithm to initialize system variables.
The purpose of this algorithm is to
retrieve the raw variables for the given system,
convert them into py-apteco ``Variable`` objects,
and assign these to their tables.
Attributes:
data_view (str): DataView the system belongs to
system (str): FastStats system the session is connected to
api_client (aa.ApiClient): client to handle API calls
session (Session): API session the variables data belongs to
tables_lookup (Dict[str, Table]):
mapping from table name to its ``Table`` object,
initially with ``variables`` attribute as ``NOT_ASSIGNED``
raw_variables (List[aa.Variable]): list of raw variables
variables (List[Variable]): list of variables
as py-apteco ``Variable`` objects
variables_lookup (Dict[str, List[Variable]]):
mapping from table name to list of its variables
Methods:
run(): entry point to run the algorithm
"""
def __init__(self, session, tables_without_variables):
"""
Args:
session (Session): API session the variables data belongs to
tables_without_variables (Dict[str, Table]):
mapping from table name to its ``Table`` object,
with variables attribute as ``NOT_ASSIGNED``
"""
self.data_view = session.data_view
self.system = session.system
self.api_client = session.api_client
self.session = session
self.tables_lookup = tables_without_variables
def run(self) -> Tuple[List[Variable], List[Table]]:
"""Run the algorithm.
Returns:
(tuple): tuple containing:
variables (List[Variable]):
list of variables as py-apteco ``Variable`` objects
tables (List[Table]):
list of tables as py-apteco ``Table`` objects
"""
self._get_raw_variables()
self._create_variables()
self._identify_variables()
self._assign_variables()
self._check_all_variables_assigned()
return self.variables, list(self.tables_lookup.values())
def _get_raw_variables(self, variables_per_page=VARIABLES_PER_PAGE):
"""Get list of all variables from API."""
systems_controller = aa.FastStatsSystemsApi(self.api_client)
self.raw_variables = []
offset = 0
while True:
results = systems_controller.fast_stats_systems_get_fast_stats_variables(
self.data_view, self.system, count=variables_per_page, offset=offset
) # type: aa.PagedResultsVariable
self.raw_variables.extend(results.list)
if results.offset + results.count >= results.total_count:
break
offset = results.offset + results.count
self._check_variable_results_consistency(results.total_count)
def _check_variable_results_consistency(self, total_variables: int):
"""Check number of variables returned matches stated total count."""
list_count = len(self.raw_variables)
if not total_variables == list_count:
raise ApiResultsError(
f"API stated there are {total_variables} variables in the system"
f" but {list_count} were returned."
)
def _create_variables(self):
"""Create py-apteco variables from apteco-api ones."""
self.variables = []
for v in self.raw_variables:
try:
variable_class = self._choose_variable(v)
except VariablesError as exc:
exc_msg = exc.args[0]
logging.warning(exc_msg)
else:
variable = variable_class(
name=v.name,
description=v.description,
type=v.type,
folder_name=v.folder_name,
table=self.tables_lookup[v.table_name],
is_selectable=v.is_selectable,
is_browsable=v.is_browsable,
is_exportable=v.is_exportable,
is_virtual=v.is_virtual,
selector_info=v.selector_info,
numeric_info=v.numeric_info,
text_info=v.text_info,
reference_info=v.reference_info,
session=self.session,
)
self.variables.append(variable)
@staticmethod
def _choose_variable(raw_variable: aa.Variable):
"""Get class to create given variable according to its type."""
variable_type_lookup = {
("Selector", "Categorical", "SingleValue", False): SelectorVariable,
("Selector", "Categorical", "SingleValue", True): SelectorVariable, # should be Combined Categories type
("Numeric", None, None, False): NumericVariable,
("Text", None, None, False): TextVariable,
("Selector", "Categorical", "OrArray", False): ArrayVariable,
("Selector", "Categorical", "OrArray", True): ArrayVariable, # should be Combined Categories type
("Selector", "Categorical", "OrBitArray", False): FlagArrayVariable,
("Selector", "Categorical", "OrBitArray", True): FlagArrayVariable, # should be Combined Categories type
("Selector", "Date", "SingleValue", False): DateVariable,
("Selector", "Date", "OrBitArray", False): FlagArrayVariable, # some kind of VV
("Selector", "DateTime", "SingleValue", False): DateTimeVariable,
("Reference", None, None, False): ReferenceVariable,
}
determinant = (
raw_variable.type,
(raw_variable.to_dict().get("selector_info") or {}).get("sub_type"),
(raw_variable.to_dict().get("selector_info") or {}).get("selector_type"),
bool(
(raw_variable.to_dict().get("selector_info") or {}).get(
"combined_from_variable_name"
)
),
)
try:
return variable_type_lookup[determinant]
except KeyError as exc:
raise VariablesError(
f"Failed to initialize variable '{raw_variable.name}',"
f" did not recognise the type from determinant: {determinant}"
) from exc
def _identify_variables(self):
"""Identify variables for each table."""
self.variables_lookup = defaultdict(list)
for variable in self.variables:
self.variables_lookup[variable.table_name].append(variable)
self.variables_lookup.default_factory = None # 'freeze' as normal dict
def _assign_variables(self):
"""Assign variables to each table."""
for table in self.tables_lookup.values():
try:
table.variables = VariablesAccessor(self.variables_lookup[table.name])
except KeyError:
raise VariablesError(f"No variables found for '{table.name}' table.")
def _check_all_variables_assigned(self):
"""Check all tables have variables attribute assigned."""
no_variables = []
for table in self.tables_lookup.values():
if table.variables is NOT_ASSIGNED:
no_variables.append(table.name)
if no_variables:
raise VariablesError(
f"{len(no_variables)} table(s) had no variables assigned."
)
|
from tt_web import s11n
from tt_web import handlers
from tt_web import exceptions as tt_exceptions
from tt_protocol.protocol import properties_pb2
from tt_protocol.protocol import data_protector_pb2
from . import protobuf
from . import operations
@handlers.protobuf_api(properties_pb2.SetPropertiesRequest)
async def set_properties(message, config, **kwargs):
await operations.set_properties([protobuf.to_property(property) for property in message.properties])
return properties_pb2.SetPropertiesResponse()
@handlers.protobuf_api(properties_pb2.GetPropertiesRequest)
async def get_properties(message, **kwargs):
properties = await operations.get_properties({object_data.object_id: list(object_data.types)
for object_data in message.objects})
return properties_pb2.GetPropertiesResponse(properties=[protobuf.from_property(property) for property in properties])
@handlers.protobuf_api(data_protector_pb2.PluginReportRequest, raw=True)
async def data_protection_collect_data(message, config, **kwargs):
if config['custom']['data_protector']['secret'] != message.secret:
raise tt_exceptions.ApiError(code='properties.data_protection_collect_data.wrong_secret',
message='wrong secret code')
report = await operations.get_data_report(object_id=int(message.account_id))
return data_protector_pb2.PluginReportResponse(result=data_protector_pb2.PluginReportResponse.ResultType.SUCCESS,
data=s11n.to_json(report))
@handlers.protobuf_api(data_protector_pb2.PluginDeletionRequest, raw=True)
async def data_protection_delete_data(message, config, **kwargs):
if config['custom']['data_protector']['secret'] != message.secret:
raise tt_exceptions.ApiError(code='properties.data_protection_delete_data.wrong_secret',
message='wrong secret code')
await operations.clean_object_properties(object_id=int(message.account_id))
return data_protector_pb2.PluginDeletionResponse(result=data_protector_pb2.PluginDeletionResponse.ResultType.SUCCESS)
@handlers.protobuf_api(properties_pb2.DebugClearServiceRequest)
async def debug_clear_service(message, **kwargs):
await operations.clean_database()
return properties_pb2.DebugClearServiceResponse()
|
class Solution:
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
letters = set(s)
index=[s.index(l) for l in letters if s.count(l) == 1]
return min(index) if len(index) > 0 else -1
|
"""The pynamd package provides classes and routines for interacting with NAMD
output in Python. This is generally limited to _energy_ based analysis, as a
number of excellent packages are available for performing trajectory analysis.
"""
__version__ = '1.0'
__author__ = 'Brian K. Radak'
from pynamd.log import NamdLog
# from pynamd.xgs import NamdXGSLog
from pynamd.config import NamdConfig
from pynamd.cphlog import TitratableSystemSet
|
###############################################################################
#
# Test cases for xlsxwriter.lua.
#
# Copyright 2014-2015, John McNamara, [email protected]
#
import base_test_class
class TestCompareXLSXFiles(base_test_class.XLSXBaseTest):
"""
Test file created with xlsxwriter.lua against a file created by Excel.
These tests check date writing functions.
"""
def test_date_1904_01(self):
self.run_lua_test('test_date_1904_01')
def test_date_1904_02(self):
self.run_lua_test('test_date_1904_02')
def test_date_1904_03(self):
self.run_lua_test('test_date_1904_03', 'date_1904_01.xlsx')
def test_date_1904_04(self):
self.run_lua_test('test_date_1904_04', 'date_1904_02.xlsx')
|
"""
Flagship file for the StickyJump platformer game
Proprietary content of StickyAR, 2019
Brought to you by Luke Igel, Fischer Moseley, Tim Gutterman, and Zach Rolfness
"""
import pygame as pg
import time
import random
from settings import *
from stickys import updateSticky, clearSticky, calibrate, uncalibrate
from sprites import *
from time import sleep
# Default data seeds a guaranteed game world in absence of CV data
DEFAULT_CV = [(150,200,50,50,"blue"), (275,200,50,50,"orange"), (375,200,50,50,"green"), (450,200,50,50,"pink")]
class StickyJump:
def __init__(self, cv_data, debug_mode):
self.debug_mode = debug_mode
# Check if CV data pipeline is operational, and use default data if not
if cv_data is None:
self.cv_data = DEFAULT_CV
else:
self.cv_data = cv_data
# Add the invisible killing floor to bottom of game window
self.cv_data.append((0, 768, 2048, 5,"pink"))
# Initialize game window
pg.init()
pg.mixer.init()
if debug_mode:
self.screen = pg.display.set_mode((WIDTH, HEIGHT))
else:
self.screen = pg.display.set_mode((WIDTH, HEIGHT),flags=pg.FULLSCREEN)
pg.display.set_caption(TITLE)
#Basic procedural game settings
self.clock = pg.time.Clock()
self.running = True
self.xspawn = 0
self.yspawn = 0
self.win_state = False
def read_cv_data(self):
"""Reads incoming CV data from the projected visual field and adds colored platforms as game sprites"""
print(self.cv_data)
for sticky in self.cv_data:
# Get the sticky note's (x, y) position, width, and height, respectively
plat = sticky[:-1]
print("PLAT")
print(*plat)
sticky_color = sticky[-1]
# Different types of platforms correspond to different sticky note colors
if sticky_color == "green":
p = WinSticky(debug_mode=self.debug_mode,*plat)
self.safeplatforms.add(p)
self.winplatform.add(p)
elif sticky_color == "blue":
p = WalkSticky(debug_mode=self.debug_mode,*plat)
self.safeplatforms.add(p)
# Orange sticky is the spawn platform; only expect one of these
elif sticky_color == "orange":
p = SpawnSticky(debug_mode=self.debug_mode,*plat)
self.safeplatforms.add(p)
self.spawnplatform.add(p)
# Add spawn coords to overall StickyJump game settings
self.xspawn = p.rect.x
self.yspawn = p.rect.y
elif sticky_color == "pink":
# If it's a death sticky, it belongs to a group of platforms reserved for death stickies
p = DieSticky(debug_mode=self.debug_mode,*plat)
self.deathplatforms.add(p)
self.all_sprites.add(p)
def spawnplayer(self):
"""Spawn in Player at spawn sticky"""
self.player = Player(self, self.xspawn, self.yspawn)
self.all_sprites.add(self.player)
# Player begins stationary in x direction
self.player.vel = vec(0, 0)
self.player.acc = vec(0, 0)
def win_condition(self):
#Displays win screen and then starts new game
self.message_display('You Win!')
time.sleep(1.25)
self.new()
def new(self, resticky=False):
"""Start a new game"""
if resticky:
print("attempting to resticky")
self.cv_data = updateSticky()
print(self.cv_data)
# Define groups and subgroups of platforms
self.all_sprites = pg.sprite.Group()
self.safeplatforms = pg.sprite.Group()
self.spawnplatform = pg.sprite.GroupSingle()
self.winplatform = pg.sprite.GroupSingle()
self.deathplatforms = pg.sprite.Group()
#Fill out groups of platforms using CV data
self.read_cv_data()
#Spawn player and enter master game loop
self.spawnplayer()
self.win_state = False
self.run()
def run(self):
"""Master Game Loop"""
self.playing = True
while self.playing:
self.clock.tick(FPS)
# Update player's position and handle collisions with platforms
self.update()
# Checks for keystrokes that alter player or game state
self.events()
# Redraws game in window accordingly
self.draw()
if not self.playing:
pg.QUIT()
def update(self):
"""Game Loop - Update"""
# Updates Player sprite directly, allowing for movement along x-axis and falling along y-axis
self.all_sprites.update()
if self.win_state:
self.win_condition()
# Handles when player falls and collides with different types of platforms
if self.player.vel.y > 0:
# Falling collision with safe (orange, blue, green) platform
hits = pg.sprite.spritecollide(self.player, self.safeplatforms, False)
if hits:
# Kill player's velocity at point of collision
self.player.pos.y = hits[0].rect.top
self.player.vel.y = 0
# Handles win sequence if player falls onto win (green) platform
wins = pg.sprite.spritecollide(self.player, self.winplatform, False)
if wins:
self.win_state = True
# If collision with death (pink) platform, kill player and then respawn
dead = pg.sprite.spritecollide(self.player, self.deathplatforms, False) #Checks for collision with death platform
if dead:
self.player.kill()
self.player.remove()
sleep(0.5)
self.spawnplayer()
def events(self):
"""Game Loop - Keystroke Events"""
for event in pg.event.get():
if event.type == pg.KEYDOWN:
# Escape key exits game
if event.key == pg.K_ESCAPE:
if self.playing:
self.playing = False
self.running = False
# Spacebar mapped to jump
if event.key == pg.K_SPACE:
self.player.jump()
# 'U' key means resticky and start new game
if event.key == pg.K_u:
print("restickying")
self.resticky()
def draw(self):
"""Game Loop - Draw"""
self.screen.fill(BLACK)
self.all_sprites.draw(self.screen)
# *after* drawing everything, flip the display
pg.display.flip()
def resticky(self):
""" **WORK IN PROGRESS**
Handles changes to sticky note layout and builds new game accordingly
"""
print("restickying!")
self.new(True)
# Extraneous for now
delay = 250 # 500ms = 0.5s
current_time = pg.time.get_ticks()
change_time = current_time + delay
show = True
def text_objects(self, text, font):
"""Helper function for message_display"""
textSurface = font.render(text, True, WHITE)
return textSurface, textSurface.get_rect()
def message_display(self, text):
"""Displays message in center of game window"""
largeText = pg.font.Font('freesansbold.ttf',115)
TextSurf, TextRect = self.text_objects(text, largeText)
TextRect.center = ((WIDTH/2),(HEIGHT/2))
self.screen.blit(TextSurf, TextRect)
pg.display.update()
def show_start_screen(self):
"""game splash/start screen"""
pass
def show_go_screen(self):
"""game over/continue"""
pass
|
from datetime import datetime
from django.db import models
from django.utils.translation import gettext_lazy as _
from ..get_settings import extract_model_kwargs as ek
__all__ = [
"CreationDateMixin", "EditCreationDateMixin"
]
class CreationDateMixin(models.Model):
"""Adds an `created_at` field, which stores date and time, when the object was created"""
class Meta:
abstract = True
___common_name = __qualname__
created_at = models.DateTimeField(
**ek(___common_name, "created_at", {
"verbose_name": _("Creation date"),
"help_text": _("Date and time when this was created"),
"editable": False,
})
) # type: datetime
def save(self, *args, **kwargs):
if self.created_at is None:
self.created_at = datetime.now()
return super().save(*args, **kwargs)
class EditCreationDateMixin(CreationDateMixin):
"""Adds an `edited_at` field, which stores date and time, when this object was modified last. The field is empty,
if the object hasn't been edited since creation."""
class Meta:
abstract = True
___common_name = __qualname__
edited_at = models.DateTimeField(
**ek(___common_name, "edited_at", {
"verbose_name": _("Edit date"),
"help_text": _("Last date and time when this was edited"),
"editable": False,
"blank": True,
"null": True,
})
) # type: datetime
def save(self, *args, **kwargs):
if self.created_at is not None:
self.edited_at = datetime.now()
return super().save(*args, **kwargs)
|
#!/usr/bin/python3
""" Printing files module
Functions:
number_of_lines: get the number of lines and
return it
"""
def number_of_lines(filename=""):
""" Write the file in a list of lines and return the len
filename: string containing the name or "" if
not given.
Return:
lines of the file
"""
with open(filename, encoding="utf-8") as fl_opened:
fl_list_lines = fl_opened.readlines()
count = len(fl_list_lines)
return count
|
#!/usr/bin/env python3
import os
import json
from imdbpie import Imdb
import requests
from pprint import pprint
import re
import logging
from logging import info, debug, warning, error
import subprocess
logging.basicConfig(level=logging.DEBUG)
movie_dir = "/home/arti/Videod/Filmid"
YTKEY = os.environ.get("YTKEY")
if not YTKEY:
error("YTKEY not set")
exit(1)
imdb = Imdb()
def download_movie_metadata(movie):
movie, year = re.search("(.*)\((\d{4})\)", movie).groups()
movie = movie.strip()
info("Finding movie {} {}".format(movie, year))
#print(movie, year)
ret = imdb.search_for_title(movie)
#print(ret)
movie_metadata_basic = [m for m in ret if m["year"] == year][0]
info("Found movie with imdb_id {}".format(movie_metadata_basic["imdb_id"]))
info("Downloading metadata")
movie_metadata_more = imdb.get_title_by_id(movie_metadata_basic["imdb_id"])
info("Metadata downloaded")
return movie_metadata_more
def write_metadata(movie_dir, mm):
metadata = {"title":str(mm.title),
"year": str(mm.year),
"plot_outline": str(mm.plot_outline),
"rating": str(mm.rating),
"certification": str(mm.certification),
"runtime": str(mm.runtime),
"genres": mm.genres,
"plots": mm.plots,
"imdb_id":str(mm.imdb_id)}
info("opening metadata.json in {} for {}".format(movie_dir, metadata["title"]))
with open(movie_dir+"/metadata.json", "w") as f:
info("writing metadata for {}".format(metadata["title"]))
f.write(json.dumps(metadata, indent=4, sort_keys=True))
def download_poster(movie_dir, metadata):
info("Downloading posters for {}".format(metadata.title))
if not os.path.isfile(movie_dir+"/cover.jpg"):
subprocess.call(['wget', metadata.cover_url,
"-O", movie_dir+"/cover.jpg"])
else:
info("cover.jpg already downloaded")
#if not os.path.isfile(movie_dir+"/poster.jpg"):
# subprocess.call(['wget', metadata.poster_url,
# "-O", movie_dir+"/poster.jpg"])
#else:
# info("poster.jpg already downloaded")
info("Poster downloading finished")
def download_trailer(movie_dir, metadata):
info("Downloading trailer for {}".format(metadata.title))
if os.path.isfile(movie_dir+"/trailer.mp4"):
info("Trailer already downloaded")
return
trailers = []
for key, val in metadata.trailers.items():
r = requests.head(val)
size = r.headers.get("Content-Length")
trailers.append((size, key, val))
trailer = sorted(trailers)[::-1][0]
subprocess.call(['wget', trailer[2],
"-O", movie_dir+"/trailer.mp4"])
def add_yt_trailer_code(md):
params = {"key": YTKEY,
"part":"id", "maxResults":1,
"q":"{} ({}) trailer".format(md.get("title"), md.get("year", ""))}
r = requests.get("https://www.googleapis.com/youtube/v3/search", params=params)
md["yt_trailer_code"] = r.json()["items"][0]["id"]["videoId"]
return md
def metadata_update_needed(metadata_file):
with open(metadata_file, "r") as f:
md = json.loads(f.read())
if "imdb_id" not in md:
return True
elif "yt_trailer_code" not in md:
md = add_yt_trailer_code(md)
print(md.get("title"), md.get("yt_trailer_code"))
fd = open(metadata_file, "w")
fd.write(json.dumps(md, indent=4, sort_keys=True))
fd.close()
else:
return False
for movie in os.listdir(movie_dir):
#print(movie)
if os.path.isfile(movie_dir+"/"+movie+"/metadata.json") \
and not metadata_update_needed(movie_dir+"/"+movie+"/metadata.json"):
continue
try:
re.search("(.*)\((\d{4})\)", movie).groups()
except:
continue
try:
mm = download_movie_metadata(movie)
write_metadata(movie_dir+"/"+movie, mm)
except:
logging.exception("Metadata download failed")
try:
download_poster(movie_dir+"/"+movie, mm)
except:
logging.exception("Poster download failed")
#download_trailer(movie_dir+"/"+movie, mm)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 31 13:42:05 2017
This script is for post processing data produced by other parts of the codebase.
It contains function definitions which may also be useful to other modules.
eventually this should write the models to a csv for use by XID+
@author: rs548
"""
from astropy.io import fits
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import numpy as np
import pandas as pd
from scipy import stats, integrate
import seaborn as sns
sns.set(color_codes=True)
from os import listdir
from os import getcwd
from os import remove
import csv
def checkcatalogue(sdssid1,cat2):
#First get the SDSSid for the provided objID
matchfound = False
for source in cat2:
if sdssid1 == sdssid2:
matchfound = True
return matchfound
def comparecatalogues(cat1,cat2):
print('There are ', len(cat1), ' objects in catalogue 1')
print('There are ', len(cat2), ' objects in catalogue 2')
nmatch=0
for source1 in cat1:
if checkcatalogue(source1['SDSSid'],cat2):
nmatch += 1
print('There are ', nmatch, ' objects from catalogue 1 in catalogue 2')
def printGraphs(folder):
numberOutputs = 0
for filename in listdir(folder):
if filename[-11:] == 'output.fits':
images = fits.open(folder + filename)
fig = plt.figure()
fig.suptitle(filename)
#norm=LogNorm(),
plt.subplot(131)
plt.imshow(images[1].data, cmap='gray', interpolation='none')
plt.title('Image')
plt.subplot(132)
plt.imshow(images[2].data, cmap='gray', interpolation='none')
plt.title('Model')
plt.subplot(133)
plt.imshow(images[3].data, cmap='gray', interpolation='none')
plt.title('Residual')
plt.show()
plt.close()
images.close()
numberOutputs = numberOutputs + 1
#remove('/Users/rs548/Documents/Science/PeteHurley/SDSS/' + filename)
def print5bandGraphs(folder,band):
numberOutputs = 0
for filename in listdir(folder):
if filename[-11:] == 'output.fits':
images = fits.open(folder + filename)
fig = plt.figure()
fig.suptitle(filename)
#norm=LogNorm(),
plt.subplot(131)
plt.imshow(images[band + 1].data, cmap='gray', interpolation='none')
plt.title('Image')
plt.subplot(132)
plt.imshow(images[band + 6].data, cmap='gray', interpolation='none')
plt.title('Model')
plt.subplot(133)
plt.imshow(images[band + 11].data, cmap='gray', interpolation='none')
plt.title('Residual')
plt.show()
plt.close()
images.close()
numberOutputs = numberOutputs + 1
#remove('/Users/rs548/Documents/Science/PeteHurley/SDSS/' + filename)
def oneModel(output):
lognorm = True
image = fits.open(output)
fig = plt.figure()
fig.suptitle(output)
#norm=LogNorm(),
plt.imshow(image[1].data, cmap='gray', interpolation='none',norm=LogNorm())
plt.title('Image')
fig = plt.figure()
plt.imshow(image[2].data, cmap='gray', interpolation='none',norm=LogNorm())
plt.title('Model')
fig = plt.figure()
plt.imshow(image[3].data, cmap='gray', interpolation='none',norm=LogNorm())
plt.title('Residual')
image.close()
def generateTables(folder,bandnames=['u','g','r','i','z']):
numberObjects = 0
writer = csv.writer(open( folder + 'out.csv', 'wb'))
paramnames = ['OBJID',
'CHISQ',
'RA',
'DEC',
'R_e']
writer.writerow(paramnames)
for filename in listdir(folder):
if filename[-11:] == 'output.fits':
output = fits.open(folder + filename)
numBands = ((len(output) -1)/3) -1
for band in range(0,numBands):
allbandparams = []
for param in paramnames:
allbandparams += [output[band+numBands].header[param]]
writer.writerow(allbandparams)
return writer
def printAllBandGraphs(folder):
"""
Go though a folder and print all the passband images/models/residuals
for every Galfit output file. Will have to be modified for pyprofit.
"""
numberOutputs = 0
for filename in listdir(folder):
if filename[-11:] == 'output.fits':
images = fits.open(folder + filename)
numBands = ((len(images) -1)/3) -1
print(numBands)
fig,axarr = plt.subplots(nrows=numBands, ncols=3, sharex=True,
sharey=True, figsize=(10,10))
plt.suptitle(filename)
#norm=LogNorm(),
axarr[0,0].set_title('Image')
axarr[0,1].set_title('Model')
axarr[0,2].set_title('Residual')
for band in range(0,numBands):
axarr[band,0].imshow(images[band].data,
cmap='gray', interpolation='none')
axarr[band,1].imshow(images[band + numBands].data,
cmap='gray', interpolation='none')
axarr[band,2].imshow(images[band + 2*numBands].data,
cmap='gray', interpolation='none')
plt.show()
plt.close()
images.close()
numberOutputs = numberOutputs + 1
#remove('/Users/rs548/Documents/Science/PeteHurley/SDSS/' + filename)
print('done a file')
plt.close('all')
def generateTables(folder,bandnames=['u','g','r','i','z']):
"""
A function to go through a folder of GalfitM output files (fits) and
print all the Sersic or Sersic/bulge parameters to a CSV
"""
numberObjects = 0
outfile = open( folder + 'out.csv', 'w')
writer = csv.writer(outfile)
#Define a non general set of params to pull out for a SDSS UGRIZ fit
paramnames = ['DATAIN_U',
'CHISQ',
'1_XC_U',
'1_YC_U',
'1_MAG_U', '1_MAG_G', '1_MAG_R','1_MAG_I','1_MAG_Z',
'1_RE_U',
'1_N_U',
'1_AR_U',
'1_PA_U']
writer.writerow(paramnames)
for filename in listdir(folder):
if filename[-11:] == 'output.fits':
output = fits.open(folder + filename)
numBands = ((len(output) -1)/3) -1
#for band in range(0,numBands):
allbandparams = []
for param in paramnames:
#print(band,numBands,param)
allbandparams += [output[6].header[param]]
writer.writerow(allbandparams)
return writer
if __name__ == '__main__':
#printGraphs('/Users/rs548/Documents/Science/PeteHurley/UVG/')
#printAllBandGraphs('/Users/rs548/Documents/Science/PeteHurley/SDSS-M-BD/')
#print5bandGraphs('/Users/rs548/Documents/Science/PeteHurley/SM/',3)
#oneModel('/Users/rs548/Documents/Science/Blended/g-output.fits')
#generateTables('/Users/rs548/Documents/Science/PeteHurley/SDSS-XM/')
|
import os
from flask import Flask, render_template, redirect, url_for, request, session, flash
from flask_migrate import Migrate
from werkzeug.security import generate_password_hash, check_password_hash
def create_app(test_config=None):
# Previous code omitted
@app.route('/log_in', methods=('GET', 'POST'))
def log_in():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
error = None
user = User.query.filter_by(username=username).first()
if not user or not check_password_hash(user.password, password):
error = 'Username or password are incorrect'
if error is None:
session.clear()
session['user_id'] = user.id
return redirect(url_for('index'))
flash(error, category='error')
return render_template('log_in.html')
@app.route('/')
def index():
return 'Index'
return app
|
from persistence.repositories.exam_solution_repository_postgres import ExamSolutionRepositoryPostgres
from exceptions.http_exception import NotFoundException
from application.serializers.exam_solution_serializer import ExamSolutionSerializer
import logging
logger = logging.getLogger(__name__)
esrp = ExamSolutionRepositoryPostgres()
def get_exam_solution(db, exam_solution_id):
exam_solution = esrp.get_exam_solution(db, exam_solution_id)
if exam_solution is None:
logger.warning("Exam solution %s not found", exam_solution_id)
raise NotFoundException("Exam solution {}".format(exam_solution_id))
return ExamSolutionSerializer.serialize(exam_solution)
def get_all_exam_solutions_by_user_id(db, user_id, graded, approval_state):
exam_solutions = esrp.get_all_exam_solutions_by_user_id(db, user_id, graded, approval_state)
exam_solution_list = []
for exam_solution in exam_solutions:
exam_solution_list.append(ExamSolutionSerializer.serialize(exam_solution))
amount = len(exam_solution_list)
amount_graded = 0
total_score = 0
approval_count = 0
average_score = 0
approval_rate = 0
if amount != 0:
for exam_solution in exam_solution_list:
if exam_solution["graded"] is True:
amount_graded += 1
total_score += exam_solution["score"] / exam_solution["max_score"]
if exam_solution["approval_state"] is True:
approval_count += 1
if amount_graded != 0:
average_score = total_score / amount_graded
approval_rate = approval_count / amount_graded
return {
"user_id": user_id,
"amount": amount,
"amount_graded": amount_graded,
"average_score": average_score,
"approval_rate": approval_rate,
"exam_solutions": exam_solution_list,
}
def get_all_exam_solutions_by_exam_template_id(db, exam_template_id, graded, approval_state):
exam_solutions = esrp.get_all_exam_solutions_by_exam_template_id(db, exam_template_id, graded, approval_state)
exam_solution_list = []
for exam_solution in exam_solutions:
exam_solution_list.append(ExamSolutionSerializer.serialize(exam_solution))
amount = len(exam_solution_list)
amount_graded = 0
total_score = 0
approval_count = 0
average_score = 0
approval_rate = 0
if amount != 0:
for exam_solution in exam_solution_list:
if exam_solution["graded"] is True:
amount_graded += 1
total_score += exam_solution["score"] / exam_solution["max_score"]
if exam_solution["approval_state"] is True:
approval_count += 1
if amount_graded != 0:
average_score = total_score / amount_graded
approval_rate = approval_count / amount_graded
return {
"exam_template_id": exam_template_id,
"amount": amount,
"amount_graded": amount_graded,
"average_score": average_score,
"approval_rate": approval_rate,
"exam_solutions": exam_solution_list,
}
def get_all_exam_solutions_by_course_id(db, course_id, graded, approval_state):
exam_solutions = esrp.get_all_exam_solutions_by_course_id(db, course_id, graded, approval_state)
exam_solution_list = []
for exam_solution in exam_solutions:
exam_solution_list.append(ExamSolutionSerializer.serialize(exam_solution))
amount = len(exam_solution_list)
amount_graded = 0
total_score = 0
approval_count = 0
average_score = 0
approval_rate = 0
if amount != 0:
for exam_solution in exam_solution_list:
if exam_solution["graded"] is True:
amount_graded += 1
total_score += exam_solution["score"] / exam_solution["max_score"]
if exam_solution["approval_state"] is True:
approval_count += 1
if amount_graded != 0:
average_score = total_score / amount_graded
approval_rate = approval_count / amount_graded
return {
"course_id": course_id,
"amount": amount,
"amount_graded": amount_graded,
"average_score": average_score,
"approval_rate": approval_rate,
"exam_solutions": exam_solution_list,
}
def get_all_exam_solutions_by_user_id_and_course_id(db, user_id, course_id, graded, approval_state):
exam_solutions = esrp.get_all_exam_solutions_by_user_id_and_course_id(db, user_id, course_id, graded, approval_state)
exam_solution_list = []
for exam_solution in exam_solutions:
exam_solution_list.append(ExamSolutionSerializer.serialize(exam_solution))
amount = len(exam_solution_list)
amount_graded = 0
total_score = 0
approval_count = 0
average_score = 0
approval_rate = 0
if amount != 0:
for exam_solution in exam_solution_list:
if exam_solution["graded"] is True:
amount_graded += 1
total_score += exam_solution["score"] / exam_solution["max_score"]
if exam_solution["approval_state"] is True:
approval_count += 1
if amount_graded != 0:
average_score = total_score / amount_graded
approval_rate = approval_count / amount_graded
return {
"user_id": user_id,
"course_id": course_id,
"amount": amount,
"amount_graded": amount_graded,
"average_score": average_score,
"approval_rate": approval_rate,
"exam_solutions": exam_solution_list,
}
def get_all_exam_solutions_by_corrector_id_and_course_id(db, corrector_id, course_id, graded, approval_state):
exam_solutions = esrp.get_all_exam_solutions_by_corrector_id_and_course_id(
db, corrector_id, course_id, graded, approval_state
)
exam_solution_list = []
for exam_solution in exam_solutions:
exam_solution_list.append(ExamSolutionSerializer.serialize(exam_solution))
amount = len(exam_solution_list)
amount_graded = 0
total_score = 0
approval_count = 0
average_score = 0
approval_rate = 0
if amount != 0:
for exam_solution in exam_solution_list:
if exam_solution["graded"] is True:
amount_graded += 1
total_score += exam_solution["score"] / exam_solution["max_score"]
if exam_solution["approval_state"] is True:
approval_count += 1
if amount_graded != 0:
average_score = total_score / amount_graded
approval_rate = approval_count / amount_graded
return {
"corrector_id": corrector_id,
"course_id": course_id,
"amount": amount,
"amount_graded": amount_graded,
"average_score": average_score,
"approval_rate": approval_rate,
"exam_solutions": exam_solution_list,
}
def get_all_exam_solutions_by_corrector_id(db, corrector_id, graded, approval_state):
exam_solutions = esrp.get_all_exam_solutions_by_corrector_id(db, corrector_id, graded, approval_state)
exam_solution_list = []
for exam_solution in exam_solutions:
exam_solution_list.append(ExamSolutionSerializer.serialize(exam_solution))
amount = len(exam_solution_list)
amount_graded = 0
approval_count = 0
approval_rate = 0
if amount != 0:
for exam_solution in exam_solution_list:
if exam_solution["graded"] is True:
amount_graded += 1
if exam_solution["approval_state"] is True:
approval_count += 1
if amount_graded != 0:
approval_rate = approval_count / amount_graded
return {
"corrector_id": corrector_id,
"amount": amount,
"amount_graded": amount_graded,
"approval_rate": approval_rate,
"exam_solutions": exam_solution_list,
}
def exam_solution_exists(db, exam_solution_id):
return esrp.get_exam_solution(db, exam_solution_id)
|
def test_update_log_weights(part_updater):
temperature_step = 0.1
exp_w = 0.2 - 0.2 * temperature_step
part_updater.update_log_weights(temperature_step)
assert all([exp_w == p.log_weight for p in
part_updater.step.get_particles()])
def test_resample_if_needed_no(part_updater):
part_updater.resample_if_needed()
assert part_updater._resample_status == "No resampling"
def test_resample_if_needed_yes(part_updater_high_ess_threshold):
part_updater_high_ess_threshold.resample_if_needed()
assert part_updater_high_ess_threshold._resample_status == "Resampling..."
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from collections import defaultdict
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.experiment import Experiment
from downward.reports import PlanningReport
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
import common_setup
DIR = os.path.dirname(os.path.abspath(__file__))
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]")
if common_setup.is_test_run():
ENVIRONMENT = LocalEnvironment(processes=4)
exp = Experiment()
class TranslatorDiffReport(PlanningReport):
def get_cell(self, run):
return ";".join(run.get(attr) for attr in self.attributes)
def get_text(self):
lines = []
for runs in self.problem_runs.values():
hashes = set([r.get("translator_output_sas_hash") for r in runs])
if len(hashes) > 1 or None in hashes:
lines.append(";".join([self.get_cell(r) for r in runs]))
return "\n".join(lines)
class SameValueFilters(object):
"""Ignore runs for a task where all algorithms have the same value."""
def __init__(self, attribute):
self._attribute = attribute
self._tasks_to_values = defaultdict(list)
def _get_task(self, run):
return (run['domain'], run['problem'])
def store_values(self, run):
value = run.get(self._attribute)
self._tasks_to_values[self._get_task(run)].append(value)
# Don't filter this run, yet.
return True
def filter_tasks_with_equal_values(self, run):
values = self._tasks_to_values[self._get_task(run)]
return len(set(values)) != 1
exp.add_fetcher(src='data/issue939-base-eval')
exp.add_fetcher(src='data/issue939-v1-eval', merge=True)
ATTRIBUTES = ["error", "run_dir", "translator_*", "translator_output_sas_hash"]
#exp.add_comparison_table_step(attributes=ATTRIBUTES)
same_value_filters = SameValueFilters("translator_output_sas_hash")
# exp.add_comparison_table_step(
# name="filtered",
# attributes=ATTRIBUTES,
# filter=[same_value_filters.store_values, same_value_filters.filter_tasks_with_equal_values])
exp.add_report(TranslatorDiffReport(
attributes=["domain", "problem", "algorithm", "run_dir"]
), outfile="different_output_sas.csv"
)
exp.add_report(AbsoluteReport(attributes=ATTRIBUTES))
exp.add_report(ComparativeReport([
('issue939-base-translate-only', 'issue939-v1-translate-only')
], attributes=ATTRIBUTES))
exp.run_steps()
|
"""
A class to keep information about faces of a polyhedron
This module gives you a tool to work with the faces of a polyhedron
and their relative position. First, you need to find the faces. To get
the faces in a particular dimension, use the
:meth:`~sage.geometry.polyhedron.base.face` method::
sage: P = polytopes.cross_polytope(3)
sage: P.faces(3)
(A 3-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 6 vertices,)
sage: [f.ambient_V_indices() for f in P.faces(2)]
[(0, 1, 2),
(0, 1, 3),
(0, 2, 4),
(0, 3, 4),
(3, 4, 5),
(2, 4, 5),
(1, 3, 5),
(1, 2, 5)]
sage: [f.ambient_V_indices() for f in P.faces(1)]
[(0, 1),
(0, 2),
(1, 2),
(0, 3),
(1, 3),
(0, 4),
(2, 4),
(3, 4),
(2, 5),
(3, 5),
(4, 5),
(1, 5)]
or :meth:`~sage.geometry.polyhedron.base.face_lattice` to get the
whole face lattice as a poset::
sage: P.face_lattice()
Finite lattice containing 28 elements with distinguished linear extension
The faces are printed in shorthand notation where each integer is the
index of a vertex/ray/line in the same order as the containing
Polyhedron's :meth:`~sage.geometry.polyhedron.base.Vrepresentation` ::
sage: face = P.faces(1)[3]; face
A 1-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 2 vertices
sage: face.ambient_V_indices()
(0, 3)
sage: P.Vrepresentation(0)
A vertex at (-1, 0, 0)
sage: P.Vrepresentation(3)
A vertex at (0, 0, 1)
sage: face.vertices()
(A vertex at (-1, 0, 0), A vertex at (0, 0, 1))
The face itself is not represented by Sage's
:func:`sage.geometry.polyhedron.constructor.Polyhedron` class, but by
an auxiliary class to keep the information. You can get the face as a
polyhedron with the :meth:`PolyhedronFace.as_polyhedron` method::
sage: face.as_polyhedron()
A 1-dimensional polyhedron in ZZ^3 defined as the convex hull of 2 vertices
sage: _.equations()
(An equation (0, 1, 0) x + 0 == 0,
An equation (1, 0, -1) x + 1 == 0)
"""
########################################################################
# Copyright (C) 2012 Volker Braun <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
########################################################################
from __future__ import print_function
from sage.structure.sage_object import SageObject
from sage.structure.richcmp import richcmp_method, richcmp
from sage.misc.all import cached_method
from sage.modules.free_module_element import vector
from sage.matrix.constructor import matrix
#########################################################################
@richcmp_method
class PolyhedronFace(SageObject):
r"""
A face of a polyhedron.
This class is for use in
:meth:`~sage.geometry.polyhedron.base.Polyhedron_base.face_lattice`.
INPUT:
No checking is performed whether the H/V-representation indices
actually determine a face of the polyhedron. You should not
manually create :class:`PolyhedronFace` objects unless you know
what you are doing.
OUTPUT:
A :class:`PolyhedronFace`.
EXAMPLES::
sage: octahedron = polytopes.cross_polytope(3)
sage: inequality = octahedron.Hrepresentation(2)
sage: face_h = tuple([ inequality ])
sage: face_v = tuple( inequality.incident() )
sage: face_h_indices = [ h.index() for h in face_h ]
sage: face_v_indices = [ v.index() for v in face_v ]
sage: from sage.geometry.polyhedron.face import PolyhedronFace
sage: face = PolyhedronFace(octahedron, face_v_indices, face_h_indices)
sage: face
A 2-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: face.dim()
2
sage: face.ambient_V_indices()
(0, 1, 2)
sage: face.ambient_Hrepresentation()
(An inequality (1, 1, 1) x + 1 >= 0,)
sage: face.ambient_Vrepresentation()
(A vertex at (-1, 0, 0), A vertex at (0, -1, 0), A vertex at (0, 0, -1))
"""
def __init__(self, polyhedron, V_indices, H_indices):
r"""
The constructor.
See :class:`PolyhedronFace` for more information.
INPUT:
- ``polyhedron`` -- a :class:`Polyhedron`. The ambient
polyhedron.
- ``V_indices`` -- list of sorted integers. The indices of the
face-spanning V-representation objects in the ambient
polyhedron.
- ``H_indices`` -- list of sorted integers. The indices of the
H-representation objects of the ambient polyhedron that are
saturated on the face.
TESTS::
sage: from sage.geometry.polyhedron.face import PolyhedronFace
sage: PolyhedronFace(Polyhedron(), [], []) # indirect doctest
A -1-dimensional face of a Polyhedron in ZZ^0
"""
self._polyhedron = polyhedron
self._ambient_Vrepresentation_indices = tuple(V_indices)
self._ambient_Hrepresentation_indices = tuple(H_indices)
self._ambient_Vrepresentation = tuple( polyhedron.Vrepresentation(i) for i in V_indices )
self._ambient_Hrepresentation = tuple( polyhedron.Hrepresentation(i) for i in H_indices )
def __hash__(self):
r"""
TESTS::
sage: P = Polyhedron([[0,0],[0,1],[23,3],[9,12]])
sage: list(map(hash, P.faces(1))) # random
[2377119663630407734,
2377136578164722109,
5966674064902575359,
4795242501625591634]
"""
return hash((self._polyhedron, self._ambient_Vrepresentation_indices))
def vertex_generator(self):
"""
Return a generator for the vertices of the face.
EXAMPLES::
sage: triangle = Polyhedron(vertices=[[1,0],[0,1],[1,1]])
sage: face = triangle.faces(1)[0]
sage: for v in face.vertex_generator(): print(v)
A vertex at (0, 1)
A vertex at (1, 0)
sage: type(face.vertex_generator())
<... 'generator'>
"""
for V in self.ambient_Vrepresentation():
if V.is_vertex():
yield V
@cached_method
def vertices(self):
"""
Return all vertices of the face.
OUTPUT:
A tuple of vertices.
EXAMPLES::
sage: triangle = Polyhedron(vertices=[[1,0],[0,1],[1,1]])
sage: face = triangle.faces(1)[0]
sage: face.vertices()
(A vertex at (0, 1), A vertex at (1, 0))
"""
return tuple(self.vertex_generator())
@cached_method
def n_vertices(self):
"""
Return the number of vertices of the face.
OUTPUT:
Integer.
EXAMPLES::
sage: Q = polytopes.cross_polytope(3)
sage: face = Q.faces(2)[0]
sage: face.n_vertices()
3
"""
return len(self.vertices())
def ray_generator(self):
"""
Return a generator for the rays of the face.
EXAMPLES::
sage: pi = Polyhedron(ieqs = [[1,1,0],[1,0,1]])
sage: face = pi.faces(1)[0]
sage: next(face.ray_generator())
A ray in the direction (1, 0)
"""
for V in self.ambient_Vrepresentation():
if V.is_ray():
yield V
@cached_method
def rays(self):
"""
Return the rays of the face.
OUTPUT:
A tuple of rays.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,0,0,1],[0,0,1,0],[1,1,0,0]])
sage: face = p.faces(2)[0]
sage: face.rays()
(A ray in the direction (1, 0, 0), A ray in the direction (0, 1, 0))
"""
return tuple(self.ray_generator())
@cached_method
def n_rays(self):
"""
Return the number of rays of the face.
OUTPUT:
Integer.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,0,0,1],[0,0,1,0],[1,1,0,0]])
sage: face = p.faces(2)[0]
sage: face.n_rays()
2
"""
return len(self.rays())
def line_generator(self):
"""
Return a generator for the lines of the face.
EXAMPLES::
sage: pr = Polyhedron(rays = [[1,0],[-1,0],[0,1]], vertices = [[-1,-1]])
sage: face = pr.faces(1)[0]
sage: next(face.line_generator())
A line in the direction (1, 0)
"""
for V in self.ambient_Vrepresentation():
if V.is_line():
yield V
@cached_method
def lines(self):
"""
Return all lines of the face.
OUTPUT:
A tuple of lines.
EXAMPLES::
sage: p = Polyhedron(rays = [[1,0],[-1,0],[0,1],[1,1]], vertices = [[-2,-2],[2,3]])
sage: p.lines()
(A line in the direction (1, 0),)
"""
return tuple(self.line_generator())
@cached_method
def n_lines(self):
"""
Return the number of lines of the face.
OUTPUT:
Integer.
EXAMPLES::
sage: p = Polyhedron(rays = [[1,0],[-1,0],[0,1],[1,1]], vertices = [[-2,-2],[2,3]])
sage: p.n_lines()
1
"""
return len(self.lines())
def __richcmp__(self, other, op):
"""
Compare ``self`` and ``other``.
INPUT:
- ``other`` -- anything.
OUTPUT:
Two faces test equal if and only if they are faces of the same
(not just isomorphic) polyhedron and their generators have the
same indices.
EXAMPLES::
sage: square = polytopes.hypercube(2)
sage: f = square.faces(1)
sage: matrix(4,4, lambda i,j: ZZ(f[i] <= f[j]))
[1 1 1 1]
[0 1 1 1]
[0 0 1 1]
[0 0 0 1]
sage: matrix(4,4, lambda i,j: ZZ(f[i] == f[j])) == 1
True
"""
if not isinstance(other, PolyhedronFace):
return NotImplemented
if self._polyhedron is not other._polyhedron:
return NotImplemented
return richcmp(self._ambient_Vrepresentation_indices,
other._ambient_Vrepresentation_indices, op)
def ambient_Hrepresentation(self, index=None):
r"""
Return the H-representation objects of the ambient polytope
defining the face.
INPUT:
- ``index`` -- optional. Either an integer or ``None``
(default).
OUTPUT:
If the optional argument is not present, a tuple of
H-representation objects. Each entry is either an inequality
or an equation.
If the optional integer ``index`` is specified, the
``index``-th element of the tuple is returned.
EXAMPLES::
sage: square = polytopes.hypercube(2)
sage: for face in square.face_lattice():
....: print(face.ambient_Hrepresentation())
(An inequality (1, 0) x + 1 >= 0, An inequality (0, 1) x + 1 >= 0,
An inequality (-1, 0) x + 1 >= 0, An inequality (0, -1) x + 1 >= 0)
(An inequality (1, 0) x + 1 >= 0, An inequality (0, 1) x + 1 >= 0)
(An inequality (1, 0) x + 1 >= 0, An inequality (0, -1) x + 1 >= 0)
(An inequality (0, 1) x + 1 >= 0, An inequality (-1, 0) x + 1 >= 0)
(An inequality (-1, 0) x + 1 >= 0, An inequality (0, -1) x + 1 >= 0)
(An inequality (1, 0) x + 1 >= 0,)
(An inequality (0, 1) x + 1 >= 0,)
(An inequality (-1, 0) x + 1 >= 0,)
(An inequality (0, -1) x + 1 >= 0,)
()
"""
if index is None:
return self._ambient_Hrepresentation
else:
return self._ambient_Hrepresentation[index]
def ambient_Vrepresentation(self, index=None):
r"""
Return the V-representation objects of the ambient polytope
defining the face.
INPUT:
- ``index`` -- optional. Either an integer or ``None``
(default).
OUTPUT:
If the optional argument is not present, a tuple of
V-representation objects. Each entry is either a vertex, a
ray, or a line.
If the optional integer ``index`` is specified, the
``index``-th element of the tuple is returned.
EXAMPLES::
sage: square = polytopes.hypercube(2)
sage: for fl in square.face_lattice():
....: print(fl.ambient_Vrepresentation())
()
(A vertex at (-1, -1),)
(A vertex at (-1, 1),)
(A vertex at (1, -1),)
(A vertex at (1, 1),)
(A vertex at (-1, -1), A vertex at (-1, 1))
(A vertex at (-1, -1), A vertex at (1, -1))
(A vertex at (1, -1), A vertex at (1, 1))
(A vertex at (-1, 1), A vertex at (1, 1))
(A vertex at (-1, -1), A vertex at (-1, 1),
A vertex at (1, -1), A vertex at (1, 1))
"""
if index is None:
return self._ambient_Vrepresentation
else:
return self._ambient_Vrepresentation[index]
def n_ambient_Hrepresentation(self):
"""
Return the number of objects that make up the ambient
H-representation of the polyhedron.
See also :meth:`ambient_Hrepresentation`.
OUTPUT:
Integer.
EXAMPLES::
sage: p = polytopes.cross_polytope(4)
sage: face = p.face_lattice()[10]
sage: face
A 1-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 2 vertices
sage: face.ambient_Hrepresentation()
(An inequality (1, -1, 1, -1) x + 1 >= 0,
An inequality (1, 1, 1, 1) x + 1 >= 0,
An inequality (1, 1, 1, -1) x + 1 >= 0,
An inequality (1, -1, 1, 1) x + 1 >= 0)
sage: face.n_ambient_Hrepresentation()
4
"""
return len(self.ambient_Hrepresentation())
def n_ambient_Vrepresentation(self):
"""
Return the number of objects that make up the ambient
V-representation of the polyhedron.
See also :meth:`ambient_Vrepresentation`.
OUTPUT:
Integer.
EXAMPLES::
sage: p = polytopes.cross_polytope(4)
sage: face = p.face_lattice()[10]
sage: face
A 1-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 2 vertices
sage: face.ambient_Vrepresentation()
(A vertex at (-1, 0, 0, 0), A vertex at (0, 0, -1, 0))
sage: face.n_ambient_Vrepresentation()
2
"""
return len(self.ambient_Vrepresentation())
def ambient_H_indices(self):
"""
Return the indices of the H-representation objects of the
ambient polyhedron that make up the H-representation of ``self``.
See also :meth:`ambient_Hrepresentation`.
OUTPUT:
Tuple of indices
EXAMPLES::
sage: Q = polytopes.cross_polytope(3)
sage: F = Q.faces(1)
sage: [f.ambient_H_indices() for f in F]
[(1, 2),
(2, 3),
(2, 7),
(0, 1),
(1, 6),
(0, 3),
(3, 4),
(0, 5),
(4, 7),
(5, 6),
(4, 5),
(6, 7)]
"""
return self._ambient_Hrepresentation_indices
def ambient_V_indices(self):
"""
Return the indices of the V-representation objects of the
ambient polyhedron that make up the V-representation of ``self``.
See also :meth:`ambient_Vrepresentation`.
OUTPUT:
Tuple of indices
EXAMPLES::
sage: P = polytopes.cube()
sage: F = P.faces(2)
sage: [f.ambient_V_indices() for f in F]
[(0, 1, 2, 3),
(0, 1, 4, 5),
(0, 2, 4, 6),
(1, 3, 5, 7),
(2, 3, 6, 7),
(4, 5, 6, 7)]
"""
return self._ambient_Vrepresentation_indices
def ambient_dim(self):
r"""
Return the dimension of the containing polyhedron.
EXAMPLES::
sage: P = Polyhedron(vertices = [[1,0,0,0],[0,1,0,0]])
sage: face = P.faces(1)[0]
sage: face.ambient_dim()
4
"""
return self._polyhedron.ambient_dim()
@cached_method
def dim(self):
"""
Return the dimension of the face.
OUTPUT:
Integer.
EXAMPLES::
sage: fl = polytopes.dodecahedron().face_lattice()
sage: [ x.dim() for x in fl ]
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3]
"""
if self.n_ambient_Vrepresentation() == 0:
return -1
else:
origin = vector(self.ambient_Vrepresentation(0))
v_list = [ vector(v)-origin for v in self.ambient_Vrepresentation() ]
return matrix(v_list).rank()
def _repr_(self):
r"""
Return a string representation.
OUTPUT:
A string listing the V-representation indices of the face.
EXAMPLES::
sage: square = polytopes.hypercube(2)
sage: a_face = list( square.face_lattice() )[8]
sage: a_face.__repr__()
'A 1-dimensional face of a Polyhedron in ZZ^2 defined as the convex hull of 2 vertices'
"""
desc = ''
desc += 'A ' + repr(self.dim()) + '-dimensional face'
desc += ' of a Polyhedron in '
desc += self.polyhedron().parent()._repr_ambient_module()
if self.n_vertices() > 0:
desc += ' defined as the convex hull of '
desc += repr(self.n_vertices())
if self.n_vertices() == 1: desc += ' vertex'
else: desc += ' vertices'
if self.n_rays() > 0:
if self.n_lines() > 0: desc += ", "
else: desc += " and "
desc += repr(self.n_rays())
if self.n_rays() == 1: desc += ' ray'
else: desc += ' rays'
if self.n_lines() > 0:
if self.n_rays() > 0: desc += ", "
else: desc += " and "
desc += repr(self.n_lines())
if self.n_lines() == 1: desc += ' line'
else: desc += ' lines'
return desc
def polyhedron(self):
"""
Return the containing polyhedron.
EXAMPLES::
sage: P = polytopes.cross_polytope(3); P
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 6 vertices
sage: face = P.faces(2)[3]
sage: face
A 2-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: face.polyhedron()
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 6 vertices
"""
return self._polyhedron
@cached_method
def as_polyhedron(self):
"""
Return the face as an independent polyhedron.
OUTPUT:
A polyhedron.
EXAMPLES::
sage: P = polytopes.cross_polytope(3); P
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 6 vertices
sage: face = P.faces(2)[3]
sage: face
A 2-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: face.as_polyhedron()
A 2-dimensional polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: P.intersection(face.as_polyhedron()) == face.as_polyhedron()
True
"""
P = self._polyhedron
parent = P.parent()
Vrep = (self.vertices(), self.rays(), self.lines())
return P.__class__(parent, Vrep, None)
|
from RestrictedPython import compile_restricted_function
from RestrictedPython import PrintCollector
from RestrictedPython import safe_builtins
from types import FunctionType
def test_compile_restricted_function():
p = ''
body = """
print("Hello World!")
return printed
"""
name = "hello_world"
global_symbols = []
result = compile_restricted_function(
p, # parameters
body,
name,
filename='<string>',
globalize=global_symbols
)
assert result.code is not None
assert result.errors == ()
safe_globals = {
'__name__': 'script',
'_getattr_': getattr,
'_print_': PrintCollector,
'__builtins__': safe_builtins,
}
safe_locals = {}
exec(result.code, safe_globals, safe_locals)
hello_world = safe_locals['hello_world']
assert type(hello_world) == FunctionType
assert hello_world() == 'Hello World!\n'
def test_compile_restricted_function_func_wrapped():
p = ''
body = """
print("Hello World!")
return printed
"""
name = "hello_world"
global_symbols = []
result = compile_restricted_function(
p, # parameters
body,
name,
filename='<string>',
globalize=global_symbols
)
assert result.code is not None
assert result.errors == ()
safe_globals = {
'__name__': 'script',
'_getattr_': getattr,
'_print_': PrintCollector,
'__builtins__': safe_builtins,
}
func = FunctionType(result.code, safe_globals)
func()
assert 'hello_world' in safe_globals
hello_world = safe_globals['hello_world']
assert hello_world() == 'Hello World!\n'
def test_compile_restricted_function_with_arguments():
p = 'input1, input2'
body = """
print(input1 + input2)
return printed
"""
name = "hello_world"
global_symbols = []
result = compile_restricted_function(
p, # parameters
body,
name,
filename='<string>',
globalize=global_symbols
)
assert result.code is not None
assert result.errors == ()
safe_globals = {
'__name__': 'script',
'_getattr_': getattr,
'_print_': PrintCollector,
'__builtins__': safe_builtins,
}
safe_locals = {}
exec(result.code, safe_globals, safe_locals)
hello_world = safe_locals['hello_world']
assert type(hello_world) == FunctionType
assert hello_world('Hello ', 'World!') == 'Hello World!\n'
def test_compile_restricted_function_can_access_global_variables():
p = ''
body = """
print(input)
return printed
"""
name = "hello_world"
global_symbols = ['input']
result = compile_restricted_function(
p, # parameters
body,
name,
filename='<string>',
globalize=global_symbols
)
assert result.code is not None
assert result.errors == ()
safe_globals = {
'__name__': 'script',
'_getattr_': getattr,
'input': 'Hello World!',
'_print_': PrintCollector,
'__builtins__': safe_builtins,
}
safe_locals = {}
exec(result.code, safe_globals, safe_locals)
hello_world = safe_locals['hello_world']
assert type(hello_world) == FunctionType
assert hello_world() == 'Hello World!\n'
def test_compile_restricted_function_pretends_the_code_is_executed_in_a_global_scope(): # NOQA: E501
p = ''
body = """output = output + 'bar'"""
name = "hello_world"
global_symbols = ['output']
result = compile_restricted_function(
p, # parameters
body,
name,
filename='<string>',
globalize=global_symbols
)
assert result.code is not None
assert result.errors == ()
safe_globals = {
'__name__': 'script',
'output': 'foo',
'__builtins__': {},
}
safe_locals = {}
exec(result.code, safe_globals, safe_locals)
hello_world = safe_locals['hello_world']
assert type(hello_world) == FunctionType
hello_world()
assert safe_globals['output'] == 'foobar'
def test_compile_restricted_function_allows_invalid_python_identifiers_as_function_name(): # NOQA: E501
p = ''
body = """output = output + 'bar'"""
name = "<foo>.bar.__baz__"
global_symbols = ['output']
result = compile_restricted_function(
p, # parameters
body,
name,
filename='<string>',
globalize=global_symbols
)
assert result.code is not None
assert result.errors == ()
safe_globals = {
'__name__': 'script',
'output': 'foo',
'__builtins__': {},
}
safe_locals = {}
exec(result.code, safe_globals, safe_locals)
generated_function = tuple(safe_locals.values())[0]
assert type(generated_function) == FunctionType
generated_function()
assert safe_globals['output'] == 'foobar'
def test_compile_restricted_function_handle_SyntaxError():
p = ''
body = """a("""
name = "broken"
result = compile_restricted_function(
p, # parameters
body,
name,
)
assert result.code is None
assert result.errors == (
"Line 1: SyntaxError: unexpected EOF while parsing at statement: 'a('",
)
|
from .pylunasvg import Bitmap, Box, Matrix, Document
from .extensions import loadFromUrl
import os
import numpy as np
from PIL import Image
def svg2png(
svg_file: str, width: int, height: int, scale: float = 1.0, output_file: str = None
):
doc = Document.loadFromFile(svg_file).scale(scale, scale)
if width is None:
width = int(doc.width())
if height is None:
height = int(doc.height())
bitmap = doc.renderToBitmap(width, height)
svgArray = np.array(bitmap, copy=False)
if output_file is None:
Image.fromarray(svgArray).save(svg_file.replace(".svg", ".png"))
else:
Image.fromarray(svgArray).save(output_file, format="png")
|
from contextlib import contextmanager
from ..image import Image
from . import Rect
class SpriteNode(Rect):
def __init__(self, im, width, height, fname=None, pad=(0, 0)):
Rect.__init__(self, (0, 0, width, height))
self.im = im
self.fname = fname
(self.pad_x, self.pad_y) = pad
self.close = im.close
def __str__(self):
clsnam = type(self).__name__
arg = self.fname if self.fname else self.im
args = (clsnam, arg, self.width, self.height)
return "<%s %s (%dx%d)>" % args
def calc_box(self, pos):
x1, y1 = pos
return (x1, y1, x1 + self.width, y1 + self.height)
@classmethod
def from_image(cls, im, *args, **kwds):
args = im.size + args
return cls(im, *args, **kwds)
@classmethod
def load_file(cls, fo, fname=None, pad=(0, 0), **kwds):
if not hasattr(fo, "read"):
if not fname:
fname = fo
fo = open(fo, "rb")
elif not fname and hasattr(fo, "name"):
fname = fo.name
return cls.from_image(Image.load(fo), fname=fname, pad=pad)
@contextmanager
def open_sprites(fnames, **kwds):
fs = [(fn, open(str(fn), "rb")) for fn in fnames]
try:
yield [SpriteNode.load_file(fo, fname=fn, **kwds) for (fn, fo) in fs]
finally:
for fn, fo in fs:
fo.close()
|
import unittest
from rdp.symbols import to_symbol, Symbol, Terminal
class SymbolsTest(unittest.TestCase):
def test_to_symbol(self):
self.assertTrue(isinstance(to_symbol(','), Symbol))
self.assertTrue(isinstance(to_symbol(Terminal(',')), Symbol))
self.assertRaises(TypeError, to_symbol, 42)
|
"""
Functions to read and write ASCII model (.dat) files used by SPECFEM2D
"""
import os
import numpy as np
from glob import glob
from shutil import copyfile
from seisflows3.tools.tools import iterable
def read_slice(path, parameters, iproc):
"""
Reads SPECFEM model slice(s) based on .dat ASCII files
:type path: str
:param path: path to the database files
:type parameters: str
:param parameters: parameters to read, e.g. 'vs', 'vp'
:type iproc: int
:param iproc: processor/slice number to read
:rtype: list of np.array
:return: list of arrays corresponding to model parameters in given order
"""
filename = _get_filename(path, iproc)
available_parameters = _get_available_parameters(filename)
model = np.loadtxt(filename).T
vals = []
for key in iterable(parameters):
vals += [model[available_parameters.index(key)]]
return vals
def write_slice(data, path, parameters, iproc):
"""
Writes SPECFEM model slice
!!! This won't work because we need access to the spatial components that
!!! are only the model
:type data: seisflows.Container
:param data: data to be written to a slice
:type path: str
:param path: path to the database files
:type parameters: str
:param parameters: parameters to write, e.g. 'vs', 'vp'
:type iproc: int
:param iproc: processor/slice number to write
"""
for key in iterable(parameters):
filename = os.path.join(path, f"proc{int(iproc):06d}_{key}.bin")
_write(data, filename)
def copy_slice(src, dst, iproc, parameter):
"""
Copies SPECFEM model slice
:type src: str
:param src: source location to copy slice from
:type dst: str
:param dst: destination location to copy slice to
:type parameter: str
:param parameter: parameters to copy, e.g. 'vs', 'vp'
:type iproc: int
:param iproc: processor/slice number to copy
"""
filename = os.path.basename(_get_filename(src, iproc))
copyfile(os.path.join(src, filename),
os.path.join(dst, filename))
def _get_filename(path, iproc):
"""
ASCII .dat files list the available parameters in the fileid, meaning
there is no standard format for retrieving files. Use glob to search for
the file based on file extension.
:type path: str
:param path: path to the database files
:type iproc: int
:param iproc: processor/slice number to read
:rtype: str
:return: filename of the model
"""
filename_glob = os.path.join(path, f"proc{int(iproc):06d}_*.dat")
filename = glob(filename_glob)
assert(len(filename) == 1),
f"Expected only one .dat file, found {len(filename)}"
return filename[0]
def _get_available_parameters(filename):
"""
The available parameters are listed in the file name. Split off the
uncessary text and return the listend parameters.
:type filename: str
:param filename: filename to check parameters from
:rtype: list
:return: list of parameters from the file id
"""
fid = os.path.splitext(os.path.basename(filename))[0]
_, *available_parameters = fid.split("_")
return available_parameters
def _write(v, filename):
"""
Writes Fortran style binary files
Data are written as single precision floating point numbers
"""
n = np.array([4 * len(v)], dtype='int32')
v = np.array(v, dtype='float32')
with open(filename, 'wb') as file:
n.tofile(file)
v.tofile(file)
n.tofile(file)
|
import logging
import functools
from xml.parsers.expat import ExpatError
import xmltodict
from rtcclient.exception import RTCException, BadValue
import six
from lxml import etree
def setup_basic_logging():
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s %(levelname)s %(name)s: "
"%(message)s")
def token_expire_handler(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rtc_obj = args[0].get_rtc_obj()
if not hasattr(rtc_obj, "headers") or rtc_obj.headers is None:
# still in the initialization or relogin
# directly call the method
return func(*args, **kwargs)
else:
# check whether token expires
try:
resp = func(*args, **kwargs)
xmltodict.parse(resp.content)
return resp
except ExpatError as excp:
if "invalid token" in str(excp):
# expires
try:
rtc_obj.relogin()
except RTCException:
raise RTCException("Relogin Failed: "
"Invalid username or password")
kwargs["headers"]["Cookie"] = rtc_obj.headers["Cookie"]
return func(*args, **kwargs)
else:
# not expires
# raise the actual exception
raise ExpatError(excp)
return wrapper
def capitalize(keyword):
"""Only capitalize the first character and make the left unchanged
:param keyword: the input string
:return: the capitalized string
"""
if keyword is None:
raise BadValue("Invalid value. None is not supported")
if isinstance(keyword, six.string_types):
if len(keyword) > 1:
return keyword[0].upper() + keyword[1:]
else:
return keyword.capitalize()
else:
raise BadValue("Input value %s is not string type" % keyword)
def remove_empty_elements(docs):
root = etree.fromstring(bytes(docs, 'utf-8'))
for element in root.xpath("//*[not(node())]"):
if "rdf:resource" not in str(etree.tostring(element)):
element.getparent().remove(element)
return etree.tostring(root)
|
from twisted.internet import defer, reactor
class __Node(object):
def execute(self):
pass
class BlackBoard(object):
pass
class ParallelNode(__Node):
def __init__(self, blackboard=None):
self.children = []
self.blackboard = blackboard
def execute(self):
if len(self.children) == 0:
print 'Parallel needs at least one child'
return None
dl = []
for child in self.children:
d = child.execute()
dl.append(d)
defList = defer.DeferredList(dl, fireOnOneErrback=True, consumeErrors=True)
self.d = defer.Deferred()
defList.addCallback(self.parallelCallback)
defList.addErrback(self.error)
return self.d
def error(self, fail):
print fail
self.d.errback(fail)
def parallelCallback(self, result):
print 'parallel callback'
agg = True
for _, test in result:
agg = agg and test
self.d.callback(agg)
def add(self, child):
child.blackboard = self.blackboard
self.children.append(child)
return self
class Sequence(__Node):
def __init__(self, blackboard=None):
self.children = []
self.blackboard = blackboard
self.curChild = 0
def execute(self):
if not self.children:
print 'Sequence needs at least one child'
return None
self.d = defer.Deferred()
self.moveToNextChild(True)
return self.d
def error(self, fail):
print 'Error handler in Sequence node'
self.d.errback(fail)
def moveToNextChild(self, data):
if data == True:
if self.curChild >= len(self.children):
# Sequence finished with true
self.d.callback(True)
return
item = self.children[self.curChild];
self.curChild += 1
deferFromChild = item.execute()
if isinstance(deferFromChild, bool):
self.moveToNextChild(deferFromChild)
else:
deferFromChild.addCallback(self.moveToNextChild)
# deferFromChild.addErrback(self.error)
else:
# Sequence finished with false
self.d.callback(False)
def add(self, child):
child.blackboard = self.blackboard
self.children.append(child)
return self
class Selector(__Node):
def __init__(self, blackboard=None):
self.children = []
self.blackboard = blackboard
self.curChild = 0
def execute(self):
if not self.children:
print 'Selector needs at least one child'
return None
self.d = defer.Deferred()
self.moveToNextChild(False)
return self.d
def error(self, fail):
print 'Error handler in Selector node'
self.d.errback(fail)
def moveToNextChild(self, data):
if data == True:
self.d.callback(True)
else:
if self.curChild >= len(self.children):
# Select finished with true
self.d.callback(False)
return
item = self.children[self.curChild]
self.curChild = self.curChild + 1
deferFromChild = item.execute()
if isinstance(deferFromChild, bool):
self.moveToNextChild(deferFromChild)
else:
deferFromChild.addCallback(self.moveToNextChild)
deferFromChild.addErrback(self.error)
def add(self, child):
child.blackboard = self.blackboard
self.children.append(child)
return self
# Action (Nodes)
class Action(__Node):
def __init__(self, blackboard=None):
self.blackboard = blackboard
def execute(self):
return self.action()
def action(self):
d = defer.Deferred()
print "Dummy - Action Body"
reactor.callLater(1, d.callback, True)
return d
# Test program
class NodeA(Action):
def action(self):
print 'Action - NodeA'
d = defer.Deferred()
reactor.callLater(1, d.callback, True)
return d
class NodeB(Action):
def action(self):
print 'Action - NodeB'
d = defer.Deferred()
reactor.callLater(1, d.errback, ValueError('error in Node b'))
return d
def finish(data):
print 'finish %i' % data
if reactor.running:
reactor.stop()
def error(fail):
print 'Behavior tree failed'
print str(fail)
if reactor.running:
reactor.stop()
if __name__ == '__main__':
print 'Test behavior trees'
bb = BlackBoard();
root = ParallelNode(bb)
root.add(NodeB())
root.add(NodeA())
d = root.execute()
d.addCallback(finish)
d.addErrback(error)
reactor.run()
|
from collections import defaultdict
from random import random, randint
from glob import glob
from math import log
import argparse
from nltk.corpus import stopwords
from nltk.probability import FreqDist
from nltk.tokenize import TreebankWordTokenizer
kTOKENIZER = TreebankWordTokenizer()
kDOC_NORMALIZER = True
import time
def dict_sample(d, cutoff=-1):
"""
Sample a key from a dictionary using the values as probabilities (unnormalized)
"""
if cutoff==-1:
cutoff = random()
normalizer = float(sum(d.values()))
current = 0
for i in d:
assert(d[i] > 0)
current += float(d[i]) / normalizer
if current >= cutoff:
return i
print("Didn't choose anything: %f %f" % (cutoff, current))
def lgammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
Usage: lgammln(xx)
Copied from stats.py by [email protected]
"""
assert xx > 0, "Arg to gamma function must be > 0; got %f" % xx
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x + 0.5) * log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j] / x
return -tmp + log(2.50662827465 * ser)
class RandomWrapper:
"""
Class to wrap a random number generator to facilitate deterministic testing.
"""
def __init__(self, buff):
self._buffer = buff
self._buffer.reverse()
def __call__(self):
val = self._buffer.pop()
print("Using random value %0.2f" % val)
return val
class VocabBuilder:
"""
Creates a vocabulary after scanning a corpus.
"""
def __init__(self, lang="english", min_length=3, cut_first=100):
"""
Set the minimum length of words and which stopword list (by language) to
use.
"""
self._counts = FreqDist()
self._stop = set(stopwords.words(lang))
self._min_length = min_length
self._cut_first = cut_first
print("Using stopwords: %s ... " % " ".join(list(self._stop)[:10]))
def scan(self, words):
"""
Add a list of words as observed.
"""
for ii in [x.lower() for x in words if x.lower() not in self._stop \
and len(x) >= self._min_length]:
self._counts[ii] += 1
def vocab(self, size=5000):
"""
Return a list of the top words sorted by frequency.
"""
keys = list(self._counts.keys())
if len(self._counts) > self._cut_first + size:
return keys[self._cut_first:(size + self._cut_first)]
else:
return keys[:size]
class LdaTopicCounts:
"""
This class works for normal LDA. There is no correlation between words,
although words can have an aysymmetric prior.
"""
def __init__(self, beta=0.01):
"""
Create a topic count with the provided Dirichlet parameter
"""
self._beta = {}
self._beta_sum = 0.0
# Maintain a count for each word
self._normalizer = FreqDist()
self._topic_term = defaultdict(FreqDist)
self._default_beta = beta
self._finalized = False
def set_vocabulary(self, words):
"""
Sets the vocabulary for the topic model. Only these words will be
recognized.
"""
for ii in range(len(words)):
self._beta_sum += self._default_beta
def change_prior(self, word, beta):
"""
Change the prior for a single word.
"""
assert not self._finalized, "Priors are fixed once sampling starts."
self._beta[word] = beta
self._beta_sum += (beta - self._default_beta)
def initialize(self, word, topic):
"""
During initialization, say that a word token with id ww was given topic
"""
self._topic_term[topic][word] += 1
self._normalizer[topic] += 1
def change_count(self, topic, word, delta):
"""
Change the topic count associated with a word in the topic
"""
self._finalized = True
self._topic_term[topic][word] += delta
self._normalizer[topic] += delta
def get_normalizer(self, topic):
"""
Return the normalizer of this topic
"""
return self._beta_sum + self._normalizer[topic]
def get_prior(self, word):
"""
Return the prior probability of a word. For tree-structured priors,
return the probability marginalized over all paths.
"""
return self._beta.get(word, self._default_beta)
def get_observations(self, topic, word):
"""
Return the number of occurences of a combination of topic, word, and
path.
"""
return self._topic_term[topic][word]
def word_in_topic(self, topic, word):
"""
Return the probability of a word type in a topic
"""
val = self.get_observations(topic, word) + self.get_prior(word)
val /= self.get_normalizer(topic)
return val
def report(self, vocab, handle, limit=25):
"""
Create a human readable report of topic probabilities to a file.
"""
for kk in self._normalizer:
normalizer = self.get_normalizer(kk)
handle.write("------------\nTopic %i (%i tokens)\n------------\n" % \
(kk, self._normalizer[kk]))
word = 0
for ww in self._topic_term[kk]:
handle.write("%0.5f\t%0.5f\t%0.5f\t%s\n" % \
(self.word_in_topic(kk, ww),
self.get_observations(kk, ww),
self.get_prior(ww),
vocab[ww]))
word += 1
if word > limit:
break
class Sampler:
def __init__(self, num_topics, vocab, alpha=0.1, beta=0.01, rand_stub=None):
"""
Create a new LDA sampler with the provided characteristics
"""
self._num_topics = num_topics
self._doc_counts = defaultdict(FreqDist)
self._doc_tokens = defaultdict(list)
self._doc_assign = defaultdict(list)
self._alpha = [alpha for x in range(num_topics)]
self._sample_stats = defaultdict(int)
self._vocab = vocab
self._topics = LdaTopicCounts(beta)
self._topics.set_vocabulary(vocab)
self._lhood = []
self._time = []
self._rand_stub = rand_stub
def change_alpha(self, idx, val):
"""
Update the alpha value; note that this invalidates precomputed values.
"""
self._alpha[idx] = val
def get_doc(self, doc_id):
"""
Get the data associated with an individual document
"""
return self._doc_tokens[doc_id], self._doc_assign[doc_id], \
self._doc_counts[doc_id]
def add_doc(self, doc, vocab, doc_id = None,
token_limit=-1):
"""
Add a document to the corpus. If a doc_id is not supplied, a new one
will be provided.
"""
temp_doc = [vocab.index(x) for x in doc if x in vocab]
if not doc_id:
doc_id = len(self._doc_tokens)
assert not doc_id in self._doc_tokens, "Doc " + str(doc_id) + \
" already added"
if len(temp_doc) == 0:
print("WARNING: empty document (perhaps the vocab doesn't make sense?)")
else:
self._doc_tokens[doc_id] = temp_doc
token_count = 0
for ww in temp_doc:
assignment = randint(0, self._num_topics - 1)
self._doc_assign[doc_id].append(assignment)
self._doc_counts[doc_id][assignment] += 1
self._topics.initialize(ww, assignment)
token_count += 1
if token_limit > 0 and token_count > token_limit:
break
assert len(self._doc_assign[doc_id]) == len(temp_doc), \
"%s != %s" % (str(self._doc_assign[doc_id]), str(temp_doc))
return doc_id
def change_topic(self, doc, index, new_topic):
"""
Change the topic of a token in a document. Update the counts
appropriately. -1 is used to denote "unassigning" the word from a topic.
"""
assert doc in self._doc_tokens, "Could not find document %i" % doc
assert index < len(self._doc_tokens[doc]), \
"Index %i out of range for doc %i (max: %i)" % \
(index, doc, len(self._doc_tokens[doc]))
term = self._doc_tokens[doc][index]
alpha = self._alpha
assert index < len(self._doc_assign[doc]), \
"Bad index %i for document %i, term %i %s" % \
(index, doc, term, str(self._doc_assign[doc]))
old_topic = self._doc_assign[doc][index]
if old_topic != -1:
assert new_topic == -1
# TODO: Add code here to keep track of the counts and
# assignments
if new_topic != -1:
assert old_topic == -1
# TODO: Add code here to keep track of the counts and
# assignments
def run_sampler(self, iterations = 100):
"""
Sample the topic assignments of all tokens in all documents for the
specified number of iterations.
"""
for ii in range(iterations):
start = time.time()
for jj in self._doc_assign:
self.sample_doc(jj)
total = time.time() - start
lhood = self.lhood()
print("Iteration %i, likelihood %f, %0.5f seconds" % (ii, lhood, total))
self._lhood.append(lhood)
self._time.append(total)
def report_topics(self, vocab, outputfilename, limit=10):
"""
Produce a report to a file of the most probable words in a topic, a
history of the sampler, and the state of the Markov chain.
"""
topicsfile = open(outputfilename + ".topics", 'w')
self._topics.report(vocab, topicsfile, limit)
statsfile = open(outputfilename + ".stats", 'w')
tmp = "iter\tlikelihood\ttime(s)\n"
statsfile.write(tmp)
for it in range(0, len(self._lhood)):
tmp = str(it) + "\t" + str(self._lhood[it]) + "\t" + str(self._time[it]) + "\n"
statsfile.write(tmp)
statsfile.close()
topicassignfile = open(outputfilename + ".topic_assign", 'w')
for doc_id in self._doc_assign.keys():
tmp = " ".join([str(x) for x in self._doc_assign[doc_id]]) + "\n"
topicassignfile.write(tmp)
topicassignfile.close()
doctopicsfile = open(outputfilename + ".doc_topics", 'w')
for doc_id in self._doc_counts.keys():
tmp = ""
for tt in range(0, self._num_topics):
tmp += str(self._doc_counts[doc_id][tt]) + " "
tmp = tmp.strip()
tmp += "\n"
doctopicsfile.write(tmp)
doctopicsfile.close()
def sample_probs(self, doc_id, index):
"""
Create a dictionary storing the conditional probability of this token being assigned to each topic.
"""
assert self._doc_assign[doc_id][index] == -1, \
"Sampling doesn't make sense if this hasn't been unassigned."
sample_probs = {}
term = self._doc_tokens[doc_id][index]
for kk in range(self._num_topics):
# TODO: Compute the conditional probability of
# sampling a topic; at the moment it's just the
# uniform probability.
sample_probs[kk] = 1.0 / float(self._num_topics)
return sample_probs
def sample_doc(self, doc_id, debug=False):
"""
For a single document, compute the conditional probabilities and
resample topic assignments.
"""
one_doc_topics = self._doc_assign[doc_id]
topics = self._topics
for index in range(len(one_doc_topics)):
self.change_topic(doc_id, index, -1)
sample_probs = self.sample_probs(doc_id, index)
if self._rand_stub:
cutoff = self._rand_stub()
else:
cutoff = random()
new_topic = dict_sample(sample_probs, cutoff)
self.change_topic(doc_id, index, new_topic)
return self._doc_assign[doc_id]
def lhood(self):
val = self.doc_lhood() + self.topic_lhood()
return val
def doc_lhood(self):
doc_num = len(self._doc_counts)
alpha_sum = sum(self._alpha)
val = 0.0
val += lgammln(alpha_sum) * doc_num
tmp = 0.0
for tt in range(0, self._num_topics):
tmp += lgammln(self._alpha[tt])
val -= tmp * doc_num
for doc_id in self._doc_counts:
for tt in range(0, self._num_topics):
val += lgammln(self._alpha[tt] + self._doc_counts[doc_id][tt])
val -= lgammln(alpha_sum + len(self._doc_assign[doc_id]))
return val
def topic_lhood(self):
val = 0.0
vocab_size = len(self._vocab)
val += lgammln(self._topics._beta_sum) * self._num_topics
val -= lgammln(self._topics._default_beta) * vocab_size * self._num_topics
for tt in range(0, self._num_topics):
for ww in self._vocab:
val += lgammln(self._topics._default_beta + self._topics._topic_term[tt][ww])
val -= lgammln(self._topics.get_normalizer(tt))
return val
def tokenize_file(filename):
contents = open(filename).read()
for ii in kTOKENIZER.tokenize(contents):
yield ii
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("--doc_dir", help="Where we read the source documents",
type=str, default=".", required=False)
argparser.add_argument("--language", help="The language we use",
type=str, default="english", required=False)
argparser.add_argument("--output", help="Where we write results",
type=str, default="result", required=False)
argparser.add_argument("--vocab_size", help="Size of vocabulary",
type=int, default=1000, required=False)
argparser.add_argument("--num_topics", help="Number of topics",
type=int, default=5, required=False)
argparser.add_argument("--num_iterations", help="Number of iterations",
type=int, default=100, required=False)
args = argparser.parse_args()
vocab_scanner = VocabBuilder(args.language)
# Create a list of the files
search_path = u"%s/*.txt" % args.doc_dir
files = glob(search_path)
assert len(files) > 0, "Did not find any input files in %s" % search_path
# Create the vocabulary
for ii in files:
vocab_scanner.scan(tokenize_file(ii))
# Initialize the documents
vocab = vocab_scanner.vocab(args.vocab_size)
print(len(vocab), vocab[:10])
lda = Sampler(args.num_topics, vocab)
for ii in files:
lda.add_doc(tokenize_file(ii), vocab)
lda.run_sampler(args.num_iterations)
lda.report_topics(vocab, args.output)
|
# coding: utf-8
# tests/conftest.py
import pytest
from app import guard
from app import create_app
from app.database import init_test_db, db_test_session
from app.orm import start_mapper
start_mapper()
@pytest.fixture
def app_instance():
app_test = create_app()
yield app_test
db_test_session.remove()
@pytest.fixture
def client(app_instance):
with app_instance.test_client() as client:
yield client
|
from django.conf import settings
from celeryutils import task
from tower import ugettext as _
import amo
from addons.tasks import create_persona_preview_images
from amo.decorators import write
from amo.storage_utils import move_stored_file
from amo.utils import LocalFileStorage, send_mail_jinja
from editors.models import ReviewerScore
import mkt.constants.reviewers as rvw
@task
def send_mail(cleaned_data, theme_lock):
"""
Send emails out for respective review actions taken on themes.
"""
theme = cleaned_data['theme']
action = cleaned_data['action']
comment = cleaned_data['comment']
reject_reason = cleaned_data['reject_reason']
reason = None
if reject_reason:
reason = rvw.THEME_REJECT_REASONS[reject_reason]
elif action == rvw.ACTION_DUPLICATE:
reason = _('Duplicate Submission')
emails = set(theme.addon.authors.values_list('email', flat=True))
context = {
'theme': theme,
'base_url': settings.SITE_URL,
'reason': reason,
'comment': comment
}
subject = None
if action == rvw.ACTION_APPROVE:
subject = _('Thanks for submitting your Theme')
template = 'reviewers/themes/emails/approve.html'
elif action == rvw.ACTION_REJECT:
subject = _('A problem with your Theme submission')
template = 'reviewers/themes/emails/reject.html'
elif action == rvw.ACTION_DUPLICATE:
subject = _('A problem with your Theme submission')
template = 'reviewers/themes/emails/reject.html'
elif action == rvw.ACTION_FLAG:
subject = _('Theme submission flagged for review')
template = 'reviewers/themes/emails/flag_reviewer.html'
# Send the flagged email to themes email.
emails = [settings.THEMES_EMAIL]
elif action == rvw.ACTION_MOREINFO:
subject = _('A question about your Theme submission')
template = 'reviewers/themes/emails/moreinfo.html'
context['reviewer_email'] = theme_lock.reviewer.email
send_mail_jinja(subject, template, context,
recipient_list=emails, from_email=settings.ADDONS_EMAIL,
headers={'Reply-To': settings.THEMES_EMAIL})
@task
def approve_rereview(theme):
"""Replace original theme with pending theme on filesystem."""
# If reuploaded theme, replace old theme design.
storage = LocalFileStorage()
rereview = theme.rereviewqueuetheme_set.all()
reupload = rereview[0]
if reupload.header_path != reupload.theme.header_path:
move_stored_file(
reupload.header_path, reupload.theme.header_path,
storage=storage)
if reupload.footer_path != reupload.theme.footer_path:
move_stored_file(
reupload.footer_path, reupload.theme.footer_path,
storage=storage)
create_persona_preview_images(
src=reupload.theme.header_path,
full_dst=[
reupload.theme.header_path.replace('header', 'preview'),
reupload.theme.header_path.replace('header', 'icon')],
set_modified_on=[reupload.theme.addon])
rereview.delete()
theme.addon.increment_version()
@task
def reject_rereview(theme):
"""Delete pending theme from filesystem."""
storage = LocalFileStorage()
rereview = theme.rereviewqueuetheme_set.all()
reupload = rereview[0]
storage.delete(reupload.header_path)
storage.delete(reupload.footer_path)
rereview.delete()
@task
@write
def _batch_award_points(activity_logs, **kwargs):
"""For migration award_theme_rev_points."""
for log in activity_logs:
if not ReviewerScore.objects.filter(
user=log.user, addon=log.arguments[0],
score=amo.REVIEWED_SCORES.get(amo.REVIEWED_PERSONA),
note_key=amo.REVIEWED_PERSONA, note='RETROACTIVE').exists():
ReviewerScore.objects.create(
user=log.user, addon=log.arguments[0],
score=amo.REVIEWED_SCORES.get(amo.REVIEWED_PERSONA),
note_key=amo.REVIEWED_PERSONA, note='RETROACTIVE')
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
batch_norm
"""
import os
import pytest
from tests.common.base import TestBase
class TestCase(TestBase):
def setup(self):
case_name = "test_akg_batchmatmul_run_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
# caseflag, opfuncname, testRunArgs, dimArgs
# bs, m, n, k, bias_shape, dtype, kernel_name, attrs
("batch_matmul_001", "batchmatmul_run",
((4,), 16, 48, 32, (1,), "float32", False, True, "batch_matmul_output")),
#("batch_matmul_002", "batchmatmul_run",
# ((4,), 16, 48, 32, (48,), "float32", False, True, "batch_matmul_output")),
("batch_matmul_003", "batchmatmul_run",
((4,), 16, 48, 32, (4, 16, 48), "float32", False, True, "batch_matmul_output")),
("batch_matmul_004", "batchmatmul_run", ((), 16, 48, 32, (), "float32", True, False, "batch_matmul_output")),
("batch_matmul_005", "batchmatmul_run", ((), 16, 48, 32, (), "float32", False, True, "batch_matmul_output")),
("batch_matmul_006", "batchmatmul_run",
((4, 2), 16, 48, 32, (1, 1), "float32", False, False, "batch_matmul_output")),
#("batch_matmul_007", "batchmatmul_run",
# ((4, 2), 16, 48, 32, (1, 48), "float32", False, False, "batch_matmul_output")),
("batch_matmul_008", "batchmatmul_run",
((4, 2), 16, 48, 32, (4, 2, 16, 48), "float32", False, False, "batch_matmul_output")),
("batch_matmul_009", "batchmatmul_run",
((4, 2), 16, 48, 32, (), "float32", True, False, "batch_matmul_output")),
("batch_matmul_010", "batchmatmul_run",
((8, 16), 128, 128, 64, (), "float32", False, True, "batch_matmul_output")),
############################
# for bert small case add by mini in ci
############################
# ("matmul_0033", "batchmatmul_run", ((), 3072, 768, 8192, (), "float16", True, False, "batch_matmul_bert")),
# ("matmul_0037", "batchmatmul_run", ((), 33, 64, 16384, (), "float32", True, False, "batch_matmul_bert")),
("matmul_0053", "batchmatmul_run", ((), 32000, 768, 20, (), "float32", True, False, "batch_matmul_bert")),
('matmul_0060', "batchmatmul_run", ((), 20, 768, 32000, (), 'float32', False, False, 'batchmatmul_bert')),
('matmul_0061', "batchmatmul_run", ((128,), 768, 64, 128, (), 'float32', False, False, 'batchmatmul_bert')),
# ('matmul_0062', "batchmatmul_run", ((), 16384, 6384, 33, (), 'float32', True, False, 'batchmatmul_bert')),
('matmul_0063', "batchmatmul_run", ((), 32000, 768, 20, (), 'float32', False, False, 'batchmatmul_bert')),
]
self.testarg_cloud = [
# caseflag, opfuncname, testRunArgs, dimArgs
# bs, m, n, k, bias_shape, dtype, kernel_name, attrs
(
"batch_matmul_001", "batchmatmul_run", ((), 16, 48, 32, (1,), "float32", False, True, "batch_matmul_output")),
("batch_matmul_002", "batchmatmul_run",
((), 16, 48, 32, (48,), "float32", False, True, "batch_matmul_output")),
("batch_matmul_003", "batchmatmul_run",
((), 16, 48, 32, (1, 1), "float32", False, True, "batch_matmul_output")),
("batch_matmul_004", "batchmatmul_run",
((), 16, 48, 32, (16, 1), "float32", False, True, "batch_matmul_output")),
("batch_matmul_005", "batchmatmul_run",
((), 16, 48, 32, (1, 48), "float32", False, True, "batch_matmul_output")),
("batch_matmul_006", "batchmatmul_run",
((), 16, 48, 32, (16, 48), "float32", False, True, "batch_matmul_output")),
# ("batch_matmul_007", "batchmatmul_run", ((64, 12), 128, 128, 64, (), "float32", False, False, "batch_matmul_output")),
("batch_matmul_001", "batchmatmul_run",
((1,), 128, 128, 1, (), "float32", False, False, "batch_matmul_output")),
]
self.testarg_rpc_cloud = [
# caseflag, opfuncname, testRunArgs, dimArgs
# bs, m, n, k, bias_shape, dtype, kernel_name, attrs
# 4D
# ("batch_matmul_4D_001", "batchmatmul_run", ((128,), 128, 64, 768, (), "float32", True, False, "batch_matmul_output")),
# ("batch_matmul_4D_002", "batchmatmul_run", ((64, 12), 128, 64, 128, (), "float32", False, False, "batch_matmul_output")),
# ("batch_matmul_4D_003", "batchmatmul_run", ((128,), 768, 128, 64, (), "float32", False, True, "batch_matmul_output")),
# ("batch_matmul_4D_004", "batchmatmul_run", ((64, 12), 128, 128, 64, (), "float32", False, True, "batch_matmul_output")),
# ("batch_matmul_4D_005", "batchmatmul_run", ((128,), 768, 64, 128, (), "float32", False, False, "batch_matmul_output")),
# caseflag, opfuncname, testRunArgs, dimArgs
# bs, m, n, k, bias_shape, dtype, kernel_name, attrs
("batch_matmul_007", "batchmatmul_run",
((64, 12), 128, 128, 64, (), "float32", False, False, "batch_matmul_output")),
("batch_matmul_007", "batchmatmul_run",
((1, 12), 128, 128, 64, (), "float32", False, False, "batch_matmul_output")),
("batch_matmul_001", "batchmatmul_run",
((1, 12), 128, 64, 128, (), "float32", False, False, "batch_matmul_output")),
# Matrix (2D)
("batch_matmul_2D_001", "batchmatmul_run",
((), 8192, 3072, 768, (), "float32", False, False, "batch_matmul_output")),
("batch_matmul_2D_007", "batchmatmul_run",
((), 64, 2, 768, (), "float32", False, True, "batch_matmul_output")),
("batch_matmul_2D_008", "batchmatmul_run",
((), 8192, 768, 21128, (), "float32", False, False, "batch_matmul_output"),
((16, 16), (16, 16), (16, 16))),
("batch_matmul_2D_009", "batchmatmul_run",
((), 8192, 768, 3072, (), "float32", False, True, "batch_matmul_output")),
("batch_matmul_2D_012", "batchmatmul_run",
((), 64, 768, 2, (), "float32", False, False, "batch_matmul_output")),
("batch_matmul_2D_013", "batchmatmul_run",
((), 8192, 768, 2, (), "float32", False, False, "batch_matmul_output")),
("batch_matmul_2D_014", "batchmatmul_run",
((), 8192, 768, 768, (), "float32", False, False, "batch_matmul_output")),
("batch_matmul_2D_015", "batchmatmul_run",
((), 64, 768, 768, (), "float32", False, False, "batch_matmul_output")),
("batch_matmul_2D_016", "batchmatmul_run",
((), 21128, 768, 8192, (), "float32", False, True, "batch_matmul_output")),
("batch_matmul_2D_017", "batchmatmul_run",
((), 768, 768, 1280, (), "float32", True, False, "batch_matmul_output")),
# # float - float:[64, 16, 128, 64] - [64, 16, 128, 64] = float:[64, 16, 128, 128]
("batch_matmul_4D_001", "batchmatmul_run",
((64, 16), 128, 128, 64, (), "float32", False, True, "batch_matmul_output")),
# float - float:[8, 16, 128, 64] - [8, 16, 128, 64] = float:[8, 16, 128, 128]
("batch_matmul_4D_002", "batchmatmul_run",
((8, 16), 128, 128, 64, (), "float32", False, True, "batch_matmul_output")),
# float - float:[64, 16, 128, 128] - [64, 16, 128, 64] = float:[64, 16, 128, 64]
("batch_matmul_4D_003", "batchmatmul_run",
((64, 16), 128, 64, 128, (), "float32", False, False, "batch_matmul_output")),
# float - float:[8, 16, 128, 128] - [8, 16, 128, 64] = float:[8, 16, 128, 64]
("batch_matmul_4D_004", "batchmatmul_run",
((8, 16), 128, 64, 128, (), "float32", False, False, "batch_matmul_output")),
# half - half:[128, 768, 128] - [128, 768, 64] = half:[128, 128, 64]
("batch_matmul_3D_005", "batchmatmul_run",
((128,), 128, 64, 768, (), "float16", True, False, "batch_matmul_output")),
# half - half:[64, 12, 128, 128] - [64, 12, 128, 64] = half:[64, 12, 128, 64]
("batch_matmul_4D_006", "batchmatmul_run",
((64, 12), 128, 64, 128, (), "float16", False, False, "batch_matmul_output")),
# # half - half:[128, 768, 64] - [128, 128, 64] = half:[128, 768, 128]
("batch_matmul_3D_007", "batchmatmul_run",
((128,), 768, 128, 64, (), "float16", False, True, "batch_matmul_output")),
# # half - half:[64, 12, 128, 64] - [64, 12, 128, 64] = half:[64, 12, 128, 128]
("batch_matmul_4D_008", "batchmatmul_run",
((64, 12), 128, 128, 64, (), "float16", False, True, "batch_matmul_output")),
# # half - half:[128, 768, 128] - [128, 128, 64] = half:[128, 768, 64]
("batch_matmul_3D_009", "batchmatmul_run",
((128,), 768, 64, 128, (), "float16", False, False, "batch_matmul_output")),
# cost a long time
# 3461 seconds for below this by run on 1980
# ("batch_matmul_2D_17", "batchmatmul_run", ((), 30522, 1024, 1280, False, "float32", True, False, "batch_matmul_output"), ((32, 32), (32, 32), (32, 32))),
# 3569 seconds for below this by run on 1980
# ("batch_matmul_2D_29", "batchmatmul_run", ((), 1280, 1024, 30522, False, "float32", False, False, "batch_matmul_output"), ((32, 32), (32, 32), (32, 32))),
# fail for now,
# As do not support that trans_a and trans_b both true:
# ("batch_matmul_2D_27", "batchmatmul_run", ((), 1024, 1024, 64, False, "float32", True, True, "batch_matmul_output")),
# half - half:[8192, 3072] - [768, 3072] = half:[8192, 768]
("matmul_0043", "batchmatmul_run",
((), 8192, 768, 3072, (), "float16", False, True, "batch_matmul_output_fp16")),
# half - half:[8192, 768] - [3072, 768] = half:[8192, 3072]
("matmul_0044", "batchmatmul_run",
((), 8192, 3072, 768, (), "float16", False, True, "batch_matmul_output_fp16")),
# half - half:[8192, 768] - [768, 768] = half:[8192, 768]
("matmul_0048", "batchmatmul_run",
((), 8192, 768, 768, (), "float16", False, False, "batch_matmul_output_fp16")),
# error: Not all Vars are passed in api_args: 'cc5' 'cc5' 'cc5' does not appear in api_args
# ("matmul_0029", "batchmatmul_run", ((), 768, 768, 8192, (), "float16", True, False, "batch_matmul_output"), ((1,1),(16,1),(1024,1))),
# ("matmul_0033", "batchmatmul_run", ((), 3072, 768, 8192, (), "float16", True, False, "batch_matmul_output"), ((1,1),(16,1),(1024,1))),
("matmul_0036", "batchmatmul_run",
((), 768, 3072, 768, (), "float16", False, False, "batch_matmul_output_fp16")),
# half - half:[8192, 768] - [768, 3072] = half:[8192, 3072]
("matmul_0035", "batchmatmul_run",
((), 8192, 3072, 768, (), "float16", False, False, "batch_matmul_output_fp16")),
# # half - half:[8192, 3072] - [3072, 768] = half:[8192, 768]
("matmul_0052", "batchmatmul_run",
((), 8192, 768, 3072, (), "float16", False, False, "batch_matmul_output_fp16")),
# lenet
('matmul_lenet_001_fp32', "batchmatmul_run", ((), 1, 120, 784, (120,), 'float32', False, True, 'batchmatmul_output')),
('matmul_lenet_002_fp32', "batchmatmul_run", ((), 1, 84, 120, (84,), 'float32', False, True, 'batchmatmul_output')),
('matmul_lenet_003_fp32', "batchmatmul_run", ((), 1, 10, 84, (10,), 'float32', False, True, 'batchmatmul_output')),
('matmul_lenet_004_fp32', "batchmatmul_run", ((), 10, 84, 1, (), 'float32', True, False, 'batchmatmul_output')),
('matmul_lenet_005_fp32', "batchmatmul_run", ((), 1, 84, 10, (), 'float32', False, False, 'batchmatmul_output')),
('matmul_lenet_006_fp32', "batchmatmul_run", ((), 84, 120, 1, (), 'float32', True, False, 'batchmatmul_output')),
('matmul_lenet_007_fp32', "batchmatmul_run", ((), 1, 120, 84, (), 'float32', False, False, 'batchmatmul_output')),
('matmul_lenet_008_fp16', "batchmatmul_run", ((), 120, 784, 1, (), 'float16', True, False, 'batchmatmul_output')),
('matmul_lenet_009_fp16', "batchmatmul_run", ((), 1, 784, 120, (), 'float32', False, False, 'batchmatmul_output')),
]
return
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg)
def test_run_cloud(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg_cloud)
def test_rpc_cloud(self):
"""
run case.#
:return:
"""
self.common_run([self.testarg_rpc_cloud[0]])
def teardown(self):
"""
clean environment
:return:
"""
self._log.info("============= {0} Teardown============".format(self.casename))
return
|
# Copyright 2022, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Hacks for scons that we apply.
We block some tools from the standard scan, there is e.g. no need to ask
what fortran version we have installed to compile with Nuitka.
Also we hack the gcc version detection to fix some bugs in it, and to avoid
scanning for g++ when we have a gcc installer, but only if that is not too
version.
"""
import os
import re
import subprocess
import SCons.Tool.gcc # pylint: disable=I0021,import-error
from SCons.Script import Environment # pylint: disable=I0021,import-error
from nuitka.Tracing import scons_details_logger
from nuitka.utils.FileOperations import openTextFile
from .SconsUtils import decodeData, getExecutablePath, isGccName
# Cache for detected versions.
v_cache = {}
# Prevent these programs from being found, avoiding the burden of tool init.
_blocked_tools = (
# TODO: Where the fallback is needed, g++ needs to scanned or else it
# cannot be used.
# "g++",
"c++",
"f95",
"f90",
"f77",
"gfortran",
"ifort",
"javah",
"tar",
"dmd",
"gdc",
"flex",
"bison",
"ranlib",
"ar",
"ldc2",
"pdflatex",
"pdftex",
"latex",
"tex",
"dvipdf",
"dvips",
"gs",
"swig",
"ifl",
"rpcgen",
"rpmbuild",
"bk",
"p4",
"m4",
"ml",
"icc",
"sccs",
"rcs",
"cvs",
"as",
"gas",
"nasm",
)
def _myDetectVersion(env, clvar):
clvar0 = os.path.basename(clvar[0])
if isGccName(clvar0) or "clang" in clvar0:
command = clvar + ("-dumpversion",)
else:
command = clvar + ("--version",)
# pipe = SCons.Action._subproc(env, SCons.Util.CLVar(cc) + ['-dumpversion'],
pipe = SCons.Action._subproc( # pylint: disable=protected-access
env, command, stdin="devnull", stderr="devnull", stdout=subprocess.PIPE
)
line = pipe.stdout.readline()
# Non-GNU compiler's output (like AIX xlc's) may exceed the stdout buffer:
# So continue with reading to let the child process actually terminate.
while pipe.stdout.readline():
pass
ret = pipe.wait()
if ret != 0:
scons_details_logger.info(
"Error, error exit from '%s' (%d) gave %r."
% (command, ret, pipe.stderr.read())
)
return None
if str is not bytes and type(line) is bytes:
line = decodeData(line)
line = line.strip()
match = re.findall(r"[0-9]+(?:\.[0-9]+)+", line)
if match:
version = match[0]
else:
# gcc 8 or higher
version = line.strip()
version = tuple(int(part) for part in version.split("."))
return version
# From gcc.py of Scons
def myDetectVersion(env, cc):
"""Return the version of the GNU compiler, or None if it is not a GNU compiler."""
cc = env.subst(cc)
if not cc:
return None
if "++" in os.path.basename(cc):
return None
# Make path absolute, to improve cache hit rate.
cc = getExecutablePath(cc, env)
if cc is None:
return None
if cc not in v_cache:
v_cache[cc] = _myDetectVersion(env, (cc,))
scons_details_logger.info("CC %r version check gives %r" % (cc, v_cache[cc]))
return v_cache[cc]
def myDetect(self, progs):
# Don't consider Fortran, tar, D, c++, we don't need it. We do manual
# fallback
for blocked_tool in _blocked_tools:
if blocked_tool in progs:
return None
return orig_detect(self, progs)
# The original value will be used in our form.
orig_detect = Environment.Detect
def getEnhancedToolDetect():
SCons.Tool.gcc.detect_version = myDetectVersion
return myDetect
def makeGccUseLinkerFile(source_dir, source_files, env):
tmp_linker_filename = os.path.join(source_dir, "@link_input.txt")
env["LINKCOM"] = env["LINKCOM"].replace(
"$SOURCES", "@%s" % env.get("ESCAPE", lambda x: x)(tmp_linker_filename)
)
with openTextFile(tmp_linker_filename, "w") as tmpfile:
for filename in source_files:
filename = ".".join(filename.split(".")[:-1]) + ".o"
if os.name == "nt":
filename = filename.replace(os.path.sep, "/")
tmpfile.write('"%s"\n' % filename)
tmpfile.write(env.subst("$SOURCES"))
|
# -*- coding: utf8 -*-
import pycurl
import sys
from unittest.case import SkipTest
from tests import online
class TestPyCurl:
def test_pycurl_download_progress(self):
if not online:
raise SkipTest("Can't test download offline")
def progress(download_t, download_d, upload_t, upload_d):
print("Total to download {}\n".format(download_t))
print("Total downloaded {}\n".format(download_d))
print("Total to upload {}\n".format(upload_t))
print("Total uploaded {}\n".format(upload_d))
with open('out.html', 'wb') as f:
c = pycurl.Curl()
c.setopt(c.URL, 'http://pycurl.io/')
#c.setopt(c.URL, 'http://planet.osm.org/pbf/planet-latest.osm.pbf')
c.setopt(pycurl.USERAGENT, "python test")
c.setopt(c.NOPROGRESS, False)
c.setopt(c.XFERINFOFUNCTION, progress)
c.setopt(c.WRITEDATA, f)
c.perform()
c.close()
|
import csv
import json
with open('./orion_generated_connectivity.csv') as f:
reader = csv.DictReader(f)
connections = list(reader)
with open('./orion_full_node_list.csv') as f:
reader = csv.DictReader(f)
network_nodes = list(reader)
nodesById = {}
links = []
def processNode(name, nodeDesc, machineType, nodeId):
if nodeId not in nodesById:
if "WLC" in name:
icon = "wlc"
elif "BIG-IP" in machineType:
icon = "f5"
elif "APIC Server" in machineType:
icon = "server"
elif (
"Adaptive Security Appliance" in nodeDesc or
"Firepower" in nodeDesc
):
icon = "firewall"
elif (
"Nexus" in machineType or
"L3 Switch" in nodeDesc or
"Cisco Catalyst" in machineType or
"Cisco N9K" in machineType
):
icon = "l3switch"
elif (
"RTR" in name or
"ASR" in machineType or
"Cisco ISR" in machineType
):
icon = "router"
else:
icon = None
if name.lower().endswith('.corp.internal'):
name = name[:-14]
nodesById[nodeId] = {
"id": f'Orion:{nodeId}',
"name": name,
"description": f"Machine Type: {machineType}\n\nDescription:\n{nodeDesc}",
"icon": icon,
"solarwindsNodeId": nodeId
}
for connection in connections:
processNode(connection["SrcName"],
connection["SrcNodeDesc"],
connection["SrcMachineType"],
connection["SrcNodeID"])
processNode(connection["DestName"],
connection["DestNodeDesc"],
connection["DestMachineType"],
connection["DestNodeID"])
links.append({
"source": f'Orion:{connection["SrcNodeID"]}',
"target": f'Orion:{connection["DestNodeID"]}'
})
for node in network_nodes:
processNode(node["NodeName"], node["NodeDescription"],
node["MachineType"], node["NodeID"])
with open("fullNetworkGraph.json", "w") as f:
json.dump({"nodes": list(nodesById.values()), "links": links}, f)
|
from django.db import models
class Document(models.Model):
description = models.CharField(max_length=255, blank=True)
document = models.FileField()
document2 = models.FileField()
uploaded_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.description |
"""Application config module."""
from django.apps import AppConfig
from githubnavigator import container
from . import views
class WebConfig(AppConfig):
name = 'web'
def ready(self):
container.wire(modules=[views])
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import GenericRepr, Snapshot
snapshots = Snapshot()
snapshots['test_interface_type_raises_error_when_extended_dependency_is_wrong_type 1'] = GenericRepr('<ExceptionInfo ValueError("ExtendExampleUnion requires \'Example\' to be GraphQL union but other type was provided in \'__requires__\'") tblen=3>')
snapshots['test_union_type_raises_attribute_error_when_defined_without_schema 1'] = GenericRepr('<ExceptionInfo AttributeError("type object \'ExampleUnion\' has no attribute \'__schema__\'") tblen=2>')
snapshots['test_union_type_raises_error_when_defined_with_invalid_graphql_type_schema 1'] = GenericRepr("<ExceptionInfo ValueError('ExampleUnion class was defined with __schema__ without GraphQL union') tblen=3>")
snapshots['test_union_type_raises_error_when_defined_with_invalid_schema_str 1'] = GenericRepr('<ExceptionInfo GraphQLSyntaxError("Syntax Error: Unexpected Name \'unien\'.", locations=[SourceLocation(line=1, column=1)]) tblen=7>')
snapshots['test_union_type_raises_error_when_defined_with_invalid_schema_type 1'] = GenericRepr("<ExceptionInfo TypeError('ExampleUnion class was defined with __schema__ of invalid type: bool') tblen=3>")
snapshots['test_union_type_raises_error_when_defined_with_multiple_types_schema 1'] = GenericRepr("<ExceptionInfo ValueError('ExampleUnion class was defined with __schema__ containing more than one GraphQL definition (found: UnionTypeDefinitionNode, UnionTypeDefinitionNode)') tblen=3>")
snapshots['test_union_type_raises_error_when_defined_without_extended_dependency 1'] = GenericRepr('<ExceptionInfo ValueError("ExtendExampleUnion class was defined without required GraphQL union definition for \'Result\' in __requires__") tblen=3>')
snapshots['test_union_type_raises_error_when_defined_without_member_type_dependency 1'] = GenericRepr('<ExceptionInfo ValueError("ExampleUnion class was defined without required GraphQL definition for \'Comment\' in __requires__") tblen=3>')
|
from numpy import array, sum
import cv2
"""
File:Screen_Regions.py
Description:
Class to rectangle areas of the screen to capture along with filters to apply. Includes functions to
match a image template to the region using opencv
Author: [email protected]
"""
class Screen_Regions:
def __init__(self, screen, templ):
self.screen = screen
self.templates = templ
self.sun_threshold = 125
# array is in HSV order which represents color ranges for filtering
self.orange_color_range = [array([0, 130, 123]), array([25, 235, 220])]
self.orange_2_color_range = [array([16, 165, 220]), array([98, 255, 255])]
self.target_occluded_range= [array([16, 31, 85]), array([26, 160, 212])]
self.blue_color_range = [array([0, 28, 170]), array([180, 100, 255])]
self.fss_color_range = [array([95, 210, 70]), array([105, 255, 120])]
self.reg = {}
# regions with associatzed filter and color ranges
# The rect is top left x, y, and bottom right x, y in faction of screen resolution
self.reg['compass'] = {'rect': [0.33, 0.65, 0.46, 0.97], 'width': 1, 'height': 1, 'filterCB': self.equalize, 'filter': None}
self.reg['target'] = {'rect': [0.33, 0.27, 0.66, 0.70], 'width': 1, 'height': 1, 'filterCB': self.filter_by_color, 'filter': self.orange_2_color_range} # also called destination
self.reg['target_occluded'] = {'rect': [0.33, 0.27, 0.66, 0.70], 'width': 1, 'height': 1, 'filterCB': self.filter_by_color, 'filter': self.target_occluded_range}
self.reg['sun'] = {'rect': [0.30, 0.30, 0.70, 0.68], 'width': 1, 'height': 1, 'filterCB': self.filter_sun, 'filter': None}
self.reg['disengage'] = {'rect': [0.42, 0.70, 0.60, 0.80], 'width': 1, 'height': 1, 'filterCB': self.filter_by_color, 'filter': self.blue_color_range}
self.reg['interdicted'] = {'rect': [0.60, 0.1, 0.90, 0.25], 'width': 1, 'height': 1, 'filterCB': self.filter_by_color, 'filter': self.orange_2_color_range}
self.reg['fss'] = {'rect': [0.5045, 0.7545, 0.532, 0.7955], 'width': 1, 'height': 1, 'filterCB': self.equalize, 'filter': None}
self.reg['mission_dest'] = {'rect': [0.46, 0.38, 0.65, 0.86], 'width': 1, 'height': 1, 'filterCB': self.equalize, 'filter': None}
self.reg['missions'] = {'rect': [0.50, 0.78, 0.65, 0.85], 'width': 1, 'height': 1, 'filterCB': self.equalize, 'filter': None}
self.reg['nav_panel'] = {'rect': [0.25, 0.36, 0.60, 0.85], 'width': 1, 'height': 1, 'filterCB': self.equalize, 'filter': None}
# convert rect from percent of screen into pixel location, calc the width/height of the area
for i, key in enumerate(self.reg):
xx = self.reg[key]['rect']
self.reg[key]['rect'] = [int(xx[0]*screen.screen_width), int(xx[1]*screen.screen_height),
int(xx[2]*screen.screen_width), int(xx[3]*screen.screen_height)]
self.reg[key]['width'] = self.reg[key]['rect'][2] - self.reg[key]['rect'][0]
self.reg[key]['height'] = self.reg[key]['rect'][3] - self.reg[key]['rect'][1]
# just grab the screen based on the region name/rect
def capture_region(self, screen, region_name):
return screen.get_screen_region(self.reg[region_name]['rect'])
# grab screen region and call its filter routine
def capture_region_filtered(self, screen, region_name):
scr = screen.get_screen_region(self.reg[region_name]['rect'])
if self.reg[region_name]['filterCB'] == None:
return scr
else:
return self.reg[region_name]['filterCB'] (scr, self.reg[region_name]['filter'])
def match_template_in_region(self, region_name, templ):
img_region = self.capture_region_filtered(self.screen, region_name) # which would call, reg.capture_region('compass') and apply defined filter
match = cv2.matchTemplate(img_region, self.templates.template[templ]['image'], cv2.TM_CCOEFF_NORMED)
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(match)
return img_region, (minVal, maxVal, minLoc, maxLoc), match
def match_template_in_image(self, image, template):
match = cv2.matchTemplate(image, self.templates.template[template]['image'], cv2.TM_CCOEFF_NORMED)
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(match)
return image, (minVal, maxVal, minLoc, maxLoc), match
def equalize(self, image=None, noOp=None):
# Load the image in greyscale
img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# create a CLAHE object (Arguments are optional). Histogram equalization, improves constrast
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
img_out = clahe.apply(img_gray)
return img_out
def filter_by_color(self, image, color_range):
# converting from BGR to HSV color space
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# filte passed in color low, high
filtered = cv2.inRange(hsv, color_range[0], color_range[1])
return filtered
# not used
def filter_bright(self, image=None, noOp=None):
equalized = self.equalize(image)
equalized = cv2.cvtColor(equalized, cv2.COLOR_GRAY2BGR) #hhhmm, equalize() already converts to gray
equalized = cv2.cvtColor(equalized, cv2.COLOR_BGR2HSV)
filtered = cv2.inRange(equalized, array([0, 0, 215]), array([0, 0, 255])) #only high value
return filtered
def set_sun_threshold(self, thresh):
self.sun_threshold = thresh
# need to compare filter_sun with filter_bright
def filter_sun(self, image=None, noOp=None):
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# set low end of filter to 25 to pick up the dull red Class L stars
(thresh, blackAndWhiteImage) = cv2.threshold(hsv, self.sun_threshold, 255, cv2.THRESH_BINARY)
return blackAndWhiteImage
# percent the image is white
def sun_percent(self, screen):
blackAndWhiteImage = self.capture_region_filtered(screen, 'sun')
wht = sum(blackAndWhiteImage == 255)
blk = sum(blackAndWhiteImage != 255)
result = int((wht / (wht+blk))*100)
return result
'''
from Overlay import *
from Screen import *
from Image_Templates import *
from time import sleep
def main():
ov = Overlay("",1)
scr = Screen()
templ = Image_Templates(scr.scaleX, scr.scaleY)
scrReg = Screen_Regions(scr, templ)
for i, key in enumerate(scrReg.reg):
#tgt = scrReg.capture_region_filtered(scr, key)
#print(key)
#print(scrReg.reg[key])
if key == 'nav_panel':
ov.overlay_rect(key, (scrReg.reg[key]['rect'][0],
scrReg.reg[key]['rect'][1]),
(scrReg.reg[key]['rect'][2],
scrReg.reg[key]['rect'][3]) , (0,255,i*40), 2 )
ov.overlay_paint()
sleep(10)
ov.overlay_quit()
sleep(2)
if __name__ == "__main__":
main()
'''
|
#!/bin/python3
import sys
n = int(input().strip())
unsorted = []
unsorted_i = 0
for unsorted_i in range(n):
unsorted_t = int(input().strip())
unsorted.append(unsorted_t)
# your code goes here
unsorted.sort()
for idx, val in enumerate(unsorted):
print(val)
|
"""
Problem: https://www.hackerrank.com/challenges/ctci-bfs-shortest-reach/problem
"""
from collections import deque
# read amount of test cases
testCases = int(input())
# solve each test case
while testCases > 0:
# read number of nodes and edges
nodes, edges = map(int, input().split())
# intialize empty undirected graph (adjacent list)
graph = []
for i in range(nodes):
graph.append([])
# read each edge and populate graph
for i in range(edges):
nodeOne, nodeTwo = map(int, input().split())
graph[nodeOne - 1].append(nodeTwo - 1)
graph[nodeTwo - 1].append(nodeOne - 1)
# read starting node
startNode = int(input())
startNode -= 1
# intialize control variables for BFS
distance = [-1] * nodes
visited = [False] * nodes
toVisit = deque()
# add node 0 to control variables
visited[startNode] = True
distance[startNode] = 0
toVisit.append(startNode)
# perform BFS
while len(toVisit) > 0:
# get first node from queue
node = toVisit.popleft()
# add each unvisited neighbor of node to queue and update control vars
for neighbor in graph[node]:
if visited[neighbor] == False:
visited[neighbor] = True
distance[neighbor] = distance[node] + 6
toVisit.append(neighbor)
# print answer
first = True
for i in range(nodes):
if i != startNode:
if first == True:
print(distance[i], end = '')
first = False
else:
print(' ' + str(distance[i]), end = '')
if testCases != 1:
print()
# decrement ammount of test cases
testCases -= 1
|
#! /usr/bin/env python
##########################################################################
# NSAp - Copyright (C) CEA, 2013
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""SPM wrappers for preprocessing data
"""
__docformat__ = 'restructuredtext'
# Standard library imports
from copy import deepcopy
import os
# Third-party imports
import numpy as np
# Local imports
from nipype.interfaces.base import (OutputMultiPath, TraitedSpec, isdefined,
traits, InputMultiPath, File)
from nipype.interfaces.spm.base import (SPMCommand, scans_for_fname,
func_is_3d,
scans_for_fnames, SPMCommandInputSpec)
from nipype.utils.filemanip import (fname_presuffix, filename_to_list,
list_to_filename, split_filename)
class ApplyDeformationFieldInputSpec(SPMCommandInputSpec):
"""
Parameters
----------
in_files : list of str (mandatory)
Files on which the deformation is applied.
deformation_field : str (mandatory)
SN SPM deformation file.
bounding_box : list of list of float
3x2-element list of lists (opt).
voxel_sizes : list of float
3-element list (opt).
interpolation : int
Degree of b-spline used for interpolation (from 0 to 7).
"""
in_files = InputMultiPath(
File(exists=True),
mandatory=True,
field='fnames',
desc='Files on which deformation is applied')
deformation_field = File(
exists=True,
mandatory=True,
field='comp{1}.def',
desc='SN SPM deformation file')
bounding_box = traits.List(
traits.List(traits.Float(),
minlen=3, maxlen=3),
field='comp{2}.idbbvox.bb',
minlen=2, maxlen=2,
desc='3x2-element list of lists (opt)')
voxel_sizes = traits.List(
traits.Float(),
[1., 1., 1.],
field='comp{2}.idbbvox.vox',
minlen=3, maxlen=3,
desc='3-element list (opt)')
interpolation = traits.Range(
low=0,
high=7,
field='interp',
desc='degree of b-spline used for interpolation')
out_prefix = traits.String(
'w', field='prefix',
usedefault=True,
desc='aplly deformation field output prefix')
class ApplyDeformationFieldOutputSpec(TraitedSpec):
"""
Returns
-------
normalized_files : list of str
Converted files.
"""
normalized_files = OutputMultiPath(
File(exists=True),
desc='converted files')
class ApplyDeformationField(SPMCommand):
""" Uses SPM to apply inverse deformation field to given files.
Examples
--------
>>> import nsap.nipype.spm_interfaces as spm
>>> f = spm.ApplyformationField()
>>> f.inputs.in_files = 'functional.nii'
>>> f.inputs.deformation_field = 'y_t1_localizer.nii'
>>> f.run()
"""
input_spec = ApplyDeformationFieldInputSpec
output_spec = ApplyDeformationFieldOutputSpec
_jobtype = 'util'
_jobname = 'defs'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for SPM
"""
if opt in ['in_files']:
return scans_for_fnames(filename_to_list(val), keep4d=False)
if opt == 'deformation_field':
return np.array([list_to_filename(val)], dtype=object)
return super(ApplyDeformationField, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['normalized_files'] = []
for filename in self.inputs.in_files:
_, fname = os.path.split(filename)
outputs['normalized_files'].append(
os.path.join(self.inputs.output_directory,
'%s%s' % (self.inputs.out_prefix, fname)))
return outputs
class NormalizeInputSpec(SPMCommandInputSpec):
template = File(exists=True, field='eoptions.template',
desc='template file to normalize to',
mandatory=True, xor=['parameter_file'],
copyfile=False)
source = File(exists=True, field='subj.source',
desc='file to normalize to template',
xor=['parameter_file'],
mandatory=True, copyfile=True)
jobtype = traits.Enum('estwrite', 'est', 'write',
desc='one of: est, write, estwrite (opt, estwrite)',
usedefault=True)
apply_to_files = InputMultiPath(traits.Either(File(exists=True),
traits.List(File(exists=True))),
field='subj.resample',
desc='files to apply transformation to (opt)',
copyfile=True)
parameter_file = File(field='subj.matname', mandatory=True,
xor=['source', 'template'],
desc='normalization parameter file*_sn.mat', copyfile=False)
source_weight = File(field='subj.wtsrc',
desc='name of weighting image for source (opt)', copyfile=False)
template_weight = File(field='eoptions.weight',
desc='name of weighting image for template (opt)', copyfile=False)
source_image_smoothing = traits.Float(field='eoptions.smosrc',
desc='source smoothing (opt)')
template_image_smoothing = traits.Float(field='eoptions.smoref',
desc='template smoothing (opt)')
affine_regularization_type = traits.Enum('mni', 'size', 'none', field='eoptions.regype',
desc='mni, size, none (opt)')
DCT_period_cutoff = traits.Float(field='eoptions.cutoff',
desc='Cutoff of for DCT bases (opt)')
nonlinear_iterations = traits.Int(field='eoptions.nits',
desc='Number of iterations of nonlinear warping (opt)')
nonlinear_regularization = traits.Float(field='eoptions.reg',
desc='the amount of the regularization for the nonlinear part of the normalization (opt)')
write_preserve = traits.Bool(field='roptions.preserve',
desc='True/False warped images are modulated (opt,)')
write_bounding_box = traits.List(traits.List(traits.Float(), minlen=3,
maxlen=3),
field='roptions.bb', minlen=2, maxlen=2,
desc='3x2-element list of lists (opt)')
write_voxel_sizes = traits.List(traits.Float(), field='roptions.vox',
minlen=3, maxlen=3,
desc='3-element list (opt)')
write_interp = traits.Range(low=0, high=7, field='roptions.interp',
desc='degree of b-spline used for interpolation')
write_wrap = traits.List(traits.Int(), field='roptions.wrap',
desc=('Check if interpolation should wrap in [x,y,z] '
'- list of bools (opt)'))
out_prefix = traits.String('w', field='roptions.prefix', usedefault=True,
desc='normalized output prefix')
class NormalizeOutputSpec(TraitedSpec):
normalization_parameters = File(exists=True, desc='MAT files containing the normalization parameters')
normalized_source = File(exists=True, desc='Normalized source files')
normalized_files = OutputMultiPath(File(exists=True), desc='Normalized other files')
class Normalize(SPMCommand):
"""use spm_normalise for warping an image to a template
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=51
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> norm = spm.Normalize()
>>> norm.inputs.source = 'functional.nii'
>>> norm.run() # doctest: +SKIP
"""
input_spec = NormalizeInputSpec
output_spec = NormalizeOutputSpec
_jobtype = 'spatial'
_jobname = 'normalise'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'template':
return scans_for_fname(filename_to_list(val))
if opt == 'source':
return scans_for_fname(filename_to_list(val))
if opt == 'apply_to_files':
return scans_for_fnames(filename_to_list(val))
if opt == 'parameter_file':
return np.array([list_to_filename(val)], dtype=object)
if opt in ['write_wrap']:
if len(val) != 3:
raise ValueError('%s must have 3 elements' % opt)
return super(Normalize, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(Normalize, self)._parse_inputs(skip=('jobtype',
'apply_to_files'))
if isdefined(self.inputs.apply_to_files):
inputfiles = deepcopy(self.inputs.apply_to_files)
if isdefined(self.inputs.source):
inputfiles.extend(self.inputs.source)
einputs[0]['subj']['resample'] = scans_for_fnames(inputfiles)
jobtype = self.inputs.jobtype
if jobtype in ['estwrite', 'write']:
if not isdefined(self.inputs.apply_to_files):
if isdefined(self.inputs.source):
einputs[0]['subj']['resample'] = scans_for_fname(self.inputs.source)
return [{'%s' % (jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
jobtype = self.inputs.jobtype
if jobtype.startswith('est'):
outputs['normalization_parameters'] = []
for imgf in filename_to_list(self.inputs.source):
outputs['normalization_parameters'].append(fname_presuffix(imgf, suffix='_sn.mat', use_ext=False))
outputs['normalization_parameters'] = list_to_filename(outputs['normalization_parameters'])
if self.inputs.jobtype == "estimate":
if isdefined(self.inputs.apply_to_files):
outputs['normalized_files'] = self.inputs.apply_to_files
outputs['normalized_source'] = self.inputs.source
elif 'write' in self.inputs.jobtype:
outputs['normalized_files'] = []
if isdefined(self.inputs.apply_to_files):
filelist = filename_to_list(self.inputs.apply_to_files)
for f in filelist:
if isinstance(f, list):
run = [fname_presuffix(in_f, prefix=self.inputs.out_prefix) for in_f in f]
else:
run = [fname_presuffix(f, prefix=self.inputs.out_prefix)]
outputs['normalized_files'].extend(run)
if isdefined(self.inputs.source):
outputs['normalized_source'] = fname_presuffix(self.inputs.source, prefix=self.inputs.out_prefix)
return outputs
class ResliceToReferenceInput(SPMCommandInputSpec):
in_files = InputMultiPath(
File(exists=True), mandatory=True, field='fnames',
desc='Files on which deformation is applied')
target = File(
exists=True,
field='comp{1}.id.space',
desc='File defining target space')
interpolation = traits.Range(
low=0, high=7, field='interp',
desc='degree of b-spline used for interpolation')
bounding_box = traits.List(traits.List(traits.Float(), minlen=3, maxlen=3),
field='comp{1}.idbbvox.bb', minlen=2, maxlen=2,
desc='3x2-element list of lists (opt)')
voxel_sizes = traits.List(
traits.Float(),
field='comp{1}.idbbvox.vox',
minlen=3, maxlen=3,
desc='3-element list (opt)')
class ResliceToReferenceOutput(TraitedSpec):
out_files = OutputMultiPath(File(exists=True),
desc='Transformed files')
class ResliceToReference(SPMCommand):
""" Uses spm to reslice a volume to a target image space or to a provided voxel size and bounding box
Examples
--------
>>> import nipype.interfaces.spm.utils as spmu
>>> r2ref = spmu.ResliceToReference()
>>> r2ref.inputs.in_files = 'functional.nii'
>>> r2ref.inputs.target = 'structural.nii'
>>> r2ref.run() # doctest: +SKIP
"""
input_spec = ResliceToReferenceInput
output_spec = ResliceToReferenceOutput
_jobtype = 'util'
_jobname = 'defs'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'in_files':
return scans_for_fnames(filename_to_list(val))
if opt == 'target':
return scans_for_fname(filename_to_list(val))
if opt == 'deformation':
return np.array([list_to_filename(val)], dtype=object)
if opt == 'deformation_field':
return np.array([list_to_filename(val)], dtype=object)
return val
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_files'] = []
for filename in self.inputs.in_files:
_, fname = os.path.split(filename)
outputs['out_files'].append(os.path.realpath('w%s' % fname))
return outputs
|
#!/usr/bin/env python3
"""
Copyright 2021 Leon Läufer
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import time
import asyncio
import signal
import os
def dump_to_csv(filehandle, state, pids):
header = ""
for pid in sorted(pids):
header += "epoch_time, total_cpu, "+str(pid)+"_cpu, "+str(pid)+"_mem, "
header = header[:-2]+"\n"
filehandle.write(header)
sorted_keys = sorted(state.keys(), key=lambda x: int(x))
for key in sorted_keys:
timeframe = state[key]
line = ""
filehandle.write((str(timeframe["TIME"])+", "))
filehandle.write(str(timeframe["TOTAL"]["CPU"])+", ")
del timeframe["TOTAL"]
for pid in pids:
if str(pid) in timeframe.keys():
line += str(timeframe[str(pid)]["CPU"])+", "+str(timeframe[str(pid)]["MEM"])+", "
else:
line += ", , "
line = line[:-2]+"\n"
filehandle.write(line)
def dump_to_json(filehandle, state, interval):
state["INTERVAL"] = interval
filehandle.write(str(state).replace("'", '"').replace("True", "true").replace("False", "false"))
async def get_resource_stats(interval, filetype, output_file, pids):
cpu_usage = dict()
global_cpu_usage = 0.0
def clear_ascii_escape(text):
return text.replace('\x1b(B', '').replace('\x1b[m', '').replace('\x1b[39;49m', '').replace('\x1b[1m', '').\
replace('\x1b[K', '').replace('\x1b[J', '').replace('\x1b[H', '').replace('\x1b[7m', '')
def get_memory_usage(pid):
return float((os.popen('cat /proc/' + str(
pid) + '/smaps | grep -i pss | awk \'{sum+=$2} END {print sum/1024""}\'').read()).replace(",", "."))
def get_cpu_usage(pid):
return cpu_usage[int(pid)]
def set_cpu_usage(pid, usage):
nonlocal cpu_usage
cpu_usage[int(pid)] = usage
def get_global_cpu_usage():
return global_cpu_usage
def set_global_cpu_usage(usage):
nonlocal global_cpu_usage
global_cpu_usage = usage
def get_cpu_thread_count():
return int(os.popen('grep -c processor /proc/cpuinfo').read())
def is_running(pid):
return str(pid) in os.popen('ps -p ' + str(pid)).read()
for pid in pids:
set_cpu_usage(pid, 0.0)
cpu_threads = get_cpu_thread_count()
pidlst = str(pids)[1:-1].replace(" ", "")
code = 'top -d '+str(interval)+' -p '+pidlst+' -o PID'
proc = await asyncio.create_subprocess_shell(code, stdout=asyncio.subprocess.PIPE)
time.sleep(1)
current_epoch = 0
captured_data = dict()
def signal_handler(sig, frame):
print("Logging halted! Saving log to ", output_file, " as ", filetype, "!", sep="")
with open(output_file, 'w') as outfile:
if filetype == "json":
dump_to_json(outfile, captured_data, interval)
else:
dump_to_csv(outfile, captured_data, pids)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
epoch_time = -1
while True:
data = await proc.stdout.readline()
line = data.decode('ascii').rstrip()
if len(line) > 5: # Sort out invalid lines
line_split = clear_ascii_escape(line).split()
if line.startswith('%Cpu(s):\x1b(B\x1b[m\x1b[39;49m\x1b[1m'): # Get CPU usage
try:
set_global_cpu_usage(round((100.0-float(line_split[7].replace(",", ".")))*cpu_threads, 1))
except:
print("CPU usage could not be logged, increase your interval if this issue persists")
elif line.startswith('\x1b(B\x1b[m'): # Get CPU usage of pids
try:
set_cpu_usage(line_split[0], float(line_split[8].replace(",", ".")))
except:
print("Thread CPU usage could not be logged, increase your interval if this issue persists")
current_time = time.time()
if epoch_time + interval < current_time:
if epoch_time == -1:
epoch_time = current_time
else:
epoch_time += interval
current_epoch += 1
# Update to current epoch if there was a delay in the program
while epoch_time + interval < current_time:
print("There was a delay in the program, trying to fill in the data")
print("Use a longer interval if this problem persists")
current_data = captured_data.get(str(current_epoch-1), dict()).copy()
current_data["TIME"] = epoch_time
current_data["LAG-COMPENSATION"] = True
captured_data[str(current_epoch)] = current_data
epoch_time += interval
current_epoch += 1
current_data = captured_data.get(str(current_epoch), dict())
current_data["TIME"] = epoch_time
current_data["LAG-COMPENSATION"] = False
print("TIME: ", epoch_time, sep="")
# Log and print total CPU utilization
total = dict()
total["CPU"] = get_global_cpu_usage()
current_data["TOTAL"] = total
captured_data[str(current_epoch)] = current_data
print("TOTAL CPU: ", get_global_cpu_usage(), "%", sep="")
# Log and print data of all pids
for pid in pids:
if is_running(pid):
current_data_pid = current_data.get(int(pid), dict())
try:
mem_size = get_memory_usage(pid)
except:
mem_size = 0.0
current_data_pid["CPU"] = get_cpu_usage(pid)
current_data_pid["MEM"] = mem_size
current_data[str(pid)] = current_data_pid
captured_data[str(current_epoch)] = current_data
print(pid, ": CPU: ", get_cpu_usage(pid), "% MEM: ", str(mem_size), " MiB", sep="")
print()
if __name__ == "__main__":
if len(sys.argv) < 5:
print("Usage: python3 %s <interval> <type> <file> <pid1> [<pid2>] [<pid3>] [<pid4>] ..." % sys.argv[0])
print("Interval in seconds, Type: json or csv")
sys.exit(1)
print("Press Ctrl+C to stop logging!")
interval = float(sys.argv[1])
filetype = str(sys.argv[2])
output_file = str(sys.argv[3])
pids = list(map(lambda x: int(x), sys.argv[4:]))
asyncio.run(get_resource_stats(interval, filetype, output_file, pids))
|
from src import DistributionFamilyType
from src.Distributions import Distributions
import math
class TriangularDistribution(Distributions):
_c = 0
def probability_random_number(self, x):
if isinstance(x, str):
raise Exception("x must be non string type")
if not self.__a <= x <= self.__b:
raise Exception("x({}) must be between a({}) and b({}).".format(x, self.__a, self.__b))
if self.__b - self.__a == 1:
return self._fx_(x)/2
else:
return self._fx_(x)
def __init__(self, a, b):
if isinstance(a, str):
raise Exception("a must be non string type")
if isinstance(b, str):
raise Exception("b must be non string type")
if not a < b:
raise Exception("a({}) must be less than b({})".format(a, b))
if self._c == 0:
self._c = 2 / (b - a)
c = self._c
self.__a = a
self.__b = b
self.__c = c
self.__mn = (a + b + c) / 3
if c >= (a + b) / 2:
self.__med = a + math.sqrt((b - a) * (c - a) / 2)
else:
self.__med = b - math.sqrt((b - a) * (b - c) / 2)
self.__mod = self.__c
abc2 = a * a + b * b + c * c - a * b - a * c - b * c
self.__var = abc2 / 18
self.__skw = math.sqrt(2) * (a + b - 2 * c) * (2 * a - b - c) * (a - 2 * b + c) / (5 * math.pow(abc2, 3 / 2))
self.__krt = -3 / 5
self.__x = [float(i / 10) for i in range(10 * int(a), 10 * int(b)+1)]
if self.__x.__contains__(0.0):
self.__x.remove(0.0)
self.__y = [self._fx_(i) for i in self.__x]
self.__cdf = [self._cfx_(i) for i in self.__x]
self.__mgf = []
try:
self.__mgf = [self._mg_function_(i) for i in self.__x]
except OverflowError:
self.__mgf = []
Distributions.__init__(self, x=self.__x, y=self.__y, m=self.__mn, v=self.__var, mo=self.__mod, me=self.__med,
sk=self.__skw, kr=self.__krt, cdf=self.__cdf, mgf=self.__mgf)
def __str__(self):
d = {'a': self.__a, 'b': self.__b, 'c': self.__c}
n = self.name() + str(d)
return n
def name(self):
return "Triangular Distribution"
def frequency_density_formula(self):
return [super()._fx_Name + "2*(x-a)/((b-a)*(c-a)), if a<x<c", super()._fx_Name + "2/(b-a) if x=c",
super()._fx_Name + "2*(b-x)/((b-a)*(b-c)) if c<x<b"]
def notation(self):
return "Trg.(a,b)"
def parameters(self):
return ["-∞ < a < b < ∞"]
def distribution_family(self):
return DistributionFamilyType.Continuous.value
def _fx_(self, x):
if self.__a <= x < self.__c:
fx = 2 * (x - self.__a) / ((self.__b - self.__a) * (self.__c - self.__a))
elif x == self.__c:
fx = 2/(self.__b-self.__a)
else:
fx = 2 * (self.__b - x) / ((self.__b - self.__a) * (self.__b - self.__c))
return fx
def _cfx_(self, x):
if x <= self.__a:
fx = 0.0
elif self.__a < x <= self.__c:
fx = (x - self.__a)*(x - self.__a) / ((self.__b - self.__a) * (self.__c - self.__a))
elif self.__c < x < self.__b:
fx = 1 - (self.__b - x)*(self.__b - x) / ((self.__b - self.__a) * (self.__b - self.__c))
else:
fx = 1
return fx
def _mg_function_(self, x):
if self.__a == self.__c or self.__c == self.__b:
return (math.exp(x * self.__b) - math.exp(x * self.__a)) / x * (self.__b - self.__a)
else:
exa = math.exp(x * self.__a)
exb = math.exp(x * self.__b)
exc = math.exp(x * self.__c)
bc = self.__b - self.__c
ba = self.__b - self.__a
ca = self.__c - self.__a
return 2 * (bc * exa - ba * exc + ca * exb) / (x * x * bc * ba * ca)
|
# This code is used in the 'Run MATLAB from Python' example
# Copyright 2019-2021 The MathWorks, Inc.
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.onnx
import time
import os
cudaAvailable = torch.cuda.is_available()
if cudaAvailable:
cuda = torch.device('cuda')
# start a MATLAB engine
import matlab.engine
MLEngine = matlab.engine.start_matlab()
miniBatchSize = 128.0
# Prepare training dataset
class TrainData(Dataset):
def __init__(self):
# Create persistent training dataset in MATLAB
MLEngine.setupDatasets(miniBatchSize)
# Set the dataset length to the number of minibatches
# in the training dataset
self.len = int(MLEngine.getNumIterationsPerEpoch())
def __getitem__(self, index):
# Call MATLAB to get a minibatch of features + labels
minibatch = MLEngine.extractTrainingFeatures()
x = torch.FloatTensor(minibatch.get('features'))
y = torch.FloatTensor(minibatch.get('labels'))
return x, y
def __len__(self):
return int(self.len)
print('Setting up datastores...')
trainDataset = TrainData()
print('Datastore setup complete')
print('Minibatch size: ', int(miniBatchSize))
print('Number of training files: ', int(trainDataset.len * miniBatchSize))
print('Number of minibatches per epoch: ', int(trainDataset.len))
trainLoader = DataLoader(dataset=trainDataset, batch_size=1)
print('Computing validation features...')
# Prepare validation dataset
# Call MATLAB to compute validation features
valFeatures = MLEngine.extractValidationFeatures()
XValidation = valFeatures["features"]
YValidation = valFeatures["labels"]
# Create Data Class
class ValData(Dataset):
# Constructor
def __init__(self):
self.x = XValidation
self.y = YValidation
self.len = self.y.size[0]
# Getter
def __getitem__(self, index):
x = torch.FloatTensor(self.x[index])
y = torch.FloatTensor(self.y[index])
return x, y
# Get Length
def __len__(self):
return self.len
valDataset = ValData()
valLoader = DataLoader(dataset = valDataset, batch_size = valDataset.len)
print('Validation feature computation complete')
# Create the neural network
NumF = 12
numHops = 98
timePoolSize = 13
dropoutProb = 0.2
numClasses = 11
class CNN(nn.Module):
# Contructor
def __init__(self, out_1=NumF):
super(CNN, self).__init__()
self.cnn1 = nn.Conv2d(in_channels=1, out_channels=out_1, kernel_size=3, padding=1)
self.batch1 = nn.BatchNorm2d(out_1)
self.relu1 = nn.ReLU()
self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.cnn2 = nn.Conv2d(in_channels=out_1, out_channels=2*out_1, kernel_size=3, padding=1)
self.batch2 = nn.BatchNorm2d(2*out_1)
self.relu2 = nn.ReLU()
self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.cnn3 = nn.Conv2d(in_channels=2*out_1, out_channels=4 * out_1, kernel_size=3, padding=1)
self.batch3 = nn.BatchNorm2d(4 * out_1)
self.relu3 = nn.ReLU()
self.maxpool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.cnn4 = nn.Conv2d(in_channels=4 * out_1, out_channels=4 * out_1, kernel_size=3, padding=1)
self.batch4 = nn.BatchNorm2d(4 * out_1)
self.relu4 = nn.ReLU()
self.cnn5 = nn.Conv2d(in_channels=4 * out_1, out_channels=4 * out_1, kernel_size=3, padding=1)
self.batch5 = nn.BatchNorm2d(4 * out_1)
self.relu5 = nn.ReLU()
self.maxpool4 = nn.MaxPool2d(kernel_size=(timePoolSize, 1))
self.dropout = nn.Dropout2d(dropoutProb)
self.fc = nn.Linear(336, numClasses)
#self.softmax = nn.Softmax(dim = 1)
# Prediction
def forward(self, x):
out = self.cnn1(x)
out = self.batch1(out)
out = self.relu1(out)
out = self.maxpool1(out)
out = self.cnn2(out)
out = self.batch2(out)
out = self.relu2(out)
out = self.maxpool2(out)
out = self.cnn3(out)
out = self.batch3(out)
out = self.relu3(out)
out = self.maxpool3(out)
out = self.cnn4(out)
out = self.batch4(out)
out = self.relu4(out)
out = self.cnn5(out)
out = self.batch5(out)
out = self.relu5(out)
out = self.maxpool4(out)
out = self.dropout(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
#out = self.softmax(out)
return out
model = CNN()
if cudaAvailable:
model.cuda()
# Define training parameters
n_epochs = 25
criterion = nn.CrossEntropyLoss()
learning_rate = 3e-4
optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate)
loss_list = []
accuracy_list = []
numValItems = len(valDataset)
doValidation = True
print('Training...')
for epoch in range(n_epochs):
if epoch == 20:
for g in optimizer.param_groups:
g['lr'] = 3e-5
count = 0
for batch in trainLoader:
count += 1
print('Epoch ', epoch+1, ' Iteration', count, ' of ', trainDataset.len)
if cudaAvailable:
x = batch[0].cuda()
y = batch[1].cuda()
else:
x = batch[0]
y = batch[1]
optimizer.zero_grad()
z = model(torch.squeeze(x.float(), 0))
loss = criterion(z, torch.squeeze(y).long())
loss.backward()
optimizer.step()
if doValidation:
correct = 0
# perform a prediction on the validation data
for x_test, y_test in valLoader:
if cudaAvailable:
x_test = x_test.cuda()
y_test = y_test.cuda()
else:
x_test = x_test
y_test = y_test
z = model(x_test.float())
_ , yhat = torch.max(z.data, 1)
correct += (yhat == y_test.squeeze()).sum().item()
accuracy = correct / numValItems
print('Validation accuracy: ', accuracy)
accuracy_list.append(accuracy)
loss_list.append(loss.data)
# Export the trained model to ONXX format
if cudaAvailable:
x = torch.empty(1, 1, 98, 50).cuda()
else:
x = torch.empty(1, 1, 98, 50)
torch.onnx.export(model,
x,
"cmdRecognition.onnx",
export_params=True,
opset_version=9,
do_constant_folding=True,
input_names=['input'],
output_names=['output'])
print('Training complete') |
import pyautogui
import screengrab as sg
import time
def find_direction(x, y, cx, cy, size):
""" Gets the direction of the arrow from the centre (cx, cy) """
width, height = size
max_match = (0, 0, width)
x_dist = width // 40
x_s = width // 2 - x_dist
x_b = width // 2 + x_dist
y_dist = height // 40
y_s = height // 2 - y_dist
y_b = height // 2 + y_dist
direction = ""
if y_s < y < y_b:
if x < cx:
direction = "left"
else:
direction = "right"
elif x_s < x < x_b:
if y < cy:
direction = "up"
else:
direction = "down"
return direction
def main(image="screenshot.png"):
sg.screengrab(output=image)
heart, size = [], []
size = sg.getsize(image)
nodir = True
prevrun = False
while True:
try:
start = time.time()
sg.screengrab(output=image)
arrow = sg.findarrows(image)
if not arrow:
continue
if nodir and not prevrun:
heart = sg.findheart(image)
prevrun = True
direction = find_direction(*arrow[:2], *heart, size)
print(direction)
end = time.time()
nodir = False
pyautogui.press(direction)
print(end-start)
except ZeroDivisionError:
prevrun = False
nodir = True
if __name__ == "__main__":
main()
|
# Generated by Django 3.0.1 on 2019-12-26 16:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lancamentos', '0013_auto_20190621_1316'),
]
operations = [
migrations.AddField(
model_name='journal',
name='descricao',
field=models.TextField(default='', max_length=250),
preserve_default=False,
),
migrations.AddField(
model_name='lancamento',
name='descricao',
field=models.TextField(default='Migrado', max_length=250),
preserve_default=False,
),
]
|
#!/usr/bin/env python
# coding=utf-8
import os
import random
import numpy as np
class DataConfig():
base_dir='/data/dataset/'
dict_dir=base_dir+'dict/'
py2id_dict=dict_dir+'py2id_dict.txt'
hz2id_dict=dict_dir+'hz2id_dict.txt'
py2hz_dict=dict_dir+'py2hz_dict.txt'
py2hz_dir=base_dir+'pinyin2hanzi/'
types=['train','test','dev']
class ConfigLanguage(DataConfig):
epochs=100
model_dir='models/language_model/new/'
model_name='model'
model_path=model_dir+model_name
embed_size=300
num_hb=4
num_eb=16
norm_type='bn'
lr=0.0001
is_training=True
batch_size=256
py_size=1472
hz_size=7459
dropout_rate=0.5
class DataLanguage(ConfigLanguage):
def __init__(self):
super(DataLanguage,self).__init__()
self.py2hz_paths={type:self.py2hz_dir+'py2hz_'+type+'.tsv' for type in self.types}
self.create_dict()
self.create_py2hz()
def create_py2hz(self):
self.py2hz={}
self.batch_num={}
for _type,path in self.py2hz_paths.items():
self.py2hz[_type]={}
start_num=0
with open(path,'r',encoding='utf-8') as file:
for line in file:
idx,pys,hzs=line.strip('\n').strip().split('\t')
pys=pys.strip().split(' ')
hzs=hzs.strip().split(' ')
self.py2hz[_type][start_num]=(pys,hzs)
start_num+=1
batch_num=start_num//self.batch_size
self.batch_num[_type]=batch_num
def create_batch(self,flag='train',shuffle=True):
data_num=len(self.py2hz[flag])
idxs=list(range(data_num))
if shuffle:
random.shuffle(idxs)
pys=[]
hzs=[]
for i,idx in enumerate(idxs):
py,hz=self.py2hz[flag][idx]
py=[self.py2id[p] for p in py]
hz=[self.hz2id[h] for h in hz]
assert len(py)==len(hz)
if len(pys)==self.batch_size:
inputs,outputs=self.seq_pad(pys,hzs)
yield inputs,outputs
pys,hzs=[],[]
pys.append(py)
hzs.append(hz)
def create_online(self,text):
pred=[self.py2id[py] for py in text]
pred=np.array(pred)
pred=pred.reshape((1,pred.shape[0]))
return pred
def seq_pad(self,pys,hzs):
max_len=max([len(py) for py in pys])
inputs=np.array([line+[0]*(max_len-len(line)) for line in pys])
outputs=np.array([line+[0]*(max_len-len(line)) for line in hzs])
return inputs,outputs
def create_dict(self):
self.py2id={}
self.id2py={}
self.id2hz={}
self.hz2id={}
with open(self.py2id_dict,'r',encoding='utf-8') as file:
for line in file:
py,idx=line.strip('\n').strip().split('\t')
self.py2id[py.strip()]=int(idx.strip())
self.id2py[int(idx.strip())]=py.strip()
with open(self.hz2id_dict,'r',encoding='utf-8') as file:
for line in file:
hz,idx=line.strip('\n').strip().split('\t')
self.hz2id[hz.strip()]=int(idx.strip())
self.id2hz[int(idx.strip())]=hz.strip()
def main():
data=DataLanguage()
data_iters=data.create_batch()
for batch in data_iters:
x,y=batch
print(x,'\n',y)
if __name__=="__main__":
main()
|
import toml as toml_
def dumps(*a, **kw):
kw.pop('indent', None)
return toml_.dumps(*a, **kw)
loads = toml_.loads
|
import sys
import unittest
import logChunk
from chunkingConstants import *
sys.path.append("../util")
import Util
from Util import ConfigInfo
class logChunktest(unittest.TestCase):
def readHelper(self,filename):
inf =open(filename,"r")
text=""
for line in inf:
text+=line
return text
def debugFunctions(self, funcList):
print("===========================================")
for func in funcList:
print((func.method))
print((func.start))
print((func.end))
print((func.total_add))
print((func.total_del))
print((func.keywordDictionary))
print("===========================================")
def setUp(self):
self.javaMethod1 = "public static Intent createIntent(Context context, String username, String password) {"
self.javaMethod2 = " public <V> V post(final String uri, final Object params, final Type type) \n throws IOException {"
self.javaMethod3 = "public static Intent createIntent(final Collection<? extends Issue> issues,\n final Repository repository, final int position) {"
self.javaMethod4 = "@Override \n public List<User> run(Account account) throws Exception {"
self.javaMethod5 = "private JClass typeBoundsToJClass(GeneratedClassHolder holder, List<? extends TypeMirror> bounds, Map<String, TypeMirror> actualTypes) {"
self.javaMethod6 = " public JMethod implementMethod(GeneratedClassHolder holder, List<ExecutableElement> methods, String methodName, String returnType, String... parameterTypes) {"
self.javaMethod7 = "ProgrammerInterview pInstance = new ProgrammerInterview() {\npublic void read() {"
c_info = ConfigInfo("../util/javatest.ini")
self.testChunk2 = logChunk.logChunk("", "Java", c_info)
#Read in the block tests
self.chunkb1 = logChunk.logChunk(self.readHelper("testfiles/Block/testChunk1.txt"), "Java", c_info)
self.chunkb2 = logChunk.logChunk(self.readHelper("testfiles/Block/testChunk2.txt"), "Java", c_info)
self.chunkb3 = logChunk.logChunk(self.readHelper("testfiles/Block/testChunk3.txt"), "Java", c_info)
self.chunkb4 = logChunk.logChunk(self.readHelper("testfiles/Block/testChunk4.txt"), "Java", c_info)
self.chunkb5 = logChunk.logChunk(self.readHelper("testfiles/Block/testChunk5.txt"), "Java", c_info)
self.chunkb6 = logChunk.logChunk(self.readHelper("testfiles/Block/testChunk6.txt"), "Java", c_info)
self.chunkb7 = logChunk.logChunk(self.readHelper("testfiles/Block/testChunk7.txt"), "Java", c_info)
self.chunkb8 = logChunk.logChunk(self.readHelper("testfiles/Block/testChunk8.txt"), "Java", c_info)
self.chunkb9 = logChunk.logChunk(self.readHelper("testfiles/Block/testChunk9.txt"), "Java", c_info)
self.chunkb10 = logChunk.logChunk(self.readHelper("testfiles/Block/testChunk10.txt"), "Java", c_info)
self.chunkb11 = logChunk.logChunk(self.readHelper("testfiles/Block/testChunk11.txt"), "Java", c_info)
def test_FunctionNameParseJAVA(self):
temp = self.testChunk2.langSwitch.parseFunctionName(self.javaMethod1)
print(temp)
self.assertTrue(temp == "createIntent", "Actual: " + temp)
temp = self.testChunk2.langSwitch.parseFunctionName(self.javaMethod2)
print(temp)
self.assertTrue(temp == "post", "Actual: " + temp)
temp = self.testChunk2.langSwitch.parseFunctionName(self.javaMethod3)
print(temp)
self.assertTrue(temp == "createIntent", "Actual: " + temp)
temp = self.testChunk2.langSwitch.parseFunctionName(self.javaMethod4)
print(temp)
self.assertTrue(temp == "run", "Actual: " + temp)
temp = self.testChunk2.langSwitch.parseFunctionName(self.javaMethod5)
print(temp)
self.assertTrue(temp == "typeBoundsToJClass", "Actual: " + temp)
temp = self.testChunk2.langSwitch.parseFunctionName(self.javaMethod6)
print(temp)
self.assertTrue(temp == "implementMethod", "Actual: " + temp)
temp = self.testChunk2.langSwitch.parseFunctionName(self.javaMethod7)
print(temp)
self.assertTrue(temp == "read", "Actual: " + temp)
def test_parseText_Block1(self):
self.chunkb1.parseText()
funcList = self.chunkb1.functions
#self.debugFunctions(funcList)
self.assertTrue(len(funcList) == 2)
self.assertTrue(funcList[0].method=="foo")
self.assertTrue(funcList[0].total_add == 2)
self.assertTrue(funcList[0].total_del == 1)
testDict= {'throw adds': 0, 'catch dels': 0, 'try adds': 0, 'try dels': 1, 'Exception dels': 0, 'raise adds': 0, 'catch adds': 1, 'finally dels': 0, 'finally adds': 0, 'throw dels': 0, 'Exception adds': 0, 'raise dels': 0, 'for adds': 0,'for dels': 0,'while adds': 0,'while dels': 0}
self.assertEqual(testDict,funcList[0].keywordDictionary)
self.assertTrue(funcList[1].method=="foo00022")
self.assertTrue(funcList[1].total_add == 4)
self.assertTrue(funcList[1].total_del == 2)
testDict= {'throw adds': 0, 'catch dels': 0, 'try adds': 1, 'try dels': 1, 'Exception dels': 0, 'raise adds': 0, 'catch adds': 1, 'finally dels': 0, 'finally adds': 0, 'throw dels': 0, 'Exception adds': 0, 'raise dels': 0, 'for adds': 0,'for dels': 0,'while adds': 0,'while dels': 0}
self.assertEqual(testDict, funcList[1].keywordDictionary)
def test_parseText_Block2(self): #ISSUE: the current cannot assign values to multiple blocks.
self.chunkb2.parseText()
funcList = self.chunkb2.functions
#self.debugFunctions(funcList)
self.assertTrue(len(funcList) == 2)
self.assertTrue(funcList[0].method=="getAccounts")
self.assertTrue(funcList[0].total_add == 1)
self.assertTrue(funcList[0].total_del == 2)
testdict= {'throw adds': 0, 'catch dels': 0, 'try adds': 0, 'try dels': 0, 'Exception dels': 0, 'raise adds': 0, 'catch adds': 0, 'finally dels': 0, 'finally adds': 0, 'throw dels': 0, 'Exception adds': 0, 'raise dels': 0, 'for adds': 0,'for dels': 0,'while adds': 0,'while dels': 0}
self.assertEqual(testdict,funcList[0].keywordDictionary)
self.assertTrue(funcList[1].method=="getAccount")
self.assertTrue(funcList[1].total_add == 6)
self.assertTrue(funcList[1].total_del == 2)
testdict={'throw adds': 1, 'catch dels': 0, 'try adds': 3, 'try dels': 2, 'Exception dels': 0, 'raise adds': 0, 'catch adds': 4, 'finally dels': 0, 'finally adds': 0, 'throw dels': 0, 'Exception adds': 2, 'raise dels': 0, 'for adds': 0,'for dels': 0,'while adds': 2,'while dels': 2}
self.assertEqual(testdict,funcList[1].keywordDictionary)
def test_parseText_Block3(self):
self.chunkb3.parseText()
funcList = self.chunkb3.functions
#self.debugFunctions(funcList)
self.assertTrue(len(funcList) == 1)
self.assertTrue(funcList[0].method=="ReflectiveProperty")
self.assertTrue(funcList[0].total_add == 8)
self.assertTrue(funcList[0].total_del == 2)
testdict= {'throw adds': 0, 'catch dels': 1, 'try adds': 8, 'try dels': 2, 'Exception dels': 0, 'raise adds': 0, 'catch adds': 4, 'finally dels': 0, 'finally adds': 0, 'throw dels': 0, 'Exception adds': 0, 'raise dels': 0, 'for adds': 0,'for dels': 0,'while adds': 0,'while dels': 0}
self.assertEqual(testdict,funcList[0].keywordDictionary)
def test_parseText_Block4(self):
self.chunkb4.parseText()
funcList = self.chunkb4.functions
# self.debugFunctions(funcList)
self.assertTrue(len(funcList) == 1)
self.assertTrue(funcList[0].method=="setHandle")
self.assertTrue(funcList[0].total_add == 1)
self.assertTrue(funcList[0].total_del == 1)
testdict= {'throw adds': 0, 'catch dels': 0, 'try adds': 0, 'try dels': 0, 'Exception dels': 0, 'raise adds': 0, 'catch adds': 0, 'finally dels': 0, 'finally adds': 0, 'throw dels': 0, 'Exception adds': 0, 'raise dels': 0, 'for adds': 0,'for dels': 0,'while adds': 0,'while dels': 0}
self.assertEqual(testdict,funcList[0].keywordDictionary)
def test_parseText_Block5(self):
self.chunkb5.parseText()
funcList = self.chunkb5.functions
self.debugFunctions(funcList)
self.assertTrue(len(funcList) == 2)
self.assertTrue(funcList[0].method=="copy")
self.assertTrue(funcList[0].total_add == 19)
self.assertTrue(funcList[0].total_del == 5)
testdict= {'throw adds': 1, 'catch dels': 0, 'try adds': 0, 'try dels': 0, 'Exception dels': 0, 'raise adds': 0, 'catch adds': 0, 'finally dels': 0, 'finally adds': 0, 'throw dels': 0, 'Exception adds': 1, 'raise dels': 0, 'for adds': 0,'for dels': 0,'while adds': 0,'while dels': 0}
self.assertEqual(testdict,funcList[0].keywordDictionary)
self.assertTrue(funcList[1].method==NON_FUNC) #The add del count here is a bit off due to the way we change code that has been uncommented
testdict= {'throw adds': 0, 'catch dels': 0, 'try adds': 0, 'try dels': 0, 'Exception dels': 0, 'raise adds': 0, 'catch adds': 0, 'finally dels': 0, 'finally adds': 0, 'throw dels': 0, 'Exception adds': 0, 'raise dels': 0, 'for adds': 0,'for dels': 0,'while adds': 0,'while dels': 0}
self.assertEqual(testdict,funcList[1].keywordDictionary)
def test_parseText_Block6(self):
self.chunkb6.parseText()
funcList = self.chunkb6.functions
# self.debugFunctions(funcList)
self.assertTrue(len(funcList) == 1)
self.assertTrue(funcList[0].method=="init")
self.assertTrue(funcList[0].total_add == 0)
self.assertTrue(funcList[0].total_del == 1)
testdict= {'throw adds': 0, 'catch dels': 1, 'try adds': 0, 'try dels': 0, 'Exception dels': 1, 'raise adds': 0, 'catch adds': 0, 'finally dels': 0, 'finally adds': 0, 'throw dels': 1, 'Exception adds': 0, 'raise dels': 0, 'for adds': 0,'for dels': 0,'while adds': 0,'while dels': 0}
self.assertEqual(testdict,funcList[0].keywordDictionary)
def test_parseText_Block7(self): #Need to update expected result (Question, we seem to not count the } at end of block?)
self.chunkb7.parseText()
funcList = self.chunkb7.functions
#self.debugFunctions(funcList)
self.assertTrue(len(funcList) == 1)
self.assertTrue(funcList[0].method=="onCreateLoader")
self.assertTrue(funcList[0].total_add == 2)
self.assertTrue(funcList[0].total_del == 7)
testdict= {'throw adds': 1, 'catch dels': 4, 'try adds': 0, 'try dels': 2, 'Exception dels': 1, 'raise adds': 0, 'catch adds': 0, 'finally dels': 0, 'finally adds': 0, 'throw dels': 0, 'Exception adds': 1, 'raise dels': 0, 'for adds': 0,'for dels': 0,'while adds': 0,'while dels': 0, 'for adds': 0,'for dels': 0,'while adds': 0,'while dels': 0, 'for adds': 0,'for dels': 0,'while adds': 0,'while dels': 0}
self.assertEqual(testdict,funcList[0].keywordDictionary)
def test_parseText_Block8(self): #Need to update expected result (Question, we seem to not count the } at end of block?)
self.chunkb8.parseText()
funcList = self.chunkb8.functions
#self.debugFunctions(funcList)
self.assertTrue(len(funcList) == 1)
self.assertTrue(funcList[0].method=="getAuthToken")
self.assertTrue(funcList[0].total_add == 2)
self.assertTrue(funcList[0].total_del == 2)
testdict= {'throw adds': 1, 'catch dels': 1, 'try adds': 1, 'try dels': 1, 'Exception dels': 1, 'raise adds': 0, 'catch adds': 2, 'finally dels': 0, 'finally adds': 0, 'throw dels': 0, 'Exception adds': 2, 'raise dels': 0, 'for adds': 0,'for dels': 0,'while adds': 0,'while dels': 0}
self.assertEqual(testdict,funcList[0].keywordDictionary)
def test_parseText_Block9(self):
self.chunkb9.parseText()
funcList = self.chunkb9.functions
#self.debugFunctions(funcList)
self.assertTrue(len(funcList) == 1)
self.assertTrue(funcList[0].method=="getAuthToken")
self.assertTrue(funcList[0].total_add == 2)
self.assertTrue(funcList[0].total_del == 2)
testdict= {'throw adds': 1, 'catch dels': 1, 'try adds': 0, 'try dels': 0, 'Exception dels': 1, 'raise adds': 0, 'catch adds': 2, 'finally dels': 0, 'finally adds': 0, 'throw dels': 0, 'Exception adds': 2, 'raise dels': 0, 'for adds': 0,'for dels': 0,'while adds': 0,'while dels': 0}
self.assertEqual(testdict,funcList[0].keywordDictionary)
def test_parseText_Block10(self):
self.chunkb10.parseText()
funcList = self.chunkb10.functions
#self.debugFunctions(funcList)
self.assertTrue(len(funcList) == 1)
self.assertTrue(funcList[0].method=="getToken")
self.assertTrue(funcList[0].total_add == 8)
self.assertTrue(funcList[0].total_del == 5)
testdict= {'throw adds': 0, 'catch dels': 0, 'try adds': 0, 'try dels': 0, 'Exception dels': 0, 'raise adds': 0, 'catch adds': 0, 'finally dels': 0, 'finally adds': 0, 'throw dels': 0, 'Exception adds': 0, 'raise dels': 0, 'for adds': 4,'for dels': 5,'while adds': 4,'while dels': 0}
self.assertEqual(testdict,funcList[0].keywordDictionary)
def test_parseText_Block11(self):
self.chunkb11.parseText()
funcList = self.chunkb11.functions
#self.debugFunctions(funcList)
self.assertTrue(len(funcList) == 1)
self.assertTrue(funcList[0].method=="blockTest")
self.assertTrue(funcList[0].total_add == 1)
self.assertTrue(funcList[0].total_del == 1)
testdict= {'throw adds': 0, 'catch dels': 0, 'try adds': 1, 'try dels': 1, 'Exception dels': 0, 'raise adds': 0, 'catch adds': 0, 'finally dels': 0, 'finally adds': 0, 'throw dels': 0, 'Exception adds': 0, 'raise dels': 0, 'for adds': 0,'for dels': 0,'while adds': 0,'while dels': 0}
self.assertEqual(testdict,funcList[0].keywordDictionary)
if __name__=="__main__":
unittest.main()
|
# The following code is based on the ProcHarvester implementation
# See https://github.com/IAIK/ProcHarvester/tree/master/code/analysis%20tool
import pandas as pd
import numpy as np
import config
import math
import distance_computation
def init_dist_matrices(file_contents):
dist_matrices = []
for fileContent in file_contents:
dist_matrices.append(np.full((len(fileContent.records), len(fileContent.records)), np.nan))
return dist_matrices
def predict_based_on_distance_matrix(file_contents, Y_train, test_index, train_index, dist_matrices):
# predict based on k nearest distances majority vote
# If there are multiple input files, then k gets multiplied by the number of files
elsecase = 0
ifcase = 0
# Determine indices of k nearest neighbours
k_nearest_indices_rows = []
for file_cnt, fileContent in enumerate(file_contents):
comp_cnt = 0
dist_array = dist_matrices[file_cnt] # everything NaN at the beginning (160x160)
print("_array).shape", np.array(dist_array).shape)
for training_sample_cnt, row_cnt in enumerate(test_index):
test_to_train_distances = np.zeros(len(train_index)) # 160 measurements, 8-folds -> size 140
for test_sample_cnt, col_cnt in enumerate(train_index):
# lazy computation of distances
dist = dist_array[row_cnt, col_cnt]
if math.isnan(dist):
dist = distance_computation.dtw(fileContent.records[row_cnt], fileContent.records[col_cnt])
dist_array[row_cnt, col_cnt] = dist
dist_array[col_cnt, row_cnt] = dist
comp_cnt += 1
ifcase += 1
else:
elsecase += 1
test_to_train_distances[test_sample_cnt] = dist
sorted_indices = np.argsort(test_to_train_distances)
k_smallest_dist_indices = sorted_indices
print("ifcaes", ifcase)
print("elsecase", elsecase)
print("")
if len(k_nearest_indices_rows) <= training_sample_cnt:
k_nearest_indices_rows.append(k_smallest_dist_indices)
else:
extended = np.append(k_nearest_indices_rows[training_sample_cnt], k_smallest_dist_indices)
k_nearest_indices_rows[training_sample_cnt] = extended
sample_cnt = len(fileContent.records)
print(fileContent.file_name + ": " + str(sample_cnt) + "x" + str(sample_cnt) + " distance matrix - " + str(
comp_cnt) + " computations done")
# Predict based on k nearest indices rows
predictions = []
single_predictions = []
first_pred = []
second_pred = []
third_pred = []
for test_set_cnt in range(0, len(test_index)):
k_smallest_dist_indices = k_nearest_indices_rows[test_set_cnt]
k_nearest_labels = []
for index in k_smallest_dist_indices:
k_nearest_labels.append(Y_train.iloc[index])
k_nearest_labels = pd.Series(k_nearest_labels)
# label_cnts = tmp_lbls.groupby(tmp_lbls, sort=False).count()
label_cnts = k_nearest_labels[0:config.K_NEAREST_CNT].value_counts(sort=False).reindex(
pd.unique(k_nearest_labels))
prediction = label_cnts.idxmax()
predictions.append(prediction)
k_nearest_labels = pd.unique(k_nearest_labels)
for idx in range(0, len(k_nearest_labels)):
add = True
for check_idx in range(0, idx):
if single_predictions[check_idx][len(single_predictions[check_idx]) - 1] == k_nearest_labels[idx]:
add = False
break
if idx >= len(single_predictions):
single_predictions.append([])
if add:
single_predictions[idx].append(k_nearest_labels[idx])
else:
single_predictions[idx].append("already_found")
first_pred.append(k_nearest_labels[0])
if k_nearest_labels[1] != k_nearest_labels[0]:
second_pred.append(k_nearest_labels[1])
else:
assert False
second_pred.append("already_found")
if k_nearest_labels[2] != k_nearest_labels[0] and k_nearest_labels[2] != k_nearest_labels[1]:
third_pred.append(k_nearest_labels[2])
else:
assert False
third_pred.append("already_found")
return pd.Series(predictions), pd.Series(first_pred), pd.Series(second_pred), pd.Series(third_pred), pd.Series(
single_predictions)
|
from flask import render_template
from . import datasrc
@datasrc.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@datasrc.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
|
#
# PySNMP MIB module CISCO-PORT-STORM-CONTROL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-PORT-STORM-CONTROL-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:53:01 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
ModuleIdentity, ObjectIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Integer32, Bits, TimeTicks, IpAddress, NotificationType, Unsigned32, iso, Gauge32, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "ObjectIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Integer32", "Bits", "TimeTicks", "IpAddress", "NotificationType", "Unsigned32", "iso", "Gauge32", "Counter32")
TruthValue, DisplayString, TimeStamp, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "TimeStamp", "TextualConvention")
ciscoPortStormControlMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 362))
ciscoPortStormControlMIB.setRevisions(('2007-10-19 00:00', '2003-07-03 00:00',))
if mibBuilder.loadTexts: ciscoPortStormControlMIB.setLastUpdated('200710190000Z')
if mibBuilder.loadTexts: ciscoPortStormControlMIB.setOrganization('Cisco Systems, Inc.')
ciscoPortStormControlMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 0))
ciscoPortStormControlMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 1))
ciscoPortStormControlMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 2))
cpscConfigObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1))
cpscStatusObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2))
class CPortStormControlTrafficType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("broadcast", 1), ("multicast", 2), ("unicast", 3), ("all", 4))
class CPortStormControlActionType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("filter", 1), ("shutdown", 2))
class CPortStormControlStatusType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("inactive", 1), ("forwarding", 2), ("trafficTypeFiltered", 3), ("allTrafficFiltered", 4), ("shutdown", 5))
cpscThresholdTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 1), )
if mibBuilder.loadTexts: cpscThresholdTable.setStatus('current')
cpscThresholdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-PORT-STORM-CONTROL-MIB", "cpscTrafficType"))
if mibBuilder.loadTexts: cpscThresholdEntry.setStatus('current')
cpscTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 1, 1, 1), CPortStormControlTrafficType())
if mibBuilder.loadTexts: cpscTrafficType.setStatus('current')
cpscUpperThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setUnits('0.01 Percentage').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpscUpperThreshold.setStatus('current')
cpscLowerThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setUnits('0.01 Percentage').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpscLowerThreshold.setStatus('current')
cpscActionTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 2), )
if mibBuilder.loadTexts: cpscActionTable.setStatus('current')
cpscActionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: cpscActionEntry.setStatus('current')
cpscAction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 2, 1, 1), CPortStormControlActionType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpscAction.setStatus('current')
cpscNotificationControl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("stormOccurred", 2), ("stormCleared", 3), ("both", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpscNotificationControl.setStatus('current')
cpscNotificationThreshold = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000))).setUnits('Notifications per Minute').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpscNotificationThreshold.setStatus('current')
cpscStatusTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 1), )
if mibBuilder.loadTexts: cpscStatusTable.setStatus('current')
cpscStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-PORT-STORM-CONTROL-MIB", "cpscTrafficType"))
if mibBuilder.loadTexts: cpscStatusEntry.setStatus('current')
cpscStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 1, 1, 1), CPortStormControlStatusType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpscStatus.setStatus('current')
cpscCurrentLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setUnits('0.01 Percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpscCurrentLevel.setStatus('current')
cpscSuppressedPacket = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 1, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpscSuppressedPacket.setStatus('current')
cpscHistoryTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2), )
if mibBuilder.loadTexts: cpscHistoryTable.setStatus('current')
cpscHistoryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryTrafficType"), (0, "CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryIndex"))
if mibBuilder.loadTexts: cpscHistoryEntry.setStatus('current')
cpscHistoryTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2, 1, 1), CPortStormControlTrafficType())
if mibBuilder.loadTexts: cpscHistoryTrafficType.setStatus('current')
cpscHistoryIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1024)))
if mibBuilder.loadTexts: cpscHistoryIndex.setStatus('current')
cpscHistoryStartTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2, 1, 3), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpscHistoryStartTime.setStatus('current')
cpscHistoryEndTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2, 1, 4), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpscHistoryEndTime.setStatus('current')
cpscNotificationsPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 0, 1))
cpscEventRev1 = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 362, 0, 2)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatus"))
if mibBuilder.loadTexts: cpscEventRev1.setStatus('current')
cpscEvent = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 362, 0, 1, 1)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatus"))
if mibBuilder.loadTexts: cpscEvent.setStatus('deprecated')
ciscoPortStormControlMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 1))
ciscoPortStormControlMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2))
ciscoPortStormControlMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 1, 1)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscConfigurationGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotifConfigurationGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotificationGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatusGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatisticsGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoPortStormControlMIBCompliance = ciscoPortStormControlMIBCompliance.setStatus('deprecated')
ciscoPortStormControlMIBComplianceRev1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 1, 2)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscConfigurationGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotifConfigurationGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotificationGroupRev1"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatusGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatisticsGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoPortStormControlMIBComplianceRev1 = ciscoPortStormControlMIBComplianceRev1.setStatus('current')
cpscConfigurationGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 1)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscUpperThreshold"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscLowerThreshold"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscAction"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscConfigurationGroup = cpscConfigurationGroup.setStatus('current')
cpscStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 2)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatus"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscCurrentLevel"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscStatusGroup = cpscStatusGroup.setStatus('current')
cpscNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 3)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscEvent"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscNotificationGroup = cpscNotificationGroup.setStatus('deprecated')
cpscNotifConfigurationGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 4)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotificationControl"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotificationThreshold"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscNotifConfigurationGroup = cpscNotifConfigurationGroup.setStatus('current')
cpscStatisticsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 5)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscSuppressedPacket"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscStatisticsGroup = cpscStatisticsGroup.setStatus('current')
cpscHistoryGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 6)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryStartTime"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryEndTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscHistoryGroup = cpscHistoryGroup.setStatus('current')
cpscNotificationGroupRev1 = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 7)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscEventRev1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscNotificationGroupRev1 = cpscNotificationGroupRev1.setStatus('current')
mibBuilder.exportSymbols("CISCO-PORT-STORM-CONTROL-MIB", CPortStormControlActionType=CPortStormControlActionType, cpscHistoryEntry=cpscHistoryEntry, cpscHistoryStartTime=cpscHistoryStartTime, PYSNMP_MODULE_ID=ciscoPortStormControlMIB, cpscEventRev1=cpscEventRev1, ciscoPortStormControlMIBConform=ciscoPortStormControlMIBConform, cpscLowerThreshold=cpscLowerThreshold, CPortStormControlTrafficType=CPortStormControlTrafficType, cpscAction=cpscAction, cpscHistoryTrafficType=cpscHistoryTrafficType, ciscoPortStormControlMIBObjects=ciscoPortStormControlMIBObjects, cpscStatusEntry=cpscStatusEntry, cpscStatusGroup=cpscStatusGroup, cpscStatusTable=cpscStatusTable, cpscActionEntry=cpscActionEntry, cpscSuppressedPacket=cpscSuppressedPacket, ciscoPortStormControlMIBCompliances=ciscoPortStormControlMIBCompliances, ciscoPortStormControlMIBComplianceRev1=ciscoPortStormControlMIBComplianceRev1, cpscThresholdTable=cpscThresholdTable, cpscNotificationControl=cpscNotificationControl, cpscNotificationThreshold=cpscNotificationThreshold, ciscoPortStormControlMIBGroups=ciscoPortStormControlMIBGroups, cpscConfigurationGroup=cpscConfigurationGroup, cpscHistoryEndTime=cpscHistoryEndTime, cpscTrafficType=cpscTrafficType, cpscHistoryIndex=cpscHistoryIndex, CPortStormControlStatusType=CPortStormControlStatusType, cpscCurrentLevel=cpscCurrentLevel, cpscEvent=cpscEvent, cpscThresholdEntry=cpscThresholdEntry, cpscHistoryTable=cpscHistoryTable, ciscoPortStormControlMIBCompliance=ciscoPortStormControlMIBCompliance, ciscoPortStormControlMIB=ciscoPortStormControlMIB, cpscUpperThreshold=cpscUpperThreshold, cpscNotificationGroup=cpscNotificationGroup, cpscHistoryGroup=cpscHistoryGroup, cpscStatusObjects=cpscStatusObjects, cpscStatisticsGroup=cpscStatisticsGroup, cpscActionTable=cpscActionTable, cpscStatus=cpscStatus, cpscConfigObjects=cpscConfigObjects, cpscNotificationsPrefix=cpscNotificationsPrefix, cpscNotificationGroupRev1=cpscNotificationGroupRev1, ciscoPortStormControlMIBNotifs=ciscoPortStormControlMIBNotifs, cpscNotifConfigurationGroup=cpscNotifConfigurationGroup)
|
"""
constraint-expression:
logical-or-expression
"""
import glrp
from ...parser import cxx98
from be_typing import TYPE_CHECKING
@glrp.rule('constraint-expression : logical-or-expression')
@cxx98
def constraint_expression(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
if TYPE_CHECKING:
from ...parser import CxxParser |
from dataclasses import dataclass
from pdip.cqrs import ICommand
@dataclass
class StartExecutionProcessCommand(ICommand):
DataOperationId: int = None
JobId: int = None
DataOperationJobExecutionId: int = None
|
# coding:utf-8
import functools
import collections
import MaterialX
from LxGraphic import grhCfg
from . import mtxCfg
class Mtd_MtxBasic(mtxCfg.MtxUtility):
MOD_materialx = MaterialX
MOD_functools = functools
class Mtd_MtxFile(Mtd_MtxBasic):
@classmethod
def _getNodeDefDict(cls, fileString):
dic = cls.CLS_ordered_dict()
doc = cls.MOD_materialx.createDocument()
# noinspection PyArgumentList
cls.MOD_materialx.readFromXmlFile(doc, fileString)
#
for i in doc.getNodeDefs():
typepathString = i.getNodeString()
datatypeStr = i.getType()
nodeDic = collections.OrderedDict()
nodeDic[grhCfg.GrhUtility.DEF_grh__key_node_datatype] = datatypeStr
nodeAttrLis = []
for input_ in i.getInputs():
portpathStr = input_.getName()
datatypeStr = input_.getType()
portrawStr = input_.getValueString()
attrDic = collections.OrderedDict()
attrDic[grhCfg.GrhUtility.DEF_grh__key_portpath] = portpathStr
attrDic[grhCfg.GrhUtility.DEF_grh__key_porttype] = datatypeStr
attrDic[grhCfg.GrhUtility.DEF_grh__key_port_datatype] = datatypeStr
attrDic[grhCfg.GrhUtility.DEF_grh__key_portraw] = portrawStr
attrDic[grhCfg.GrhUtility.DEF_grh__key_assign] = grhCfg.GrhPortAssignQuery.inport
nodeAttrLis.append(attrDic)
nodeDic[grhCfg.GrhUtility.DEF_grh__key_port] = nodeAttrLis
dic[typepathString] = nodeDic
return dic
|
import pytest
import pandas as pd
import numpy as np
from check_univariate_outliers import pick_univariate_outliers
s = np.random.seed(54920)
def make_df_with_outliers(mean, std, size, colname, values_to_insert=None, **kwargs):
data = np.random.normal(loc=mean, scale=std, size=size)
if values_to_insert:
data = np.append(np.array(values_to_insert), data)
df_args = kwargs
df_args[colname] = data
return pd.DataFrame(df_args)
# make_df_with_outliers(2000, 100, 10, colname="brain_volume",
# values_to_insert=[1600, 2400], arm="standard", visit="baseline")
@pytest.fixture
def df_within_limits(mean=1000, sd=100, size=1000):
df = make_df_with_outliers(mean, sd, size, colname="brain_volume",
arm="standard", visit="baseline")
df.index.names = ['subject']
df.set_index(['visit', 'arm'], append=True, inplace=True)
return df
@pytest.fixture
def df_baseline(mean=2000, sd=100, size=1000):
df = make_df_with_outliers(mean, sd, size, colname="brain_volume",
values_to_insert=[mean - 4*sd, mean + 4*sd],
arm="standard", visit="baseline")
df.index.names = ['subject']
df.set_index(['visit', 'arm'], append=True, inplace=True)
return df
@pytest.fixture
def df_year1(mean=2000, sd=50, size=1000):
df = make_df_with_outliers(mean, sd, size, colname="brain_volume",
values_to_insert=[mean - 4*sd, mean + 4*sd],
arm="standard", visit="followup_1y")
df.index.names = ['subject']
df.set_index(['visit', 'arm'], append=True, inplace=True)
return df
@pytest.fixture
def df_nice():
data = [0] * 5 + [10] * 90 + [1000] * 4 + [10000] # mean: 149, sd = 1008.9
df = make_df_with_outliers(0, 1, 0, colname="brain_volume",
values_to_insert=data,
arm="standard", visit="baseline")
df.index.names = ['subject']
df.set_index(['visit', 'arm'], append=True, inplace=True)
return df
def test_catches_outlier(df_nice):
result = df_nice.mask(~df_nice.apply(pick_univariate_outliers,
sd_count=3)).stack()
assert result.shape[0] == 1
assert result.values == [10000]
# 0. Sanity checks:
# a. Should return a series
# b. Should return no outliers if everything is within limits
def test_returns_series(df_within_limits):
result = df_within_limits.mask(
~df_within_limits.apply(pick_univariate_outliers,
sd_count=3.5)).stack()
assert type(result) == pd.core.series.Series
assert result.shape[0] == 0
# Others:
# - It should throw an error if the data frame is not indexed / indexes are not
# named correctly
# - Should error out if there is now baseline visit
# Tests:
# 1. Testing df_baseline should find the two outliers
def test_baseline_finds(df_baseline):
result = df_baseline.mask(~df_baseline.apply(pick_univariate_outliers)).stack()
assert result.shape[0] >= 2
assert result.shape[0] <= 2 + int(np.round(0.002 * df_baseline.shape[0]))
# 2. Testing df_baseline + df_year1 should find two outliers if baseline-only
# is enabled, four if not
def test_year1_ok_if_baseline_only(df_baseline, df_year1):
df = pd.concat([df_baseline, df_year1], axis=0)
result = df.mask(~df.apply(pick_univariate_outliers, baseline_only=True)).stack()
assert (result.reset_index(['visit'])['visit'] != "followup_1y").all()
def test_year1_outliers_if_per_year(df_baseline, df_year1):
df = pd.concat([df_baseline, df_year1], axis=0)
result = df.mask(~df.apply(pick_univariate_outliers, baseline_only=False)).stack()
assert (result.reset_index(['visit'])['visit'] == "followup_1y").any()
|
import asyncio
import random
import os
from aiocache import cached, Cache
import aiohttp
import discord
from discord.commands import Option
from discord.ext import commands
from alttprbot.util.holyimage import HolyImage
ALTTP_RANDOMIZER_SERVERS = list(map(int, os.environ.get("ALTTP_RANDOMIZER_SERVERS", "").split(',')))
WELCOME_MESSAGES = {
'french': (
'Bienvenue! Ce serveur discord est principalement en anglais.\n'
'Vous trouverez un serveur en français en suivant https://discord.gg/9cWhQyw .\n'
'Les membres de ce serveur se feront un plaisir de vous aider.\n\n'
'Merci, et bienvenue encore une fois.'
),
'spanish': (
'¡Bienvenidos! Este servidor de Discord es principalmente de habla inglesa.'
'¡No tengáis miedo, hay un servidor en Español que podéis encontrar en https://discord.gg/xyHxAFJ donde os pueden ayudar!\n\n'
'Gracias y otra vez, bienvenidos.'
),
'german': (
'Willkommen! Auf diesem Server wird grundsätzlich in Englisch geschrieben.'
'Aber keine Sorge, es gibt ebenfalls einen deutschen Discord-Server, der dir gerne in deiner Sprache helfen kann.'
'Diesen findest du unter folgender Einladung https://discordapp.com/invite/5zuANcS . Dort gibt es alles - von Einsteigertipps bis zu Turnieren\n\n'
'Vielen Dank und nochmals herzlich willkommen.'
)
}
async def holy_slug_autocomplete(ctx):
data = await get_holy_images()
value: str = ctx.value
raw_game = ctx.options['game']
if raw_game:
game = raw_game[0]['value']
else:
if ctx.interaction.guild:
game = await holy_game_default(ctx.interaction.guild)
else:
game = 'z3r'
slugs = sorted([val['slug'] for val in data[game] if val['slug'].startswith(value)][:25])
return slugs
async def holy_game_autocomplete(ctx):
data = await get_holy_images()
return sorted([val for val in data.keys() if val.startswith(ctx.value)][:25])
@cached(ttl=300, cache=Cache.MEMORY, key="holygamedefault")
async def holy_game_default(guild: discord.Guild):
return await guild.config_get("HolyImageDefaultGame", "z3r")
@cached(ttl=300, cache=Cache.MEMORY, key="holyimages")
async def get_holy_images() -> dict:
async with aiohttp.ClientSession() as session:
async with session.get('http://alttp.mymm1.com/holyimage/holyimages.json') as resp:
return await resp.json()
class Misc(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message):
if self.bot.user in message.mentions:
emoji = discord.utils.get(self.bot.emojis, name='SahasrahBot')
if emoji:
await asyncio.sleep(random.random()*5)
await message.add_reaction(emoji)
@commands.slash_command(name="welcome", guild_ids=ALTTP_RANDOMIZER_SERVERS)
async def welcome_cmd(self, ctx: discord.ApplicationContext, language: Option(str, description="Choose a language for the welcome message.", choices=WELCOME_MESSAGES.keys())):
"""
Welcome messages for various languages.
"""
await ctx.respond(WELCOME_MESSAGES[language])
@commands.slash_command(name="memberinfo", guild_only=True)
async def memberinfo_cmd(self, ctx, member: Option(discord.Member, "Choose a member")):
"""
Get information about a member.
"""
if member is None:
member = ctx.author
embed = discord.Embed(
title=f"Member info for {member.name}#{member.discriminator}",
color=member.color
)
embed.add_field(name='Created at', value=discord.utils.format_dt(member.created_at, style='F'), inline=False)
embed.add_field(name='Joined at', value=discord.utils.format_dt(member.joined_at, style='F'), inline=False)
embed.add_field(name="Discord ID", value=member.id, inline=False)
if member.avatar:
embed.set_thumbnail(url=member.avatar.url)
await ctx.respond(embed=embed, ephemeral=True)
@commands.slash_command(
name="rom",
guild_ids=ALTTP_RANDOMIZER_SERVERS
)
async def rom_cmd(self, ctx):
"""
Get info about how to verify a ROM.
"""
await ctx.respond(
"If you need help verifying your legally-dumped Japanese version 1.0 A Link to the Past Game file needed to run ALTTPR, use this tool: <http://alttp.mymm1.com/game/checkcrc/>\n"
"It can also help get the permalink page URL which has access to the Spoiler Log depending on the settings that were chosen. Not all games that are generated have access to the Spoiler Log.\n\n"
"For legal reasons, we cannot provide help with finding this ROM online. Please do not ask here for assistance with this.\n"
"See <#543572578787393556> for details."
)
@commands.slash_command(guild_ids=ALTTP_RANDOMIZER_SERVERS)
async def festive(self, ctx: discord.ApplicationContext):
"""
Get info about festive randomizers.
"""
embed = discord.Embed(
title='Festive Randomizer Information',
description='Latest details of any upcoming festive randomizers.',
color=discord.Color.red()
)
embed.set_image(
url='https://cdn.discordapp.com/attachments/307860211333595146/654123045375442954/unknown.png')
await ctx.respond(embed=embed)
@commands.slash_command(
name='holyimage'
)
async def holyimage_cmd(
self,
ctx,
slug: Option(str, description="Slug of the holy image to retrieve.", autocomplete=holy_slug_autocomplete),
game: Option(str, description="Slug of the game to pull a holy image for.", required=False, autocomplete=holy_game_autocomplete)
):
"""
Retrieves a holy image from http://alttp.mymm1.com/holyimage/
"""
if game is None:
if ctx.guild is None:
game = "z3r"
else:
game = await ctx.guild.config_get("HolyImageDefaultGame", "z3r")
holyimage = await HolyImage.construct(slug=slug, game=game)
await ctx.respond(embed=holyimage.embed)
def setup(bot):
bot.add_cog(Misc(bot))
|
# lec6.4-removeDups.py
# edX MITx 6.00.1x
# Introduction to Computer Science and Programming Using Python
# Lecture 6, video 4
# Demonstrates performing operations on lists
# Demonstrates how changing a list while iterating over it creates
# unintended problems
def removeDups(L1, L2):
for e1 in L1:
if e1 in L2:
# Note: we are iterating over L1, but just removed one of
# its elements
L1.remove(e1)
# L1 is now [2,3,4] so when it loops through again the next
# element is 3. As a result the 2 is skipped and not removed
# as intended
L1 = [1,2,3,4]
L2 = [1,2,5,6]
removeDups(L1, L2)
print(L1)
# Better way to perform operations on list by creating a copy of L1
# then iterating over that as it will not change
def removeDupsBetter(L1, L2):
# Make a copy of L1 and put into L1Start
L1Start = L1[:]
for e1 in L1Start:
if e1 in L2:
L1.remove(e1)
L1 = [1,2,3,4]
L2 = [1,2,5,6]
removeDupsBetter(L1, L2)
print(L1)
|
import time
# from login import LogIn
import setup
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup as bs
from tkinter import *
from instascraper.scraper_interface import ScraperInterface
def filter_li(tag):
if tag.name == 'ul' and len(tag.contents) == 3:
return True
else:
return False
class GetFollower(ScraperInterface):
def __init__(self, browser):
self.browser = browser
self.soup = None
def _wait(self, element, means='xpath'):
wait = WebDriverWait(self.browser, 30)
if means == 'xpath':
wait.until(EC.element_to_be_clickable((By.XPATH, str(element))))
elif means == 'class':
wait.until(EC.element_to_be_clickable((By.CLASS_NAME, str(element))))
def homepage(self):
self.browser.implicitly_wait(10)
self._wait("//a[@href='/']")
self.browser.find_element_by_xpath("//a[@href='/']").click()
self.browser.implicitly_wait(10)
def profile(self):
self._wait("//a[@href='/{}/']".format(setup.CURRENT_USERNAME))
self.browser.find_element_by_xpath("//a[@href='/{}/']".format(setup.CURRENT_USERNAME)).click()
self._wait("k9GMp", 'class')
def make_soup(self):
source = self.browser.page_source
self.soup = bs(source, 'lxml')
# print(self.soup.prettify())
def _follower_list(self):
self._wait("//a[@href='/{}/followers/']".format(setup.CURRENT_USERNAME))
self.browser.find_element_by_xpath("//a[@href='/{}/followers/']".format(setup.CURRENT_USERNAME)).click()
# time.sleep(30)
flag = list()
tmp = 0
count = 0
while len(flag) < setup.DETAILS['follower']:
tmp = len(flag)
self._wait("/html/body/div[4]/div/div/div[2]")
scroll_box = self.browser.find_element_by_xpath("/html/body/div[4]/div/div/div[2]")
self.browser.execute_script(
"arguments[0].scrollTo(0, arguments[0].scrollHeight); return arguments[0].scrollHeight;",
scroll_box)
flag = self.browser.find_elements_by_xpath("/html/body/div[4]/div/div/div[2]/ul/div/li")
if len(flag) == tmp:
count += 1
time.sleep(2)
if count % 20 == 0:
self.browser.refresh()
self._wait("//a[@href='/{}/followers/']".format(setup.CURRENT_USERNAME))
self.browser.find_element_by_xpath("//a[@href='/{}/followers/']".format(setup.CURRENT_USERNAME)).click()
print(len(flag))
self.make_soup()
def _print_list(self):
div = self.soup.find_all('div', class_='d7ByH')
result_list = [element.a.get_text() for element in div if not element.find('span', string='Verified')]
# print(len(div))
for element in div:
print(element.get_text())
# print(result_list)
return result_list
def confirm_number(self):
print(len(self.soup.find('div', class_='PZuss').contents))
def main(self):
self.profile()
self._follower_list()
self.browser.refresh()
return self._print_list()
class GetFollowing(GetFollower):
def _following_list(self):
self._wait("//a[@href='/{}/following/']".format(setup.CURRENT_USERNAME))
self.browser.find_element_by_xpath("//a[@href='/{}/following/']".format(setup.CURRENT_USERNAME)).click()
# time.sleep(30)
flag = list()
tmp = 0
count = 0
while len(flag) < setup.DETAILS['following']:
tmp = len(flag)
self._wait("/html/body/div[4]/div/div/div[2]")
scroll_box = self.browser.find_element_by_xpath("/html/body/div[4]/div/div/div[2]")
self.browser.execute_script(
"arguments[0].scrollTo(0, arguments[0].scrollHeight); return arguments[0].scrollHeight;",
scroll_box)
flag = self.browser.find_elements_by_xpath("/html/body/div[4]/div/div/div[2]/ul/div/li")
if len(flag) == tmp:
count += 1
time.sleep(2)
if count % 20 == 0:
self.browser.refresh()
self._wait("//a[@href='/{}/following/']".format(setup.CURRENT_USERNAME))
self.browser.find_element_by_xpath("//a[@href='/{}/following/']".format(setup.CURRENT_USERNAME)).click()
print(len(flag))
self.make_soup()
def main(self):
self.profile()
self._following_list()
self.browser.refresh()
return self._print_list()
class GetNumbers(GetFollower):
def _get_numbers(self):
self.make_soup()
ul = self.soup.find('ul', class_='k9GMp')
numbers = dict()
for element in ul.children:
text = element.get_text()
if not text:
pass
elif re.search('post', text):
numbers['post'] = int(text.split(' ')[0])
elif re.search('follower', text):
numbers['follower'] = int(text.split(' ')[0])
elif re.search('following', text):
numbers['following'] = int(text.split(' ')[0])
return numbers
def main(self):
self.profile()
print('this is the problem')
setup.DETAILS = self._get_numbers()
self.homepage()
return setup.DETAILS
class NoFriend(GetFollowing, GetFollower):
def main(self):
followers = GetFollower(self.browser).main()
followings = GetFollowing(self.browser).main()
setup.NO_FRIEND_LIST = [following for following in followings if following not in followers]
return setup.NO_FRIEND_LIST
if __name__ == '__main__':
# only import when you need it (circular import)
# login = LogIn()
# login.main()
#
# # This must run first
#
# get_follower = GetFollower(login.browser)
# get_follower.result_gui()
#
# get_following = GetFollowing(login.browser)
# get_following.result_gui()
#
# # no_friend_list = NoFriend(login.browser)
# # no_friend_list.result_gui()
pass
|
"""
File Name: no_style.py
Description: This program takes one text file as an argument and ouputs the frequencies of valid tokens it contains.
Valid tokens are considered as a sequence of alphanumeric characters along with a few special characters [a-zA-Z0-9_].
The tokens are then reduced to their lowercase form where their frequencies can be computed and shown to the user.
"""
#!/usr/bin/env python3
import re
import string
import sys
from collections import Counter
from os import path
class Tokenizer:
"""
The Tokenizer object allows us to compute the frequencies of the tokens within the text file
Attributes:
tokens (list): Stores all tokens present within the file
frequencies (Counter): Dictionary subclass that stores the frequency of tokens
file (str): Stores the name of the file and its location
"""
def __init__(self, textFile):
self.tokens = []
self.frequencies = Counter()
self.file = textFile
def readChunk(self, fileObject):
"""
The readChunk method reads a file object in sections of 4096 bytes and returns a generator of the text
contained wthin this section size
Args:
fileObject (file object): A file object of the text file being read
Returns:
A generator for the text equal to the size of the bytes indicated to read
"""
while True:
chunk = fileObject.read(4096)
if not chunk:
break
yield chunk
def tokenize(self):
"""
The tokenize method reads the text file provided and adds each valid token into the 'tokens' attribute
Note:
The runtime for this method is O(n) where n is the number characters in the text file.
This is because the method grabs each line in the file and converts all uppercase characters to lowercase.
From there, it uses a regex expression to find all valid tokens in the file.
This requires us to parse through each character in the file to grab the list of valid tokens.
Raises:
OSError: If file cannot be opened
"""
# Checks that the text file provided can be opened
try:
file = open(self.file, encoding='utf-8', errors='replace')
# Displays the error that arose if the file could not be opened
except OSError as error:
print(error)
# Performs our tokenization if the file can be opened
else:
# Creates a regular expression in order to tokenize the file
pattern = re.compile('\w+')
# Creates a set of characters used for token pattern determination
charList = list(string.ascii_letters) + list(string.digits) + ['_']
charSet = set(charList)
# Variables to be used for retaining information on characters and tokens found
firstChar = ''
lastChar = ''
lastToken = ''
# Iterates through each chunk in the file
for chunk in self.readChunk(file):
# Saves the first character in the chunk and finds all valid tokens through the regular expression created
firstChar = chunk[0]
validTokens = pattern.findall(chunk.lower())
# Checks if the first character in current chunk should be part of the last token determined in previous chunk
# If true, combines last token with first token in current chunk
# Otherwise, adds last token to list of valid tokens found
if lastChar in charSet and firstChar in charSet:
validTokens[0] = lastToken + validTokens[0]
elif lastToken:
self.tokens.append(lastToken)
# Pops the last valid token found and adds the list of valid tokens to the 'tokens' attribute
# Saves the last character in the chunk to be used for reference for next chunk to be read
lastToken = validTokens.pop()
self.tokens.extend(validTokens)
lastChar = chunk[-1]
# Adds last token to list of valid characters found
self.tokens.append(lastToken)
# Closes the file regardless of whether or not an error was thrown
finally:
file.close()
def computeWordFrequencies(self):
"""
The computeWordFrequencies method checks the list of tokens and computes the frequences of each one into the 'frequencies'
attribute of the object
Note:
The runtime for this method is O(m) where n is the number of items in the 'tokens' attribute.
This is because the method grabs each valid token that was in the file.
From there, the token is added to the dictionary subclass and assigned a value of 1.
If the token is already present in the datatype, its value is increased by 1.
Counter datatype information: https://docs.python.org/3.6/library/collections.html#collections.Counter
"""
self.frequencies = Counter(self.tokens)
def print(self):
"""
The print method displays the list of tokens and their frequencies to the user
Note:
The runtime for this method is O(p log p) where p is the number of key:value pairs in the 'frequencies' attribute.
This is because our method grabs each pairing in the dictionary subclass.
From there, it prints the key as well as its associated value for the user to view through the terminal.
The pairs are presented in sorted order based on the frequency of the token.
"""
with open('stop_words.txt') as file:
stop_words = set(re.split(',', file.read()))
stop_words.add('s')
count = 0
for (token, frequency) in self.frequencies.most_common():
if count >= 25:
break
if not token in stop_words:
print(token + ' - ' + str(frequency))
count += 1
def main():
"""
The main function runs the tokenizer program and displays the frequencies of each token within the text file
Raises:
SystemExit: If improper arguments are provided to run the program
"""
# Checks that the user provides the valid number of arguments and a valid text file to run the program
if (len(sys.argv) != 2):
sys.exit('Invalid arguments provided\nPlease provide valid arguments in order to run tokenizer')
elif (not path.isfile(sys.argv[1]) or sys.argv[1][-4:] != '.txt'):
sys.exit('Argument provided is not a valid text file\nPlease provide a valid text file in order to run tokenizer')
# Creates a Tokenizer object based on text file given and computes the frequencies of tokens within then file
tkzr = Tokenizer(sys.argv[1])
tkzr.tokenize()
tkzr.computeWordFrequencies()
tkzr.print()
if __name__ == "__main__":
main()
|
from src import Parameters
from src import Preprocessing
from src import TextClassifier
from src import Run
class Controller(Parameters):
def __init__(self):
# Preprocessing pipeline
self.pr = Preprocessing(Parameters.num_words, Parameters.seq_len)
self.data = self.prepare_data()
# Initialize the model
self.model = TextClassifier(Parameters)
# Training - Evaluation pipeline
Run().train(self.model, self.data, Parameters)
def prepare_data(self, source=None, split=True):
# Preprocessing pipeline
pr = self.pr
pr.load_data(source=source)
pr.clean_text()
pr.text_tokenization()
pr.build_vocabulary()
pr.word_to_idx()
pr.padding_sentences()
if split:
pr.split_data()
else:
pr.commit_data()
return {'x_train': pr.x_train, 'y_train': pr.y_train, 'x_test': pr.x_test, 'y_test': pr.y_test}
def execute(self, inputdata : list):
pdata = self.prepare_data(source=['']+inputdata, split=False)
return Run().execute(self.model, pdata)[1:]
if __name__ == '__main__':
controller = Controller() |
import numpy as np
class PathBuilder(dict):
"""
Usage:
```
path_builder = PathBuilder()
path.add_sample(
observations=1,
actions=2,
next_observations=3,
...
)
path.add_sample(
observations=4,
actions=5,
next_observations=6,
...
)
path = path_builder.get_all_stacked()
path['observations']
# output: [1, 4]
path['actions']
# output: [2, 5]
```
Note that the key should be "actions" and not "action" since the
resulting dictionary will have those keys.
"""
def __init__(self):
super().__init__()
self._path_length = 0
def add_all(self, **key_to_value):
for k, v in key_to_value.items():
if k not in self:
self[k] = [v]
else:
self[k].append(v)
self._path_length += 1
def get_all_stacked(self):
raise NotImplementedError("Does not handle dict obs")
output_dict = dict()
for k, v in self.items():
output_dict[k] = stack_list(v)
return output_dict
def get_stacked(self, key):
v = self.__getitem__(key)
if isinstance(v[0], dict):
raise NotImplementedError()
return np.array(v)
def __len__(self):
return self._path_length
def stack_list(lst):
if isinstance(lst[0], dict):
return lst
else:
return np.array(lst)
|
from django.test import TestCase
from .models import Image,User,Comments,Profile
# Create your tests here.
class ImageTestClass(TestCase):
'''
Test case for the Image class
'''
def setUp(self):
'''
Method that creates an instance of Image class
'''
# Create a image instance
self.new_image = Image(image = 'flower.png',image_name = 'flower',image_caption='beautiful')
def test_instance(self):
'''
Test case to check if self.new_image in an instance of image class
'''
self.assertTrue(isinstance(self.new_image, Image))
def test_display_image(self):
'''
Test case to check if all images are retreived from the database
'''
found_photos = Image.get_photos()
photos = Image.objects.all()
self.assertTrue(len(found_photos) == len(photos))
def test_save_image(self):
'''
Test case to save images uploaded
'''
self.test_image.save_image()
images = Image.objects.all()
self.assertTrue(len(images) > 0)
def update_caption(self):
'''
Test case to update a new image caption
'''
self.new_caption.save_caption()
caption_id = self.new_caption.id
Image.update_caption(id,"book.jpg","book")
self.assertEqual(self.caption.caption,"book.jpg","book")
def test_get_image_by_id(self):
'''
Test case to get a single image by its id
'''
search_image = self.image.get_image_by_id(self.image.id)
searched_image = Image.objects.get(id=self.image.id)
self.assertTrue(searched_image,search_image)
def test_delete_image(self):
'''
Test case to delete uploaded images
'''
self.image.save_image()
self.image.delete_image()
images = Image.objects.all()
self.assertTrue(len(images) == 0)
class ProfileTestCase(TestCase):
'''
Test case for the Profile class
'''
def setUp(self):
'''
Method that creates an instance of Profile class
'''
# Create instance of Profile class
self.new_profile = Profile(name = 'murimi',email = 'muriuki.ms.com',bio="I am Groot",avatar = 'flower.jpg')
def test_instance(self):
'''
Test case to check if self.new_profile in an instance of Profile class
'''
self.assertTrue(isinstance(self.new_profile, Profile))
def test_get_profile(self):
'''
Test case to check if all profiles are retreived from the database
'''
profiles = Profile.get_profiles()
profile = Profile.objects.all()
self.assertTrue(len(profiles) == len(profile))
def test_save_profile(self):
'''
Test case to save profile
'''
self.test_profile.save_profile()
profile = Profile.objects.all()
self.assertTrue(profile)
def test_update_profile(self):
'''
Test case to update profile detailsof a user
'''
self.new_caption.save_caption()
caption_id = self.new_caption.id
Image.update_caption(id, "book.jpg", "book")
self.assertEqual(self.caption.caption, "book.jpg", "book")
def test_get_profile(self):
'''
Test case to test getting a single image
'''
search_image = self.image.get_image_by_id(self.image.id)
searched_image = Image.objects.get(id=self.image.id)
self.assertTrue(searched_image, search_image)
def test_delete_profile(self):
''''
Test to delete a user profile
'''
self.image.save_profile()
self.image.delete_profile()
profile = Profile.objects.all()
self.assertTrue(len(profile) == 0)
def test_search_by_name(self):
'''
Test case to get a single image by its id
'''
search_name = self.profile.search_by_name(self.searched_name)
searched_name = Profile.objects.get(name = searched_name)
self.assertTrue(searched_name, search_name)
class CommentsTestCase(TestCase):
'''
Test case for the Comment class
'''
def setUp(self):
'''
Method that creates an instance of Comment class
'''
# Create a Comment instance
self.new_comment = Comments(comment='very nice')
def test_instance(self):
'''
Test case to check if self.new_comment in an instance of Comment class
'''
self.assertTrue(isinstance(self.new_comment, Comments))
def test_save_comment(self):
'''
Test case to save comment
'''
self.test_comment.save_comment()
comment = Comments.objects.all()
self.assertTrue(comment)
def test_update_comment(self):
'''
Test case to update comments of an image
'''
self.new_comment.save_comment()
comment_id = self.new_comment.id
Image.update_comment(id, "very nice")
self.assertEqual(self.comment.comment, "very nice")
def test_delete_comment(self):
''''
Test to delete a comment in an image
'''
self.image.save_comment()
self.image.delete_comment()
comment = Comments.objects.all()
self.assertTrue(len(comment) == 0)
|
import pytest
from progress_keeper import Progress
@pytest.fixture
def prog_no_var():
return Progress('tests/tmp/progress.cfg')
@pytest.fixture
def prog_with_vars():
return Progress('tests/tmp/progress_with_vars.cfg', vars=['tracker_1', 'tracker_2', 'tracker_3'])
def test_initialize_no_vars_def(prog_no_var):
assert prog_no_var.values['last_index_processed'] == 0
def test_initialize_with_vars(prog_with_vars):
assert prog_with_vars.values['tracker_1'] == 0
assert prog_with_vars.values['tracker_2'] == 0
assert prog_with_vars.values['tracker_3'] == 0
def test_increment_no_vars_def_1(prog_no_var):
prog_no_var.increment()
assert prog_no_var.values['last_index_processed'] == 1
def test_increment_no_vars_def_2(prog_no_var):
prog_no_var.increment()
prog_no_var.increment()
prog_no_var.increment()
assert prog_no_var.values['last_index_processed'] == 3
def test_increment_no_vars_def_does_not_exist(prog_no_var):
prog_no_var.increment('do_not_exist')
assert prog_no_var.values['last_index_processed'] == 0
def test_increment_with_vars_1(prog_with_vars):
prog_with_vars.increment('tracker_1')
prog_with_vars.increment('tracker_3')
assert prog_with_vars.values['tracker_1'] == 1
assert prog_with_vars.values['tracker_3'] == 1
def test_increment_with_vars_2(prog_with_vars):
prog_with_vars.increment('tracker_1')
prog_with_vars.increment('tracker_2')
prog_with_vars.increment('tracker_2')
prog_with_vars.increment('tracker_3')
prog_with_vars.increment('tracker_3')
prog_with_vars.increment('tracker_3')
prog_with_vars.increment()
assert prog_with_vars.values['tracker_1'] == 1
assert prog_with_vars.values['tracker_2'] == 2
assert prog_with_vars.values['tracker_3'] == 3
def test_increment_with_vars_does_not_exist(prog_with_vars):
prog_with_vars.increment('do_not_exist')
assert prog_with_vars.values['tracker_1'] == 0
assert prog_with_vars.values['tracker_2'] == 0
assert prog_with_vars.values['tracker_3'] == 0
def test_reset_no_vars_def(prog_no_var):
prog_no_var.increment()
prog_no_var.increment()
prog_no_var.reset()
assert prog_no_var.values['last_index_processed'] == 0
def test_reset_no_vars_def(prog_no_var):
prog_no_var.increment()
prog_no_var.increment()
prog_no_var.reset('does_not_exist')
assert prog_no_var.values['last_index_processed'] == 2
def test_reset_with_vars_1(prog_with_vars):
prog_with_vars.increment('tracker_1')
prog_with_vars.increment('tracker_2')
prog_with_vars.increment('tracker_3')
prog_with_vars.increment('tracker_3')
prog_with_vars.increment('tracker_3')
prog_with_vars.increment('tracker_3')
prog_with_vars.reset('tracker_1')
prog_with_vars.reset('tracker_3')
assert prog_with_vars.values['tracker_1'] == 0
assert prog_with_vars.values['tracker_2'] == 1
assert prog_with_vars.values['tracker_3'] == 0
def test_reset_with_vars_not_exist(prog_with_vars):
prog_with_vars.increment('tracker_1')
prog_with_vars.increment('tracker_2')
prog_with_vars.increment('tracker_3')
prog_with_vars.increment('tracker_3')
prog_with_vars.increment('tracker_3')
prog_with_vars.increment('tracker_3')
prog_with_vars.reset('tracker_1')
prog_with_vars.reset('tracker_3')
prog_with_vars.reset('does_not_exist')
assert prog_with_vars.values['tracker_1'] == 0
assert prog_with_vars.values['tracker_2'] == 1
assert prog_with_vars.values['tracker_3'] == 0
def test_delete(prog_no_var):
prog_no_var.delete()
|
from .base import *
from .naive import *
from .naive_fast import *
|
class Solution(object):
def balancedStringSplit(self, s):
"""
:type s: str
:rtype: int
"""
# Runtime: 20 ms
# Memory: 13.5 MB
stack = 0
n_balanced = 0
for char in s:
if char == "L":
stack += 1
else:
stack -= 1
if stack == 0:
n_balanced += 1
stack = 0
return n_balanced
|
"""
PLAYERS MANAGEMENT
"""
from season.models import Player
__all__ = ['isPlayerValid','getAllPlayers','getPlayerName']
def isPlayerValid(player_id):
"""
check whether player id is valid in the DB
"""
try:
Player.objects.get(id=player_id)
except Player.DoesNotExist:
return False
else:
return True
def getAllPlayers():
"""
returns the entire list of players
"""
return Player.objects.order_by('name').all()
def getPlayerName(player_id):
"""
returns the player name corresponding to the given id
"""
try:
return Player.objects.get(id=player_id).name
except Player.DoesNotExist:
return None |
from unittest import skipIf
from decouple import config
from django.test import TestCase, override_settings
from google.api_core.client_options import ClientOptions
from google.auth.credentials import AnonymousCredentials
from google.cloud.exceptions import NotFound
from google.cloud.storage import Blob, Bucket, Client
from physionet.gcs import GCSObject, GCSObjectException
from physionet.settings.base import StorageTypes
TEST_GCS_INTEGRATION = config('TEST_GCS_INTEGRATION', default=True, cast=bool)
GCS_HOST = config('GCS_HOST', default=None)
@skipIf(
(GCS_HOST is None or not TEST_GCS_INTEGRATION),
'Test GCS-backend integration only on dockerized CI/CD pipeline.',
)
@override_settings(
STORAGE_TYPE=StorageTypes.GCP,
DEFAULT_FILE_STORAGE='physionet.storage.MediaStorage',
STATICFILES_STORAGE='physionet.storage.StaticStorage',
GCP_STORAGE_BUCKET_NAME='physionet-media',
GCP_STATIC_BUCKET_NAME='physionet-static',
GS_PROJECT_ID='test_project_id',
GCP_BUCKET_LOCATION='us-west1',
)
class TestGCSObject(TestCase):
@classmethod
def setUpTestData(cls):
cls.gcs_server_endpoint = f'http://{config("GCS_HOST", default="gcs")}:4443'
cls.bucket_name = 'test'
cls.path = 'physionet/users/admin/profile.jpg'
def tearDown(self):
try:
self._clear_gcs_bucket(self.bucket_name)
except NotFound:
pass
def _clear_gcs_bucket(self, name):
self._get_gcs_client().get_bucket(name).delete(force=True)
def _get_gcs_client(self):
return Client(
project="test_project_id",
credentials=AnonymousCredentials(),
client_options=ClientOptions(api_endpoint=self.gcs_server_endpoint),
)
def _monkeypatch_gcsobject(self, gcs_object):
gcs_object._storage._client = self._get_gcs_client()
return gcs_object
@override_settings(STORAGE_TYPE=StorageTypes.LOCAL)
def test_init_raises_exception_when_storage_types_is_local(self):
self.assertRaises(GCSObjectException, GCSObject, self.path)
@override_settings(STORAGE_TYPE=StorageTypes.GCP)
def test_init_when_storage_type_is_gcp(self):
gcs_object = self._monkeypatch_gcsobject(GCSObject(self.path))
self.assertEqual(gcs_object.bucket.name, 'physionet')
self.assertEqual(gcs_object._object_name, 'users/admin/profile.jpg')
def test_repr(self):
gcs_object = self._monkeypatch_gcsobject(GCSObject(self.path))
self.assertEqual(
repr(gcs_object),
'GCSObject(Bucket=physionet, Object="users/admin/profile.jpg")',
)
def test_bucket_property_return_bucket_proper_object(self):
gcs_object = self._monkeypatch_gcsobject(GCSObject(self.path))
self.assertIsInstance(gcs_object.bucket, Bucket)
self.assertEqual(gcs_object.bucket.name, 'physionet')
def test_blob_property_return_proper_object(self):
gcs_object = self._monkeypatch_gcsobject(GCSObject(self.path))
self.assertIsInstance(gcs_object.blob, Blob)
self.assertEqual(gcs_object.blob.name, 'users/admin/profile.jpg')
def test_mkdir_makes_directories(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir1/dir2/'))
gcs_object.client.create_bucket('test')
# WHEN
gcs_object.mkdir()
# THEN
self.assertTrue(gcs_object.bucket.get_blob('dir1/dir2/'))
def test_mkdir_doesnt_work_when_object_name_is_taken(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir1/dir2/'))
gcs_object.client.create_bucket('test')
gcs_object.mkdir()
# WHEN + THEN
self.assertRaises(GCSObjectException, gcs_object.mkdir)
def test_size_when_object_is_file(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir1/notes.txt'))
gcs_object.client.create_bucket('test')
gcs_object.upload_from_string('content')
# WHEN + THEN
self.assertEqual(gcs_object.size(), len('content'))
def test_size_when_object_is_directory(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir1/'))
gcs_object_1 = self._monkeypatch_gcsobject(GCSObject('test/dir1/notes1.txt'))
gcs_object_2 = self._monkeypatch_gcsobject(GCSObject('test/dir1/notes2.txt'))
# create a bucket
gcs_object.client.create_bucket('test')
# put files into a bucket
gcs_object_1.upload_from_string('content')
gcs_object_2.upload_from_string('content')
# WHEN + THEN
self.assertEqual(gcs_object.size(), len('content') * 2)
def test_rm_deletes_all_files_in_directory_when_object_is_directory(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir1/'))
gcs_object_1 = self._monkeypatch_gcsobject(GCSObject('test/dir1/notes1.txt'))
gcs_object_2 = self._monkeypatch_gcsobject(GCSObject('test/dir1/notes2.txt'))
# create a bucket
gcs_object.client.create_bucket('test')
# put files into a bucket
gcs_object_1.upload_from_string('content')
gcs_object_2.upload_from_string('content')
# WHEN
gcs_object.rm()
# THEN
self.assertEqual(gcs_object.size(), 0)
def test_rm_removes_file_when_object_is_file(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir/file.jpg'))
gcs_object.client.create_bucket('test')
gcs_object.upload_from_string('content')
# WHEN
gcs_object.rm()
# THEN
dir_ = self._monkeypatch_gcsobject(self._monkeypatch_gcsobject(GCSObject('test/dir/')))
self.assertEqual(dir_.size(), 0)
def test_cp_copies_file_to_directory(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir/file.jpg'))
gcs_object_1 = self._monkeypatch_gcsobject(GCSObject('test/dir/'))
# create a bucket
gcs_object.client.create_bucket('test')
# put a file into a bucket
gcs_object.upload_from_string('content')
# WHEN
gcs_object_1.cp(self._monkeypatch_gcsobject(GCSObject('test/dir_copied/')))
# THEN
self.assertEqual(gcs_object_1.size(), len('content'))
self.assertEqual(gcs_object.size(), len('content'))
def test_mv_moves_file_when_object_is_file(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir/file.jpg'))
gcs_object_1 = self._monkeypatch_gcsobject(GCSObject('test/dir/'))
gcs_object_2 = self._monkeypatch_gcsobject(GCSObject('test/dir_copied/'))
# create a bucket
gcs_object.client.create_bucket('test')
# put a file into a bucket
gcs_object.upload_from_string('content')
# WHEN
gcs_object_1.mv(self._monkeypatch_gcsobject(GCSObject('test/dir_copied/')))
# THEN
self.assertEqual(gcs_object_2.size(), len('content'))
self.assertEqual(gcs_object.exists(), False)
def test_rename_file(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/file.jpg'))
gcs_object.client.create_bucket('test')
gcs_object.upload_from_string('content')
gcs_object_renamed = self._monkeypatch_gcsobject(GCSObject('test/renamed.jpg'))
# WHEN
gcs_object.rename(gcs_object_renamed)
# THEN
self.assertFalse(gcs_object.exists())
self.assertTrue(gcs_object_renamed.exists())
self.assertEqual(gcs_object_renamed.size(), len('content'))
|
from __future__ import annotations
import json
import statistics
from pathlib import Path
from typing import Iterable, Union
import numpy as np
from vmaf.tools.bd_rate_calculator import BDrateCalculator
import tester.core.test as test
from tester.core.log import console_log
from tester.core.video import VideoFileBase, RawVideoSequence
from tester.encoders.base import QualityParam, EncoderBase
def bd_distortion(rate1, distortion1, rate2, distortion2):
"""Get Bjøntegaard Delta -distortion for two RD-curves.
args:
- rateN: list of bitrates for line N.
- psnrN: list of psnr values for line N.
returns: Average distortion difference (BD-distortion).
"""
# Sort inputs according to rate.
values1 = sorted(zip(rate1, distortion1))
rate1, distortion1 = zip(*values1)
values2 = sorted(zip(rate2, distortion2))
rate2, distortion2 = zip(*values2)
# Change inputs into numpy arrays for bdrint.
distortion1 = np.array(distortion1)
rate1 = np.array(rate1)
distortion2 = np.array(distortion2)
rate2 = np.array(rate2)
# Select integration limits such that bots curves are defined.
min_rate = np.log10(max(np.min(rate1), np.min(rate2)))
max_rate = np.log10(min(np.max(rate1), np.max(rate2)))
area1 = bdrint(distortion1, np.log10(rate1), min_rate, max_rate)
area2 = bdrint(distortion2, np.log10(rate2), min_rate, max_rate)
avg = (area2 - area1) / (max_rate - min_rate)
return avg
def bdrint(y, x, low_x, high_x):
"""Integrate a curve defined by rate and distortion between points low and high.
args:
y: numpy.array of 4 y coordinates.
x: numpy.array of 4 x coordinates.
low_x: start limit for interpolation
high_x: end limit for interpolation
"""
def pchipend(tangent, d1, d2):
if tangent * d1 < 0:
# If tangent is different sign than the adjacent slope.
tangent = 0
elif d1 * d2 < 0 and abs(tangent) > abs(3 * d1):
# If the adjacent point is a peak.
tangent = 3 * d1
return tangent
def end_tangent(h1, h2, d1, d2):
return ((2*h1 + h2) * d1 - h1 * d2) / (h1 + h2)
def mid_tangent(h1, h2, d1, d2):
# Divides by zero if h2 == h1 and d1 == -d2, but thats not likely
# to happen in real RD curves. Happened in my tests though.
return (3*h1 + 3*h2) / ( (2*h2 + h1) / d1 + (h2 + 2*h1) / d2)
h = x[1:] - x[:-1]
delta = (y[1:] - y[:-1]) / h
# I don't know where these equations are from, but it seems they are
# supposed to provide for a smooth piecewise monotonic curve.
t = np.zeros(4)
t[0] = end_tangent(h[0], h[1], delta[0], delta[1])
t[1] = mid_tangent(h[0], h[1], delta[0], delta[1])
t[2] = mid_tangent(h[1], h[2], delta[1], delta[2])
t[3] = end_tangent(h[2], h[1], delta[2], delta[1])
# Modify end points to ensure the interpolated curve doesn't overshoot the
# second data point.
t[0] = pchipend(t[0], delta[0], delta[1])
t[3] = pchipend(t[3], delta[2], delta[1])
result = np.float64(0)
c = (3*delta - 2*t[:-1] - t[1:]) / h
b = (t[:-1] - 2*delta + t[1:]) / (h * h)
for i in range(3):
# Constrain the integration between low_x and high_x.
s0 = min(high_x, max(low_x, x[i])) - x[i]
s1 = min(high_x, max(low_x, x[i+1])) - x[i]
assert(s0 <= s1)
result += (s1 - s0) * y[i]
result += (s1**2 - s0**2) * t[i] / 2
result += (s1**3 - s0**3) * c[i] / 3
result += (s1**4 - s0**4) * b[i] / 4
return result
class EncodingRunMetrics:
"""
Represents the data for a single encoding run
This is essentially stateless itself, since it always reads and writes from file
"""
def __init__(self,
file_path: Path):
self.filepath: Path = file_path
self._data = {}
if self.filepath.exists():
self._read_in()
def __getitem__(self, item):
self._read_in()
return self._data[item]
def __setitem__(self, key, value):
self._read_in()
self._data[key] = value
self._write_out()
def _write_out(self) -> None:
with self.filepath.open("w") as file:
json.dump(self._data, file)
def _read_in(self) -> None:
try:
with self.filepath.open("r") as file:
self._data = json.load(file)
except FileNotFoundError:
pass
def __contains__(self, item):
self._read_in()
return item in self._data
@property
def has_calculated_metrics(self):
"""
Used to determine if the metric object has any calculated metrics such as PSNR, SSIM, or VMAF
The two non-calculated and always existing metrics are bitrate and encoding speed
"""
return len(self._data) >= 3
def clear(self):
self._data = {}
self._write_out()
class EncodingQualityRunMetrics:
"""
Has all of the data for a single quality metric
"""
def __init__(self, rounds: int, base_path: Path):
self._rounds = [EncodingRunMetrics(Path(str(base_path).format(x + 1))) for x in range(rounds)]
def __getitem__(self, item: Union[str, int]):
if isinstance(item, str):
# Calculates either the avg or stdev of selected metric
assert (item.endswith("_avg") or item.endswith("_stdev"))
value, type_ = self.__split_suffix(item, ["_avg", "_stdev"])
all_ = [x[value] for x in self._rounds]
if type_ == "avg":
return sum(all_) / len(all_)
elif type_ == "stdev":
return statistics.stdev(all_) if len(all_) > 1 else 0.0
elif isinstance(item, int):
return self._rounds[item - 1]
def __contains__(self, item):
if isinstance(item, str):
assert (item.endswith("_avg") or item.endswith("_stdev"))
value, _ = self.__split_suffix(item, ["_avg", "_stdev"])
return all(value in x for x in self._rounds)
@staticmethod
def __split_suffix(item: str, suffixes):
for suffix in suffixes:
if item.endswith(suffix):
return item.replace(suffix, ""), suffix[1:]
def speedup(self, anchor: EncodingQualityRunMetrics):
return anchor["encoding_time_avg"] / self["encoding_time_avg"]
class SequenceMetrics:
def __init__(self,
path_prefix: Path,
sequence: VideoFileBase,
quality_type: QualityParam,
quality_values: Iterable,
rounds: int):
base_paths = {x: path_prefix /
f"{sequence.get_constructed_name()}_{quality_type.short_name}{x}_{{}}_metrics.json"
for x in quality_values}
self.__sequence = sequence
self.__qp_type = quality_type
self._prefix = path_prefix.name
self._data = {x: EncodingQualityRunMetrics(rounds, base_paths[x]) for x in quality_values}
def get_quality_with_bitrates(self, quality_metric: str):
return [(item["bitrate_avg"], item[f"{quality_metric}_avg"]) for item in self._data.values()]
def _compute_bdbr_to_anchor(self, anchor: SequenceMetrics, quality_metric: str):
return self._compute_bdbr(anchor.get_quality_with_bitrates(quality_metric),
self.get_quality_with_bitrates(quality_metric))
def _compute_bd_distortion_to_anchor(self, anchor: SequenceMetrics, quality_metric: str):
anchor_data = sorted(anchor.get_quality_with_bitrates(quality_metric), key=lambda x:x[0])
own_data = sorted(self.get_quality_with_bitrates(quality_metric), key=lambda x:x[0])
return bd_distortion([x[0] for x in anchor_data], [x[1] for x in anchor_data],
[x[0] for x in own_data], [x[1] for x in own_data], )
def compare_to_anchor(self, anchor: SequenceMetrics, quality_metric: str):
if quality_metric == "encoding_time":
return self._average_speedup(anchor)
elif quality_metric.endswith("-bddistortion"):
return self._compute_bd_distortion_to_anchor(anchor, quality_metric[:-13])
return self._compute_bdbr_to_anchor(anchor, quality_metric)
def _average_speedup(self, anchor: SequenceMetrics):
temp = [item["encoding_time_avg"] for item in self._data.values()]
own_average_time = sum(temp) / len(temp)
temp = [item["encoding_time_avg"] for item in anchor._data.values()]
other_average_time = sum(temp) / len(temp)
return other_average_time / own_average_time
def metric_overlap(self, anchor: SequenceMetrics, metric: str):
if anchor == self:
return 1
if not metric.endswith("_avg"):
metric = metric + "_avg"
rates = [item[metric] for item in self._data.values()]
anchor_rates = [item[metric] for item in anchor._data.values()]
start = max(min(rates), min(anchor_rates))
stop = min(max(rates), max(anchor_rates))
return (stop - start) / (max(anchor_rates) - min(anchor_rates))
def rd_curve_crossings(self, anchor: SequenceMetrics, quality_metric: str):
def linear_equ(first, second):
slope = (second[1] - first[1]) / (second[0] - first[0])
b = first[1] - slope * first[0]
return lambda x: slope * x + b
if self == anchor:
return 0
own = self.get_quality_with_bitrates(quality_metric)
other = anchor.get_quality_with_bitrates(quality_metric)
first_index = 0
second_index = 0
crossings = 0
while True:
if first_index == len(own) - 1 or second_index == len(other) - 1:
break
if own[first_index + 1][0] < other[second_index][0]:
first_index += 1
continue
if own[first_index][0] > other[second_index + 1][0]:
second_index += 1
continue
equ1 = linear_equ(own[first_index], own[first_index + 1])
equ2 = linear_equ(other[second_index], other[second_index + 1])
if own[first_index][0] < other[second_index][0]:
start = equ1(other[second_index][0]) - other[second_index][1]
else:
start = own[first_index][1] - equ2(own[first_index][0])
if own[first_index + 1][0] > other[second_index + 1][0]:
stop = equ1(other[second_index + 1][0]) - other[second_index + 1][1]
else:
stop = own[first_index + 1][1] - equ2(own[first_index + 1][0])
if not start or not stop:
console_log.warning(f"Potential overlap between {self} and {anchor} that may or may not be recorded.")
if start * stop < 0:
crossings += 1
if own[first_index + 1][0] < other[second_index + 1][0]:
first_index += 1
else:
second_index += 1
return crossings
@staticmethod
def _compute_bdbr(anchor_values, compared_values):
try:
bdbr = BDrateCalculator.CalcBDRate(
sorted(anchor_values, key=lambda x: x[0]),
sorted(compared_values, key=lambda x: x[0]),
)
# no overlap
if bdbr == -10000:
bdbr = float("NaN")
except AssertionError:
bdbr = float("NaN")
return bdbr
def __getitem__(self, item):
return self._data[item]
def __repr__(self):
return f"{self._prefix}/{self.__sequence}/{self.__qp_type.pretty_name}"
class TestMetrics:
def __init__(self, test_instance: "Test", sequences):
encoder: EncoderBase = test_instance.encoder
base_path = encoder.get_output_dir(test_instance.subtests[0].param_set, test_instance.env)
self.seq_data = {
seq: SequenceMetrics(base_path,
seq,
test_instance.quality_param_type,
test_instance.quality_param_list,
test_instance.rounds)
for seq
in sequences
}
def __getitem__(self, item):
if isinstance(item, RawVideoSequence):
return self.seq_data[item]
elif isinstance(item, test.EncodingRun):
return self.seq_data[item.input_sequence][item.param_set.get_quality_param_value()][item.round_number]
|
# TODA VEZ TEM QUE INSTALAR O DJANGO
# pip install django
# django-admin startproject projeto . # Criar a pasta principal do django OBS: sempre lembrar do ponto no final
# python manage.py startapp blog # Criar o blog
# python manage.py runserver # Mostra o django no navegador
|
from AWERA import config, reference_locs
from AWERA.validation.validation import ValidationPlottingClustering
import time
from AWERA.utils.convenience_utils import write_timing_info
since = time.time()
if __name__ == '__main__':
settings = {
'Data': {'n_locs': 500,
'location_type': 'europe'},
'Clustering': {
'training': {
'n_locs': 500,
'location_type': 'europe'
}
},
}
settings['General'] = {'use_memmap': False}
settings['Processing'] = {'n_cores': 1}
print(settings)
# Single locaion evaluation
# loc = reference_locs[0]
# sample_id = 27822
# Update settings to config
config.update(settings)
# Initialise AWERA eval with chosen config
val = ValidationPlottingClustering(config)
working_title = 'Plotting clustering validation'
# val.plot_all_single_loc(min_n_pcs=3,
# plot_height_dependence=False,
# plot_single_velocity_dependence=False,
# plot_velocity_dependence=True,
# plot_backscaling=True)
settings = {
'Data': {'n_locs': -1,
'location_type': 'europe'},
'Clustering': {
'n_clusters': 80,
'eval_n_pc_up_to': 5,
'training': {
'n_locs': 1000,
'location_type': 'europe'
}
},
}
val.config.update(settings)
# val.plot_cluster_diff_maps(eval_type='cluster',
# sel='v_greater_3') # 'v_greater_3') # 'full'
val.plot_cluster_diff_maps(eval_type='cluster',
sel='v_greater_5',
locs_slice=(23, 1000))
settings = {
'Data': {'n_locs': 1000,
'location_type': 'europe_ref'},
'Clustering': {
'n_clusters': 80,
'eval_n_pc_up_to': 5,
'training': {
'n_locs': 5000,
'location_type': 'europe'
}
},
}
val.config.update(settings)
# val.plot_all_single_loc(min_n_pcs=5,
# plot_height_dependence=False,
# plot_single_velocity_dependence=False,
# plot_velocity_dependence=True,
# plot_backscaling=True)
# sel_list = ['full', 'v_greater_3', 'v_greater_1']
# for sel in sel_list:
# # TODO abs = rel??
# val.plot_cluster_loc_diffs(training_locs=[# (5, 'europe_ref'), # TODO this will be 200
# (500, 'europe'),
# (1000, 'europe'),
# (5000, 'europe')],
# # TODO data will be 1k ref and 1k
# data_locs=[(1000, 'europe_ref')]*3, # [(5, 'europe_ref')]*4,
# sel=sel,
# n_pcs=5)
# --------------------------------------
print('Done.')
print('------------------------------ Config:')
print(val.config)
print('------------------------------ Time:')
write_timing_info('{} AWERA run finished.'.format(working_title),
time.time() - since)
# plt.show()
|
"""Exceptions used by the thermostat module."""
class SendFailure(Exception):
"""Failed to send a message to the remote XBee."""
class CrcVerificationFailure(Exception):
"""CRC calculation didn't match expected value."""
class RetryException(Exception):
"""Multiple tries have failed."""
class FailedToGetState(Exception):
"""Failed to get the thermostat state."""
class FailedToUpdateState(Exception):
"""Failed to update the thermostat state."""
class BadChoiceException(Exception):
"""If a bad choice is made."""
|
class Menu:
def __init__(self, x, y):
self.menu_items:list = []
self.active_item:int = 0
self.menu_title:str = ''
self.x = x
self.y = y
self.visible = True
self.priority = 2
def __str__(self) -> str:
return_string = ''
for item in self.menu_items:
return_string = f"{return_string}\n{item}"
return return_string
def __repr__(self) -> str:
return_string = ''
for item in self.menu_items:
return_string = f"{return_string}\n{item}"
return return_string
def draw(self, root_console, tick_count):
if not self.visible:
return
try:
start_height = self.y
start_width = self.x
for index in range(len(self.menu_items)):
#TODO: Print entire string not by char index
for char_index in range(len(self.menu_items[index].message)):
if self.menu_items[index].disabled:
root_console.print(start_width + char_index, start_height + index, self.menu_items[index].message[char_index],fg=self.menu_items[index].disabled_color)
else:
if index == self.active_item:
root_console.print(start_width + char_index, start_height + index, self.menu_items[index].message[char_index],fg=self.menu_items[index].active_color)
else:
root_console.print(start_width + char_index, start_height + index, self.menu_items[index].message[char_index],fg=self.menu_items[index].default_color)
except IndexError:
raise IndexError |
#!/usr/bin/env python
from siconos.io.mechanics_run import MechanicsHdf5Runner
import siconos.numerics as Numerics
import chute
import rocas
import random
import sys
import numpy
if (len(sys.argv) < 2):
dist = 'uniform'
mu = 0.1
else:
dist = sys.argv[1]
mu = sys.argv[2]
if not dist in ['uniform', 'double', 'exp']:
print("dist = [uniform | double | exp]")
sys.exit(1)
if float(mu) < 0.1 or float(mu) > 2.0:
print("mu = [0.1 .. 2.0]")
sys.exit(1)
fn = 'chute_con_rocas-{0}-mu-{1}.hdf5'.format(dist,mu)
random.seed(0)
box_height = 3.683
box_length = 6.900
box_width = 3.430
density=2500
plane_thickness = 0.2
cube_size = 0.1
with MechanicsHdf5Runner(mode='w', io_filename=fn) as io:
ch = chute.create_chute(io, box_height = box_height,
box_length = box_length,
box_width = box_width,
plane_thickness = plane_thickness,
scale = 1, trans = [-0.6, -1.8, -1])
rcs = rocas.create_rocas(io, n_layer=200, n_row=2, n_col=16,
x_shift=2.0, roca_size=0.1, top=3,
rate=0.2, density=density)
io.add_Newton_impact_friction_nsl('contact', mu=1.0, e=0.01)
with MechanicsHdf5Runner(mode='r+', io_filename=fn) as io:
io.run(t0=0,
T=4,
h=1e-4,
multipoints_iterations=True,
theta=1.0,
Newton_max_iter=1,
solver=Numerics.SICONOS_FRICTION_3D_NSGS,
itermax=1000,
tolerance=1e-3,
output_frequency=10)
|
import json
import requests
import os
def main():
folder_setup()
download_trials()
def folder_setup():
current_directory = os.getcwd()
studies_directory = os.path.join(current_directory, r'Full_Studies')
if not os.path.exists(studies_directory):
os.makedirs(studies_directory)
def build_url(expr: str='Cancer',
country: str='United States',
status: str='Recruiting',
study_type: str='Interventional',
field_names: list=['NCTId','OfficialTitle','StartDate',
'PrimaryCompletionDate','LastUpdatePostDate',
'Condition','Gender','MaximumAge','EligibilityCriteria',
'CentralContactName','CentralContactPhone','CentralContactEMail',
'LocationFacility','LocationCity','LocationState',
'LocationZip','LeadSponsorName'],
min_rnk: int=1,
max_rnk: int=1000,
fmt: str='json'
) -> str:
"""returns api url for the study fields api on clinicaltrials.gov (https://clinicaltrials.gov/api/gui/demo/simple_study_fields).
expr - defaults to Cancer trials. However, any expression one might consider for clinicaltrials.gov.
country - defaults to The United States. However, any country can be entered.
status - defaults to Recruiting. However, the following status can also be passed:
Not yet recruiting: Participants are not yet being recruited
Recruiting: Participants are currently being recruited, whether or not any participants have yet been enrolled
Enrolling by invitation: Participants are being (or will be) selected from a predetermined population
Active, not recruiting: Study is continuing, meaning participants are receiving an intervention or being examined, but new participants are not currently being recruited or enrolled
Completed: The study has concluded normally; participants are no longer receiving an intervention or being examined (that is, last participant’s last visit has occurred)
Suspended: Study halted prematurely but potentially will resume
Terminated: Study halted prematurely and will not resume; participants are no longer being examined or receiving intervention
Withdrawn: Study halted prematurely, prior to enrollment of first participant
study_type - defaults to Interventional trials. However, Observational can also be passed.
field_names - a list of data elements and their corresponding API fields as described in the crosswalk documentation. (https://clinicaltrials.gov/api/gui/ref/crosswalks)
min_rnk = defaults to 1. Can be any interger.
max_rnk - defaults to 1000 records. Can range from 1 - 1000.
fmt - defaults to json. However, csv and xml can also be passed.
"""
base_url = 'https://clinicaltrials.gov/api/query/study_fields?'
if not expr:
expr = ''
else:
expr = f"{expr.replace(' ', '+')}+AND+"
if not status:
status = ''
else:
status = f"{status.replace(' ', '+')}"
if study_type == 'Observational' or study_type == 'Interventional':
study_type = study_type
else:
print(""" This paramater only accepts Observational or Interventional.
The url will not build if other parameters are entered.
""")
country = country.replace(' ', '+')
age = 'AND+AREA%5BMinimumAge%5D18+Years&'
fields = "%2C+".join(field_names)
api_url = f'{base_url}expr={expr}SEARCH%5BLocation%5D%28AREA%5BLocationCountry%5D{country}+AND+AREA%5BLocationStatus%5D{status}%29+AND+AREA%5BStudyType%5D{study_type}+{age}fields={fields}&min_rnk={min_rnk}&max_rnk={max_rnk}&fmt={fmt}'
return api_url
def full_study_by_nct(trial_number: str):
full_study_url = f'https://clinicaltrials.gov/api/query/full_studies?expr={trial_number}&min_rnk=1&max_rnk=&fmt=json'
print(f'Downloading: {trial_number}')
# print(f'https://clinicaltrials.gov/ct2/show/{trial_number} \n')
r = requests.get(full_study_url)
if r.status_code != 200:
print(f'Bad url: {trial_number}')
with open('Full_Studies/http_error.txt', 'a+') as http_error:
http_error.write(f'Bad url: {trial_number},\n')
elif r.json()['FullStudiesResponse']['NStudiesReturned'] > 1:
print(f'\n Multiple NCT numbers for https://clinicaltrials.gov/ct2/show/{trial_number} \n')
with open('Full_Studies/trial_error.txt', 'a+') as trial_error:
trial_error.write(f'Trial number error: https://clinicaltrials.gov/ct2/show/{trial_number},\n')
elif r.json()['FullStudiesResponse']['NStudiesReturned'] == 0:
print(f'\n No NCT number for https://clinicaltrials.gov/ct2/show/{trial_number} \n')
with open('Full_Studies/trial_error.txt', 'a+') as trial_error:
trial_error.write(f'Trial number error: https://clinicaltrials.gov/ct2/show/{trial_number},\n')
else:
full_study = r.json()
with open(f'Full_Studies/{trial_number}.json', 'w+') as f:
json.dump(full_study, f, indent=2)
def download_trials():
url = build_url(expr='Cancer', field_names=['NCTId'])
r = requests.get(url)
if r.status_code != 200:
print(f'Bad url: {r}')
with open('Full_Studies/http_error.txt', 'a+') as http_error:
http_error.write(f'Bad url: {r},\n')
else:
ID = r.json()
for item in ID['StudyFieldsResponse']['StudyFields']:
nctid = ''.join(item['NCTId'])
full_study_by_nct(nctid)
print('\n-----Trial downloads complete-----\n')
if __name__=='__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 30 14:47:24 2020
@author: ToshY
"""
import numpy as np
import itertools
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn.metrics import accuracy_score, f1_score, cohen_kappa_score, \
confusion_matrix, precision_recall_curve, roc_curve, auc, \
precision_recall_fscore_support, classification_report, balanced_accuracy_score, \
average_precision_score
class Metrics():
"""
Providing metrics for a given model by returning the accuracy, precision,
recall, F1-score, Cohen's Kappa and a confusion matrix
Attributes
----------
cost : string
Name of the cost function (plotting purposes)
Methods
-------
load(y_test, y_classes, y_probabilties, btm=True)
Load the test and predicted data
accuracy()
Returns the accuracy
precision_recall(plot=True)
Returns the precision and recall with optional plot
f1()
Returns the F1-score
kappa()
Returns Cohen's Kappa
histogram(classes=None, plot=True, fs=(8,6))
Returns histogram of predicted classes
confusion(multi=True, plot=True, norm=False, title='Confusion matrix', cmap=plt.cm.coolwarm_r)
Returns confusion matrix with optional plot
confusion_derivations(confusion_matrix, multi=True)
Returns derivations of confusion matrix
class_report()
Returns Sklearn's classification report
cv_accuracy()
Returns accuracy plots for CV runs
loss_epoch()
Returns loss-epoch curve for training or CV data
ROC(plot=True, fs=(8,6))
Returns AUC, FPR, TPR and optional curve
precision_recall(plot=True, fs=(8,6))
Returns precision, recall, average_precision["micro"] and optional curve
"""
def __init__(self, cost):
# Loss string
self.loss_method = cost
def load(self, y_test, y_classes, y_probabilties, btm=True):
# Target y-values; sample-by-class
self.y = y_test
# Amount of classes; for plotting purposes
self.yc = y_classes
self.len_classes = self.y.shape[1]
# Predicted y-values; sample-by-class
self.y_prob = y_probabilties
# Classify
if btm:
# Initially multiclass
self.y_1D, self.y_hat_1D, self.y_hat_1D_raw = self._classify()
else:
# Initially binary
self.y_1D, self.y_hat_1D, self.y_hat_1D_raw = self._classify_1D()
def accuracy(self):
return accuracy_score( self.y_1D, self.y_hat_1D )
def balanced_accuracy(self):
return balanced_accuracy_score( self.y_1D, self.y_hat_1D )
def f1(self):
return f1_score( self.y_1D, self.y_hat_1D )
def kappa(self):
return cohen_kappa_score( self.y_1D, self.y_hat_1D )
def histogram(self, classes=None, plot=True, fs=(8,6)):
""" Histogram plot """
# Plot colours
col = cycle('grbcmk')
kwargs = dict(alpha=0.5, bins=100)
plt.figure(94, figsize=fs)
fig = plt.gcf()
for i in range(self.y_prob.shape[-1]):
plt.hist(self.y_prob[:,i], **kwargs, color=next(col), label=classes[i])
plt.title("Distribution of predicted probabilities")
plt.ylabel('Frequency')
plt.xlabel('Probability')
plt.xlim(0,1)
plt.xticks(self._listrange(0,1,1/10))
if self.y_prob.shape[-1] > 1:
plt.legend(loc="best")
plt.show()
return fig
def confusion(self, multi=True, plot=True, norm=False, title='Confusion matrix', cmap=plt.cm.coolwarm_r):
""" Return confusion matrix with plot option"""
# The confusion matrix
cm = confusion_matrix(self.y_1D, self.y_hat_1D)
# Derivations
cm_metrics = self.confusion_derivations(cm, multi)
# Normalize
if norm:
cm = self._normalize(cm)
# Check for plot options
if plot:
if self.yc is not None:
classes = self.yc
else:
classes = sorted([*{*self.y_1D}])
plt.figure(97)
fig = plt.gcf()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
tick_marks = np.array(self.yc)
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if norm else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('Target')
plt.xlabel('Prediction')
plt.show()
else:
fig = None
return cm, cm_metrics, fig
def confusion_derivations(self, confusion_matrix, multi=True):
""" Get derivations of confusion matrix """
# Basic derivations
if confusion_matrix.shape == (2,2) and multi is False:
# Binary
TN, FP, FN, TP = confusion_matrix.ravel()
else:
# Multiclass
FP = (confusion_matrix.sum(axis=0) - np.diag(confusion_matrix)).astype(float)
FN = (confusion_matrix.sum(axis=1) - np.diag(confusion_matrix)).astype(float)
TP = (np.diag(confusion_matrix)).astype(float)
TN = (confusion_matrix.sum() - (FP + FN + TP)).astype(float)
P = (TP+FN).astype(float)
N = (TN+FP).astype(float)
# Add everything to dictonary
metrics = {'P':P.astype(int),'N':N.astype(int), \
'TP':TP.astype(int),'FP':FP.astype(int),\
'TN':TN.astype(int),'FN':FN.astype(int)}
# Recall
metrics['TPR'] = TP/P
# Specificicty
metrics['TNR'] = TN/N
# Precision
metrics['PPV'] = TP/(TP+FP)
# Negative predictive value
metrics['NPV'] = TN/(TN+FN)
# False negative rate
metrics['FNR'] = 1-metrics['TPR']
# False positive rate
metrics['FPR'] = 1-metrics['TNR']
# False discovery rate
metrics['FPR'] = 1-metrics['PPV']
# False Omission rate
metrics['FOR'] = 1-metrics['NPV']
# Critical Success Index
metrics['TS'] = TP/(TP+FN+FP)
# Accuracy
metrics['ACC'] = (TP+TN)/(P+N)
# Balanced Accuracy
metrics['BACC'] = (metrics['TPR']+metrics['TNR'])/2
# Predicted positive condition rate
metrics['PPCR'] = (TP+FP)/(TP+FP+TN+FN)
# F1-score
metrics['F1'] = 2*(metrics['PPV']*metrics['TPR'])/(metrics['PPV']+metrics['TPR'])
# Matthews correlation coefficient
metrics['MCC'] = ((TP*TN)-(FP*FN))/(np.sqrt(((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN))))
# Fowlkes-Mallows index
metrics['FM'] = np.sqrt(metrics['PPV']*metrics['TPR'])
# Return metrics
return metrics
def class_report(self):
""" Overview of precision, recall, accuracy and f1-score """
PPV, TPR, F1, support = precision_recall_fscore_support(self.y_1D, self.y_hat_1D)
# Console printable version
report = classification_report(self.y_1D, self.y_hat_1D)
return {'PPV':PPV,'TPR':TPR,'F1':F1,'Support':support,'verbose':report}
def cv_accuracy(self, train_results, plot=True, fs=(8,6)):
""" Accuracy for cross validation sets """
if plot:
plt.figure(95, figsize=fs)
fig = plt.gcf()
plist = []
for m, v in enumerate(train_results['validation_metrics']):
plist.append(v['ACC'])
plt.plot(list(range(1,m+2)),plist,lw=2)
plt.title("Accuracy {} cross validation ({}-fold)".format(train_results['cross_val'].replace('_','-').title(), m+1))
plt.xlabel("Fold")
plt.xticks(list(range(1,m+2)))
plt.ylabel('Accuracy')
plt.show()
else:
fig = None
return fig
def loss_epoch(self, train_results, plot=True, fs=(8,6)):
""" Plot error per epoch """
error = train_results['loss']
# Check if CV
if len(error)>1:
cv = True
else:
cv = False
if plot:
plt.figure(96, figsize=fs)
fig = plt.gcf()
if cv:
# CV training
for p in range(len(error['train'])):
plt.plot(*zip(*error['train'][p]),lw=2,label='fold {}'.format(p+1))
plt.title("{} per epoch ({}-fold CV)".format(self.loss_method, len(error['validation'])))
# Only show legend if it fits; +/- <10
if len(error['train']) <= 10:
plt.legend(loc="best")
else:
# Normal training
for p in range(1):
plt.plot(*zip(*error['train']),lw=2,label='error')
plt.title("{} per epoch".format(self.loss_method, len(error)))
plt.xlabel("Epoch")
plt.ylabel(self.loss_method)
plt.show()
else:
fig = None
return fig
def ROC(self, plot=True, fs=(8,6)):
""" TPR and FPR with optional plot """
# Plot colours
col = cycle('grbcmk')
fpr = {}
tpr = {}
roc_auc = {}
if plot:
plt.figure(98, figsize=fs)
fig = plt.gcf()
else:
fig = None
for i in range(self.len_classes):
fpr[i], tpr[i], _ = roc_curve(self.y[:, i], self.y_prob[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
if plot and self.len_classes == 1:
clabel = 'ROC curve'
else:
clabel = 'class {}'.format(i)
plt.plot(fpr[i], tpr[i], lw=2, label=clabel+' (AUC = {})'.format(round(roc_auc[i],3)), color=next(col))
# Micro average
roc_auc['micro'], fpr['micro'], tpr['micro'] = self._micro_roc(fpr, tpr)
# Macro average
roc_auc['macro'], fpr['macro'], tpr['macro'] = self._macro_roc(fpr, tpr)
if plot:
if self.len_classes > 1:
# Micro average
plt.plot(fpr['micro'], tpr['micro'], label='micro-average (AUC = {0:0.3f})' ''.format(roc_auc['micro']), color='deeppink', linestyle=(0, (1, 1)), lw=3)
# Macro average
plt.plot(fpr['macro'], tpr['macro'], label='macro-average (AUC = {0:0.3f})' ''.format(roc_auc['macro']), color='navy', linestyle=(0, (1, 1)), lw=2)
# Add no skill line
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--', label='No skill (AUC = 0.500)')
plt.xlim([0.0, 1.0])
plt.xlabel("FPR")
plt.ylim([0.0, 1.05])
plt.ylabel("TPR")
plt.legend(loc="best")
plt.title("ROC curve")
plt.show()
return roc_auc, fpr, tpr, fig
def _micro_roc(self, FPR={}, TPR={}):
""" Compute micro-average ROC curve and AUC """
FPR, TPR, _ = roc_curve(self.y.ravel(), self.y_prob.ravel())
return auc(FPR, TPR), FPR, TPR
def _macro_roc(self, FPR, TPR):
""" Compute macro-average ROC curve and AUC """
# First aggregate all false positive rates
FPR_all = np.unique(np.concatenate([FPR[i] for i in range(self.len_classes)]))
# Interpolate all ROC curves at this points
TPR_mean = np.zeros_like(FPR_all)
for i in range(self.len_classes):
TPR_mean += np.interp(FPR_all, FPR[i], TPR[i])
# AUC by averaging
TPR_mean /= self.len_classes
return auc(FPR_all, TPR_mean), FPR_all, TPR_mean
def precision_recall(self, plot=True, fs=(8,6)):
""" Precision and recall for given predicted classes """
# Plot colours
col = cycle('grbcmk')
average_precision = {}
precision = {}
recall = {}
if plot:
plt.figure(99, figsize=fs)
fig = plt.gcf()
else:
fig = None
for i in range(self.len_classes):
precision[i], recall[i], _ = precision_recall_curve(self.y[:, i],self.y_prob[:, i])
average_precision[i] = average_precision_score(self.y[:, i],self.y_prob[:, i])
plt.plot(recall[i], precision[i], lw=2, label='class {}'.format(i), color=next(col))
# Micro
precision["micro"], recall["micro"], _ = precision_recall_curve(self.y.ravel(),self.y_prob.ravel())
average_precision["micro"] = average_precision_score(self.y, self.y_prob, average="micro")
if plot:
if self.len_classes > 1:
plt.plot(recall['micro'], precision['micro'], label='micro average', color='deeppink', linestyle=(0, (1, 1)), lw=2)
plt.xlim([0.0, 1.0])
plt.xlabel("Recall")
plt.ylim([0.0, 1.05])
plt.ylabel("Precision")
if self.len_classes > 1:
plt.legend(loc="best")
plt.title("Precision vs. Recall curve (AP={0:0.2f})".format(average_precision["micro"]))
plt.show()
return precision, recall, average_precision["micro"], fig
def _classify(self):
""" Returns max probability by largest argument """
y_true = []
y_pred_raw = []
y_pred = []
for i in range(len(self.y)):
y_true.append(np.argmax(self.y[i]))
y_pred_raw.append(max(self.y_prob[i]))
y_pred.append(np.argmax(self.y_prob[i]))
return y_true, y_pred, y_pred_raw
def _classify_1D(self):
""" Returns 0 if X < 0.5; Returns 1 if X >= 0.5 """
yprobin = self.y_prob.copy()
yprobin[yprobin<0.5] = 0
yprobin[yprobin>=0.5] = 1
return self.y.flatten().tolist(), yprobin.flatten().tolist(), self.y_prob.flatten().tolist()
def _normalize(self, cm):
return cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
def _listrange(self, start=0, end=1, step=1/10):
return [round(num, 2) for num in np.linspace(start,end,(end-start)*int(1/step)+1).tolist()]
|
from plugin import Plugin
class Playlist(Plugin):
def help_text(self, bot):
return bot.translate("playlist_help")
def on_msg(self, bot, user_nick, host, channel, message):
if message.lower().startswith('!np'):
bot.send_message(channel, bot.translate("playlist_str1"), user_nick)
|
# Copyright (c) 2015, Dataent Technologies Pvt. Ltd. and Contributors
# License: See license.txt
# pre loaded
from __future__ import unicode_literals
import dataent
test_records = dataent.get_test_records('Currency') |
#!/usr/bin/env python3
"""
Author:hms5232
Repo:https://github.com/hms5232/NCNU-etutor-reposter-telegram-bot
Bug:https://github.com/hms5232/NCNU-etutor-reposter-telegram-bot/issues
"""
from telegram.ext import Updater, CommandHandler
from configparser import ConfigParser
import requests
import time
import threading
import os
import datetime
# 設定一些個人的環境變數
env = ConfigParser()
env.read('config.ini')
# If you don't want use config.ini, please edit following variables for your environment.
telegram_bot_token = env.get('reposter', 'telegram_bot_token')
fb_token = env['reposter']['fb_token']
fb_group_id = env['reposter']['fb_group_id']
telegram_group_id = env['reposter']['telegram_group_id']
telegram_channel_id = env['reposter']['telegram_channel_id']
listen_status = True # 機器人是否監聽新貼文的狀態
"""
尚未執行機器人之前,可傳送訊息給機器人後至下列網址查看:
https://api.telegram.org/bot{$token}/getUpdates
"""
updater = Updater(token=telegram_bot_token) # 呼叫 bot 用
"""
對應指令的函數們
@param bot: 機器人預設值一定要,如果沒有給的話,你的機器人不會回覆
@param update: Telegram update資訊
"""
# 歡迎訊息
def welcome(bot, update):
chat_id = update.message.from_user.id
about_bot = ''
about_bot = about_bot + '本機器人由 [hms5232](https://github.com/hms5232) 開發\n'
about_bot = about_bot + '採用 [Apache許可證](https://github.com/hms5232/NCNU-etutor-reposter-telegram-bot/blob/master/LICENSE)\n'
about_bot = about_bot + '原始碼公開於 [Github](https://github.com/hms5232/NCNU-etutor-reposter-telegram-bot)\n'
about_bot = about_bot + 'bug 回報及建議請[往這裡走](https://github.com/hms5232/NCNU-etutor-reposter-telegram-bot/issues)'
bot.send_message(chat_id, about_bot, parse_mode='Markdown')
# 顯示使用者資訊
def show_user_info(bot, update):
user_info = ''
user_info = user_info + '發送人 first name:{}\n'.format(update.message.from_user.first_name)
user_info = user_info + '發送人 last name:{}\n'.format(update.message.from_user.last_name)
user_info = user_info + '發送人 full name:{}\n'.format(update.message.from_user.full_name)
user_info = user_info + '發送人 username:{}\n'.format(update.message.from_user.username)
user_info = user_info + '發送人 id:{}\n'.format(update.message.from_user.id)
user_info = user_info + 'message_id:{}\n'.format(update.message.message_id)
user_info = user_info + '所在的聊天室 id:{}\n'.format(update.message.chat.id)
user_info = user_info + '所在的聊天室 type:{}\n'.format(update.message.chat.type)
user_info = user_info + '訊息內容:{}\n'.format(update.message.text)
update.message.reply_text(user_info, disable_notification="True")
# TODO: 顯示最新幾篇貼文的資訊
def show_latest_posts(bot, update):
# https://www.facebook.com/groups/{社團ID}/permalink/{貼文ID}/
pass
def hello(bot, update):
# 兩種方法傳送訊息予使用者
update.message.reply_text('Hello world!') #方法一
bot.sendMessage(update.message.from_user.id, 'Welcome to Telegram!') # 方法二
"""
方法二的 sendMessage 是 send_message 的別名
以 python 的使用習慣,應該是後者較為符合
https://python-telegram-bot.readthedocs.io/en/stable/telegram.bot.html#telegram.Bot.send_message
"""
# 更新設定檔
def reload_config(bot, update):
# 先檢查是不是 telegram 管理員
if not is_telegram_admin(update.message.from_user.id):
# 不是管理員更新個X
bot.sendMessage(telegram_group_id, '使用者 {}(username:{})在{}嘗試操作機器人遭拒'.format(update.message.from_user.full_name, update.message.from_user.username, time.strftime("%Y/%m/%d %H:%M:%S")))
update.message.reply_text('Permission denied!')
return
new_env = ConfigParser()
new_env.read('config.ini')
global telegram_bot_token, fb_token, fb_group_id, telegram_group_id, telegram_channel_id
telegram_bot_token = new_env.get('reposter', 'telegram_bot_token')
fb_token = new_env['reposter']['fb_token']
fb_group_id = new_env['reposter']['fb_group_id']
telegram_group_id = new_env['reposter']['telegram_group_id']
telegram_channel_id = new_env['reposter']['telegram_channel_id']
update.message.reply_text('OK, config updated!')
# 確認使用者是否為指定的 telegram 管理員
def is_telegram_admin(telegram_user_id):
telegram_user_id = str(telegram_user_id) # 當前使用者 user id
env = ConfigParser()
env.read('config.ini')
telegram_admins = [str_val for str_val in env['reposter']['telegram_admin_id'].split(',')]
return telegram_user_id in telegram_admins
# 監聽社團
def listen(bot):
print("[", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), "]", 'thread start')
failed_request_times = 0
while listen_status:
r = requests.get('https://graph.facebook.com/{}/feed?fields=admin_creator,created_time,id,message,message_tags,permalink_url,link,from&access_token={}'.format(fb_group_id, fb_token))
print("[", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), "]", r.status_code)
if r.status_code == 200: # OK
failed_request_times = 0 # 重設歸零
find_last_post_with_tag = False
is_new_post = False
for posts in r.json()['data']:
if find_last_post_with_tag:
break
if 'message_tags' in posts: # 有 hash tag
for tags in posts['message_tags']:
if tags['id'] == '276859169113184': # FB 上對「telegram」這個 hash tag 給的 id
with open('repost.txt', 'r', encoding='UTF-8') as f:
last_record = f.read() # 紀錄上最後一篇貼文/更新的時間戳記
if last_record < posts['created_time']: # 發現貼文時間大於紀錄的時間 => 推斷是新貼文
is_new_post = True
if is_new_post:
with open('repost.txt', 'w+', encoding='UTF-8') as f:
# 轉貼前要先更新紀錄
f.write(posts['created_time'])
print("[", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), "]", posts['created_time'], posts['id'])
# 轉貼
# 檢查是否為指定關鍵字,是的話則採「無聲訊息」傳送
if posts['message'].find('#Telegram') != -1: # send without sound
repost_message = posts['message'].replace('#Telegram', '') + '\n原文連結:' + posts['permalink_url'] + '\n\n\n_等待管理員增加 hashtag_'
bot.send_message(telegram_channel_id, repost_message, parse_mode='Markdown', disable_notification="True")
else:
repost_message = posts['message'].replace('#telegram', '') + '\n原文連結:' + posts['permalink_url'] + '\n\n\n_等待管理員增加 hashtag_'
bot.send_message(telegram_channel_id, repost_message, parse_mode='Markdown')
find_last_post_with_tag = True
else:
failed_request_times += 1
# 失敗超過一定次數就停止
if failed_request_times >= 5:
print("[", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), "]", "Attempt failed too many times!")
bot.send_message(telegram_group_id, "Not return 200 from Facebook API too many times, bot has paused.", parse_mode='Markdown')
return
time.sleep(20)
return
# 叫機器人起來工作了
def start_work(bot, update):
# 先檢查是不是 telegram 管理員
if not is_telegram_admin(update.message.from_user.id):
# 不是管理員用個X
bot.sendMessage(telegram_group_id, '使用者 {}(username:{})在{}嘗試操作機器人遭拒'.format(update.message.from_user.full_name, update.message.from_user.username, time.strftime("%Y/%m/%d %H:%M:%S")))
update.message.reply_text('Permission denied!')
return
# 再來,檢查工作必備的東西是否都準備好了
if before_work_check():
update.message.reply_text('Init error!')
return
global listen_status, listen_group
# 檢查是否已有監聽執行緒
if listen_group.is_alive():
print('thread already exists.')
'''
https://python-telegram-bot.readthedocs.io/en/stable/telegram.message.html
雖然 reply_text 方法不能設定解析模式,但可以呼叫其他方法來指定如何解析文字
例如:reply_html(),其為 bot.send_message(update.message.chat_id, parse_mode=ParseMode.HTML, *args, **kwargs) 的縮寫
'''
update.message.reply_html('<strike>ヽ(∀゚ )人(゚∀゚)人( ゚∀)人(∀゚ )人(゚∀゚)人( ゚∀)ノ</strike>影分身術禁止!')
return
listen_status = True
listen_group = threading.Thread(target = listen, args=(bot,)) # 重新設定執行緒
if listen_status:
listen_group.start() # 開新執行緒
# 確認執行緒是不是真的開啟了
if listen_group.is_alive():
update.message.reply_text('OK, I go to work now QQ.')
else:
update.message.reply_text('Oh no, something went wrong.')
else:
update.message.reply_text('Oh no, something went wrong.')
# 機器人可以下班休息下囉,可是還是要待命(慣老闆語氣)
def unlisten(bot, update):
# 先檢查是不是 telegram 管理員
if not is_telegram_admin(update.message.from_user.id):
# 不是管理員用個X
bot.sendMessage(telegram_group_id, '使用者 {}(username:{})在{}嘗試操作機器人遭拒'.format(update.message.from_user.full_name, update.message.from_user.username, time.strftime("%Y/%m/%d %H:%M:%S")))
update.message.reply_text('Permission denied!')
return
print("[", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), "]", "stop thread")
global listen_status, listen_group
listen_status = False
listen_group.join() # 關閉執行緒
print("[", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), "]", "thread killed")
if not listen_status and not listen_group.is_alive():
update.message.reply_text('OK, now I get off work. YA~!')
else:
update.message.reply_text('Oh no, something went wrong.')
# 看看 bot 是否正在監聽社團貼文
def bot_work_status(bot, update):
now_status = ''
if listen_group.is_alive():
now_status = now_status + 'ξ( ✿>◡❛)▄︻▇▇〓▄︻┻┳═一監聽社團貼文中\n'
else:
now_status = now_status + '現在是手動模式(:3[__]4\n'
update.message.reply_text(now_status)
# 開始工作之前的檢查
def before_work_check():
# 檢查必要檔案是否存在
if not os.path.isfile('repost.txt'):
find_last_post_with_tag = False
with open('repost.txt', 'w', encoding='UTF-8') as nf:
if not os.path.isfile('repost.txt'):
print("[", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), "]", "An error occurred when try to create reposter.txt!")
return 1
print("[", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), "]", "Create repost.txt")
# 建立檔案後別忘記要填入現在的時間
nf.write(datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S+0000"))
# 檢查完成
return 0
# CommandHandler('指令', 要執行的函數),使用者輸入「/指令」
updater.dispatcher.add_handler(CommandHandler(['start', 'about'], welcome)) # 歡迎訊息 / 機器人資訊
updater.dispatcher.add_handler(CommandHandler('info', show_user_info)) # 顯示使用者資訊
#updater.dispatcher.add_handler(CommandHandler('post', post)) # TODO: 發公告
updater.dispatcher.add_handler(CommandHandler('latest', show_latest_posts)) # 顯示最新幾篇貼文
updater.dispatcher.add_handler(CommandHandler(['hello', 'hi'], hello)) # Hello World!
updater.dispatcher.add_handler(CommandHandler('reload', reload_config)) # 重新讀取設定檔
updater.dispatcher.add_handler(CommandHandler('work', start_work)) # 開始社畜生活囉
updater.dispatcher.add_handler(CommandHandler('rest', unlisten)) # 可以下班了
updater.dispatcher.add_handler(CommandHandler('status', bot_work_status)) # 看看現在 bot 有在認真看貼文嗎
listen_group = threading.Thread(target = listen) # 採用多執行緒來監聽
# 執行機器人必須要的,讓機器人運作聽命
updater.start_polling()
updater.idle()
|
state = {'params': {
'in_i_l': 1.9158910803943378e-07,
'in_i_w': 1.052548978774763e-06,
'infold_l': 1.7824932088861794e-07,
'infold_w': 6.776028281502987e-07,
'infoldsrc_w': 5.394307669756082e-06,
'inpair_l': 1.6674022226091898e-07,
'inpair_w': 8.110062940175877e-06,
'intailsrc_w': 5.128684141164976e-06,
'neg_stg2_in_w': 4.900682457972542e-07,
'neg_stg2_l': 1.7855150682190352e-07,
'neg_stg3_l': 1.771458647876742e-07,
'neg_stg3_w': 2.286009066999664e-06,
'neg_stg4_l': 1.798096544702257e-07,
'neg_stg4_w': 4.230212544882686e-07,
'pos_stg2_in_w': 9.63313265949415e-07,
'pos_stg2_l': 1.9776151770055945e-07,
'pos_stg3_l': 1.6903338952370735e-07,
'pos_stg3_w': 5.09073510895247e-06,
'pos_stg4_l': 1.9399132189659936e-07,
'pos_stg4_w': 8.036939005004705e-07,
'stg2_mult': 5.654407639121624,
'pos_stg2_out_w': 5.446965889851573e-06,
'neg_stg2_out_w': 2.7710456327269277e-06,
'sim_temp': 27,
'vcm': 0.8,
'supply': 1.8,
'pass_number': 1,
'working_dir': 'pool_dir_1'}}
print(state['params'])
print(state) |
#############################################################
## ##
## Copyright (c) 2003-2017 by The University of Queensland ##
## Centre for Geoscience Computing ##
## http://earth.uq.edu.au/centre-geoscience-computing ##
## ##
## Primary Business: Brisbane, Queensland, Australia ##
## Licensed under the Open Software License version 3.0 ##
## http://www.apache.org/licenses/LICENSE-2.0 ##
## ##
#############################################################
import os
class PathSearcher:
def __init__(self, pathString=os.environ["PATH"], pathDelimiter=":"):
self.searchList = str.split(pathString, pathDelimiter)
def contains(self, fileName):
for dir in self.searchList:
if os.path.exists(os.path.join(dir, fileName)):
return True
return False
def which(self, fileName):
for dir in self.searchList:
fileNamePath = os.path.join(dir, fileName)
if os.path.exists(fileNamePath):
return fileNamePath
return fileName
def find(self, fileName):
for dir in self.searchList:
fileNamePath = os.path.join(dir, fileName)
if os.path.exists(fileNamePath):
return fileNamePath
return ""
|
import gtk.gdk
w = gtk.gdk.get_default_root_window()
sz = w.get_size()
print "The size of the window is %d x %d" % sz
pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,False,8,sz[0],sz[1])
pb = pb.get_from_drawable(w,w.get_colormap(),0,0,0,0,sz[0],sz[1])
if (pb != None):
pb.save("screenshot.png","png")
print "Screenshot saved to screenshot.png."
else:
print "Unable to get the screenshot."
|
#!/usr/bin/env python3
# Print the first three multiples of a given number `num`,
# and finally return the third multiple.
def first_three_multiples(num):
for i in range(1, 4):
print(num * i)
return num * i
first_three_multiples(10)
first_three_multiples(15)
|
import logging
import datetime
import bisect
from django.conf import settings
from django.contrib.localflavor.us.us_states import STATE_CHOICES
from django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.utils.translation import ugettext as _
from auth.decorators import login_required
from django.core.mail.message import EmailMultiAlternatives
from uni_form.helpers import FormHelper, Layout, Fieldset, Row, Submit
from market_buy.forms import AdvancedSearchForm
def advanced_search(request, reset=False):
marketplace = request.marketplace
if reset:
form = AdvancedSearchForm(marketplace=marketplace)
else:
form = AdvancedSearchForm(marketplace=marketplace, data=request.GET)
if request.GET.get("do"):
result_list = []
if form.is_valid():
result_list = form.search()
pager = Paginator(result_list, settings.HAYSTACK_SEARCH_RESULTS_PER_PAGE)
try:
page = int(request.GET.get("page", "1"))
except:
page = 1
try:
paginator = pager.page(page)
except (EmptyPage, InvalidPage):
raise Http404
paged = (pager.num_pages > 1)
return render_to_response(
"%s/buy/advanced_search_results.html" % marketplace.template_prefix,
{"result_list": paginator, "form": form,
"pages": pager.page_range, "paged": paged, "total": pager.count,
"view_mode": form.cleaned_data["view_by"]}, RequestContext(request))
form_helper = FormHelper()
layout = Layout(
Fieldset("", "q"),
Fieldset("", Row("categories", "subcategories")),
Fieldset("", "include"),
Fieldset("", Row("from_price", "to_price")),
Fieldset("", "sort"),
Fieldset("", "view_by"),
)
form_helper.add_layout(layout)
submit = Submit("do", _("Search"))
submit.field_classes = "button_primary"
form_helper.add_input(submit)
return render_to_response(
"%s/buy/advanced_search.html" % marketplace.template_prefix,
{"form": form, "helper": form_helper} , RequestContext(request))
def categories(request):
""" Return the categories for an specifica marketplace """
return render_to_response("%s/buy/categories.html" % request.marketplace.template_prefix,
{} , RequestContext(request))
def howtobuy(request):
""" Return the how to buy for an specific marketplace """
return render_to_response("%s/buy/howtobuy.html" % request.marketplace.template_prefix,
{} , RequestContext(request))
def editor_pick(request):
""" Return a list of items marked as favorites by admins """
from models import EditorPick
marketplace = request.marketplace
picks = EditorPick.get_available_picks(marketplace)
return render_to_response("%s/buy/favorites.html" % request.marketplace.template_prefix,
{'picks' : picks} , RequestContext(request))
def latest_items(request):
""" Return the list of the latest 20(?) posted items in the stores """
from inventory.models import Product
marketplace = request.marketplace
latest_items = Product.objects.filter(shop__marketplace=marketplace, latest_item=True, has_image=True)[:10]
return render_to_response("%s/buy/latest_items.html" % request.marketplace.template_prefix,
{'latest_items' : latest_items} , RequestContext(request))
def map_pick(request):
""" Return """
from shops.models import Shop
marketplace = request.marketplace
shops = Shop.actives.filter(marketplace=marketplace)
return render_to_response("%s/buy/map_pick.html" % request.marketplace.template_prefix,
{'shops' : shops} , RequestContext(request))
def show_listing(request):
""" Return the list of shows added by admins """
from market_buy.models import Show
marketplace = request.marketplace
shows = Show.objects.filter(marketplace=marketplace).filter(date_to__gte=datetime.datetime.today())
params = {'states': STATE_CHOICES, 'shows': shows }
return render_to_response("%s/buy/show_listing.html" % request.marketplace.template_prefix,
params , RequestContext(request))
def show_search(request):
""" Shows search """
from geopy import geocoders
from distance_helper import distance_between_points
from market_buy.models import Show
marketplace = request.marketplace
city = request.POST.get('city', None)
state = request.POST.get('state')
zip = request.POST.get('zip')
country = request.POST.get('country', 'US')
if (city == u'') or (state == u''):
request.flash['message'] = "Please, fill at least the city and state fields to complete the search!"
request.flash['severity'] = "error"
return HttpResponseRedirect(reverse("buy_show_listing"))
try:
g = geocoders.Google(settings.GOOGLE_KEY)
place = "%s, %s, %s" % (city, state, country)
place, point1 = g.geocode(place)
except Exception, e:
request.flash['message'] = "Could not determine your location. Try again with other input data!"
request.flash['severity'] = "error"
return HttpResponseRedirect(reverse("buy_show_listing"))
all_shows = Show.objects.filter(marketplace=marketplace).filter(date_to__gte=datetime.datetime.today())
max_distance = 1500
metric = "miles"
shows = []
for show in all_shows:
point2 = [float(x) for x in show.geo_location()]
distance = distance_between_points(point1, point2 , metric)
if distance < max_distance:
bisect.insort(shows, (int(distance), show))
params = {'states': STATE_CHOICES, 'shows': shows, 'place': place}
return render_to_response("%s/buy/show_listing.html" % request.marketplace.template_prefix,
params, RequestContext(request))
def shop_local(request):
""" Return the shops near some location """
from shops.models import Shop
from geopy import geocoders
from distance_helper import distance_between_points
in_range = []
params = {}
params['states'] = STATE_CHOICES
if request.method == "POST":
max_distance = float(request.POST.get("max_distance"))
metric = request.POST.get("metric", "miles")
city = request.POST.get("city")
state = request.POST.get("state")
country = request.POST.get("country", "US")
try:
g = geocoders.Google(settings.GOOGLE_KEY)
place = "%s, %s, %s" % (city, state, country)
place, point1 = g.geocode(place)
except Exception, e:
logging.critical(e)
request.flash['message'] = "Invalid input!"
request.flash['severity'] = "error"
return render_to_response("%s/buy/shop_local.html" % request.marketplace.template_prefix,
{'shops' : in_range} ,
RequestContext(request))
marketplace = request.marketplace
shops = Shop.actives.filter(marketplace=marketplace)
for shop in shops:
point2 = [float(x) for x in shop.geo_location()]
distance = distance_between_points(point1, point2 , metric)
logging.critical("%s - %s - %s" % (shop, point2, distance))
if distance < max_distance:
bisect.insort(in_range, (int(distance), shop))
params.update({'shops': in_range, 'metric' : metric, 'selected_city': city, 'selected_state': state, 'do_search': True})
else:
params.update({'shops': [], 'do_search': False})
return render_to_response("%s/buy/shop_local.html" % request.marketplace.template_prefix,
params ,
RequestContext(request))
def top_sellers(request):
""" Return top seller of the month, and the last 10 top sellers """
from shops.models import Shop
from market_buy.models import BestSeller
marketplace = request.marketplace
sellers = BestSeller.objects.filter(shop__marketplace=marketplace).order_by("-to_date")[:10]
if not sellers:
delta = 7
date_to = datetime.datetime.now()
date_from = date_to - datetime.timedelta(delta)
best_seller = BestSeller()
best_seller.shop = Shop.objects.all().filter(marketplace=marketplace).order_by('?')[0]
best_seller.from_date = date_from
best_seller.to_date = date_to
best_seller.revenue = 0
best_seller.save()
sellers = BestSeller.objects.filter(shop__marketplace=marketplace).order_by("-to_date")
return render_to_response("%s/buy/top_sellers.html" % request.marketplace.template_prefix,
{'best_sellers' : sellers} , RequestContext(request))
def wish_list(request):
""" Return the wishes list posted by the users """
from market_buy.models import WishListItem
from market_buy.forms import WishListItemForm
marketplace = request.marketplace
wihs_list = WishListItem.objects.filter(marketplace=marketplace).order_by("posted_on")
form = WishListItemForm()
return render_to_response("%s/buy/wish_list.html" % request.marketplace.template_prefix,
{
'wish_list' : wihs_list,
'form' : form,
} ,
RequestContext(request))
def ajax_get_subcategories(request):
from market.models import MarketSubCategory, MarketCategory
categories = request.REQUEST.get('categories', "")
try:
categories = categories.split(",")
logging.info("categories: %s" % categories)
categories = MarketCategory.objects.filter(id__in=categories)
logging.info("categories: %s" % categories)
sub_categories = MarketSubCategory.objects.filter(parent__in=categories).order_by("name")
html = ""
for sub in sub_categories:
html += '<option value="%d">%s</option>' % (sub.id, sub.name)
logging.info("subscategories: %s" % html)
return HttpResponse(html)
except:
logging.exception("error getting subcategories")
return HttpResponse("")
@login_required
def post_wish_list_item(request):
from market_buy.forms import WishListItemForm
user = request.user
marketplace = request.marketplace
if request.method == "POST":
form = WishListItemForm(request.POST)
if form.is_valid():
item = form.save(commit = False)
item.posted_by = user
item.marketplace = marketplace
item.save()
request.flash['message'] = _("Item was successfully added...")
request.flash['severity'] = "success"
else:
request.flash['message'] = _("Item could not be added...")
request.flash['severity'] = "error"
return HttpResponseRedirect(reverse("buy_wish_list"))
def item_redirect(request, id):
#from inventory.models import Product
from for_sale.models import Item
product = get_object_or_404(Item, id=id)
host_name = product.shop.default_dns
#if hasattr(product, 'item'):
path = reverse("bidding_view_item", urlconf="stores.urls", args=[product.id])
return HttpResponseRedirect("http://%s%s" % (host_name, path))
#else:
# return HttpResponseRedirect("/")
def product_redirect(request, id):
from inventory.models import Product
product = get_object_or_404(Product, id=id)
host_name = product.shop.default_dns
if hasattr(product, 'item'):
path = reverse("bidding_view_item", urlconf="stores.urls", args=[product.id])
return HttpResponseRedirect("http://%s%s" % (host_name, path))
elif hasattr(product, 'lot'):
path = reverse("bidding_view_lot", urlconf="stores.urls", args=[product.id])
return HttpResponseRedirect("http://%s%s" % (host_name, path))
else:
return HttpResponseRedirect("/")
@transaction.commit_on_success
def signup(request):
from auth.models import User
from auth import load_backend, login
from users.models import Profile, EmailVerify
from market_buy.forms import BuyerForm
form = BuyerForm(request.POST or None)
if form.is_valid():
""" Generate Auth User """
user = User.objects.create_user(form.cleaned_data["username"],
form.cleaned_data["email"],
form.cleaned_data["password1"])
user.first_name = form.cleaned_data["first_name"]
user.last_name = form.cleaned_data["last_name"]
user.is_active = False
user.save()
""" Set profile """
profile = Profile(user=user)
profile.save()
""" Send mail to confirm account """
email_verify = EmailVerify(user=user, user_activation=True)
code = email_verify.generate_code()
email_verify.save()
# TODO: remove this
send_mail_account_confirmation(user, email_verify.code, request.marketplace)
# return HttpResponseRedirect(reverse('confirmemail', args=[code]))
# for backend in settings.AUTHENTICATION_BACKENDS:
# if user == load_backend(backend).get_user(user.pk):
# user.backend = backend
# break
# if hasattr(user, 'backend'):
# login(request, user)
if request.session.get('sell_signup',False):
request.flash['message'] = _("<h5>Please check your email and confirm your account to start selling...</h5>")
request.flash['severity'] = "success"
return HttpResponseRedirect(reverse('market_sell_signup'))
else:
request.flash['message'] = _("<h5>Please check your email and confirm your account. Once confirmed you can Buy or Sell on GreatCoins.com</h5>")
request.flash['severity'] = "success"
return HttpResponseRedirect(reverse('market_home'))
else:
#request.method == GET
if request.GET.has_key('sell_signup'):
request.session['sell_signup'] = request.GET.get('sell_signup','') == '1'
return render_to_response('%s/buy/register.html'% request.marketplace.template_prefix,
{'form': form},
RequestContext(request))
def login(request):
from auth import authenticate, login
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
next = request.POST.get('next', reverse("market_home"))
return HttpResponseRedirect(next)
else:
request.flash['message'] = _("Your account is inactive... You must confirm your account before login.")
request.flash['severity'] = "error"
else:
request.flash['message'] = _("You entered an invalid username or password. Please try again")
request.flash['severity'] = "error"
return render_to_response('%s/buy/login.html'% request.marketplace.template_prefix,
{'next': request.POST.get('next', None)},
RequestContext(request))
return render_to_response('%s/buy/login.html'% request.marketplace.template_prefix,
{'next': request.GET.get('next', None)},
RequestContext(request))
def send_mail_account_confirmation(user, code, marketplace):
"""
Send message to the user to confirm your account
"""
link = "http://www.%s/buy/confirmemail/%s/" % (marketplace.base_domain , code)
subject = "%s Account Confirmation" % marketplace.name
text_content = _("""
Hi %(username)s,
You recently registered at %(marketplace_name)s. Please confirm your account by following this link: %(link)s
Thanks.
%(marketplace_name)s Team.""") % {'username': user.username, 'link': link, 'marketplace_name': marketplace.name}
msg = EmailMultiAlternatives(subject,
text_content,
settings.EMAIL_FROM,
[user.email, settings.EMAIL_FROM],
headers={'X-SMTPAPI': '{\"category\": \"Account Confirmation\"}'})
logging.critical(text_content);
try:
msg.send()
except:
logging.exception("failure sending mail")
def confirmemail(request, code):
from users.models import EmailVerify
marketplace = request.marketplace
try:
verify = EmailVerify.objects.filter(code = code).get()
if not verify.user_activation:
request.flash['message'] = _("<h5>Account verification failed</h5>")
request.flash['severity'] = "error"
return HttpResponseRedirect(reverse('market_home'))
else:
user = verify.user
user.is_active = True
user.save()
if verify.verified:
request.flash['message'] = _("<h5>Your account was already verified! You can login now <a href='%s'>here</a></h5>" % reverse("market_buy_login"))
request.flash['severity'] = "success"
return HttpResponseRedirect(reverse('market_home'))
else:
verify.verified = True
verify.save()
from auth import load_backend, login
if not hasattr(user, 'backend'):
for backend in settings.AUTHENTICATION_BACKENDS:
if user == load_backend(backend).get_user(user.pk):
user.backend = backend
break
if hasattr(user, 'backend'):
login(request, user)
params = {}
return render_to_response(
"%s/email_confirmed.html" % marketplace.template_prefix, params, RequestContext(request))
except EmailVerify.DoesNotExist:
request.flash['message'] = _("<h5>Account verification failed</h5>")
request.flash['severity'] = "error"
return HttpResponseRedirect(reverse('market_home'))
@login_required
def user_profile(request):
from auth.models import User
from users.forms import UserProfile
from users.models import check_profile
marketplace = request.marketplace
user = request.user
check_profile(User, instance=request.user)
if request.method == 'POST':
form = UserProfile(data=request.POST, user=user)
if form.is_valid():
user.username = form.cleaned_data['username']
user.first_name = form.cleaned_data['first_name']
user.last_name = form.cleaned_data['last_name']
user.profile.street_address = form.cleaned_data['street_address']
user.profile.city = form.cleaned_data['city']
user.profile.state = form.cleaned_data['state']
user.profile.zip = form.cleaned_data['zip']
user.profile.country = form.cleaned_data['country']
user.profile.phone = form.cleaned_data['phone']
try:
photo = request.FILES['photo']
user.profile.photo = photo
except:
pass
user.save()
user.profile.save()
return HttpResponseRedirect(reverse('market_home'))
else:
initial = { 'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'street_address': user.profile.street_address,
'city': user.profile.city,
'state': user.profile.state,
'zip': user.profile.zip,
'country': user.profile.country,
'phone': user.profile.phone,
}
form = UserProfile(initial=initial, user=user)
params = {'form': form, 'photo': user.profile.photo }
return render_to_response("%s/buy/profile.html" % marketplace.template_prefix,
params,
RequestContext(request))
@login_required
def delete_profile_image(request):
from users.models import Profile
try:
profile = Profile.objects.get(user__id=request.user.id)
profile.photo = ''
profile.save()
except Exception, ex:
logging.debug(str(ex))
return HttpResponseRedirect(reverse('market_buy_user_profile'))
|
# -*- coding: utf-8 -*-
import tkinter as tk
from .gui_canvas import CanvasImage
class Rectangles(CanvasImage):
""" Class of Rectangles. Inherit CanvasImage class """
def __init__(self, placeholder, path, rect_size):
""" Initialize the Rectangles """
CanvasImage.__init__(self, placeholder, path) # call __init__ of the CanvasImage class
self.canvas.bind('<space>', self.set_rect) # set new rectangle with a spacebar key press
self.canvas.bind('<ButtonPress-1>', self.set_rect) # set new rectangle
self.canvas.bind('<ButtonRelease-3>', self.popup) # call popup menu
self.canvas.bind('<Motion>', self.motion) # handle mouse motion
self.canvas.bind('<Delete>', lambda event: self.delete_rect()) # delete selected rectangle
# Create a popup menu for Rectangles
self.hold_menu1 = False # popup menu is closed
self.hold_menu2 = False
self.menu = tk.Menu(self.canvas, tearoff=0)
self.menu.add_command(label='Delete', command=self.delete_rect, accelerator=u'Delete')
# Rectangle parameters
self.rect_size = rect_size # size of the rolling window
self.width_line = 2 # lines width
self.dash = (1, 1) # dash pattern
self.color_roi = {'draw' : 'red', # draw roi color
'point' : 'blue', # point roi color
'back' : 'yellow', # background roi color
'stipple': 'gray12'} # stipple value for roi
self.rect = self.canvas.create_rectangle((0, 0, 0, 0), width=self.width_line,
dash=self.dash, outline=self.color_roi['draw'])
self.tag_roi = 'roi' # roi tag
self.tag_const = 'rect' # constant tag for rectangle
self.tag_poly_line = 'poly_line' # edge of the rectangle
self.selected_rect = [] # selected rectangles
self.roi_dict = {} # dictionary of all roi rectangles and their top left coords on the canvas
def set_rect(self, event):
""" Set rectangle """
if self.hold_menu2: # popup menu was opened
self.hold_menu2 = False
self.motion(event) # motion event for popup menu
return
self.motion(event) # generate motion event. It's needed for menu bar, bug otherwise!
if ' '.join(map(str, self.dash)) == self.canvas.itemcget(self.rect, 'dash'):
return # rectangle is out of scope
# Calculate coordinates of rectangle top left corner on the zoomed image
bbox1 = self.canvas.coords(self.container) # get image area
bbox2 = self.canvas.coords(self.rect) # get rectangle area
x = int((bbox2[0] - bbox1[0]) / self.imscale) # get (x,y) of top left corner on the image
y = int((bbox2[1] - bbox1[1]) / self.imscale)
self.draw_rect(bbox2, (x, y))
def draw_rect(self, bbox, point):
""" Draw rectangle """
# Create identification tag
tag_uid = "{x}-{y}".format(x=point[0], y=point[1]) # unique ID
if tag_uid not in self.roi_dict: # save only unique rectangles with different coordinates
# Create rectangle. 2nd tag is ALWAYS a unique tag ID + constant string.
self.canvas.create_rectangle(bbox, fill=self.color_roi['point'],
stipple=self.color_roi['stipple'], width=0, state='hidden',
tags=(self.tag_roi, tag_uid + self.tag_const))
# Create polyline. 2nd tag is ALWAYS a unique tag ID.
vertices = [(bbox[0], bbox[1]), (bbox[2], bbox[1]), (bbox[2], bbox[3]), (bbox[0], bbox[3])]
for j in range(-1, len(vertices) - 1):
self.canvas.create_line(vertices[j], vertices[j + 1], width=self.width_line,
fill=self.color_roi['back'], tags=(self.tag_poly_line, tag_uid))
self.roi_dict[tag_uid] = point # remember top left corner in the dictionary
print('Images: {n}'.format(n=len(self.roi_dict)) + (20 * ' ') + '\r', end='')
def popup(self, event):
""" Popup menu """
self.motion(event) # select rectangle with popup menu explicitly to be sure it is selected
if self.selected_rect: # show popup menu only for selected rectangle
self.hold_menu1 = True # popup menu is opened
self.hold_menu2 = True
self.menu.post(event.x_root, event.y_root) # show popup menu
self.hold_menu1 = False # popup menu is closed
def motion(self, event):
""" Track mouse position over the canvas """
if self.hold_menu1: return # popup menu is opened
x = self.canvas.canvasx(event.x) # get coordinates of the event on the canvas
y = self.canvas.canvasy(event.y)
w = int(self.rect_size[0] * self.imscale) >> 1
h = int(self.rect_size[1] * self.imscale) >> 1
bbox = self.canvas.coords(self.container) # get image area
if bbox[0] + w <= x < bbox[2] - w and bbox[1] + h <= y < bbox[3] - h:
self.canvas.itemconfigure(self.rect, dash='') # set solid line
else:
self.canvas.itemconfigure(self.rect, dash=self.dash) # set dashed line
self.canvas.coords(self.rect, (x - w, y - h, x + w, y + h)) # relocate rectangle
self.canvas.lift(self.rect) # set roi into foreground
# Handle rectangles on the canvas
self.deselect_rect() # change color and zeroize selected rectangle
self.select_rect() # change color and select rectangle
def deselect_rect(self):
""" Deselect current roi object """
if not self.selected_rect: return # selected rectangles list is empty
for i in self.selected_rect:
j = i + self.tag_const # unique tag of the rectangle
self.canvas.itemconfigure(i, fill=self.color_roi['back']) # deselect lines
self.canvas.itemconfigure(j, state='hidden') # hide rectangle
self.selected_rect.clear() # clear the list
def select_rect(self):
""" Select and change color of the current roi object """
i = self.canvas.find_withtag('current') # id of the current object
tags = self.canvas.gettags(i) # get tags of the current object
if self.tag_poly_line in tags: # if it's a polyline. 2nd tag is ALWAYS a unique tag ID
j = tags[1] + self.tag_const # unique tag of the rectangle
self.canvas.itemconfigure(tags[1], fill=self.color_roi['point']) # select lines
self.canvas.itemconfigure(j, state='normal') # show rectangle
self.selected_rect.append(tags[1]) # remember 2nd unique tag_id
def delete_rect(self):
""" Delete selected rectangle """
if self.selected_rect: # delete selected rectangle
for i in self.selected_rect:
j = i + self.tag_const # unique tag of the rectangle
del(self.roi_dict[i]) # delete ROI from the dictionary of all rectangles
self.canvas.delete(i) # delete lines
self.canvas.delete(j) # delete rectangle
self.selected_rect.clear() # clear selection list
self.hold_menu2 = False # popup menu is closed
def delete_all(self):
""" Delete all rectangles from the canvas and clear variables """
self.canvas.delete(self.tag_roi) # delete all rectangles
self.canvas.delete(self.tag_poly_line) # delete all poly-lines
self.selected_rect.clear() # clear selection list
self.hold_menu2 = False # popup menu is closed
self.roi_dict.clear() # clear dictionary of ROI
def reset(self, roi):
""" Reset ROI and holes on the image """
self.delete_all() # delete old rectangles
bbox1 = self.canvas.coords(self.container) # get image area
for point in roi: # draw roi rectangles
bbox2 = (int(point[0] * self.imscale) + bbox1[0],
int(point[1] * self.imscale) + bbox1[1],
int((point[0] + self.rect_size[0]) * self.imscale) + bbox1[0],
int((point[1] + self.rect_size[1]) * self.imscale) + bbox1[1])
self.draw_rect(bbox2, point)
|
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import model_list, models, fitting
import matplotlib.lines as mlines
import corner
import copy as cp
from utils import rj2cmb
plt.style.use('seaborn-colorblind')
#plt.rc('text', usetex=True)
#plt.rc('font', family='serif')
print "hello!"
mean_beta = 1.6
mean_temp = 20.
sigma_beta = .2
sigma_temp = 4.
pMBB_broad = model_list.prob1mbb_model
sMBB = model_list.dust_model
cmb = model_list.cmb_model
sync = model_list.sync_model
DUST_I = 50.
DUST_P = 5. / 1.41
amp_I=rj2cmb(353e9, DUST_I)
amp_Q=rj2cmb(353e9, DUST_P)
amp_U=rj2cmb(353e9, DUST_P)
pMBB_narrow = models.ProbSingleMBB(amp_I=rj2cmb(353e9, DUST_I),
amp_Q=rj2cmb(353e9, DUST_P),
amp_U=rj2cmb(353e9, DUST_P),
dust_beta=1.6, dust_T=20.,
sigma_beta=.1 * sigma_beta, sigma_temp=.1 * sigma_temp)
nu_pico = np.asarray([21,25,30, 36.0,43.2,51.8,62.2,74.6,89.6,
107.5,129.0,154.8,185.8,222.9,267.5,321.0,
385.2,462.2,554.7,665.6,798.7]) * 1e9
models_sMBB = [sMBB, cmb, sync]
models_pMBB_broad = [pMBB_broad, cmb, sync]
models_pMBB_narrow = [pMBB_narrow, cmb, sync]
def make_pnames(models_fit):
amp_names = []
param_names = []
for mod in models_fit:
# Parameter names
amp_names += ["%s_%s" % (mod.model, pol) for pol in "IQU"]
param_names += mod.param_names
return amp_names + param_names
pnames_sMBB = make_pnames(models_sMBB)
pnames_pMBB_broad = make_pnames(models_pMBB_broad)
pnames_pMBB_narrow = make_pnames(models_pMBB_narrow)
print pnames_sMBB
fsigma_T=1e3
fsigma_P=1.
beam_mat = np.identity(3*len(nu_pico)) # Beam model
# pvals set the model parameters
params_sMBB = [sMBB.amp_I, sMBB.amp_Q, sMBB.amp_U, cmb.amp_I, cmb.amp_Q, cmb.amp_U,
sync.amp_I, sync.amp_Q, sync.amp_U, sMBB.dust_beta, sMBB.dust_T,
sync.sync_beta]
params_pMBB_broad = [pMBB_broad.amp_I, pMBB_broad.amp_Q, pMBB_broad.amp_U, cmb.amp_I, cmb.amp_Q, cmb.amp_U,
sync.amp_I, sync.amp_Q, sync.amp_U, pMBB_broad.dust_beta, pMBB_broad.dust_T,
pMBB_broad.sigma_beta, pMBB_broad.sigma_temp, sync.sync_beta]
params_pMBB_narrow = [pMBB_narrow.amp_I, pMBB_narrow.amp_Q, pMBB_narrow.amp_U, cmb.amp_I, cmb.amp_Q, cmb.amp_U,
sync.amp_I, sync.amp_Q, sync.amp_U, pMBB_narrow.dust_beta, pMBB_narrow.dust_T,
pMBB_narrow.sigma_beta, pMBB_narrow.sigma_temp, sync.sync_beta]
initial_vals_sMBB = (amp_I, amp_Q, amp_U, cmb.amp_I, cmb.amp_Q, cmb.amp_U,
sync.amp_I, sync.amp_Q, sync.amp_U, mean_beta, mean_temp,
sync.sync_beta)
initial_vals_pMBB_broad = (amp_I, amp_Q, amp_U, cmb.amp_I, cmb.amp_Q, cmb.amp_U,
sync.amp_I, sync.amp_Q, sync.amp_U, mean_beta, mean_temp,
sigma_beta, sigma_temp, sync.sync_beta)
initial_vals_pMBB_narrow = (amp_I, amp_Q, amp_U, cmb.amp_I, cmb.amp_U, cmb.amp_Q,
sync.amp_I, sync.amp_Q, sync.amp_U,mean_beta, mean_temp,
.1 * sigma_beta, .1 * sigma_temp, sync.sync_beta)
parent_model = 'mbb'
D_vec_sMBB, Ninv = fitting.generate_data(nu_pico, fsigma_T, fsigma_P, [sMBB, cmb, sync],
noise_file="data/noise_pico.dat" )
D_vec_pMBB_broad, Ninv = fitting.generate_data(nu_pico, fsigma_T, fsigma_P, [pMBB_broad, cmb, sync],
noise_file="data/noise_pico.dat")
D_vec_pMBB_narrow, Ninv = fitting.generate_data(nu_pico, fsigma_T, fsigma_P, [pMBB_narrow, cmb, sync],
noise_file="data/noise_pico.dat")
data_spec_sMBB = (nu_pico, D_vec_sMBB, Ninv, beam_mat)
data_spec_pMBB_broad = (nu_pico, D_vec_pMBB_broad, Ninv, beam_mat)
data_spec_pMBB_narrow = (nu_pico, D_vec_pMBB_narrow, Ninv, beam_mat)
p_spec_sMBB = (pnames_sMBB, initial_vals_sMBB, parent_model)
p_spec_pMBB_broad = (pnames_pMBB_broad, initial_vals_pMBB_broad, parent_model)
p_spec_pMBB_narrow = (pnames_pMBB_narrow, initial_vals_pMBB_narrow, parent_model)
print "running emcee"
mcmc_sMBB = fitting.joint_mcmc(data_spec_sMBB, models_sMBB, p_spec_sMBB, nwalkers=30,
burn=5000, steps=10000, nthreads=8, sample_file=None)
mcmc_pMBB_broad = fitting.joint_mcmc(data_spec_pMBB_broad, models_sMBB, p_spec_sMBB, nwalkers=30,
burn=5000, steps=10000, nthreads=8, sample_file=None)
mcmc_pMBB_narrow = fitting.joint_mcmc(data_spec_pMBB_narrow, models_sMBB, p_spec_sMBB, nwalkers=30,
burn=5000, steps=10000, nthreads=8, sample_file=None)
ax1 = corner.corner(mcmc_sMBB[1].T, labels=pnames_sMBB,
truths=initial_vals_sMBB, plot_datapoints=False)
ax2 = corner.corner(mcmc_pMBB_broad[1].T, labels=pnames_sMBB,
truths=initial_vals_sMBB, plot_datapoints=False)
ax3 = corner.corner(mcmc_pMBB_narrow[1].T, labels=pnames_sMBB],
truths=initial_vals_sMBB, plot_datapoints=False)
ax1.savefig('sMBB2sMBB.pdf')
ax2.savefig('pMBB_broad2sMBB.pdf')
ax3.savefig('pMBB2_narrowsMBB.pdf')
np.save('mcmc_sMBB', mcmc_sMBB)
np.save('mcmc_pMBB_broad', mcmc_pMBB_broad)
np.save('mcmc_pMBB_narrow', mcmc_pMBB_narrow)
|
# Generated by Django 2.1.1 on 2019-10-06 00:51
from django.conf import settings
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import simple_history.models
class Migration(migrations.Migration):
dependencies = [
('home', '0013_historicalresource_resource'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('projects', '0008_auto_20190925_1939'),
]
operations = [
migrations.CreateModel(
name='HistoricalMissionSubCategory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('updated_date', models.DateTimeField(default=django.utils.timezone.now)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('secondary_mission_area', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='home.MissionArea')),
],
options={
'verbose_name': 'historical mission sub category',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalProjectRelatedLink',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('link_descr', models.CharField(blank=True, max_length=250)),
('link', models.CharField(blank=True, max_length=250)),
('isAccessible', models.BooleanField(default=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical project related link',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalProjectSubCategory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('updated_date', models.DateTimeField(default=django.utils.timezone.now)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical project sub category',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalSubCategory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('sub_category', models.CharField(blank=True, max_length=30)),
('sub_category_descr', models.CharField(blank=True, max_length=250, null=True)),
('sub_category_tags', models.CharField(blank=True, max_length=1024, null=True)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('updated_date', models.DateTimeField(default=django.utils.timezone.now)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical sub category',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='MissionSubCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('updated_date', models.DateTimeField(default=django.utils.timezone.now)),
('secondary_mission_area', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.MissionArea')),
],
),
migrations.CreateModel(
name='ProjectRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('link_descr', models.CharField(blank=True, max_length=250)),
('link', models.CharField(blank=True, max_length=250)),
('isAccessible', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='ProjectSubCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('updated_date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='SubCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_category', models.CharField(blank=True, max_length=30)),
('sub_category_descr', models.CharField(blank=True, max_length=250, null=True)),
('sub_category_tags', models.CharField(blank=True, max_length=1024, null=True)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('updated_date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.AddField(
model_name='historicalproject',
name='campus_lead_staff',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), blank=True, null=True, size=10),
),
migrations.AddField(
model_name='project',
name='campus_lead_staff',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), blank=True, null=True, size=10),
),
migrations.AddField(
model_name='projectsubcategory',
name='project_name',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Project'),
),
migrations.AddField(
model_name='projectsubcategory',
name='sub_category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.SubCategory'),
),
migrations.AddField(
model_name='projectrelatedlink',
name='project_name',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Project'),
),
migrations.AddField(
model_name='missionsubcategory',
name='sub_category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.SubCategory'),
),
migrations.AddField(
model_name='historicalprojectsubcategory',
name='project_name',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='projects.Project'),
),
migrations.AddField(
model_name='historicalprojectsubcategory',
name='sub_category',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='projects.SubCategory'),
),
migrations.AddField(
model_name='historicalprojectrelatedlink',
name='project_name',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='projects.Project'),
),
migrations.AddField(
model_name='historicalmissionsubcategory',
name='sub_category',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='projects.SubCategory'),
),
]
|
Subsets and Splits