max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
Week 6/Programming Assignment 3 - Functions.py | joe733/Joy-Of-Computing-Using-Python-2019 | 0 | 12790051 | <filename>Week 6/Programming Assignment 3 - Functions.py
'''
Given an integer number n, define a function named printDict() which can print a dictionary where the keys are numbers between 1 and n (both included) and the values are square of keys.
The function printDict() doesn't take any argument.
>>Input Format:
The first line contains the number n.
>>Output Format:
Print the dictionary in one line.
>>Example:
Input:
5
Output:
{1: 1, 2: 4, 3: 9, 4: 16, 5: 25}
NOTE: You are supposed to write the code for the function printDict() only. The function has already been called in the main part of the code.
'''
def printDict():
print(dict([(i,i**2) for i in range (1,x+1)]), end="")
x=int(input())
printDict()
| 4.65625 | 5 |
setup.py | CrawlerCode/PythonTools | 0 | 12790052 | <gh_stars>0
from setuptools import setup
def readme():
with open("README.rst") as f:
README = f.read()
return README
TYPE = "CORE"
packages = []
install_requires = []
if TYPE == "CORE":
packages = ['pythontools.core', 'pythontools.identity', 'pythontools.sockets', 'pythontools.dev', 'pythontools.telegram']
install_requires.extend(['requests', 'colorama', 'getmac', 'stdiomask', 'cryptography'])
if TYPE == "GUI":
packages.append('pythontools.gui')
install_requires.append('PyQt5')
if TYPE == "WEBBOT":
packages.append('pythontools.webbot')
install_requires.append('selenium')
setup(
name='CrawlerCodePythonTools' + ('-Gui' if TYPE == "GUI" else '-WebBot' if TYPE == "WEBBOT" else ''),
version='1.5.2',
packages=packages,
url='https://github.com/CrawlerCode',
license='MIT',
author='CrawlerCode',
author_email='',
description='Tools for Python',
long_description=readme(),
long_description_content_type="text/x-rst",
include_package_data=True,
install_requires=install_requires
)
| 1.609375 | 2 |
study/spMap.py | Suryavf/SelfDrivingCar | 11 | 12790053 | <filename>study/spMap.py
import os
import glob
import h5py
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
# Parameters
path = '/media/victor/Documentos/'
outpath = '/media/victor/Documentos/Thesis/AttentionMap/Resume10'
dimImage = ( 96,192)
dimEncode = ( 12, 24)
n_head = 2
n_task = 2
n_sample = 120*20
def getint(name):
basename = name.partition('.')
_, num = basename[0].split('resume')
return int(num)
filesname = glob.glob(os.path.join(path,'*.sy'))
filesname.sort()
filename = '/media/victor/Documentos/resume10.sy'
# Getting data
with h5py.File(filename, 'r') as h5_file:
image = np.array(h5_file['image'])
alpha = np.array(h5_file['alpha'])
print('Load data done\n')
t = 0
# Genera mapas de atencion en video
alpha = alpha.reshape([n_sample,n_head,dimEncode[0],dimEncode[1],n_task])
alpha = alpha/alpha.max()
"""
for attmap,sample in zip(alpha,image):
# a: [n_head,h,w,n_task]
# s: [3,H,W]
tasks = list()
for n in range(n_task):
heads = list()
for h in range(n_head):
# Up-sampling
att = attmap[h,:,:,n]
att = cv.resize(att,None,fx=8,fy=8, interpolation = cv.INTER_AREA)
att = cv.GaussianBlur(att,(11,11),0)
#att = np.expand_dims(att,axis=0) # [1,H,W]
# Apply
sample = att #sample*att
heads.append( sample ) #np.moveaxis(sample,0,2) )
tasks.append( cv.vconcat(heads) )
plt.figure(1); plt.clf()
plt.imshow(cv.hconcat(tasks))
plt.title('Frame ' + str(t))
plt.pause(0.1)
t += 1
"""
n_up = 8
for attmap,sample in zip(alpha,image):
# a: [n_head,h,w,n_task]
# s: [3,H,W]
sample = np.moveaxis(sample,0,2)
maps = list()
for n in range(n_task):
map = np.zeros([ dimEncode[0]*n_up,dimEncode[1]*n_up ,3])
for h in range(n_head):
# Up-sampling
att = attmap[h,:,:,n]
att = cv.resize(att,None,fx=n_up,fy=n_up, interpolation = cv.INTER_AREA)
att = cv.GaussianBlur(att,(11,11),0)
map[:,:,h] = att
maps.append( (0.5*sample+0.5*map) )
img = cv.hconcat(maps)*255
img = img.astype('float32')
img = cv.cvtColor(img,cv.COLOR_RGB2BGR)
fileout = os.path.join(outpath,'sample%i.png'%t)
cv.imwrite(fileout,img)
#plt.imshow(cv.hconcat(maps))
#fileout = os.path.join(outpath,'sample%i.png'%t)
#plt.savefig(fileout)
#print('Create %s'%fileout)
#plt.title('Frame ' + str(t))
#plt.pause(0.1)
t += 1
| 2.375 | 2 |
.github/workflows/get_version_and_update.py | gdt050579/GView | 7 | 12790054 | import sys
import os
import shutil
if len(sys.argv) < 2:
print("Failed to obtain GView.hpp location")
exit(1)
header_location = sys.argv[1]
if not os.path.exists(header_location):
print("Path {} does not exists!".format(header_location))
exit(1)
default_version_to_update = 1 # major=0, minor=1, patch=2
if len(sys.argv) > 2:
version_to_update = sys.argv[2]
defined_versions = {
"major": 0,
"minor": 1,
"patch": 2
}
default_version_to_update = defined_versions[version_to_update]
reset_lower_versions = True
header_output_location = header_location+'.new'
found_version = False
with open(header_location, 'r') as f:
with open(header_output_location, 'w') as g:
for line in f:
if line.startswith('#define GVIEW_VERSION '):
version = line.split('#define GVIEW_VERSION ')[
1].strip(' \r\n\t\"')
version_array = version.split('.')
value = int(version_array[default_version_to_update])+1
version_array[default_version_to_update] = value
for i in range(default_version_to_update+1, 3):
version_array[i] = 0
version = "{}.{}.{}".format(
version_array[0], version_array[1], version_array[2])
line = '#define GVIEW_VERSION "{}"\n'.format(version)
found_version = True
os.putenv('GVIEW_VERSION', version)
g.write(line)
if not found_version:
print("Failed to find GVIEW_VERSION")
exit(1)
shutil.move(header_output_location, header_location)
exit(0)
| 2.484375 | 2 |
utils/redis_utils.py | sdgdsffdsfff/qtalk_search | 1 | 12790055 | <reponame>sdgdsffdsfff/qtalk_search
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'jingyu.he'
import redis
from redis import sentinel
import json
from conf.cache_params_define import *
# from utils.logger_conf import configure_logger
# log_path = get_logger_file(name='reids.log')
# redis_log = configure_logger('redis', log_path)
try:
if if_redis_sentinel:
_hosts = [hp.split(':') for hp in pre_rs_hosts]
hosts = [(hp[0].strip(), int(hp[1].strip())) for hp in _hosts]
r_sentinel = sentinel.Sentinel(hosts, socket_timeout=r_timeout)
redis_cli = r_sentinel.master_for(r_master, socket_timeout=r_timeout, password=<PASSWORD>, db=r_database,
decode_responses=True)
else:
redis_cli = redis.StrictRedis(host=r_host, port=r_port, db=r_database, password=r_password,
decode_responses=True)
except (KeyError, ValueError, IndexError) as e:
raise TypeError('wrong configure pattern')
# redis_log.exception('wrong configure pattern')
# exit(0)
class RedisUtil:
def __init__(self):
self.redis = redis_cli
self.single_key = SINGLE_KEY
self.muc_key = MUC_KEY
self.single_trace_key = SINGLE_TRACE_KEY
self.muc_trace_key = MUC_TRACE_KEY
self.user_registed_mucs = USER_MUCS
self.all_user_key = ALL_USER_DATA_CACHE
# self.router = [self.single_key, self.muc_key, self.single_trace_key, self.muc_trace_key]
def get_user_habit(self, user_id):
"""
获取redis中用户的缓存
包括个人、群组聊天顺序
个人、群组聊天频率
:param user_id:
:return:
"""
router = [self.single_key, self.muc_key, self.single_trace_key, self.muc_trace_key, self.user_registed_mucs]
habit = {}
for key in router:
_k = key + '_' + user_id
if key in [self.single_key, self.muc_key, self.user_registed_mucs]:
habit[key] = self.redis.lrange(name=_k, start=0, end=-1)
else:
habit[key] = self.redis.zrevrangebyscore(name=_k, max='+inf', min=10, start=0, num=10)
# TODO 这个num可能应该走limit 和 offset
return habit
def get_all_user_data(self, domain=''):
if domain:
__k = self.all_user_key + '_' + domain
else:
__k = self.all_user_key
user_data = self.redis.get(name=__k)
try:
if not user_data:
return []
user_data = json.loads(user_data)
return user_data
except json.JSONDecodeError:
return []
def set_all_user_data(self, data, domain=''):
data = json.dumps(data, ensure_ascii=False)
if domain:
__k = self.all_user_key + '_' + domain
else:
__k = self.all_user_key
self.redis.set(name=__k, value=data, ex=86400)
def get_single_lookback(self, user, term):
res = self.redis.get(LOOKBACK_SINGLE_CACHE + '_' + user + '_' + term)
if res:
try:
if not res:
return None
res = json.loads(res)
except Exception as e:
print('LOAD SINGLE LOOKBACK ERROR {}'.format(e))
return None
return res
def set_single_lookback(self, user, term, data):
self.redis.set(name=LOOKBACK_SINGLE_CACHE + '_' + user + '_' + term,
value=json.dumps(data, ensure_ascii=False), ex=300)
def get_muc_lookback(self, user, term):
res = self.redis.get(LOOKBACK_MUC_CACHE + '_' + user + '_' + term)
if res:
try:
if not res:
return None
res = json.loads(res)
except Exception as e:
print('LOAD SINGLE LOOKBACK ERROR {}'.format(e))
res = None
return res
def set_muc_lookback(self, user, term, data):
self.redis.set(name=LOOKBACK_MUC_CACHE + '_' + user + '_' + term,
value=json.dumps(data, ensure_ascii=False), ex=300)
def get_agg_cache(self, user, term):
"""
结构:
user - { 'key' : term, 'data': _info }
:param user:
:param term:
:return:
"""
name = LOOKBACK_AGG_CACHE + '_' + user
res = self.redis.get(name=name)
try:
if not res:
return []
res = json.loads(res)
except Exception as __e:
print(__e)
return []
if res.get('term', '') != term:
self.redis.delete(name)
return []
return res['data']
def set_agg_cache(self, user, term, data):
name = LOOKBACK_AGG_CACHE + '_' + user
info = {'term': term, 'data': data}
self.redis.set(name=name, value=json.dumps(info, ensure_ascii=False), ex=300)
| 1.789063 | 2 |
agilife/api/views.py | michellydsalves/agilife-api | 0 | 12790056 | <filename>agilife/api/views.py
from rest_framework import viewsets
from rest_framework import mixins
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import authentication_classes, permission_classes
from api.serializers import (
UserModelSerializer,
PartnerModelSerializer,
HomeworkModelSerializer,
UserHomeworkModelSerializer,
HomeworkVoucherTypeModelSerializer,
HomeworkVoucherModelSerializer,
UserHomeworkVoucherModelSerializer,
ForumTypeModelSerializer,
ForumModelSerializer,
ForumCommentModelSerializer,
UserForumFavoriteModelSerializer,
ContentTypeModelSerializer,
ContentModelSerializer,
UserContentModelSerializer,
PaymentModelSerializer
)
from api.models import (
Partner,
User,
Homework,
UserHomework,
HomeworkVoucherType,
HomeworkVoucher,
UserHomeworkVoucher,
ForumType,
Forum,
ForumComment,
UserForumFavorite,
ContentType,
Content,
UserContent,
Payment
)
@authentication_classes([])
@permission_classes([])
class UserApiViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserModelSerializer
class PartnerApiViewSet(viewsets.ModelViewSet):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = Partner.objects.all()
serializer_class = PartnerModelSerializer
class HomeworkApiViewSet(viewsets.ModelViewSet):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = Homework.objects.all()
serializer_class = HomeworkModelSerializer
class UserHomeworkApiViewSet(viewsets.ModelViewSet):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = UserHomework.objects.all()
serializer_class = UserHomeworkModelSerializer
class HomeworkVoucherTypeListOnlyAPIView(mixins.ListModelMixin, generics.GenericAPIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = HomeworkVoucherType.objects.all()
serializer_class = HomeworkVoucherTypeModelSerializer
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class HomeworkVoucherListOnlyAPIView(mixins.ListModelMixin, generics.GenericAPIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = HomeworkVoucher.objects.all()
serializer_class = HomeworkVoucherModelSerializer
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class UserHomeworkVoucherApiViewSet(viewsets.ModelViewSet):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = UserHomeworkVoucher.objects.all()
serializer_class = UserHomeworkVoucherModelSerializer
class ForumTypeListOnlyAPIView(mixins.ListModelMixin, generics.GenericAPIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = ForumType.objects.all()
serializer_class = ForumTypeModelSerializer
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class ForumApiViewSet(viewsets.ModelViewSet):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = Forum.objects.all()
serializer_class = ForumModelSerializer
class ForumCommentApiViewSet(viewsets.ModelViewSet):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = ForumComment.objects.all()
serializer_class = ForumCommentModelSerializer
class UserForumFavoriteApiViewSet(viewsets.ModelViewSet):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = UserForumFavorite.objects.all()
serializer_class = UserForumFavoriteModelSerializer
class ContentTypeListOnlyAPIView(mixins.ListModelMixin, generics.GenericAPIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = ContentType.objects.all()
serializer_class = ContentTypeModelSerializer
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class ContentListOnlyAPIView(mixins.ListModelMixin, generics.GenericAPIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = Content.objects.all()
serializer_class = ContentModelSerializer
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class UserContentApiViewSet(viewsets.ModelViewSet):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = UserContent.objects.all()
serializer_class = UserContentModelSerializer
class PaymentApiViewSet(viewsets.ModelViewSet):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = Payment.objects.all()
serializer_class = PaymentModelSerializer
| 1.921875 | 2 |
Python/Nearly Lucky Number.py | bic-potato/codeforces_learning | 0 | 12790057 | <filename>Python/Nearly Lucky Number.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 19 21:11:16 2020
@author: zuoxichen
"""
import sys
def main_args():
a=list(input())
k=0
n=0
for i in a:
n+=1
if (i=='4' or i=='7'):
k+=1
else:
k+=0
return [n,k]
list1=main_args()
k=str(list1[1])
n=list1[0]
if n==int(k) and n!=1:
print('YES')
sys.exit(0)
else:
for i in k :
if i!='4' or i!='7':
print('NO')
sys.exit(0)
print('YES')
| 3.515625 | 4 |
server/etes/migrations/0024_auto_20181119_1213.py | ethanmnrd/TicketXchnge | 0 | 12790058 | <gh_stars>0
# Generated by Django 2.1.3 on 2018-11-19 20:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('etes', '0023_auto_20181119_1203'),
]
operations = [
migrations.AddField(
model_name='ticket',
name='ticket_address',
field=models.CharField(default='San José State University, Washington Sq, San Jose, CA, United States', max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='ticket',
name='ticket_price',
field=models.DecimalField(decimal_places=2, max_digits=10),
),
]
| 1.617188 | 2 |
taschenrechner.py | it-moerike/python | 0 | 12790059 | <gh_stars>0
from tkinter import *
def rechnen():
if operator.curselection() == (0,):
ausgabe["text"] = float(zahl1.get()) + float(zahl2.get())
elif operator.curselection() == (1,):
ausgabe["text"] = float(zahl1.get()) - float(zahl2.get())
elif operator.curselection() == (2,):
ausgabe["text"] = float(zahl1.get()) * float(zahl2.get())
elif operator.curselection() == (3,):
ausgabe["text"] = float(zahl1.get()) / float(zahl2.get())
window = Tk()
window.title("Taschenrechner")
zahl1 = Entry(window)
operator = Listbox(window)
operator.insert(0, "+")
operator.insert(1, "-")
operator.insert(2, "*")
operator.insert(3, "/")
zahl2 = Entry(window)
button = Button(window, command=rechnen, text="Los", bg='#FBD975')
ausgabe = Label(window)
zahl1.grid(row=0, column=0)
operator.grid(row=0, column=1)
zahl2.grid(row=0, column=2)
button.grid(row=1, column=2, sticky=E)
ausgabe.grid(row=2)
window.mainloop()
| 3.21875 | 3 |
scripts/mongodb_store.py | coastrock/CEBD1261-2019-fall-group-project | 1 | 12790060 | try:
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import pyspark.sql.functions as f
# from operator import add
except Exception as e:
print(e)
## http://www.hongyusu.com/imt/technology/spark-via-python-basic-setup-count-lines-and-word-counts.html
def push_mongo():
spark = SparkSession \
.builder \
.appName("Push to MongoDB") \
.master("spark://master:7077") \
.config("spark.mongodb.input.uri", "mongodb://root:password@mongo/test.coll?authSource=admin") \
.config("spark.mongodb.output.uri", "mongodb://root:password@mongo/test.coll?authSource=admin") \
.config('spark.jars.packages', 'org.mongodb.spark:mongo-spark-connector_2.11:2.4.0')\
.getOrCreate()
sc = spark.sparkContext
sc.setLogLevel('WARN')
# Reading Data from volume
acc_mongo=spark.read.csv("/volume/data")
#Show Mongo data
acc_mongo.show()
# Store data in MongoDB
acc_mongo.write.format("com.mongodb.spark.sql.DefaultSource").mode("append").save()
# End the Spark Context
spark.stop()
if __name__ == "__main__":
push_mongo()
| 2.421875 | 2 |
UVa 231 - Testing the Catcher/sample/main.py | tadvi/uva | 1 | 12790061 | '''
Created on Jul 20, 2013
@author: <NAME>
'''
import sys
INF = 1 << 31
def LDS(array):
N = len(array)
longest = [0] * N
longest[0] = 1
for i in range(1, N):
currMax = 1
for j in range(i):
if array[i] <= array[j] and longest[j] + 1 > currMax:
currMax = longest[j] + 1
longest[i] = currMax
return max(longest)
if __name__ == '__main__':
sys.stdin = open('input.txt', 'r')
array = []
while True:
n = int(input())
if n == -1:
print('maximum possible interceptions: %d' % LDS(array))
array = []
n1 = int(input())
if n1 == -1:
break
else:
array.append(n1)
else:
array.append(n)
| 3.125 | 3 |
lib/config/default.py | yaopengUSTC/mbit-skin-cancer | 3 | 12790062 | <reponame>yaopengUSTC/mbit-skin-cancer<gh_stars>1-10
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from yacs.config import CfgNode as CN
_C = CN()
# ----- BASIC SETTINGS -----
_C.NAME = "default"
_C.OUTPUT_DIR = "./output/derm_7pt"
_C.VALID_STEP = 5
_C.SAVE_STEP = 5
_C.SHOW_STEP = 20
_C.PIN_MEMORY = True
_C.INPUT_SIZE = (224, 224) # (h, w)
_C.COLOR_SPACE = "RGB"
_C.RESUME_MODEL = ""
_C.RESUME_MODE = "all"
_C.CPU_MODE = False
_C.EVAL_MODE = False
_C.GPUS = [0, 1]
# ----- DATASET BUILDER -----
_C.DATASET = CN()
_C.DATASET.DATASET = ""
_C.DATASET.ROOT = ""
_C.DATASET.DATA_TYPE = "jpg"
_C.DATASET.TRAIN_JSON = ""
_C.DATASET.VALID_JSON = ""
_C.DATASET.TEST_JSON = ""
_C.DATASET.CLASS_NAME = ['BCC', 'NV', 'MEL', 'MISC', 'SK']
_C.DATASET.VALID_ADD_ONE_CLASS = False # for ISIC_2019 valid and test, the number of class is increased from 8 to 9.
_C.DATASET.ADD_CLASS_NAME = "UNK"
_C.DATASET.IMBALANCECIFAR = CN()
_C.DATASET.IMBALANCECIFAR.RATIO = 0.01
_C.DATASET.IMBALANCECIFAR.RANDOM_SEED = 0
# ----- BACKBONE BUILDER -----
_C.BACKBONE = CN()
_C.BACKBONE.TYPE = "RegNetY_800MF" # refer to lib/backbone/all_models.py
_C.BACKBONE.BBN = False
_C.BACKBONE.FREEZE = False
_C.BACKBONE.PRE_FREEZE = False
_C.BACKBONE.PRE_FREEZE_EPOCH = 5
_C.BACKBONE.PRETRAINED = True
_C.BACKBONE.PRETRAINED_MODEL = ""
# if using drop block, below are drop block parameter
_C.BACKBONE.DROP = CN()
_C.BACKBONE.DROP.BLOCK_PROB = 0.1
_C.BACKBONE.DROP.BLOCK_SIZE = 5
_C.BACKBONE.DROP.NR_STEPS = 50000
# dropout parameter to the last FC layer
_C.BACKBONE.DROP.OUT_PROB = 0.1
# ----- MODULE BUILDER -----
_C.MODULE = CN()
_C.MODULE.TYPE = "GAP" # "GAP", "Identity"
# ----- CLASSIFIER BUILDER -----
_C.CLASSIFIER = CN()
_C.CLASSIFIER.TYPE = "FC" # "FC", "FCNorm"
_C.CLASSIFIER.BIAS = True
# ----- LOSS BUILDER -----
_C.LOSS = CN()
_C.LOSS.WEIGHT_POWER = 1.1
_C.LOSS.EXTRA_WEIGHT = [1.0, 1.0, 1.0, 1.0, 1.0]
_C.LOSS.LOSS_TYPE = "CrossEntropy" # "CrossEntropy", "LDAMLoss", "FocalLoss", "LOWLoss", "GHMCLoss", "CCELoss", "MWNLoss"
_C.LOSS.SCHEDULER = "default" # "default"--the weights of all classes are "1.0",
# "re_weight"--re-weighting by the power of inverse class frequency at all train stage,
# "drw"--two-stage strategy using re-weighting at the second stage,
# "cls"--cumulative learning strategy to set loss weight.
# For drw scheduler
_C.LOSS.DRW_EPOCH = 50
# For cls scheduler
_C.LOSS.CLS_EPOCH_MIN = 20
_C.LOSS.CLS_EPOCH_MAX = 60
# For LDAMLoss
_C.LOSS.LDAM = CN()
_C.LOSS.LDAM.MAX_MARGIN = 0.5
# For FocalLoss
_C.LOSS.FOCAL = CN()
_C.LOSS.FOCAL.GAMMA = 2.0
_C.LOSS.FOCAL.TYPE = "sigmoid" # "cross_entropy", "sigmoid", "ldam"
_C.LOSS.FOCAL.SIGMOID = "normal" # "normal", "enlarge"
# For LOWLoss
_C.LOSS.LOW = CN()
_C.LOSS.LOW.LAMB = 0.01
# For GHMCLoss
_C.LOSS.GHMC = CN()
_C.LOSS.GHMC.BINS = 10
_C.LOSS.GHMC.MOMENTUM = 0.0
# For MWNLoss
_C.LOSS.MWNL = CN()
_C.LOSS.MWNL.GAMMA = 2.0
_C.LOSS.MWNL.BETA = 0.1
_C.LOSS.MWNL.TYPE = "fix" # "zero", "fix", "decrease"
_C.LOSS.MWNL.SIGMOID = "normal" # "normal", "enlarge"
# ----- TRAIN BUILDER -----
_C.TRAIN = CN()
_C.TRAIN.BATCH_SIZE = 32 # for every gpu
_C.TRAIN.MAX_EPOCH = 70
_C.TRAIN.SHUFFLE = True
_C.TRAIN.NUM_WORKERS = 8
_C.TRAIN.TENSORBOARD = CN()
_C.TRAIN.TENSORBOARD.ENABLE = True
# ----- SAMPLER BUILDER -----
_C.TRAIN.SAMPLER = CN()
_C.TRAIN.SAMPLER.TYPE = "default" # "default", "weighted sampler", "oversample"
_C.TRAIN.SAMPLER.IMAGE_TYPE = "derm" # "derm", "clinic". For derm_7pt dataset used.
_C.TRAIN.SAMPLER.BORDER_CROP = "pixel" # "pixel", "ratio"
_C.TRAIN.SAMPLER.BORDER_CROP_PIXEL = 0 # An integer specifying how many pixels to crop at the image border. Useful if images contain a black boundary.
_C.TRAIN.SAMPLER.BORDER_CROP_RATIO = 0.0 # the ratio of edge of the image to be cropped.
_C.TRAIN.SAMPLER.IMAGE_RESIZE = True # whether the input image needs to be resized to a fix size
_C.TRAIN.SAMPLER.IMAGE_RESIZE_SHORT = 450 # the need size of the short side of the input image
_C.TRAIN.SAMPLER.COLOR_CONSTANCY = False
_C.TRAIN.SAMPLER.CONSTANCY_POWER = 6.0
_C.TRAIN.SAMPLER.CONSTANCY_GAMMA = 0.0
# For Modified RandAugment
_C.TRAIN.SAMPLER.AUGMENT = CN()
_C.TRAIN.SAMPLER.AUGMENT.NEED_AUGMENT = False
_C.TRAIN.SAMPLER.AUGMENT.AUG_METHOD = "v1_0" # the method of Modified RandAugment ('v0_0' to 'v3_1') or RandAugment ('rand') (refer to: lib/data_transform/modified_randaugment.py)
_C.TRAIN.SAMPLER.AUGMENT.AUG_PROB = 0.7 # the probability parameter 'P' of Modified RandAugment (0.1 -- 0.9)
_C.TRAIN.SAMPLER.AUGMENT.AUG_MAG = 10 # the magnitude parameter 'M' of Modified RandAugment (1 -- 20)
_C.TRAIN.SAMPLER.AUGMENT.AUG_LAYER_NUM = 1 # the number of transformations applied to a training image if AUG_METHOD = 'rand'
# for BBN sampler
_C.TRAIN.SAMPLER.DUAL_SAMPLER = CN()
_C.TRAIN.SAMPLER.DUAL_SAMPLER.TYPE = "reversed" # "balance", "reverse", "uniform"
# for other sampler
_C.TRAIN.SAMPLER.WEIGHTED_SAMPLER = CN()
_C.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE = "balance" # "balance", "reverse"
# for multi crop
_C.TRAIN.SAMPLER.MULTI_CROP = CN()
_C.TRAIN.SAMPLER.MULTI_CROP.ENABLE = False # Should the crops be order or random for evaluation
_C.TRAIN.SAMPLER.MULTI_CROP.CROP_NUM = 16 # Number of crops to use during evaluation (must be N^2)
_C.TRAIN.SAMPLER.MULTI_CROP.L_REGION = 1.0 # Only crop within a certain range of the central area (along the long side of the image)
_C.TRAIN.SAMPLER.MULTI_CROP.S_REGION = 1.0 # Only crop within a certain range of the central area (along the short side of the image)
_C.TRAIN.SAMPLER.MULTI_CROP.SCHEME = 'average' # Averaging or voting over the crop predictions ("vote", "average")
# for multi transformation of the center crop
_C.TRAIN.SAMPLER.MULTI_SCALE = CN()
_C.TRAIN.SAMPLER.MULTI_SCALE.ENABLE = False # whether to perform multi transformation on the central crop
_C.TRAIN.SAMPLER.MULTI_SCALE.SCALE_NUM = 12 # Number of scales to use during evaluation (must be less than or equal to the length of SCALE_NAME)
_C.TRAIN.SAMPLER.MULTI_SCALE.SCALE_NAME = ["scale_+00", "flip_x_+00", "rotate_90_+00", "rotate_270_+00",
"scale_+10", "flip_x_+10", "rotate_90_+10", "rotate_270_+10",
"scale_+20", "flip_x_+20", "rotate_90_+20", "rotate_270_+20",
"scale_+30", "flip_x_+30", "rotate_90_+30", "rotate_270_+30",
"scale_-10", "flip_x_-10", "rotate_90_-10", "rotate_270_-10",
"flip_y_+00", "flip_y_+10", "flip_y_-10", "flip_y_+20"]
_C.TRAIN.SAMPLER.FIX_MEAN_VAR = CN()
_C.TRAIN.SAMPLER.FIX_MEAN_VAR.ENABLE = True # Normalize using the mean and variance of each image, or using fixed values
# A fixed set mean (input image will be subtracted from the mean, processing variance)
_C.TRAIN.SAMPLER.FIX_MEAN_VAR.SET_MEAN = [0.485, 0.456, 0.406]
# A fixed set variance
_C.TRAIN.SAMPLER.FIX_MEAN_VAR.SET_VAR = [0.229, 0.224, 0.225]
# ----- OPTIMIZER -----
_C.TRAIN.OPTIMIZER = CN()
_C.TRAIN.OPTIMIZER.TYPE = "SGD" # 'SGD', 'ADAM', 'NADAM', 'RMSPROP'
_C.TRAIN.OPTIMIZER.BASE_LR = 0.001
_C.TRAIN.OPTIMIZER.MOMENTUM = 0.9
_C.TRAIN.OPTIMIZER.WEIGHT_DECAY = 1e-4
# ----- LR_SCHEDULER -----
_C.TRAIN.LR_SCHEDULER = CN()
_C.TRAIN.LR_SCHEDULER.TYPE = "multistep" # "steplr", "multistep", "cosine", "warmup"
_C.TRAIN.LR_SCHEDULER.LR_LOWER_STEP = 20 # for 'steplr'
_C.TRAIN.LR_SCHEDULER.LR_STEP = [40, 50] # for 'multistep'
_C.TRAIN.LR_SCHEDULER.LR_FACTOR = 0.1
_C.TRAIN.LR_SCHEDULER.WARM_EPOCH = 5 # for 'warmup'
_C.TRAIN.LR_SCHEDULER.COSINE_DECAY_END = 0
# For valid or test
_C.TEST = CN()
_C.TEST.BATCH_SIZE = 128 # for every gpu
_C.TEST.NUM_WORKERS = 8
_C.TEST.MODEL_FILE = "best_model.pth"
def update_config(cfg, args):
cfg.defrost()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
cfg.freeze()
def update_cfg_name(cfg):
'''
modify the cfg.NAME
:param cfg:
:return:
'''
cfg.defrost()
cfg_name = cfg.DATASET.DATASET + "." + cfg.BACKBONE.TYPE + (
"_BBN." if cfg.BACKBONE.BBN else ".") + cfg.LOSS.LOSS_TYPE + cfg.NAME
cfg.merge_from_list(['NAME', cfg_name])
cfg.freeze()
| 1.609375 | 2 |
wirexfers/__init__.py | plaes/wirexfers | 1 | 12790063 | <reponame>plaes/wirexfers
# -*- coding: utf-8 -*-
"""
wirexfers - an online payment library
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
WireXfers is an online payments library, written in Python, providing
a simple common API for various online payment protocols (IPizza,
Solo/TUPAS).
:copyright: (c) 2012-2014 <NAME>
:license: ISC, see LICENSE for more details.
"""
__title__ = 'wirexfers'
__version__ = '2014.06-dev'
__author__ = '<NAME>'
__license__ = 'ISC'
__copyright__ = 'Copyright 2012-2014 Priit Laes'
from .request import PaymentInfo, PaymentRequest
from .response import PaymentResponse
| 0.855469 | 1 |
thualign/utils/hook.py | bryant1410/Mask-Align | 27 | 12790064 | <filename>thualign/utils/hook.py
# coding=utf-8
# Copyright 2021-Present The THUAlign Authors
import torch
import numpy as np
from .summary import scalar
from .misc import get_global_step
def print_grad(x, name="x"):
if type(x) == torch.Tensor:
x.register_hook(lambda x: print("Norm - {} {}:{}\n {}".format(name, list(x.shape), torch.norm(x), x)))
elif type(x) == torch.nn.Module:
pass # TODO
def print_grad_norm(x, name="x", summary=True, verbose=True):
if type(x) == torch.Tensor:
if verbose:
x.register_hook(lambda x: print("Norm - {} {}: {}".format(name, list(x.shape), torch.norm(x))))
if summary:
scalar('grad_norm/' + name + '/max', torch.max(x), get_global_step(), write_every_n_steps=1)
scalar('grad_norm/' + name + '/min', torch.min(x), get_global_step(), write_every_n_steps=1)
scalar('grad_norm/' + name + '/mean', torch.mean(x), get_global_step(), write_every_n_steps=1)
scalar('grad_norm/' + name + '/normavg', torch.norm(x)/x.nelement(), get_global_step(), write_every_n_steps=1)
elif type(x) == torch.nn.Module:
pass # TODO
def print_grad_max(x, name="x"):
if type(x) == torch.Tensor:
x.register_hook(lambda x: print("Grad max - {} {}:\n {}".format(name, list(x.shape), torch.max(x).item())))
elif type(x) == torch.nn.Module:
pass # TODO
global_collection = {}
collection_on = False
def start_global_collection():
global collection_on
collection_on = True
def stop_global_collection():
global collection_on
collection_on = False
def add_global_collection(v, name="var"):
global global_collection
if not collection_on:
return
if name in global_collection:
global_collection[name].append(v)
else:
global_collection[name] = [v]
def get_global_collection(name):
global global_collection
if name in global_collection:
return global_collection[name]
return None
def clear_global_collection():
global global_collection
global_collection = {} | 2.3125 | 2 |
src/negotiating_agent/venv/lib/python3.8/site-packages/geniusweb/protocol/session/mopac/MOPACSettings.py | HahaBill/CollaborativeAI | 1 | 12790065 | from typing import List
from tudelft_utilities_logging.Reporter import Reporter
from geniusweb.deadline.Deadline import Deadline
from geniusweb.protocol.session.SessionProtocol import SessionProtocol
from geniusweb.protocol.session.SessionSettings import SessionSettings
from geniusweb.protocol.session.TeamInfo import TeamInfo
from geniusweb.protocol.tournament.Team import Team
from geniusweb.references.PartyWithProfile import PartyWithProfile
from geniusweb.voting.VotingEvaluator import VotingEvaluator
class MOPACSettings (SessionSettings):
'''
Settings for MOPAC negotiation. in MOPAC, each party may get a "power"
parameter containing an natural number ≤1.
'''
def __init__(self, participants:List[TeamInfo] ,
deadline:Deadline ,
votingevaluator:VotingEvaluator):
'''
@param participants the list of {@link PartyWithProfile} in clockwise
order. There must be at least 2 to run the MOPAC
protocol. This is not tested in the constructor
because this can be initialized with less, for use in
TournamentSettings.
@param deadline the {@link Deadline} for the negotiation
@param votingeval the {@link VotingEvaluator} to use.
'''
self._participants = participants;
self._deadline = deadline;
if participants == None or deadline == None or votingevaluator == None:
raise ValueError(
"participants, deadline and votingeval must be not none")
self._votingevaluator = votingevaluator
self._checkTeams();
def getMaxRunTime(self)->float:
return self._deadline.getDuration() / 1000.
def getProtocol(self, logger:Reporter) -> SessionProtocol :
from geniusweb.protocol.session.mopac.MOPACState import MOPACState
from geniusweb.protocol.session.mopac.MOPAC import MOPAC
return MOPAC(MOPACState(None, [], None, self, {}), logger)
def getTeams(self ) -> List[TeamInfo] :
return list(self._participants)
def getParticipants(self ) -> List[TeamInfo] :
'''
bit hacky, same as getTeams, for deserialization...
'''
return list(self._participants)
def getDeadline(self)-> Deadline :
'''
@return the deadline for this negotiation
'''
return self._deadline
def getAllParties(self)->List[PartyWithProfile] :
return [ particip.getParties()[0] for particip in self._participants]
def getVotingEvaluator(self)->VotingEvaluator :
'''
@return a class that allows us to evaluate the voting results in
different ways, selectable by the user.
'''
return self._votingevaluator
def With(self, team:TeamInfo ) -> "MOPACSettings" :
if team.getSize() != 1:
raise ValueError(
"Added party must have one party but got " + str(team))
newparts:List[TeamInfo] = list(self._participants)
newparts.append(team)
return MOPACSettings(newparts, self._deadline, self._votingevaluator)
def __repr__(self)->str:
return "MOPACSettings[" + str(self._participants) + "," +\
str(self._deadline) + "," + \
type(self._votingevaluator).__name__ + "]";
def getTeamSize(self)->int:
return 1;
def __hash__(self):
return hash((tuple(self._participants), self._deadline, self._votingevaluator))
def __eq__(self, other):
return isinstance(other, self.__class__)\
and self._participants == other._participants \
and self._deadline == other._deadline \
and self._votingevaluator == other._votingevaluator
def _checkTeams(self):
'''
@throws IllegalArgumentException if teams have improper power settings.
'''
for team in self._participants:
if team.getSize() != 1:
raise ValueError("All teams must be size 1 but found " + str(team))
party = team.getParties()[0]
if 'power' in party.getParty().getParameters().getParameters():
power = party.getParty().getParameters().get("power")
if not isinstance(power, int):
raise ValueError(
"parameter 'power' for party" + str(party)
+ " must be integer but found " + str(power))
if power < 1:
raise ValueError(
"parameter 'power' for party" + str(party)
+ " must be >=1 but found " + str(power))
| 2.109375 | 2 |
data_visualization_app.py | vishwas1234567/Streamlit_tutorials | 5 | 12790066 | import streamlit as st
import plotly_express as px
import pandas as pd
# configuration
st.set_option('deprecation.showfileUploaderEncoding', False)
# title of the app
st.title("Data Visualization App")
# Add a sidebar
st.sidebar.subheader("Visualization Settings")
# Setup file upload
uploaded_file = st.sidebar.file_uploader(
label="Upload your CSV or Excel file. (200MB max)",
type=['csv', 'xlsx'])
global df
if uploaded_file is not None:
print(uploaded_file)
print("hello")
try:
df = pd.read_csv(uploaded_file)
except Exception as e:
print(e)
df = pd.read_excel(uploaded_file)
global numeric_columns
global non_numeric_columns
try:
st.write(df)
numeric_columns = list(df.select_dtypes(['float', 'int']).columns)
non_numeric_columns = list(df.select_dtypes(['object']).columns)
non_numeric_columns.append(None)
print(non_numeric_columns)
except Exception as e:
print(e)
st.write("Please upload file to the application.")
# add a select widget to the side bar
chart_select = st.sidebar.selectbox(
label="Select the chart type",
options=['Scatterplots', 'Lineplots', 'Histogram', 'Boxplot']
)
if chart_select == 'Scatterplots':
st.sidebar.subheader("Scatterplot Settings")
try:
x_values = st.sidebar.selectbox('X axis', options=numeric_columns)
y_values = st.sidebar.selectbox('Y axis', options=numeric_columns)
color_value = st.sidebar.selectbox("Color", options=non_numeric_columns)
plot = px.scatter(data_frame=df, x=x_values, y=y_values, color=color_value)
# display the chart
st.plotly_chart(plot)
except Exception as e:
print(e)
if chart_select == 'Lineplots':
st.sidebar.subheader("Line Plot Settings")
try:
x_values = st.sidebar.selectbox('X axis', options=numeric_columns)
y_values = st.sidebar.selectbox('Y axis', options=numeric_columns)
color_value = st.sidebar.selectbox("Color", options=non_numeric_columns)
plot = px.line(data_frame=df, x=x_values, y=y_values, color=color_value)
st.plotly_chart(plot)
except Exception as e:
print(e)
if chart_select == 'Histogram':
st.sidebar.subheader("Histogram Settings")
try:
x = st.sidebar.selectbox('Feature', options=numeric_columns)
bin_size = st.sidebar.slider("Number of Bins", min_value=10,
max_value=100, value=40)
color_value = st.sidebar.selectbox("Color", options=non_numeric_columns)
plot = px.histogram(x=x, data_frame=df, color=color_value)
st.plotly_chart(plot)
except Exception as e:
print(e)
if chart_select == 'Boxplot':
st.sidebar.subheader("Boxplot Settings")
try:
y = st.sidebar.selectbox("Y axis", options=numeric_columns)
x = st.sidebar.selectbox("X axis", options=non_numeric_columns)
color_value = st.sidebar.selectbox("Color", options=non_numeric_columns)
plot = px.box(data_frame=df, y=y, x=x, color=color_value)
st.plotly_chart(plot)
except Exception as e:
print(e) | 3.234375 | 3 |
rigidsearch/cli.py | robopsi/rigidsearch | 9 | 12790067 | # coding: utf-8
import os
import shutil
import json
import click
from werkzeug.utils import cached_property
class Context(object):
def __init__(self):
self.config_filename = os.environ.get('RIGIDSEARCH_CONFIG')
@cached_property
def app(self):
from rigidsearch.app import create_app
return create_app(self.config_filename)
pass_ctx = click.make_pass_decorator(Context, ensure=True)
@click.group()
@click.option('--config', type=click.Path(),
help='Path to the config file.')
@pass_ctx
def cli(ctx, config):
if config is not None:
ctx.config_filename = os.path.abspath(config)
@cli.command('index-folder')
@click.argument('config', type=click.File('rb'))
@click.option('--index-path', type=click.Path(),
help='Where to write the index to other than config default.')
@click.option('--save-zip', type=click.File('wb'),
help='Optional a zip file the index should be stored at '
'instead of modifying the index in-place.')
@pass_ctx
def index_folder_cmd(ctx, config, index_path, save_zip):
"""Indexes a path."""
from rigidsearch.search import index_tree, get_index_path
index_path = get_index_path(index_path=index_path, app=ctx.app)
try:
shutil.rmtree(index_path)
except (OSError, IOError):
pass
for event in index_tree(json.load(config), index_zip=save_zip,
index_path=index_path):
click.echo(event)
@cli.command('search')
@click.argument('query')
@click.option('--section', default='generic')
@click.option('--index-path', help='Path to the search index.')
@pass_ctx
def search_cmd(ctx, query, section, index_path):
"""Triggers a search from the command line."""
from rigidsearch.search import get_index, get_index_path
index_path = get_index_path(app=ctx.app)
index = get_index(index_path)
results = index.search(query, section=section)
for result in results['items']:
click.echo('%s (%s)' % (
result['path'],
result['title']
))
@cli.command('devserver')
@click.option('--bind', '-b', default='127.0.0.1:5001')
@pass_ctx
def devserver_cmd(ctx, bind):
"""Runs a local development server."""
parts = bind.split(':', 1)
if len(parts) == 2:
addr, port = parts
elif len(parts) == 1:
addr, port = bind, '5001'
if addr == '':
addr = '127.0.0.1'
ctx.app.run(addr, int(port), debug=True)
@cli.command('run')
@click.option('--bind', '-b', default='127.0.0.1:5001')
@click.option('--workers', '-w', default=1)
@click.option('--timeout', '-t', default=30)
@click.option('--loglevel', default='info')
@click.option('--accesslog', default='-')
@click.option('--errorlog', default='-')
@pass_ctx
def run_cmd(ctx, **options):
"""Runs the http web server."""
from rigidsearch.app import make_production_server
make_production_server(app=ctx.app, options=options).run()
def main():
cli(auto_envvar_prefix='RIGIDSEARCH')
| 2.15625 | 2 |
src/sion.py | kamimura/py-sion | 1 | 12790068 | # Created by kamimura on 2018/07/21.
# Copyright © 2018 kamimura. All rights reserved.
import sys
import datetime
from antlr4 import *
from SIONLexer import SIONLexer
from SIONParser import SIONParser
from SIONVisitor import SIONVisitor
def load(file, encoding: str='utf-8', errors: str='strict') -> object:
data = file.read()
if isinstance(data, (bytes, bytearray)):
data = data.decode(encoding, errors)
stream = InputStream(data)
lexer = SIONLexer(stream)
tokens = CommonTokenStream(lexer)
parser = SIONParser(tokens)
tree = parser.si_self()
visitor = SIONVisitor()
return visitor.visit(tree)
def loads(s):
if isinstance(s, (bytes, bytearray)):
s = s.decode()
stream = InputStream(s)
lexer = SIONLexer(stream)
tokens = CommonTokenStream(lexer)
parser = SIONParser(tokens)
tree = parser.si_self()
visitor = SIONVisitor()
return visitor.visit(tree)
def str_esc(s):
for o, n in [('"', '\\"'), ('\n', '\\n'), ('\r', '\\r'), ('\\', '\\\\')]:
s = s.replace(o, n)
return s
def dump(obj, file):
if obj is None:
print('nil', file=file, end='')
elif isinstance(obj, bool):
if obj:
print('ture', file=file, end='')
else:
print('false', file=file, end='')
elif isinstance(obj, (int, float)):
print(obj, file=file, end='')
elif isinstance(obj, str):
print(f'"{str_esc(obj)}"', file=file, end='')
elif isinstance(obj, (bytes, bytearray)):
print(f'.Data("{str(obj)[2:-1]}")', file=file, end='')
elif isinstance(obj, datetime.datetime):
print(f'.Date({obj.timestamp()})', file=file, end='')
elif isinstance(obj, (list, tuple)):
print(f'[', file=file, end='')
if len(obj) > 0:
for o in obj[:-1]:
dump(o, file)
print(',', file=file, end='')
dump(obj[-1], file)
print(']', file=file, end='')
elif isinstance(obj, dict):
print('[', file=file, end='')
ks = list(obj.keys())
if len(ks) == 0:
print(':', file=file, end='')
elif len(ks) == 1:
dump(ks[0], file)
print(':', file=file, end='')
dump(obj[ks[0]], file)
else:
for k in ks[:-1]:
dump(k, file)
print(':', file=file, end='')
dump(obj[k], file)
print(',', file=file, end='')
dump(ks[-1], file)
print(':', file=file, end='')
dump(obj[ks[-1]], file)
print(']', file=file, end='')
else:
raise TypeError(
f"Object of type '{obj.__class__.__name__}' is not SION serializable")
def dumps(obj: object):
if obj is None:
return 'nil'
if isinstance(obj, bool):
if obj:
return 'true'
return 'false'
if isinstance(obj, (int, float)):
return str(obj)
if isinstance(obj, str):
return f'"{str_esc(obj)}"'
if isinstance(obj, (bytes, bytearray)):
return f'.Data("{str(obj)[2:-1]}")'
if isinstance(obj, datetime.datetime):
return f'.Date({obj.timestamp(obj)})'
if isinstance(obj, (list, tuple)):
res = '['
if len(obj) > 0:
for o in obj[:-1]:
res += dumps(o) + ','
res += dumps(obj[-1])
res += ']'
return res
if isinstance(obj, dict):
res = '['
ks = list(obj.keys())
if len(ks) == 0:
res += ':'
elif len(ks) == 1:
res += dumps(ks[0]) + ':' + dumps(obj[ks[0]])
else:
for k in ks[:-1]:
res += dumps(k) + ':' + str(obj[k]) + ','
res += dumps(ks[-1]) + ':' + dumps(obj[ks[-1]])
res += ']'
return res
raise TypeError(
f"Object of type '{obj.__class__.__name__}' is not SION serializable")
if __name__ == '__main__':
import pprint
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = '../test/t.sion'
with open(filename) as f:
obj = load(f)
pprint.pprint(obj)
with open('../test/output.sion', 'w') as f:
dump(obj, f)
s = '''
[
"array" : [
nil,
true,
1, // Int in decimal
1.0, // Double in decimal
"one",
[1],
["one" : 1.0]
],
"bool" : true,
"data" : .Data("R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7"),
"date" : .Date(0x0p+0),
"dictionary" : [
"array" : [],
"bool" : false,
"double" : 0x0p+0,
"int" : 0,
"nil" : nil,
"object" : [:],
"string" : ""
],
"double" : 0x1.518f5c28f5c29p+5, // Double in hexadecimal
"int" : -0x2a, // Int in hexadecimal
"nil" : nil,
"string" : "漢字、カタカナ、ひらがなの入ったstring😇",
"url" : "https://github.com/dankogai/",
nil : "Unlike JSON and Property Lists,",
true : "Yes, SION",
1 : "does accept",
1.0 : "non-String keys.",
[] : "like",
[:] : "Map of ECMAScript."
]
'''
obj = loads(s)
pprint.pprint(obj)
s = dumps(obj)
print(s)
| 2.203125 | 2 |
src/project/urls.py | kottenator/code.kottenator.com | 0 | 12790069 | <reponame>kottenator/code.kottenator.com
from django.contrib import admin
from django.urls import path, include
import project.auth.urls
import project.core.urls
from project.core.views import bad_request, permission_denied, page_not_found, server_error
import project.projects.urls
urlpatterns = [
path('admin/', admin.site.urls),
path('projects/', include(project.projects.urls)),
path('', include(project.auth.urls)),
path('', include(project.core.urls))
]
handler400 = bad_request
handler403 = permission_denied
handler404 = page_not_found
handler500 = server_error
| 1.898438 | 2 |
code/matplotlib/test.py | qiudebo/13learn | 1 | 12790070 | # *-* coding:utf-8 *-*
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import style
import os
from os import path
from matplotlib.font_manager import fontManager
# 图表坐标系
| 1.382813 | 1 |
rdfframework/processors/__init__.py | KnowledgeLinks/rdfframework | 7 | 12790071 | """
RDF Proccessors
===============
Processors are used to manipulate rdf date within the framework.
:copyright: Copyright (c) 2016 by <NAME> and <NAME>.
:license: To be determined, see LICENSE.txt for details.
"""
from .propertyprocessors import PropertyProcessor
from .classprocessors import ClassProcessor
__author__ = "<NAME>, <NAME>"
__version__ = '0.0.1'
| 1.328125 | 1 |
atlas/foundations_rest_api/src/foundations_rest_api/filters/null_filter.py | DeepLearnI/atlas | 296 | 12790072 | <reponame>DeepLearnI/atlas
from foundations_rest_api.filters.api_filter_mixin import APIFilterMixin
class NullFilter(APIFilterMixin):
def __call__(self, result, params):
if result and isinstance(result, list):
new_params = {key: value for key, value in params.items() if key.endswith('_isnull')}
if new_params:
self._filter(result, new_params)
return result
def _filter(self, result, params):
for key, param_value in params.items():
column_name = key.split('_isnull', 1)[0]
value = self._parse_value(param_value)
if value is not None:
self._filter_column(result, column_name, value)
def _parse_value(self, param_value):
from foundations_rest_api.filters.parsers import BoolParser
parser = BoolParser()
return parser.parse(param_value)
def _filter_column(self, result, column_name, value):
# Explicit is better than implicit [Zen of Python, 1]
# This is because "value" can also be None and in that case filtering is discarded
if value is True:
self._filter_by_null_values(result, column_name)
elif value is False:
self._filter_by_not_null_values(result, column_name)
def _is_none(self, value):
return value is None or self._is_nan(value)
def _is_nan(self, value):
import math
return isinstance(value, float) and math.isnan(value)
def _filter_by_null_values(self, result, column_name):
def column_value_is_null(item):
value, item_parser = self._get_item_property_value_and_parser(item, column_name, parse=False)
return item_parser is not None and self._is_none(value)
return self._in_place_filter(column_value_is_null, result)
def _filter_by_not_null_values(self, result, column_name):
def column_value_is_not_null(item):
value, item_parser = self._get_item_property_value_and_parser(item, column_name, parse=False)
return item_parser is not None and not self._is_none(value)
return self._in_place_filter(column_value_is_not_null, result)
| 2.75 | 3 |
problems/interleaving_str.py | apoorvkk/LeetCodeSolutions | 1 | 12790073 | class Solution:
def isInterleave(self, s1, s2, s3):
"""
:type s1: str
:type s2: str
:type s3: str
:rtype: bool
"""
return self._is_interleave(s1, s2, s3, set(), 0, 0, 0)
def _is_interleave(self, s1, s2, s3, memo, s1_start, s2_start, s3_start):
if s3_start >= len(s3):
return s1_start >= len(s1) and s2_start >= len(s2)
encoding = str(s1_start) + '|' + str(s2_start) + '|' + str(s3_start)
if encoding in memo:
return False
s1_counter = s1_start if s1_start < len(s1) else None
s2_counter = s2_start if s2_start < len(s2) else None
def _find_prefix(counter, p, other_start):
for i in range(s3_start, len(s3)):
if counter is not None:
if s3[i] == p[counter]:
if s1 == p:
if self._is_interleave(s1, s2, s3, memo, counter+1, other_start, i+1):
return True
else:
if self._is_interleave(s1, s2, s3, memo, other_start, counter+1, i+1):
return True
counter += 1
else:
counter = None
if counter is not None and counter >= len(p):
counter = None
if _find_prefix(s1_counter, s1, s2_start):
return True
if _find_prefix(s2_counter, s2, s1_start):
return True
memo.add(encoding)
return False
| 3.53125 | 4 |
pyDLib/GUI/fields.py | benoitkugler/abstractDataLibrary | 0 | 12790074 | """Implements widgets to visualize and modify basic fields. (french language)
ASSOCIATION should be updated with custom widgets, since common.abstractDetails will use it.
"""
import datetime
import re
from collections import defaultdict
from typing import List, Any
from PyQt5.QtCore import pyqtSignal, Qt, QPoint
from PyQt5.QtGui import QColor, QPen, QBrush, QIcon
from PyQt5.QtWidgets import (QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox,
QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip)
from . import list_views, clear_layout, Icons
from ..Core import formats
class NouveauTelephone(list_views.abstractNewButton):
LABEL = "Ajouter un numéro"
@staticmethod
def IS_TELEPHONE(s: str):
r = re.compile(r'[0-9]{9,10}')
m = r.search(s.replace(' ', ''))
return (m is not None)
def _clear(self):
clear_layout(self.layout())
def enter_edit(self):
self._clear()
line_layout = self.layout()
self.entree = QLineEdit()
self.entree.setObjectName("nouveau-numero-tel")
self.entree.setAlignment(Qt.AlignCenter)
self.entree.setPlaceholderText("Ajouter...")
add = QPushButton()
add.setIcon(QIcon(Icons.Valid))
add.clicked.connect(self.on_add)
self.entree.editingFinished.connect(self.on_add)
line_layout.addWidget(self.entree)
line_layout.addWidget(add)
line_layout.setStretch(0, 3)
line_layout.setStretch(1, 1)
def on_add(self):
num = self.entree.text()
if self.IS_TELEPHONE(num):
self.entree.setPlaceholderText("Ajouter...")
self.data_changed.emit(num)
self._clear()
self.set_button()
else:
self.entree.selectAll()
QToolTip.showText(self.entree.mapToGlobal(
QPoint(0, 10)), "Numéro invalide")
class Tels(list_views.abstractMutableList):
LIST_PLACEHOLDER = "Aucun numéro."
LIST_HEADER = None
BOUTON = NouveauTelephone
def __init__(self, collection: list, is_editable):
collection = self.from_list(collection)
super().__init__(collection, is_editable)
def on_add(self, item):
"""Convert to pseuso acces"""
super(Tels, self).on_add(list_views.PseudoAccesCategorie(item))
def set_data(self, collection):
collection = self.from_list(collection)
super(Tels, self).set_data(collection)
def get_data(self):
col = super(Tels, self).get_data()
return [tel.Id for tel in col]
class Duree(QLabel):
"""Display the numbers of day between two date widgets.
These widgets have to implement a get_data method, which return a date.date"""
def __init__(self, begining, end):
super().__init__()
self.begining = begining
self.end = end
self.begining.data_changed.connect(self.set_data)
self.end.data_changed.connect(self.set_data)
self.set_data()
def set_data(self, *args):
"""we cant to call set_data to manually update"""
db = self.begining.get_data() or formats.DATE_DEFAULT
df = self.end.get_data() or formats.DATE_DEFAULT
jours = max((df - db).days + 1, 0)
self.setText(str(jours) + (jours >= 2 and " jours" or " jour"))
# -------------- Enumerations vizualisation --------------
class abstractEnum(QLabel):
VALUE_TO_LABEL = {}
"""Dict. giving label from raw value"""
def set_data(self, value):
self.value = value
self.setText(self.VALUE_TO_LABEL.get(self.value, ""))
def get_data(self):
return self.value
class abstractEnumEditable(QComboBox):
data_changed = pyqtSignal(object)
VALEURS_LABELS = []
"""List of tuples (value, label) or None to add a separator"""
def __init__(self, parent=None):
super().__init__(parent)
self.set_choix(self.VALEURS_LABELS)
self.currentIndexChanged.connect(
lambda i: self.data_changed.emit(self.currentData()))
def set_choix(self, choix):
self.places = {}
for t in choix:
if t:
self.places[t[0]] = self.count()
self.addItem(t[1], userData=t[0])
else:
self.insertSeparator(self.count())
def set_data(self, value):
if value is None:
self.setCurrentIndex(-1)
else:
self.setCurrentIndex(self.places[value])
self.data_changed.emit(self.get_data())
def get_data(self):
return self.currentData()
# -------------------- Commons types --------------------
class DepartementFixe(abstractEnum):
VALUE_TO_LABEL = formats.DEPARTEMENTS
class DepartementEditable(abstractEnumEditable):
VALEURS_LABELS = sorted((i, i + " " + v)
for i, v in formats.DEPARTEMENTS.items())
class SexeFixe(abstractEnum):
VALUE_TO_LABEL = formats.SEXES
class SexeEditable(abstractEnumEditable):
VALEURS_LABELS = sorted((k, v) for k, v in formats.SEXES.items())
class ModePaiementFixe(abstractEnum):
VALUE_TO_LABEL = formats.MODE_PAIEMENT
class ModePaiementEditable(abstractEnumEditable):
VALEURS_LABELS = sorted([(k, v) for k, v in formats.MODE_PAIEMENT.items()])
# ------------- Simple string-like field -------------
class abstractSimpleField(QLabel):
FONCTION_AFF = None
TOOLTIP = None
data_changed = pyqtSignal() # dummy signal
def __init__(self, *args, **kwargs):
super(abstractSimpleField, self).__init__(*args, **kwargs)
if self.TOOLTIP:
self.setToolTip(self.TOOLTIP)
def set_data(self, value):
self.value = value
label = self.FONCTION_AFF(value)
self.setText(label)
def get_data(self):
return self.value
class BoolFixe(abstractSimpleField):
FONCTION_AFF = staticmethod(formats.abstractRender.boolen)
class EurosFixe(abstractSimpleField):
FONCTION_AFF = staticmethod(formats.abstractRender.euros)
class PourcentFixe(abstractSimpleField):
FONCTION_AFF = staticmethod(formats.abstractRender.pourcent)
class DefaultFixe(abstractSimpleField):
FONCTION_AFF = staticmethod(formats.abstractRender.default)
class DateFixe(abstractSimpleField):
FONCTION_AFF = staticmethod(formats.abstractRender.date)
class DateHeureFixe(abstractSimpleField):
FONCTION_AFF = staticmethod(formats.abstractRender.dateheure)
# --------------- Numeric fields ---------------
class abstractEntierEditable(QSpinBox):
UNITE = ""
MAX = None
MIN = 0
DEFAULT = 0
data_changed = pyqtSignal(int)
def __init__(self, parent=None):
super().__init__(parent)
self.setMaximum(self.MAX)
self.setMinimum(self.MIN)
self.setSuffix(self.UNITE)
self.valueChanged.connect(self.data_changed.emit)
self.setSpecialValueText(" ")
def set_data(self, somme):
somme = somme if somme is not None else (self.MIN - 1)
self.setValue(somme)
def get_data(self):
return self.value()
class EntierEditable(abstractEntierEditable):
MAX = 10000
class PourcentEditable(abstractEntierEditable):
UNITE = "%"
MAX = 100
DEFAULT = 0
class EurosEditable(QDoubleSpinBox):
data_changed = pyqtSignal(float)
def __init__(self, parent=None):
super().__init__(parent)
self.setMaximum(100000)
self.setMinimum(-1)
self.setSpecialValueText(" ")
self.setSuffix("€")
self.valueChanged.connect(self.data_changed.emit)
def set_data(self, somme):
somme = somme if somme is not None else -1
self.setValue(somme)
def get_data(self):
v = self.value()
return v if v != -1 else None
class BoolEditable(QFrame):
data_changed = pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent)
cb = QCheckBox()
l = QLabel()
self.setAutoFillBackground(True) # Pour éviter la transparence
layout = QHBoxLayout(self)
layout.addWidget(cb)
layout.addWidget(l)
def callback(b):
l.setText(b and "Oui" or "Non")
self.data_changed.emit(b)
cb.clicked.connect(callback)
self.cb = cb
self.l = l
def set_data(self, b):
b = b or False
self.cb.setChecked(b)
self.l.setText(b and "Oui" or "Non")
def get_data(self):
return self.cb.isChecked()
class DefaultEditable(QLineEdit):
data_changed = pyqtSignal(str)
MAX_LENGTH = None
def __init__(self, parent=None, completion=[]):
super().__init__(parent)
self.textChanged.connect(self.data_changed.emit)
if completion:
c = QCompleter(completion)
c.setCaseSensitivity(Qt.CaseInsensitive)
self.setCompleter(c)
if self.MAX_LENGTH:
self.setMaxLength(self.MAX_LENGTH)
def set_data(self, value):
self.setText(str(value or ""))
def get_data(self):
return self.text()
def LimitedDefaultEditable(max_length):
return type("LDefaultEditable", (DefaultEditable,), {"MAX_LENGTH": max_length})
class OptionnalTextEditable(QFrame):
"""QCheckbox + QLineEdit"""
data_changed = pyqtSignal(object)
def __init__(self, parent=None):
super(OptionnalTextEditable, self).__init__(parent=parent)
self.active = QCheckBox()
self.text = QLineEdit()
self.active.clicked.connect(self.on_click)
self.text.textChanged.connect(self.on_text_changed)
layout = QHBoxLayout(self)
layout.addWidget(self.active)
layout.addWidget(self.text)
def on_click(self):
self.text.setEnabled(self.active.isChecked())
self.data_changed.emit(self.get_data())
def on_text_changed(self, text):
is_active = bool(text.strip())
self.active.setChecked(is_active)
self.text.setEnabled(is_active)
self.data_changed.emit(self.get_data())
def get_data(self):
text = self.text.text().strip()
active = self.active.isChecked() and bool(text)
return text if active else None
def set_data(self, text: str):
text = text or ""
is_active = bool(text.strip())
self.active.setChecked(is_active)
self.text.setEnabled(is_active)
self.text.setText(text)
self.data_changed.emit(self.get_data())
class DateEditable(QFrame):
data_changed = pyqtSignal(object)
def __init__(self, parent=None):
super().__init__(parent)
layout = QGridLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
j = QSpinBox()
j.setMinimum(0)
j.setMaximum(31)
j.setToolTip("Jour")
m = QSpinBox()
m.setMinimum(0)
m.setMaximum(12)
m.setToolTip("Mois")
a = QSpinBox()
a.setMinimum(0)
a.setMaximum(2500)
a.setToolTip("Année")
j.setAlignment(Qt.AlignCenter)
m.setAlignment(Qt.AlignCenter)
a.setAlignment(Qt.AlignCenter)
j.setSpecialValueText("-")
m.setSpecialValueText("-")
a.setSpecialValueText("-")
layout.addWidget(j, 0, 0)
layout.addWidget(m, 0, 1)
layout.addWidget(a, 0, 2, 1, 2)
j.valueChanged.connect(
lambda v: self.data_changed.emit(self.get_data()))
m.valueChanged.connect(
lambda v: self.data_changed.emit(self.get_data()))
a.valueChanged.connect(
lambda v: self.data_changed.emit(self.get_data()))
a.editingFinished.connect(self.on_editing)
self.ws = (a, m, j)
def _change_year_text_color(self, is_ok):
color = "black" if is_ok else "red"
self.ws[0].setStyleSheet(f"color : {color}")
def on_editing(self):
current_year = self.ws[0].value()
if not current_year:
return
self._change_year_text_color(not current_year < 100)
self.ws[0].setValue(current_year)
def get_data(self):
d = [self.ws[0].value(), self.ws[1].value(), self.ws[2].value()]
try:
return datetime.date(*d)
except ValueError:
return
def set_data(self, d):
if d is None:
self.ws[0].clear()
self.ws[1].clear()
self.ws[2].clear()
else:
self.ws[0].setValue(d.year)
self.ws[1].setValue(d.month)
self.ws[2].setValue(d.day)
self.on_editing()
class MontantEditable(QFrame):
def __init__(self, parent=None):
super().__init__(parent)
self.setAutoFillBackground(True)
self.val = QDoubleSpinBox()
self.val.setMaximum(100000)
self.par_jour = QCheckBox("Par jour")
layout = QVBoxLayout(self)
layout.addWidget(self.val)
layout.addWidget(self.par_jour)
def set_data(self, value):
self.val.setValue(value[0])
self.par_jour.setChecked(value[1])
def get_data(self):
return [self.val.value(), self.par_jour.isChecked()]
class DateRange(QFrame):
data_changed = pyqtSignal(object, object)
def __init__(self):
super().__init__()
self.debut = DateEditable()
self.fin = DateEditable()
self.debut.data_changed.connect(self.on_change)
self.fin.data_changed.connect(self.on_change)
layout = QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(QLabel("Du "))
layout.addWidget(self.debut)
layout.addWidget(QLabel(" au "))
layout.addWidget(self.fin)
def on_change(self):
self.data_changed.emit(*self.get_data())
def get_data(self):
return self.debut.get_data(), self.fin.get_data()
def set_data(self, v):
v = v or [None, None]
self.debut.set_data(v[0])
self.fin.set_data(v[1])
class Texte(QPlainTextEdit):
data_changed = pyqtSignal(str)
def __init__(self, text, is_editable, placeholder="Informations complémentaires"):
super().__init__(text)
self.setSizeAdjustPolicy(QPlainTextEdit.AdjustToContents)
self.setMinimumHeight(50)
self.setMinimumWidth(150)
self.setPlaceholderText(placeholder)
self.setReadOnly(not is_editable)
self.textChanged.connect(
lambda: self.data_changed.emit(self.toPlainText()))
def get_data(self):
return self.toPlainText()
def set_data(self, text):
self.setPlainText(text)
class OptionsButton(QPushButton):
"""Bouton to open window to acces advanced options.
CLASS_PANEL_OPTIONS is responsible for doing the actual modification"""
TITLE = "Advanced options"
CLASS_PANEL_OPTIONS:Any = None
options_changed = pyqtSignal()
def __init__(self, acces, is_editable):
super(OptionsButton, self).__init__(self.TITLE)
self.clicked.connect(self.show_options)
self.acces = acces
self.is_editable = is_editable
def show_options(self):
f = self.CLASS_PANEL_OPTIONS(self.acces, self.is_editable)
if f.exec_():
self.options_changed.emit()
def set_data(self, *args):
pass
###---------------------------- Wrappers---------------------------- ###
def _get_widget(classe, value):
w = classe()
w.set_data(value)
return w
def Default(value, is_editable):
return _get_widget(is_editable and DefaultEditable or DefaultFixe, value)
def Booleen(value, is_editable):
return _get_widget(is_editable and BoolEditable or BoolFixe, value)
def Entier(entier, is_editable):
return _get_widget(is_editable and EntierEditable or DefaultFixe, entier)
def Euros(value, is_editable):
return _get_widget(is_editable and EurosEditable or EurosFixe, value)
def Pourcent(value, is_editable):
return _get_widget(is_editable and PourcentEditable or PourcentFixe, value)
def Date(value, is_editable):
return _get_widget(is_editable and DateEditable or DateFixe, value)
def Departement(value, is_editable):
return _get_widget(is_editable and DepartementEditable or DepartementFixe, value)
def Sexe(value, is_editable):
return _get_widget(is_editable and SexeEditable or SexeFixe, value)
def Adresse(value, is_editable):
return Texte(value, is_editable, placeholder="Adresse...")
def ModePaiement(value, is_editable):
return _get_widget(is_editable and ModePaiementEditable or ModePaiementFixe, value)
def DateHeure(value, is_editable):
if is_editable:
raise NotImplementedError("No editable datetime widget !")
w = DateHeureFixe()
w.set_data(value)
return w
def OptionnalText(value, is_editable):
return _get_widget(is_editable and OptionnalTextEditable or DefaultFixe, value)
"""Correspondance field -> widget (callable)"""
TYPES_WIDGETS = defaultdict(
lambda: Default,
date_naissance=Date,
departement_naissance=Departement,
sexe=Sexe,
tels=Tels,
adresse=Adresse,
date=Date,
date_debut=Date,
date_fin=Date,
date_arrivee=Date,
date_depart=Date,
date_emission=Date,
date_reception=Date,
nb_places=Entier,
nb_places_reservees=Entier,
age_min=Entier,
age_max=Entier,
acquite=Booleen,
is_acompte=Booleen,
is_remboursement=Booleen,
reduc_speciale=Euros,
acompte_recu=Euros,
valeur=Euros,
total=Euros,
prix=Euros,
date_heure_modif=DateHeure,
date_reglement=Date,
date_encaissement=Date,
info=Texte,
message=Texte,
mode_paiement=ModePaiement,
)
ASSOCIATION = {}
def add_widgets_type(type_widgets, abstract_ASSOCIATION):
TYPES_WIDGETS.update(type_widgets)
for k, v in abstract_ASSOCIATION.items():
t = TYPES_WIDGETS[k]
ASSOCIATION[k] = (v[0], v[1], v[2], t, v[3])
add_widgets_type({}, formats.ASSOCIATION)
## ------------------Custom delegate ------------------ ##
class delegateAttributs(QStyledItemDelegate):
CORRES = {"montant": MontantEditable, "mode_paiement": ModePaiementEditable,
"valeur": EurosEditable,
"description": DefaultEditable, "quantite": EntierEditable,
"obligatoire": BoolEditable}
"""Correspondance between fields and widget classes"""
size_hint_: tuple
def __init__(self, parent):
QStyledItemDelegate.__init__(self, parent)
self.size_hint_ = None
self.row_done_ = None
@staticmethod
def paint_filling_rect(option, painter, proportion):
rect = option.rect
painter.save()
proportion = min(proportion, 100)
rs, vs, bs = (30,64,55) # start
re, ve, be = (153,242,200) # end
t = proportion / 100
color = QColor( rs + t*(re - rs), vs + t*(ve - vs), bs + t*(be - bs))
painter.setPen(QPen(color, 0.5, Qt.SolidLine,
Qt.RoundCap, Qt.RoundJoin))
painter.setBackgroundMode(Qt.OpaqueMode)
painter.setBackground(QBrush(color))
painter.setBrush(QBrush(color))
rect.setWidth(rect.width() * proportion / 100)
painter.drawRoundedRect(rect, 5, 5)
painter.restore()
@staticmethod
def _get_field(index):
return index.model().header[index.column()]
def sizeHint(self, option, index):
if self.size_hint_ and self.size_hint_[0] == index:
return self.size_hint_[1]
return super().sizeHint(option, index)
def setEditorData(self, editor, index):
value = index.data(role=Qt.EditRole)
editor.set_data(value)
self.sizeHintChanged.emit(index)
def createEditor(self, parent, option, index):
field = self._get_field(index)
other = index.data(role=Qt.UserRole)
classe = self.CORRES[field]
w = classe(parent, other) if other else classe(parent)
self.size_hint_ = (index, w.sizeHint())
self.row_done_ = index.row()
return w
def destroyEditor(self, editor, index):
self.size_hint_ = None
super().destroyEditor(editor, index)
def setModelData(self, editor, model, index):
value = editor.get_data()
model.set_data(index, value)
| 2.359375 | 2 |
charts.py | TurtleOld/budget_interface_flask | 0 | 12790075 | <gh_stars>0
from flask import Blueprint, render_template
import matplotlib.pyplot as plt
import datetime
from settings_database import cursor
from functions import get_total_amount, get_number_month
charts_route = Blueprint("charts", __name__)
# Выделяем число месяца из даты чека
def get_name_month_from_date(date_time):
date_without_dash = date_time.replace("-", "")
number_month = datetime.datetime.strptime(date_without_dash, "%Y%m%d").date().month
if number_month == 1:
return "Январь"
if number_month == 2:
return "Февраль"
if number_month == 3:
return "Март"
if number_month == 4:
return "Апрель"
if number_month == 5:
return "Май"
if number_month == 6:
return "Июнь"
if number_month == 7:
return "Июль"
if number_month == 8:
return "Август"
if number_month == 9:
return "Сентябрь"
if number_month == 10:
return "Октябрь"
if number_month == 11:
return "Ноябрь"
if number_month == 12:
return "Декабрь"
@charts_route.route("/charts")
def charts():
title = "Графики"
cursor.execute("SELECT * FROM receipt ORDER BY date_receipt")
product_information = cursor.fetchall()
listing_name_months = []
month_listing = []
number_month = []
amount_total = []
for date in product_information:
name_month = (get_name_month_from_date(str(date[0])))
listing_name_months.append(name_month)
number_month.append(get_number_month(str(date[0])))
for number in set(number_month):
cursor.execute(
"SELECT total_sum FROM receipt WHERE (extract (month from date_receipt)=%s) GROUP BY total_sum", (number,))
product_total_sum = cursor.fetchall()
amount_total.append(get_total_amount(product_total_sum))
for item in set(listing_name_months):
month_listing.append(item)
month_listing.sort(reverse=True)
dict_name_months = list(set(listing_name_months))
dict_name_months.sort(reverse=True)
index = dict_name_months
values = amount_total
plt.bar(index, values)
plt.title("Расходы по месяцам")
plt.savefig("static/img/plot_monthly_expenses.png")
return render_template("charts.html", plot_monthly_expenses="static/img/plot_monthly_expenses.png", title=title)
| 2.703125 | 3 |
src/bbbr/wsgi.py | zgoda/bbbrating-server | 0 | 12790076 | <reponame>zgoda/bbbrating-server
from .app import make_app
application = make_app()
| 1.25 | 1 |
proxysql_tools/galera/galera_node.py | akuzminsky/proxysql-tools | 26 | 12790077 | """Module describes GaleraNode class"""
from contextlib import contextmanager
import pymysql
from pymysql.cursors import DictCursor
from proxysql_tools import execute
class GaleraNodeState(object): # pylint: disable=too-few-public-methods
"""State of Galera node http://bit.ly/2r1tUGB """
PRIMARY = 1
JOINER = 2
JOINED = 3
SYNCED = 4
DONOR = 5
class GaleraNode(object):
"""
GaleraNode class describes a single node in Galera Cluster.
:param host: hostname of the node.
:param port: port to connect to.
:param user: MySQL username to connect to the node.
:param password: <PASSWORD>.
"""
def __init__(self, host, port=3306, user='root', password=<PASSWORD>):
self.host = host
self.port = port
self.user = user
self.password = password
@property
def wsrep_cluster_state_uuid(self):
"""Provides the current State UUID. This is a unique identifier
for the current state of the cluster and the sequence of changes
it undergoes.
"""
return self._status('wsrep_cluster_state_uuid')
@property
def wsrep_cluster_status(self):
"""Status of this cluster component. That is, whether the node is
part of a ``PRIMARY`` or ``NON_PRIMARY`` component."""
return self._status('wsrep_cluster_status')
@property
def wsrep_local_state(self):
"""Internal Galera Cluster FSM state number."""
return int(self._status('wsrep_local_state'))
@property
def wsrep_cluster_name(self):
"""The logical cluster name for the node."""
result = self.execute("SELECT @@wsrep_cluster_name")
return result[0]['@@wsrep_cluster_name']
def execute(self, query, *args):
"""Execute query in Galera Node.
:param query: Query to execute.
:type query: str
:return: Query result or None if the query is not supposed
to return result.
:rtype: dict
"""
with self._connect() as conn:
return execute(conn, query, *args)
@contextmanager
def _connect(self):
"""Connect to Galera node
:return: MySQL connection to the Galera node
:rtype: Connection
"""
connection = pymysql.connect( # pylint: disable=duplicate-code
host=self.host,
port=self.port,
user=self.user,
passwd=<PASSWORD>,
cursorclass=DictCursor
)
yield connection
connection.close()
def _status(self, status_variable):
"""Return value of a variable from SHOW GLOBAL STATUS"""
result = self.execute('SHOW GLOBAL STATUS LIKE %s', status_variable)
return result[0]['Value']
def __eq__(self, other):
return self.host == other.host and self.port == self.port
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "%s:%d" % (self.host, self.port)
| 2.5 | 2 |
src/comment_analysis/parsing_utils.py | peppocola/DeClutter-Challenge-2020 | 5 | 12790078 | import json
from sklearn.preprocessing import LabelEncoder
from src.comment_analysis.url_utils import get_text_by_url
from src.csv.csv_utils import get_link_line_type, get_keywords
from src.keys import line, serialize_outpath
from nltk.stem.porter import *
from spacy.lang.en.stop_words import STOP_WORDS
from src.comment_analysis.java_re import *
def get_line(code, comment_line, comment_type):
code = code.splitlines()
try:
if comment_type == line:
if not re.match(r"^[\s]*//.*", code[
comment_line - 1]): # if the line doesn't start as a comment, the comment refers to this line
if not re.match(r"^[\s]*}?[\s]*(else|try|finally)?[\s]*{?[\s]*//.*[\s]*$", code[
comment_line - 1]): # if the line isnt just brackets and some keywords, the foucs line is the comment_line
return code[comment_line - 1]
i = 0
while re.match(r"^[\s]*//.*", code[comment_line + i]) or re.match(r"^[\s]*$", code[comment_line + i]) or re.match(r"[\s]*[^}{](try|else|finally)[\s]*{?", code[comment_line + i]): # while the line starts as a comment, ignore it. I do this because they use multiple line comment to simulate a block
i += 1
if re.match(r"^[\s]*}.*", code[comment_line + i]) or re.match(r"[\s]*(try|else|finally)[\s]*{?", code[
comment_line + i]): # if this matches, the block is empty so i take the first non comment non empty line before the comment.
i = -2
while re.match(r"^[\s]*//.*", code[comment_line + i]) or re.match(r"^[\s]*$",
code[comment_line + i]) or re.match(
r"^[\s]*/\*.*", code[comment_line + i]) or re.match(r"^\*", code[comment_line + i]) or re.match(
r"^[\s]*\*/.*", code[comment_line + i]): # while the line is a comment or is blank, ignore it
i -= 1
return code[comment_line + i] # comment refers to that
# r"^[\s]*}?[\s]*(else|try|finally)?[\s]*{?[\s]*.*$"
else: # block or javadoc
# if the line doesn't start as a comment, the comment refers to this line
if not re.match(r"^[\s]*/\*.*", code[comment_line - 1]):
return code[comment_line - 1]
if comment_line >= len(code) - 1:
return code[comment_line - 2]
i = 0
if not re.match(r"^[\s]*.*\*/", code[comment_line - 1]):
while not re.match(r"^[\s]*\*/", code[comment_line + i]):
i += 1
i += 1
# while the line starts as a comment or is blank, or is an annotation, ignore it
while re.match(r"^[\s]*$", code[comment_line + i]) or re.match(r"^[\s]*@[^\s]*[\s]*$", code[comment_line + i]) or re.match(r"^[\s]*//.*", code[comment_line + i]) or re.match(r"[\s]*[^}{](try|else|finally)[\s]*{?", code[comment_line + i]):
i += 1
# if this matches, probabily the comment refers to the line before
if re.match(r"^[\s]*}[\s]*.*", code[comment_line + i]) or re.match(r"[\s]*(try|else|finally)[\s]*{?", code[comment_line + i]):
i = -2
# while the line is a comment or is blank, ignore it
while re.match(r"^[\s]*//.*", code[comment_line + i]) or re.match(r"^[\s]*$", code[comment_line + i]) or re.match(r"^[\s]*/\*.*", code[comment_line + i]) or re.match(r"^\*", code[comment_line + i]) or re.match(r"^[\s]*\*/.*", code[comment_line + i]):
i -= 1
return code[comment_line + i]
except IndexError:
return ""
def get_positions(lines=None, set='train'):
comment_type = 0
text_link = 1
comment_line = 2
positions = []
data = get_link_line_type(set=set)
if lines is None:
lines = get_lines(set=set)
i = 0
for row in data:
#print(row[comment_line], row[comment_type], row[text_link] + "#L" + str(row[comment_line]))
focus_line = lines[i]
#print(focus_line)
p = get_position(focus_line)
positions.append(p)
#print(p)
i += 1
return positions
def get_positions_encoded(lines=None, set='train'):
if lines is None:
positions = get_positions(set=set)
else:
positions = get_positions(lines, set=set)
le = LabelEncoder()
return le.fit_transform(positions)
def get_lines(serialized=True, serialize=False, set='train'):
if serialized:
x = open(serialize_outpath + 'serialized_' + set +'.json', 'r').read()
return json.loads(x)
comment_type = 0
text_link = 1
comment_line = 2
data = get_link_line_type(set=set)
lines = []
for row in data:
code = get_text_by_url(row[text_link])
focus_line = get_line(code, row[comment_line], row[comment_type])
lines.append(focus_line)
if serialize:
x = open(serialize_outpath + 'serialized_' + set +'.json', 'w')
x.write(json.dumps(lines))
return lines
def get_code_words(stemming=True, rem_keyws=True, lines=None, set='train'):
if lines is None:
lines = get_lines(set=set, serialized=False, serialize=True)
words = []
for line in lines:
words.append(word_extractor(line, stemming, rem_keyws))
return words
def word_extractor(string, stemming=True, rem_keyws=True):
string = remove_line_comment(string)
string = remove_block_comment(string)
splitted = code_split(string)
words = []
for part in splitted:
camel_case_parts = camel_case_split(part)
for camel in camel_case_parts:
words.append(camel.lower())
if stemming and rem_keyws:
return stem(remove_keywords(words))
elif stemming:
return stem(words)
else:
return remove_keywords(words)
def remove_keywords(words):
keywords = get_keywords()
non_keywords = []
for word in words:
if word not in keywords:
non_keywords.append(word)
return non_keywords
def stem(words):
stemmer = PorterStemmer()
stemmed = []
for token in words:
stemmed.append(stemmer.stem(token))
return stemmed
def camel_case_split(string):
if not string:
return string
words = [[string[0]]]
for c in string[1:]:
if words[-1][-1].islower() and c.isupper():
words.append(list(c))
else:
words[-1].append(c)
return [''.join(word) for word in words]
def remove_line_comment(string):
in_string = False
escape = False
comment = False
i = 0
for char in string:
if char == '"':
if in_string is True:
if escape is False:
in_string = False
else:
escape = False
else:
in_string = True
elif char == '\\':
if in_string is True:
escape = True
elif char == '/':
if comment is False:
comment = True
else:
return string[:i]
elif comment is True:
i += 1
comment = False
elif escape is True:
escape = False
if comment is False:
i += 1
return string
def remove_block_comment(string):
in_string = False
escape = False
block = False
maybe_block = False
found = False
init_index = 0
end_index = 0
i = 0
for char in string:
if char == '*':
if not in_string:
if maybe_block is False:
if block is True:
maybe_block = True
else:
block = True
if char == '"':
if in_string is True:
if escape is False:
in_string = False
else:
escape = False
else:
in_string = True
elif char == '\\':
if in_string is True:
escape = True
elif char == '/':
if not in_string:
if maybe_block is True:
if block is True:
found = True
end_index = i
break
else:
maybe_block = True
init_index = i
i += 1
if found is True:
return string[:init_index] + string[end_index + 1:]
return string
def code_split(string):
words = re.split(r'\\n|\?|&|\\|;|,|\*|\(|\)|\{|\s|\.|/|_|:|=|<|>|\||!|"|\+|-|\[|\]|\'|\}|\^|#|%', string)
words = list(filter(lambda a: a != "", words))
return words
def remove_stopwords(tokens):
stop_words = STOP_WORDS
relevant_words = []
for token in tokens:
if token not in stop_words:
relevant_words.append(token)
return relevant_words
def tokenizer(string, rem_stop=False, stemming=False, rem_kws=False):
tokens = code_split(string)
new_tokens = []
for token in tokens:
for t in camel_case_split(token):
new_tokens.append(t.lower())
if rem_stop:
new_tokens = remove_stopwords(new_tokens)
if rem_kws:
new_tokens = remove_keywords(new_tokens)
if stemming:
new_tokens = stem(new_tokens)
return new_tokens
if __name__ == '__main__':
# code = open('../testers/test.txt', 'r').read()
# code_parser(code, 151, javadoc)
print(get_lines(serialized=False, serialize=True))
print('first')
print(get_lines(serialized=True, serialize=False))
# get_positions()
# line_type_identifier("ciao")
# code_parser3()
# print(word_extractor("ciao mamma /*css rff*/"))
# print(tokenizer("t<EMAIL>@<EMAIL> @param"))
# print(camel_case_split("tuaMadre@QuellaTroia @param"))
# print(code_split("tuaMadre@QuellaTroia @param"))
| 2.8125 | 3 |
2020/01/part_1.py | anders-wind/advent_of_code | 0 | 12790079 | from typing import List
def read_input(file_path) -> List[int]:
res = set({})
with open(file_path, 'r') as file_handle:
for line in file_handle:
res.add(int(line))
return res
def calculate_result(numbers: List[int]) -> int:
for number in numbers:
opposite = 2020 - number
if opposite in numbers:
return number * opposite
raise Exception("Could not find combi")
def main():
print(calculate_result(read_input("sample.txt")))
print(calculate_result(read_input("input.txt")))
if __name__ == "__main__":
main() | 3.765625 | 4 |
tests/unit/test_user.py | gnott/h | 0 | 12790080 | from unittest import TestCase
from h.models import User
from . import AppTestCase
class UserTest(AppTestCase):
def test_password_encrypt(self):
"""make sure user passwords are stored encrypted
"""
u1 = User(username=u'test', password=u'<PASSWORD>', email=u'<EMAIL>')
assert u1.password != '<PASSWORD>'
self.db.add(u1)
self.db.flush()
assert u1.password != '<PASSWORD>'
| 3.3125 | 3 |
tagannotator/base/migrations/0005_auto_20200113_0518.py | kixlab/suggestbot-instagram-context-annotator | 0 | 12790081 | <filename>tagannotator/base/migrations/0005_auto_20200113_0518.py
# Generated by Django 2.2.7 on 2020-01-13 05:18
import base.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('base', '0004_auto_20200113_0232'),
]
operations = [
migrations.RemoveField(
model_name='photo',
name='title',
),
migrations.RemoveField(
model_name='post',
name='user',
),
migrations.RemoveField(
model_name='tag',
name='madeat',
),
migrations.AddField(
model_name='post',
name='source',
field=models.CharField(blank=True, choices=[('instagram', 'instagram'), ('upload', 'upload')], max_length=20, null=True),
),
migrations.AddField(
model_name='tag',
name='madeby',
field=models.CharField(blank=True, choices=[('user', 'user'), ('post', 'post')], max_length=20, null=True),
),
migrations.AlterField(
model_name='photo',
name='file',
field=models.ImageField(upload_to=base.models.user_directory_path),
),
migrations.CreateModel(
name='Session',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('starttime', models.DateTimeField(auto_now_add=True)),
('endtime', models.DateTimeField(auto_now_add=True)),
('status', models.BooleanField(blank=True, default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.523438 | 2 |
get_model.py | dev-05/google_lens_clone | 1 | 12790082 | <filename>get_model.py
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 16 17:14:38 2021
@author: dev
"""
from tensorflow.keras.applications.vgg16 import VGG16
model=VGG16()
model.save('mymodel.h5')
print("VGG16 model downloaded and saved successfully") | 2.40625 | 2 |
src/architecture/encoder/stacked_mnist_encoder.py | gmum/lcw-generator | 4 | 12790083 | <reponame>gmum/lcw-generator
from architecture.encoder.cnn_encoder_block import CnnEncoderBlock
import torch
import torch.nn as nn
from architecture.generator.linear_generator_block import LinearGeneratorBlock
class Encoder(nn.Module):
def __init__(self, latent_size: int):
super().__init__()
self.__sequential_blocks = [
CnnEncoderBlock(3, 128),
CnnEncoderBlock(128, 256),
nn.Flatten(start_dim=1),
LinearGeneratorBlock(7 * 7 * 256, 256),
nn.Linear(256, latent_size)
]
self.main = nn.Sequential(*self.__sequential_blocks)
def forward(self, input_images: torch.Tensor):
assert input_images.size(1) == 3 and input_images.size(2) == 28 and input_images.size(3) == 28
encoded_latent = self.main(input_images)
return encoded_latent
| 2.703125 | 3 |
scraper/scraper/main_page.py | tomirendo/BenYehuda | 0 | 12790084 | """
Class for parsing the main Ben Yehuda site page
"""
from urllib import request
from urllib import parse as urlparse
from bs4 import BeautifulSoup
from .helpers import NamedLink, clean_text
class MainPage(object):
"""
Parses and gets information from the main index page. Mostly used to get
links for all of the artist pages
"""
def __init__(self, url="http://benyehuda.org"):
self.main_url = url
self.soup = BeautifulSoup(request.urlopen(url))
@staticmethod
def artist_a_filter(tag):
"""
Finds all the links in the index page that points to an artist's page
"""
if tag.name != "a":
return False
href = tag.get("href").lower()
# Artist links are supposed to be internal
if href.startswith("http"):
return False
# Remove unrelated crap
if href.startswith("javascript"):
return False
# Artist pages are one branch below the main page and their links
# usually end with / - Need to verify
if href.count("/") == 1 and href[-1] == "/":
return True
return False
def get_artist_links(self):
"""
:return: A set of unique artist page urls and names
:rtype: set[NamedLink]
"""
anchors = self.soup.find_all(self.artist_a_filter)
links = set()
for anchor in anchors:
url = urlparse.urljoin(self.main_url, anchor.get("href").lower())
links.add(NamedLink(url, clean_text(anchor)))
return links
| 3.734375 | 4 |
Eye-Tracking-System/tony/com.tonybeltramelli.eyetracker/Eye.py | tonybeltramelli/Graphics-And-Vision | 12 | 12790085 | <gh_stars>10-100
__author__ = 'tbeltramelli'
from scipy.cluster.vq import *
from UMedia import *
from Filtering import *
from RegionProps import *
from UMath import *
from UGraphics import *
import operator
class Eye:
_result = None
_right_template = None
_left_template = None
def __init__(self, right_corner_path, left_corner_path):
self._right_template = Filtering.apply_box_filter(Filtering.get_gray_scale_image(UMedia.get_image(right_corner_path)), 5)
self._left_template = Filtering.apply_box_filter(Filtering.get_gray_scale_image(UMedia.get_image(left_corner_path)), 5)
def process(self, img):
self._result = img
img = Filtering.apply_box_filter(Filtering.get_gray_scale_image(img), 5)
pupil_position, pupil_radius = self.get_pupil(img, 40)
iris_radius = self.get_iris(img, pupil_position, pupil_radius)
glints_position = self.get_glints(img, 180, pupil_position, iris_radius)
corners_position = self.get_eye_corners(img)
UMedia.show(self._result)
def get_pupil(self, img, threshold):
img = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY_INV)[1]
height, width = img.shape
side = (width * height) / 8
st = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
img = cv2.morphologyEx(img, cv2.MORPH_ERODE, st, iterations=1)
img = cv2.morphologyEx(img, cv2.MORPH_DILATE, st, iterations=1)
c, contours, hierarchy = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
props = RegionProps()
radius = 0.0
for cnt in contours:
properties = props.CalcContourProperties(cnt, ['Area', 'Length', 'Centroid', 'Extend', 'ConvexHull'])
perimeter = cv2.arcLength(cnt, True)
radius = np.sqrt(properties['Area'] / np.pi)
radius = 1.0 if radius == 0.0 else radius
circularity = perimeter / (radius * 2 * np.pi)
if ((circularity >= 0.0) and (circularity <= 1.5)) and ((properties['Area'] > 900) and (properties['Area'] < side)):
for i, centroid in enumerate(properties['Centroid']):
if i == 0:
center = int(centroid), int(properties['Centroid'][i+1])
if UMath.is_in_area(center[0], center[1], width, height):
if len(cnt) >= 5:
ellipse = cv2.fitEllipse(cnt)
cv2.ellipse(self._result, ellipse, (0, 0, 255), 1)
cv2.circle(self._result, center, int(radius), (0, 0, 255), 1)
return center, radius
return (int(width / 2), int(height / 2)), radius
def get_glints(self, img, threshold, pupil_position, iris_radius):
img = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY)[1]
height, width = img.shape
max_dist = iris_radius if iris_radius > 0 else (width + height) / 16
c, contours, hierarchy = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
props = RegionProps()
coordinates = []
for cnt in contours:
properties = props.CalcContourProperties(cnt, ['Area', 'Length', 'Centroid', 'Extend', 'ConvexHull'])
if properties['Extend'] > 0 and properties['Area'] < 100:
for i, centroid in enumerate(properties['Centroid']):
if i == 0:
center = int(centroid), int(properties['Centroid'][i+1])
distance = np.sqrt(np.power(pupil_position[0] - center[0], 2) + np.power(pupil_position[1] - center[1], 2))
if distance < max_dist:
coordinates.append(center)
cv2.circle(self._result, center, 2, (0, 255, 0), 3)
return coordinates
def get_eye_corners(self, img):
right = self._match(img, self._right_template)
left = self._match(img, self._left_template)
return [right, left]
def _match(self, img, template):
matching = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED)
height, width = template.shape
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(matching)
cv2.rectangle(self._result, (max_loc[0] - width/2, max_loc[1] - height/2), (max_loc[0] + width/2, max_loc[1] + height/2), 2)
return max_loc[0], max_loc[1]
def get_iris(self, img, pupil_position, pupil_radius, angle_tolerance=3, min_magnitude=15, max_magnitude=20):
if pupil_radius <= 1.0:
return 0.0
orientation, magnitude = self._get_gradient(img)
max_iris_radius = pupil_radius * 5
pupil_samples = UMath.get_circle_samples(pupil_position, pupil_radius)
iris_samples = UMath.get_circle_samples(pupil_position, max_iris_radius)
iris_radius_vote = dict()
for sample in range(len(pupil_samples)):
pupil_sample = (int(pupil_samples[sample][0]), int(pupil_samples[sample][1]))
iris_sample = (int(iris_samples[sample][0]), int(iris_samples[sample][1]))
normal = UMath.get_line_coordinates(pupil_sample, iris_sample)
normal_angle = cv2.fastAtan2(pupil_sample[1] - pupil_position[1], pupil_sample[0] - pupil_position[0])
for point in normal:
i = point[1] - 1
j = point[0] - 1
if (i >= 0 and j >= 0) and (len(magnitude) > i and len(magnitude[i]) > j):
mag = magnitude[i][j]
if min_magnitude < mag < max_magnitude:
angle = normal_angle + orientation[i][j] - 90
angle = angle - 360 if angle > 360 else angle
if angle < angle_tolerance:
radius = np.sqrt(np.power(point[0] - pupil_position[0], 2) + np.power(point[1] - pupil_position[1], 2))
radius = int(radius)
if radius not in iris_radius_vote:
iris_radius_vote[radius] = 0
iris_radius_vote[radius] += 1
cv2.line(self._result, pupil_sample, iris_sample, UGraphics.hex_color_to_bgr(0xf2f378), 1)
iris_radius = max(iris_radius_vote.iteritems(), key=operator.itemgetter(1))[0] if len(iris_radius_vote) > 0 else 0
cv2.circle(self._result, pupil_position, iris_radius, (255, 0, 0), 1)
return iris_radius
def _get_gradient(self, img, granularity=10):
height, width = img.shape
sobel_horizontal = cv2.Sobel(img, cv2.CV_32F, 1, 0)
sobel_vertical = cv2.Sobel(img, cv2.CV_32F, 0, 1)
orientation = np.empty(img.shape)
magnitude = np.empty(img.shape)
for y in range(height):
for x in range(width):
orientation[y][x] = cv2.fastAtan2(sobel_horizontal[y][x], sobel_vertical[y][x])
magnitude[y][x] = np.sqrt(np.power(sobel_horizontal[y][x], 2) + np.power(sobel_vertical[y][x], 2))
if (x % granularity == 0) and (y % granularity == 0):
UGraphics.draw_vector(self._result, x, y, magnitude[y][x] / granularity, orientation[y][x])
return orientation, magnitude
| 2.234375 | 2 |
tinylinks/urls.py | gosuai/django-tinylinks | 11 | 12790086 | <gh_stars>10-100
"""URLs for the ``django-tinylinks`` app."""
from django.conf.urls import url
from django.views.generic import TemplateView
from .views import (
StatisticsView,
TinylinkCreateView,
TinylinkDeleteView,
TinylinkListView,
TinylinkRedirectView,
TinylinkUpdateView,
)
urlpatterns = [
url(
r'^$',
TinylinkListView.as_view(),
name='tinylink_list'
),
url(
r'^create/$',
TinylinkCreateView.as_view(),
name='tinylink_create'
),
url(
r'^update/(?P<pk>\d+)/(?P<mode>[a-z-]+)/$',
TinylinkUpdateView.as_view(),
name='tinylink_update',
),
url(
r'^delete/(?P<pk>\d+)/$',
TinylinkDeleteView.as_view(),
name='tinylink_delete',
),
url(
r'^404/$',
TemplateView.as_view(template_name='tinylinks/notfound.html'),
name='tinylink_notfound',
),
url(
r'^statistics/?$',
StatisticsView.as_view(),
name='tinylink_statistics',
),
url(
r'^(?P<short_url>[a-zA-Z0-9-]+)/?$',
TinylinkRedirectView.as_view(),
name='tinylink_redirect',
),
]
| 1.820313 | 2 |
rainy_project/mainpage/migrations/0018_survey.py | osamhack2020/WEB_HeavyReading_Rainy | 2 | 12790087 | <gh_stars>1-10
# Generated by Django 2.1 on 2020-10-29 14:45
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mainpage', '0017_auto_20201025_2232'),
]
operations = [
migrations.CreateModel(
name='Survey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('author', models.CharField(max_length=50)),
('count', models.SmallIntegerField(default=0)),
('user', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.859375 | 2 |
naive_bayes.py | xiecong/Simple-Implementation-of-ML-Algorithms | 16 | 12790088 | <filename>naive_bayes.py
import numpy as np
from sklearn.datasets import fetch_20newsgroups
import re
def tokenize(documents, stop_words):
text = []
for doc in documents:
letters_only = re.sub("[^a-zA-Z]", " ", doc)
words = letters_only.lower().split()
text.append([w for w in words if not w in stop_words])
return np.array(text)
class NaiveBayes(object):
# multinominal NB model with laplace smoothing
# guassian can be used for numerical
def __init__(self):
self.p_w = {}
self.p_c = {}
self.vocabulary = []
self.v_num = 0
def fit(self, x, y):
n_data = len(y)
self.label, p_c = np.unique(y, return_counts=True)
self.p_c = dict(zip(self.label, np.log(p_c / n_data)))
indexes = np.c_[np.array(y), np.arange(n_data)]
self.vocabulary = np.unique(
[item for sublist in x for item in sublist])
self.v_num = len(self.vocabulary)
print("vocabulary length {}".format(self.v_num))
self.v_idx = dict(zip(self.vocabulary, np.arange(self.v_num)))
print("start fitting")
for l in self.label:
idxes = indexes[indexes[:, 0] == l][:, 1].astype(int)
corpus = [x[idx] for idx in idxes]
flatten = [item for sublist in corpus for item in sublist]
self.p_w[l] = [
np.log(1 / (len(flatten) + self.v_num))] * self.v_num
words, pwl = np.unique(flatten, return_counts=True)
for w, p in zip(words, pwl):
self.p_w[l][self.v_idx[w]] = np.log(
(p + 1) / (len(flatten) + self.v_num))
def predict(self, x):
return np.array([self.predict_sample(xi) for xi in x])
def predict_sample(self, x):
eps = 1 / self.v_num
p = [self.p_c[i] + sum(self.p_w[i][self.v_idx[w]] if w in self.v_idx.keys()
else eps for w in x) for i in range(len(self.label))]
return self.label[np.argmax(p)]
def main():
stop_words = set(["i", "me", "my", "myself", "we", "our", "ours", "ourselves",
"you", "your", "yours", "yourself", "yourselves", "he", "him", "his",
"himself", "she", "her", "hers", "herself", "it", "its", "itself", "they",
"them", "their", "theirs", "themselves", "what", "which", "who", "whom",
"this", "that", "these", "those", "am", "is", "are", "was", "were", "be",
"been", "being", "have", "has", "had", "having", "do", "does", "did",
"doing", "a", "an", "the", "and", "but", "if", "or", "because", "as",
"until", "while", "of", "at", "by", "for", "with", "about", "against",
"between", "into", "through", "during", "before", "after", "above", "below",
"to", "from", "up", "down", "in", "out", "on", "off", "over", "under",
"again", "further", "then", "once", "here", "there", "when", "where", "why",
"how", "all", "any", "both", "each", "few", "more", "most", "other", "some",
"such", "no", "nor", "not", "only", "own", "same", "so", "than", "too",
"very", "s", "t", "can", "will", "just", "don", "should", "now"])
data = fetch_20newsgroups()
x = tokenize(data.data, stop_words)
y = data.target
test_ratio = 0.2
test_split = np.random.uniform(0, 1, len(x))
train_x = x[test_split >= test_ratio]
test_x = x[test_split < test_ratio]
train_y = y[test_split >= test_ratio]
test_y = y[test_split < test_ratio]
nb = NaiveBayes()
nb.fit(train_x, train_y)
print("predicting")
print(sum(nb.predict(train_x) == train_y) / train_x.shape[0])
print(sum(nb.predict(test_x) == test_y) / test_y.shape[0])
if __name__ == "__main__":
main()
| 2.515625 | 3 |
code.py | priyanshnama/Hackerrank-Automation-Prototype | 0 | 12790089 | <filename>code.py<gh_stars>0
import string
import urllib3
import random
import json
# log file
log = open("log.txt", "a+")
Head = ["firstname\t", "lastname\t", "email\t\t\t", "college\t", "city\t", "phone\t\t", "password\t", "refral\t"]
log.writelines(Head)
code = input("Enter Your Refral code: ")
points = int((int(input("how much point you want to generate: ")))/10)
point = int(points/10)
# fields
for i in range(point):
firstname = ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))
email = firstname + "@gmail.com"
lastname = 'Jr'
college = 'IIITDMJ'
city = 'Dumna'
phone = '1234567890'
password = '<PASSWORD>'
refral = code
data = {
"firstname": firstname,
"lastname": lastname,
"email": email,
"college": college,
"city": city,
"phone": phone,
"password": password,
"refral": refral,
}
L = [v for k, v in data.items()]
L[0] = " " + L[0]
L[1] = " " + L[1]
for index in range(len(L)):
L[index] = L[index] + "\t"
log.writelines("\n")
log.writelines(L)
http = urllib3.PoolManager()
try:
r = http.request('GET', 'https://invicta20.org/register', retries=False)
encoded_data = json.dumps(data).encode('utf-8')
r = http.request('POST', 'https://invicta20.org/register', body=encoded_data,
headers={'Content-Type': 'application/json'})
json.loads(r.data.decode('utf-8'))['json']
var = {'attribute': 'value'}
except urllib3.exceptions.NewConnectionError:
print('Connection failed.')
except json.decoder.JSONDecodeError:
var = {'attribute': 'value'}
| 2.9375 | 3 |
nygame/_quietload.py | nfearnley/nygame | 1 | 12790090 | import os
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "1"
| 1.367188 | 1 |
_unittests/ut_rss/test_rss_compile.py | sdpython/pyrsslocal | 2 | 12790091 | <gh_stars>1-10
# coding: utf-8
"""
@brief test log(time=2s)
"""
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, ExtTestCase
from pyrsslocal.cli import compile_rss_blogs
class TestRSSCompile(ExtTestCase):
def test_rss_compile(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_rss_compile")
out_html = os.path.join(temp, 'index.html')
out_rss = os.path.join(temp, 'rssfile.xml')
links = ['http://www.xavierdupre.fr/blog/xdbrss.xml',
'http://www.xavierdupre.fr/app/ensae_projects/helpsphinx/_downloads/rss.xml']
compile_rss_blogs(links, "http://www.xavierdupre.fr/blogagg.html",
'Aggregation of blog posts published on <a href='
'"http://www.xavierdupre.fr.">xavierdupre.fr</a>',
title="Recent posts",
author="<NAME>",
out_html=out_html, out_rss=out_rss,
fLOG=fLOG)
self.assertExists(out_html)
self.assertExists(out_rss)
if __name__ == "__main__":
unittest.main()
| 2.34375 | 2 |
plantpredict/helpers.py | plantpredict/python-sdk | 2 | 12790092 | import pandas as pd
def load_from_excel(file_path, sheet_name=None):
"""
Loads the data from an Excel file into a list of dictionaries, where each dictionary represents a row in the Excel
file and the keys of each dictionary represent each column header in the Excel file. The method creates this list
of dictionaries via a Pandas dataframe.
:param file_path: The full file path (appended with .xlsx) of the Excel file to be loaded.
:type file_path: str
:param sheet_name: Name of a particular sheet in the file to load (optional, defaults to the first sheet in the
Excel file).
:type sheet_name: str
:return: List of dictionaries, each dictionary representing a row in the Excel file.
:rtype: list of dict
"""
xl = pd.ExcelFile(file_path)
sheet_name = sheet_name if sheet_name else xl.sheet_names[0]
return xl.parse(sheet_name, index_col=None).to_dict('records')
def export_to_excel(data, file_path, sheet_name="Sheet1", field_order=None, sorting_fields=None):
"""
Writes data from a list of dictionaries to an Excel file, where each dictionary represents a row in the Excel file
and the keys of each dictionary represent each column header in the Excel file.
:param data: List of dictionaries, each dictionary representing a row in the Excel file.
:type data: list of dict
:param file_path: The full file path (appended with .xlsx) of the Excel file to be written to. This will overwrite
data if both file_path and sheet_name already exist.
:type file_path: str
:param sheet_name: Name of a particular sheet in the file to write to (optional, defaults to "Sheet1").
:type sheet_name: str
:param field_order: List of keys from data ordered to match the intended Excel column ordering (left to right). Must
include all keys/columns. Any keys omitted from the list will not be written as columns. (optional)
:type field_order: list of str
:param sorting_fields: List of keys from data to be used as sorting columns (small to large) in Excel. Can be any
length from 1 column to every column. The order of the list will dictate the sorting order.
:type sorting_fields: list of str
:return: None
"""
writer = pd.ExcelWriter(file_path, engine='openpyxl')
df = pd.DataFrame(data)
if field_order:
df = df[field_order]
if sorting_fields:
df = df.sort_values(sorting_fields)
df.to_excel(writer, sheet_name=sheet_name, index=False)
writer.save()
| 4.21875 | 4 |
ulm/settings/local.py | dupuy/ulm | 1 | 12790093 | <reponame>dupuy/ulm<gh_stars>1-10
# pylint: disable=W0401,W0614,C0111
from .base import * # noqa
ADMINS = (
('<NAME>', '<EMAIL>'),
)
MANAGERS = ADMINS
# Make this unique, and don't share it with anybody.
SECRET_KEY = '<KEY>'
| 1.296875 | 1 |
examples/1827402009.py | lobo0616/bysj | 1 | 12790094 | <filename>examples/1827402009.py
# 学号:1827402009
# 姓名:肖鹏
# IP:192.168.157.232
# 上传时间:2018/11/12 15:22:55
import math
def func1(a,b): if a>=b or int(a)!=a or int(b)!=b :
return None
else:
d=1
for i in range(a,b):
d=d*i
c=len(str(d))
x=1
for j in range(1,x+1):
if c%10**j==0:
x=x+1
else:
break
return(x)
def func2(a,b): if int(a)!=a or int(b)!=b:
return None
else:
n=0
for i in range(a,b+1):
m=str(i)
h=list(m)
if list(reversed(m))==h:
n+=1
else:
break
return(n)
def func3(lst): for i in lst[:]:
if int(i)!=i:
return None
elif i<0 or i%3==0:
lst.remove(i)
lst.sort(reverse=True)
return (lst)
if __name__=="__main__":
pass
| 3.734375 | 4 |
dynatrace-scripts/checkforproblems.py | nikhilgoenkatech/Jekins | 0 | 12790095 | <filename>dynatrace-scripts/checkforproblems.py
import sys
import json
import requests
def main():
DT_URL = sys.argv[1]
DT_TOKEN = sys.argv[2]
endpoint = DT_URL + "api/v1/problem/status"
get_param = {'Accept':'application/json; charset=utf-8', 'Authorization':'Api-Token {}'.format(DT_TOKEN)}
config_post = requests.get(endpoint, headers = get_param)
jsonObj = applications = json.loads(config_post.text)
problem = jsonObj["result"]["totalOpenProblemsCount"]
return problem
if __name__=="__main__":
val = main()
exit(val)
| 2.59375 | 3 |
WIP/urls.py | kodi-sk/CSI-WIP | 0 | 12790096 | """WIP URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from complaint.views import show_complaints
from complaint.views import reject, signup
from django.contrib.auth import views as auth_views
from complaint.views import reject
from complaint.views import index
from complaint.views import resolved
from complaint.views import detail
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^show/$',show_complaints),
url(r'^reject/complaint/(\d{1,2})/$',reject),
url(r'^register/$',signup),
url(r'^$', auth_views.login, name='login'),
url(r'^logout/$', auth_views.logout, name='logout'),
url(r'^complaint/$',index),
url(r'^resolved/complaint/(\d{1,2})/$',resolved),
url(r'^complaint/(\d{1,2})/',detail),
]
| 2.53125 | 3 |
src/Honeybee_EnergyPlus Window Shade Generator.py | rdzeldenrust/Honeybee | 1 | 12790097 | <reponame>rdzeldenrust/Honeybee
# This component creates shades for Honeybee Zones
# By <NAME>
# <EMAIL>
# Ladybug started by <NAME> is licensed
# under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
"""
Use this component to generate shades for Honeybee zone windows. The component has two main uses:
_
The first is that it can be used to assign blind objects to HBZones prior to simulation. These blinds can be dynamically controlled via a schedule. Note that shades created this way will automatically be assigned to the zone and the windowBreps and shadeBreps outputs are just for visualization.
_
The second way to use the component is to create test shade areas for shade benefit evaluation after an energy simulation has already been run. In this case, the component helps keep the data tree paths of heating, cooling and beam gain synced with that of the zones and windows. For this, you would take imported EnergyPlus results and hook them up to the "zoneData" inputs and use the output "zoneDataTree" in the shade benefit evaluation.
-
Provided by Honeybee 0.0.55
Args:
_HBObjects: The HBZones out of any of the HB components that generate or alter zones. Note that these should ideally be the zones that are fed into the Run Energy Simulation component. Zones read back into Grasshopper from the Import idf component will not align correctly with the EP Result data.
blindsMaterial_: An optional blind material from the blind material component. If no material is connected here, the component will automatically assign a material of 0.65 solar reflectance, 0 transmittance, 0.9 emittance, 0.25 mm thickness, 221 W/mK conductivity.
blindsSchedule_: An optional schedule to raise and lower the blinds. If no value is connected here, the blinds will assume the "ALWAYS ON" shcedule.
north_: Input a vector to be used as a true North direction or a number between 0 and 360 that represents the degrees off from the y-axis to make North. The default North direction is set to the Y-axis (0 degrees).
_depth: A number representing the depth of the shade to be generated on each window. You can also input lists of depths, which will assign different depths based on cardinal direction. For example, inputing 4 values for depths will assign each value of the list as follows: item 0 = north depth, item 1 = west depth, item 2 = south depth, item 3 = east depth. Lists of vectors to be shaded can also be input and shades can be joined together with the mergeVectors_ input.
_numOfShds: The number of shades to generated for each glazed surface.
_distBetween: An alternate option to _numOfShds where the input here is the distance in Rhino units between each shade.
horOrVertical_: Set to "True" to generate horizontal shades or "False" to generate vertical shades. You can also input lists of horOrVertical_ input, which will assign different orientations based on cardinal direction.
shdAngle_: A number between -90 and 90 that represents an angle in degrees to rotate the shades. The default is set to "0" for no rotation. If you have vertical shades, use this to rotate them towards the South by a certain value in degrees. If applied to windows facing East or West, tilting the shades like this will let in more winter sun than summer sun. If you have horizontal shades, use this input to angle shades downward. You can also put in lists of angles to assign different shade angles to different cardinal directions.
interiorOrExter_: Set to "True" to generate Shades on the interior and set to "False" to generate shades on the exterior. The default is set to "False" to generate exterior shades.
distToGlass_: A number representing the offset distance from the glass to make the shades.
_runIt: Set boolean to "True" to run the component and generate shades.
---------------: ...
zoneData1_: Optional zone data for the HBZones_ that will be aligned with the generated windows. Use this to align data like heating load, cooling load or beam gain for a shade benefit simulation with the generated shades.
zoneData2_: Optional zone data for the HBZones_ that will be aligned with the generated windows. Use this to align data like heating load, cooling load or beam gain for a shade benefit simulation with the generated shades.
zoneData3_: Optional zone data for the HBZones_ that will be aligned with the generated windows. Use this to align data like heating load, cooling load or beam gain for a shade benefit simulation with the generated shades.
Returns:
readMe!: ...
---------------: ...
HBZones: The HBZones with the assigned shading (ready to be simulated).
---------------: ...
windowBreps: Breps representing each window of the zone. These can be plugged into a shade benefit evaulation as each window is its own branch of a grasshopper data tree.
shadeBreps: Breps representing each shade of the window. These can be plugged into a shade benefit evaulation as each window is its own branch of a grasshopper data tree. Alternatively, they can be plugged into an EnergyPlus simulation with the "Honeybee_EP Context Surfaces" component.
---------------: ...
zoneData1Tree: Data trees of the zoneData1_, which align with the branches for each window above.
zoneData2Tree: Data trees of the zoneData2_, which align with the branches for each window above.
zoneData3Tree: Data trees of the izoneData3_, which align with the branches for each window above.
"""
ghenv.Component.Name = "Honeybee_EnergyPlus Window Shade Generator"
ghenv.Component.NickName = 'EPWindowShades'
ghenv.Component.Message = 'VER 0.0.55\nSEP_11_2014'
ghenv.Component.Category = "Honeybee"
ghenv.Component.SubCategory = "09 | Energy | Energy"
#compatibleHBVersion = VER 0.0.55\nAUG_25_2014
#compatibleLBVersion = VER 0.0.58\nAUG_20_2014
try: ghenv.Component.AdditionalHelpFromDocStrings = "1"
except: pass
from System import Object
from System import Drawing
from clr import AddReference
AddReference('Grasshopper')
import Grasshopper.Kernel as gh
from Grasshopper import DataTree
from Grasshopper.Kernel.Data import GH_Path
import Rhino as rc
import rhinoscriptsyntax as rs
import scriptcontext as sc
import uuid
import math
import os
w = gh.GH_RuntimeMessageLevel.Warning
tol = sc.doc.ModelAbsoluteTolerance
def checkTheInputs(zoneNames, windowNames, windowSrfs, isZone):
#Check if the user has hooked up a distBetwee or numOfShds.
if _distBetween == [] and _numOfShds == []:
numOfShd = [1]
print "No value is connected for number of shades. The component will be run with one shade per window."
else:
numOfShd = _numOfShds
#Check the depths.
checkData2 = True
if _depth == []:
checkData2 = False
print "You must provide a depth for the shades."
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, "You must provide a depth for the shades.")
#Check if there is a blinds material connected and, if not, set a default.
checkData5 = True
if blindsMaterial_ == None:
print "No blinds material has been connected. A material will be used with 0.65 solar reflectance, 0 transmittance, 0.9 emittance, 0.25 mm thickness, 221 W/mK conductivity."
blindsMaterial = ['DEFAULTBLINDSMATERIAL', 0.65, 0, 0.9, 0.00025, 221]
else:
try: blindsMaterial = deconstructBlindMaterial(blindsMaterial_)
except:
checkData5 = False
warning = 'Blinds material is not a valid blinds material from the "Honeybee_EnergyPlus Blinds Material" component.'
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
#Check if there is a blinds schedule connected and, if not, set a default.
checkData4 = True
if blindsSchedule_ == None:
schedule = "ALWAYSON"
print "No blinds schedule has been connected. It will be assumed that the blinds are always drawn"
else:
schedule= blindsSchedule_.upper()
if schedule!=None and not schedule.lower().endswith(".csv") and schedule not in HBScheduleList:
msg = "Cannot find " + schedule + " in Honeybee schedule library."
print msg
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
checkData4 = False
elif schedule!=None and schedule.lower().endswith(".csv"):
# check if csv file is existed
if not os.path.isfile(schedule):
msg = "Cannot find the shchedule file: " + schedule
print msg
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
checkData4 = False
#Create a Python list from the input data trees.
def makePyTree(zoneData):
dataPyList = []
for i in range(zoneData.BranchCount):
branchList = zoneData.Branch(i)
dataVal = []
for item in branchList:
dataVal.append(item)
dataPyList.append(dataVal)
return dataPyList
allData = []
allData.append(makePyTree(zoneData1_))
allData.append(makePyTree(zoneData2_))
allData.append(makePyTree(zoneData3_))
#Test to see if the data lists have a headers on them, which is necessary to match the data to a zone or window. If there's no header, the data cannot be coordinated with this component.
checkData3 = True
checkBranches = []
allHeaders = []
allNumbers = []
for branch in allData:
checkHeader = []
dataHeaders = []
dataNumbers = []
for list in branch:
if str(list[0]) == "key:location/dataType/units/frequency/startsAt/endsAt":
checkHeader.append(1)
dataHeaders.append(list[:7])
dataNumbers.append(list[7:])
allHeaders.append(dataHeaders)
allNumbers.append(dataNumbers)
if sum(checkHeader) == len(branch):pass
else:
checkData3 = False
warning = "Not all of the connected zoneData has a Ladybug/Honeybee header on it. This header is necessary for data input to this component."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
#Align all of the lists to each window.
windowNamesFinal = []
windowBrepsFinal = []
alignedDataTree = []
for item in allData: alignedDataTree.append([])
for zoneCount, windowList in enumerate(windowSrfs):
if isZone == True:
zoneName = zoneNames[zoneCount]
for windowCount, window in enumerate(windowList):
windowBrepsFinal.append(window)
windowName = windowNames[zoneCount][windowCount]
windowNamesFinal.append(windowName)
for inputDataTreeCount, branch in enumerate(allHeaders):
#Test to see if the data is for the zone level.
zoneData = False
if isZone == True:
for listCount, header in enumerate(branch):
if header[2].split(' for ')[-1] == zoneName.upper():
alignedDataTree[inputDataTreeCount].append(allData[inputDataTreeCount][listCount])
zoneData = True
#Test to see if the data is for the window level.
srfData = False
if zoneData == False:
for listCount, header in enumerate(branch):
try: winNm = header[2].split(' for ')[-1].split(': ')[0]
except: winNm = header[2].split(' for ')[-1]
if str(winNm) == str(windowName.upper()):
alignedDataTree[inputDataTreeCount].append(allData[inputDataTreeCount][listCount])
srfData = True
if zoneData == False and srfData == False and alignedDataTree != [[], [], []]:
print "A window was not matched with its respective zone/surface data."
if checkData2 == True and checkData3 == True and checkData4 == True and checkData5 == True: checkData = True
else: checkData = False
return checkData, windowNamesFinal, windowBrepsFinal, _depth, alignedDataTree, numOfShd, blindsMaterial, schedule
def analyzeGlz(glzSrf, distBetween, numOfShds, horOrVertical, lb_visualization, normalVector):
# find the bounding box
bbox = glzSrf.GetBoundingBox(True)
if horOrVertical == None:
horOrVertical = True
if numOfShds == None and distBetween == None:
numOfShds = 1
if numOfShds == 0 or distBetween == 0:
sortedPlanes = []
elif horOrVertical == True:
# Horizontal
#Define a bounding box for use in calculating the number of shades to generate
minZPt = bbox.Corner(False, True, True)
minZPt = rc.Geometry.Point3d(minZPt.X, minZPt.Y, minZPt.Z)
maxZPt = bbox.Corner(False, True, False)
maxZPt = rc.Geometry.Point3d(maxZPt.X, maxZPt.Y, maxZPt.Z - sc.doc.ModelAbsoluteTolerance)
centerPt = bbox.Center
#glazing hieghts
glzHeight = minZPt.DistanceTo(maxZPt)
# find number of shadings
try:
numOfShd = int(numOfShds)
shadingHeight = glzHeight/numOfShd
shadingRemainder = shadingHeight
except:
shadingHeight = distBetween
shadingRemainder = (((glzHeight/distBetween) - math.floor(glzHeight/distBetween))*distBetween)
if shadingRemainder == 0:
shadingRemainder = shadingHeight
# find shading base planes
planeOrigins = []
planes = []
X, Y, z = minZPt.X, minZPt.Y, minZPt.Z
zHeights = rs.frange(minZPt.Z + shadingRemainder, maxZPt.Z + 0.5*sc.doc.ModelAbsoluteTolerance, shadingHeight)
try:
for Z in zHeights:
planes.append(rc.Geometry.Plane(rc.Geometry.Point3d(X, Y, Z), rc.Geometry.Vector3d.ZAxis))
except:
# single shading
planes.append(rc.Geometry.Plane(rc.Geometry.Point3d(maxZPt), rc.Geometry.Vector3d.ZAxis))
# sort the planes
sortedPlanes = sorted(planes, key=lambda a: a.Origin.Z)
elif horOrVertical == False:
# Vertical
# Define a vector to be used to generate the planes
planeVec = rc.Geometry.Vector3d(normalVector.X, normalVector.Y, 0)
planeVec.Rotate(1.570796, rc.Geometry.Vector3d.ZAxis)
#Define a bounding box for use in calculating the number of shades to generate
minXYPt = bbox.Corner(True, True, True)
minXYPt = rc.Geometry.Point3d(minXYPt.X, minXYPt.Y, minXYPt.Z)
maxXYPt = bbox.Corner(False, False, True)
maxXYPt = rc.Geometry.Point3d(maxXYPt.X, maxXYPt.Y, maxXYPt.Z)
centerPt = bbox.Center
#Test to be sure that the values are parallel to the correct vector.
testVec = rc.Geometry.Vector3d.Subtract(rc.Geometry.Vector3d(minXYPt.X, minXYPt.Y, minXYPt.Z), rc.Geometry.Vector3d(maxXYPt.X, maxXYPt.Y, maxXYPt.Z))
if testVec.IsParallelTo(planeVec) == 0:
minXYPt = bbox.Corner(False, True, True)
minXYPt = rc.Geometry.Point3d(minXYPt.X, minXYPt.Y, minXYPt.Z)
maxXYPt = bbox.Corner(True, False, True)
maxXYPt = rc.Geometry.Point3d(maxXYPt.X, maxXYPt.Y, maxXYPt.Z)
#Adjust the points to ensure the creation of the correct number of shades starting from the northernmost side of the window.
tolVec = rc.Geometry.Vector3d.Subtract(rc.Geometry.Vector3d(minXYPt.X, minXYPt.Y, minXYPt.Z), rc.Geometry.Vector3d(maxXYPt.X, maxXYPt.Y, maxXYPt.Z))
tolVec.Unitize()
tolVec = rc.Geometry.Vector3d.Multiply(sc.doc.ModelAbsoluteTolerance*2, tolVec)
if tolVec.X > 0 and tolVec.Y > 0:
tolVec = rc.Geometry.Vector3d.Multiply(1, tolVec)
norOrient = False
if tolVec.X < 0 and tolVec.Y > 0:
tolVec = rc.Geometry.Vector3d.Multiply(1, tolVec)
norOrient = False
if tolVec.X < 0 and tolVec.Y < 0:
tolVec = rc.Geometry.Vector3d.Multiply(-1, tolVec)
norOrient = True
else:
tolVec = rc.Geometry.Vector3d.Multiply(-1, tolVec)
norOrient = True
maxXYPt = rc.Geometry.Point3d.Subtract(maxXYPt, tolVec)
minXYPt = rc.Geometry.Point3d.Subtract(minXYPt, tolVec)
#glazing distance
glzHeight = minXYPt.DistanceTo(maxXYPt)
# find number of shadings
try:
numOfShd = int(numOfShds)
shadingHeight = glzHeight/numOfShd
shadingRemainder = shadingHeight
except:
shadingHeight = distBetween
shadingRemainder = (((glzHeight/distBetween) - math.floor(glzHeight/distBetween))*distBetween)
if shadingRemainder == 0:
shadingRemainder = shadingHeight
# find shading base planes
planeOrigins = []
planes = []
pointCurve = rc.Geometry.Curve.CreateControlPointCurve([maxXYPt, minXYPt])
divisionParams = pointCurve.DivideByLength(shadingHeight, True)
divisionPoints = []
for param in divisionParams:
divisionPoints.append(pointCurve.PointAt(param))
planePoints = divisionPoints
try:
for point in planePoints:
planes.append(rc.Geometry.Plane(point, planeVec))
except:
# single shading
planes.append(rc.Geometry.Plane(rc.Geometry.Point3d(minXYPt), planeVec))
sortedPlanes = planes
return sortedPlanes, shadingHeight
def makeShade(_glzSrf, depth, numShds, distBtwn):
rotationAngle_ = 0
# import the classes
lb_preparation = sc.sticky["ladybug_Preparation"]()
lb_mesh = sc.sticky["ladybug_Mesh"]()
lb_visualization = sc.sticky["ladybug_ResultVisualization"]()
# find the normal of the surface in the center
# note2developer: there might be cases that the surface is not planar and
# the normal is changing from point to point, then I should sample the test surface
# and test the normal direction for more point
baseSrfCenPt = rc.Geometry.AreaMassProperties.Compute(_glzSrf).Centroid
# sometimes the center point is not located on the surface
baseSrfCenPt = _glzSrf.ClosestPoint(baseSrfCenPt)
bool, centerPtU, centerPtV = _glzSrf.Faces[0].ClosestPoint(baseSrfCenPt)
if bool:
normalVector = _glzSrf.Faces[0].NormalAt(centerPtU, centerPtV)
#return rc.Geometry.Plane(baseSrfCenPt,normalVector)
else:
print "Couldn't find the normal of the shading surface." + \
"\nRebuild the surface and try again!"
return -1
shadingSurfaces =[]
#Define a function that can get the angle to North of any surface.
def getAngle2North(normalVector):
if north_ != None and north_.IsValid():
northVector = north_
else:northVector = rc.Geometry.Vector3d.YAxis
angle = rc.Geometry.Vector3d.VectorAngle(northVector, normalVector, rc.Geometry.Plane.WorldXY)
finalAngle = math.degrees(angle)
return finalAngle
# Define a function that can split up a list of values and assign it to different cardinal directions.
def getValueBasedOnOrientation(valueList):
angles = []
if valueList == None or len(valueList) == 0: value = None
if len(valueList) == 1:
value = valueList[0]
elif len(valueList) > 1:
initAngles = rs.frange(0, 360, 360/len(valueList))
for an in initAngles: angles.append(an-(360/(2*len(valueList))))
angles.append(360)
for angleCount in range(len(angles)-1):
if angles[angleCount] <= (getAngle2North(normalVector))%360 <= angles[angleCount +1]:
targetValue = valueList[angleCount%len(valueList)]
value = targetValue
return value
# If multiple shading depths are given, use it to split up the glazing by cardinal direction and assign different depths to different directions.
depth = getValueBasedOnOrientation(depth)
# If multiple number of shade inputs are given, use it to split up the glazing by cardinal direction and assign different numbers of shades to different directions.
numShds = getValueBasedOnOrientation(numShds)
# If multiple distances between shade inputs are given, use it to split up the glazing by cardinal direction and assign different distances of shades to different directions.
distBtwn = getValueBasedOnOrientation(distBtwn)
# If multiple horizontal or vertical inputs are given, use it to split up the glazing by cardinal direction and assign different horizontal or vertical to different directions.
horOrVertical = getValueBasedOnOrientation(horOrVertical_)
# If multiple shdAngle_ inputs are given, use it to split up the glazing by cardinal direction and assign different shdAngle_ to different directions.
shdAngle = getValueBasedOnOrientation(shdAngle_)
#If multiple interiorOrExter_ inputs are given, use it to split up the glazing by cardinal direction and assign different interiorOrExterior_ to different directions.
interiorOrExter = getValueBasedOnOrientation(interiorOrExter_)
#If multiple distToGlass_ inputs are given, use it to split up the glazing by cardinal direction and assign different distToGlass_ to different directions.
distToGlass = getValueBasedOnOrientation(distToGlass_)
# generate the planes
planes, shadingHeight = analyzeGlz(_glzSrf, distBtwn, numShds, horOrVertical, lb_visualization, normalVector)
# find the intersection crvs as the base for shadings
intCrvs =[]
for plane in planes:
try: intCrvs.append(rc.Geometry.Brep.CreateContourCurves(_glzSrf, plane)[0])
except: print "One intersection failed."
if normalVector != rc.Geometry.Vector3d.ZAxis:
normalVectorPerp = rc.Geometry.Vector3d(normalVector.X, normalVector.Y, 0)
angleFromNorm = math.degrees(rc.Geometry.Vector3d.VectorAngle(normalVectorPerp, normalVector))
if normalVector.Z < 0: angleFromNorm = angleFromNorm*(-1)
#If the user has set the shades to generate on the interior, flip the normal vector.
if interiorOrExter == True:
normalVectorPerp.Reverse()
else: interiorOrExter = False
#If a shdAngle is provided, use it to rotate the planes by that angle
if shdAngle != None:
if horOrVertical == True or horOrVertical == None:
horOrVertical = True
planeVec = rc.Geometry.Vector3d(normalVector.X, normalVector.Y, 0)
planeVec.Rotate(1.570796, rc.Geometry.Vector3d.ZAxis)
normalVectorPerp.Rotate((shdAngle*0.01745329), planeVec)
elif horOrVertical == False:
planeVec = rc.Geometry.Vector3d.ZAxis
if getAngle2North(normalVectorPerp) < 180:
normalVectorPerp.Rotate((shdAngle*0.01745329), planeVec)
else: normalVectorPerp.Rotate((shdAngle*-0.01745329), planeVec)
else:
shdAngle = 0
#Make EP versions of some of the outputs.
EPshdAngleInint = angleFromNorm+shdAngle
if EPshdAngleInint >= 0: EPshdAngle = 90 - EPshdAngleInint
else: EPshdAngle = 90 + (EPshdAngleInint)*-1
if EPshdAngle > 180 or EPshdAngle < 0:
warning = "The input shdAngle_ value will cause EnergyPlus to crash."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
if horOrVertical == True: EPSlatOrient = 'Horizontal'
else: EPSlatOrient = 'Vertical'
if interiorOrExter == True: EPinteriorOrExter = 'InteriorBlind'
else: EPinteriorOrExter = 'ExteriorBlind'
#Generate the shade curves based on the planes and extrusion vectors
if intCrvs !=[]:
for c in intCrvs:
try:
shdSrf = rc.Geometry.Surface.CreateExtrusion(c, float(depth) * normalVectorPerp).ToBrep()
shadingSurfaces.append(shdSrf)
except: pass
#If the user has specified a distance to move the shades, move them along the normal vector.
if distToGlass != None:
transVec = normalVectorPerp
transVec.Unitize()
finalTransVec = rc.Geometry.Vector3d.Multiply(distToGlass, transVec)
blindsTransform = rc.Geometry.Transform.Translation(finalTransVec)
for shdSrf in shadingSurfaces:
shdSrf.Transform(blindsTransform)
else:
distToGlass = 0
#Get the EnergyPlus distance to glass.
EPDistToGlass = distToGlass + (depth)*(0.5)*math.cos(math.radians(EPshdAngle))
if EPDistToGlass < 0.01: EPDistToGlass = 0.01
elif EPDistToGlass > 1:
warning = "The input distToGlass_ value is so large that it will cause EnergyPlus to crash."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
#Check the depth and the shadingHeight to see if E+ will crash.
assignEPCheckInit = True
if depth > 1:
assignEPCheckInit = False
warning = "Note that E+ does not like shading depths greater than 1. HBObjWShades will not be generated. shadeBreps will still be produced and you can account for these shades using a 'Honeybee_EP Context Surfaces' component."
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Remark, warning)
if shadingHeight > 1:
assignEPCheckInit = False
warning = "Note that E+ does not like distances between shades that are greater than 1. HBObjWShades will not be generated. shadeBreps will still be produced and you can account for these shades using a 'Honeybee_EP Context Surfaces' component."
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Remark, warning)
return shadingSurfaces, EPSlatOrient, depth, shadingHeight, EPshdAngle, EPDistToGlass, EPinteriorOrExter, assignEPCheckInit
def deconstructBlindMaterial(material):
matLines = material.split('\n')
name = matLines[1].split(',')[0]
reflect = float(matLines[2].split(',')[0])
transmit = float(matLines[3].split(',')[0])
emiss = float(matLines[4].split(',')[0])
thickness = float(matLines[5].split(',')[0])
conduct = float(matLines[6].split(';')[0])
return [name, reflect, transmit, emiss, thickness, conduct]
def createEPBlindMat(blindsMaterial, EPSlatOrient, depth, shadingHeight, EPshdAngle, distToGlass, name):
EPBlindMat = "WindowMaterial:Blind,\n" + \
'\t' + blindsMaterial[0] + "_" + name + ', !- Name\n' + \
'\t' + EPSlatOrient + ', !- Slat Orientation\n' + \
'\t' + str(depth) + ', !- Slat Width {m}\n' + \
'\t' + str(shadingHeight) +', !- Slat Separation {m}\n' + \
'\t' + str(blindsMaterial[4]) + ', !- Slat Thickness {m}\n' + \
'\t' + str(EPshdAngle) + ', !- Slat Angle {deg}\n' + \
'\t' + str(blindsMaterial[5]) + ', !- Slat Conductivity {W/m-K}\n' + \
'\t' + str(blindsMaterial[2]) + ', !- Slat Beam Solar Transmittance\n' + \
'\t' + str(blindsMaterial[1]) + ', !- Front Side Slat Beam Solar Reflectance\n' + \
'\t' + str(blindsMaterial[1]) + ', !- Back Side Slat Beam Solar Reflectance\n' + \
'\t' + ', !- Slat Diffuse Solar Transmittance\n' + \
'\t' + str(blindsMaterial[1]) + ', !- Front Side Slat Diffuse Solar Reflectance\n' + \
'\t' + str(blindsMaterial[1]) + ', !- Back Side Slat Diffuse Solar Reflectance\n' + \
'\t' + str(blindsMaterial[2]) + ', !- Slat Beam Visible Transmittance\n' + \
'\t' + ', !- Front Side Slat Beam Visible Reflectance\n' + \
'\t' + ', !- Back Side Slat Beam Visible Reflectance\n' + \
'\t' + ', !- Slat Diffuse Visible Transmittance\n' + \
'\t' + ', !- Front Side Slat Diffuse Visible Reflectance\n' + \
'\t' + ', !- Back Side Slat Diffuse Visible Reflectance\n' + \
'\t' + ', !- Slat Infrared Hemispherical Transmittance\n' + \
'\t' + str(blindsMaterial[3]) + ', !- Front Side Slat Infrared Hemispherical Emissivity\n' + \
'\t' + str(blindsMaterial[3]) + ', !- Back Side Slat Infrared Hemispherical Emissivity\n' + \
'\t' + str(distToGlass) + ', !- Blind to Glass Distance {m}\n' + \
'\t' + '0.5, !- Blind Top Opening Multiplier\n' + \
'\t' + ', !- Blind Bottom Opening Multiplier\n' + \
'\t' + '0.5, !- Blind Left Side Opening Multiplier\n' + \
'\t' + '0.5, !- Blind Right Side Opening Multiplier\n' + \
'\t' + ', !- Minimum Slat Angle {deg}\n' + \
'\t' + '180; !- Maximum Slat Angle {deg}\n'
return EPBlindMat
def createEPBlindControl(blindsMaterial, schedule, EPinteriorOrExter, name):
if schedule == 'ALWAYSON':
schedCntrlType = 'ALWAYSON'
schedCntrl = 'No'
schedName = ''
else:
schedName = schedule
schedCntrlType = 'OnIfScheduleAllows'
schedCntrl = 'Yes'
EPBlindControl = 'WindowProperty:ShadingControl,\n' + \
'\t' + 'BlindCntrlFor_' + name +', !- Name\n' + \
'\t' + EPinteriorOrExter + ', !- Shading Type\n' + \
'\t' + ', !- Construction with Shading Name\n' + \
'\t' + schedCntrlType + ', !- Shading Control Type\n' + \
'\t' + schedName + ', !- Schedule Name\n' + \
'\t' + ', !- Setpoint {W/m2, W or deg C}\n' + \
'\t' + schedCntrl + ', !- Shading Control Is Scheduled\n' + \
'\t' + 'No, !- Glare Control Is Active\n' + \
'\t' + blindsMaterial[0] + "_" + name + ', !- Shading Device Material Name\n' + \
'\t' + 'FixedSlatAngle, !- Type of Slat Angle Control for Blinds\n' + \
'\t' + '; !- Slat Angle Schedule Name\n'
return EPBlindControl
def main():
if _HBObjects != [] and sc.sticky.has_key('honeybee_release') == True and sc.sticky.has_key('ladybug_release') == True:
#Import the classes
hb_EPZone = sc.sticky["honeybee_EPZone"]
hb_EPSrf = sc.sticky["honeybee_EPSurface"]
hb_EPFenSurface = sc.sticky["honeybee_EPFenSurface"]
hb_hive = sc.sticky["honeybee_Hive"]()
#Make the lists that will be filled up
zoneNames = []
windowNames = []
windowSrfs = []
windowObjects = []
isZoneList = []
assignEPCheck = True
HBObjWShades = []
EPSlatOrientList = []
depthList = []
shadingHeightList = []
EPshdAngleList = []
distToGlassList = []
EPinteriorOrExterList = []
#Call the objects from the hive.
HBZoneObjects = hb_hive.callFromHoneybeeHive(_HBObjects)
#Find out what the object is and make sure that we can run it through this component's functions.
for object in HBZoneObjects:
if object.objectType == "HBZone":
isZoneList.append(1)
zoneNames.append(object.name)
winBreps = []
winNames = []
for srf in object.surfaces:
if srf.hasChild:
if srf.BC == 'OUTDOORS' or srf.BC == 'Outdoors':
if srf.isPlanar == True:
for childSrf in srf.childSrfs:
windowObjects.append(childSrf)
winNames.append(childSrf.name)
winBreps.append(childSrf.geometry)
else: print "One surface with a window is not planar. EenergyPlus shades will not be assigned to this window."
else: print "One surface with a window does not have an outdoor boundary condition. EenergyPlus shades will not be assigned to this window."
windowNames.append(winNames)
windowSrfs.append(winBreps)
elif object.objectType == "HBSurface":
isZoneList.append(0)
warning = "Note that, when using this component for individual surfaces, you should make sure that the direction of the surface is facing the outdoors in order to be sure that your shades are previewing correctly."
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Remark, warning)
if not hasattr(object, 'type'):
# find the type based on
object.type = object.getTypeByNormalAngle()
if not hasattr(object, 'angle2North'):
# find the type based on
object.getAngle2North()
if not hasattr(object, "BC"):
object.BC = 'OUTDOORS'
if object.hasChild:
if object.BC != 'OUTDOORS' and object.BC != 'Outdoors':
assignEPCheck = False
warning = "The boundary condition of the input object must be outdoors. E+ cannot create shades for intdoor windows."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
elif object.isPlanar == False:
assignEPCheck = False
warning = "The surface must not be curved. With the way that we mesh curved surfaces for E+, the program would just freak out with blinds."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
else:
for childSrf in object.childSrfs:
windowObjects.append(childSrf)
windowNames.append([childSrf.name])
windowSrfs.append([childSrf.geometry])
#Make sure that all HBObjects are of the same type.
checkSameType = True
if sum(isZoneList) == len(_HBObjects): isZone = True
elif sum(isZoneList) == 0: isZone = False
else:
checkSameType = False
warning = "This component currently only supports inputs that are all HBZones or all HBSrfs but not both. For now, just grab another component for each of these inputs."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
isZone = False
#Check the inputs and make sure that we have everything that we need to generate the shades. Set defaults on things that are not connected.
if checkSameType == True:
checkData, windowNames, windowSrfsInit, depths, alignedDataTree, numOfShd, blindsMaterial, schedule = checkTheInputs(zoneNames, windowNames, windowSrfs, isZone)
else: checkData == False
#Generate the shades.
if checkData == True:
shadings = []
for window in windowSrfsInit:
shadeBreps, EPSlatOrient, depth, shadingHeight, EPshdAngle, distToGlass, EPinteriorOrExter, assignEPCheckInit = makeShade(window, depths, numOfShd, _distBetween)
shadings.append(shadeBreps)
EPSlatOrientList.append(EPSlatOrient)
depthList.append(depth)
shadingHeightList.append(shadingHeight)
EPshdAngleList.append(EPshdAngle)
distToGlassList.append(distToGlass)
EPinteriorOrExterList.append(EPinteriorOrExter)
if assignEPCheckInit == False: assignEPCheck = False
#Create the EnergyPlus blinds material and assign it to the windows with shades.
if assignEPCheck == True:
for count, windowObj in enumerate(windowObjects):
windowObj.blindsMaterial = createEPBlindMat(blindsMaterial, EPSlatOrientList[count], depthList[count], shadingHeightList[count], EPshdAngleList[count], distToGlassList[count], windowObj.name)
windowObj.shadingControl = createEPBlindControl(blindsMaterial, schedule, EPinteriorOrExterList[count], windowObj.name)
windowObj.shadingControlName = 'BlindCntrlFor_' + windowObj.name
windowObj.shadingSchName = schedule
ModifiedHBZones = hb_hive.addToHoneybeeHive(HBZoneObjects, ghenv.Component.InstanceGuid.ToString() + str(uuid.uuid4()))
else: ModifiedHBZones = []
return checkData, windowSrfsInit, shadings, alignedDataTree, ModifiedHBZones
else:
return False, [], [], [], []
else:
print "You should first let both Honeybee and Ladybug fly..."
ghenv.Component.AddRuntimeMessage(w, "You should first let both Honeybee and Ladybug fly...")
return False, [], [], [], []
#Run the main functions.
checkData = False
if _HBObjects != [] and _runIt == True:
checkData, windowSrfsInit, shadings, alignedDataTree, HBObjWShades = main()
#Unpack the data trees.
if checkData == True:
windowBreps = DataTree[Object]()
shadeBreps = DataTree[Object]()
zoneData1Tree = DataTree[Object]()
zoneData2Tree = DataTree[Object]()
zoneData3Tree = DataTree[Object]()
for count, brep in enumerate(windowSrfsInit):
windowBreps.Add(brep, GH_Path(count))
for count, brepList in enumerate(shadings):
for brep in brepList: shadeBreps.Add(brep, GH_Path(count))
for treeCount, finalTree in enumerate(alignedDataTree):
if treeCount == 0:
for bCount, branch in enumerate(finalTree):
for twig in branch: zoneData1Tree.Add(twig, GH_Path(bCount))
elif treeCount == 1:
for bCount, branch in enumerate(finalTree):
for twig in branch: zoneData2Tree.Add(twig, GH_Path(bCount))
elif treeCount == 2:
for bCount, branch in enumerate(finalTree):
for twig in branch: zoneData3Tree.Add(twig, GH_Path(bCount))
| 2.28125 | 2 |
devserver/modules/profile.py | leture/django-devserver | 467 | 12790098 | from devserver.modules import DevServerModule
from devserver.utils.time import ms_from_timedelta
from devserver.settings import DEVSERVER_AUTO_PROFILE
from datetime import datetime
import functools
import gc
class ProfileSummaryModule(DevServerModule):
"""
Outputs a summary of cache events once a response is ready.
"""
logger_name = 'profile'
def process_init(self, request):
self.start = datetime.now()
def process_complete(self, request):
duration = datetime.now() - self.start
self.logger.info('Total time to render was %.2fs', ms_from_timedelta(duration) / 1000)
class LeftOversModule(DevServerModule):
"""
Outputs a summary of events the garbage collector couldn't handle.
"""
# TODO: Not even sure this is correct, but the its a general idea
logger_name = 'profile'
def process_init(self, request):
gc.enable()
gc.set_debug(gc.DEBUG_SAVEALL)
def process_complete(self, request):
gc.collect()
self.logger.info('%s objects left in garbage', len(gc.garbage))
from django.template.defaultfilters import filesizeformat
try:
from guppy import hpy
except ImportError:
import warnings
class MemoryUseModule(DevServerModule):
def __new__(cls, *args, **kwargs):
warnings.warn('MemoryUseModule requires guppy to be installed.')
return super(MemoryUseModule, cls).__new__(cls)
else:
class MemoryUseModule(DevServerModule):
"""
Outputs a summary of memory usage of the course of a request.
"""
logger_name = 'profile'
def __init__(self, request):
super(MemoryUseModule, self).__init__(request)
self.hpy = hpy()
self.oldh = self.hpy.heap()
self.logger.info('heap size is %s', filesizeformat(self.oldh.size))
def process_complete(self, request):
newh = self.hpy.heap()
alloch = newh - self.oldh
dealloch = self.oldh - newh
self.oldh = newh
self.logger.info('%s allocated, %s deallocated, heap size is %s', *map(filesizeformat, [alloch.size, dealloch.size, newh.size]))
try:
from line_profiler import LineProfiler
except ImportError:
import warnings
class LineProfilerModule(DevServerModule):
def __new__(cls, *args, **kwargs):
warnings.warn('LineProfilerModule requires line_profiler to be installed.')
return super(LineProfilerModule, cls).__new__(cls)
class devserver_profile(object):
def __init__(self, follow=[]):
pass
def __call__(self, func):
return func
else:
class LineProfilerModule(DevServerModule):
"""
Outputs a Line by Line profile of any @devserver_profile'd functions that were run
"""
logger_name = 'profile'
def process_view(self, request, view_func, view_args, view_kwargs):
request.devserver_profiler = LineProfiler()
request.devserver_profiler_run = False
if (DEVSERVER_AUTO_PROFILE):
_unwrap_closure_and_profile(request.devserver_profiler, view_func)
request.devserver_profiler.enable_by_count()
def process_complete(self, request):
if hasattr(request, 'devserver_profiler_run') and (DEVSERVER_AUTO_PROFILE or request.devserver_profiler_run):
from cStringIO import StringIO
out = StringIO()
if (DEVSERVER_AUTO_PROFILE):
request.devserver_profiler.disable_by_count()
request.devserver_profiler.print_stats(stream=out)
self.logger.info(out.getvalue())
def _unwrap_closure_and_profile(profiler, func):
if not hasattr(func, 'func_code'):
return
profiler.add_function(func)
if func.func_closure:
for cell in func.func_closure:
if hasattr(cell.cell_contents, 'func_code'):
_unwrap_closure_and_profile(profiler, cell.cell_contents)
class devserver_profile(object):
def __init__(self, follow=[]):
self.follow = follow
def __call__(self, func):
def profiled_func(*args, **kwargs):
request = args[0]
if hasattr(request, 'request'):
# We're decorating a Django class-based-view and the first argument is actually self:
request = args[1]
try:
request.devserver_profiler.add_function(func)
request.devserver_profiler_run = True
for f in self.follow:
request.devserver_profiler.add_function(f)
request.devserver_profiler.enable_by_count()
return func(*args, **kwargs)
finally:
request.devserver_profiler.disable_by_count()
return functools.wraps(func)(profiled_func)
| 2.25 | 2 |
editor/readsave/test.py | Atrosha/APOC-Editor | 4 | 12790099 | x1=10
y1=10
x2=9
y2=10
x=(x1*x1+y1*y1)
y=(x2*x2+y2*y2)
xyz=(x+y)//2
average_x=(x1+x2)//2
average_y=(y1+y2)//2
average_x_2=average_x*average_x
average_y_2=average_y*average_y
average=average_x_2+average_y_2
new_x=(x1+x2)-average_x
new_y=(y1+y2)-average_y
new_2=new_x*new_x+new_y*new_y
disp=new_2
print(xyz-average,disp)
| 3.703125 | 4 |
hyperion/helpers/__init__.py | hyperion-ml/hyperion | 14 | 12790100 | <filename>hyperion/helpers/__init__.py
"""
Copyright 2018 Johns Hopkins University (Author: <NAME>)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
from .vector_reader import VectorReader
from .vector_class_reader import VectorClassReader
from .trial_data_reader import TrialDataReader
from .multi_test_trial_data_reader import MultiTestTrialDataReader
from .multi_test_trial_data_reader_v2 import MultiTestTrialDataReaderV2
from .classif_trial_data_reader import ClassifTrialDataReader
# from .sequence_reader import SequenceReader
# from .sequence_class_reader import SequenceClassReader
# from .sequence_post_reader import SequencePostReader
# from .sequence_post_class_reader import SequencePostClassReader
from .plda_factory import PLDAFactory
| 1.515625 | 2 |
src/plot_AGs_results.py | syhw/contextual_word_segmentation | 2 | 12790101 | import numpy as np
import pylab as pl
import matplotlib
import matplotlib.pyplot as plt
from collections import defaultdict
import glob
import readline # otherwise the wrong readline is imported by rpy2
SAGE_XPS = 11
SAGE = 12
EAGE = 31
N_MONTHS = EAGE-SAGE+1
#TYPES = ["basic", "single-context", "topics"]
#TYPES = ["basic", "topics"]
TYPES = ["basic", "single-context"]
TEST = False # if True, just use the values evaluated on a test test
ITERS = range(499, 520) + range(1000,1005)
#ITERS = range(1000,1005)
#ITERS = range(600, 620)
PREFIX = ""
#PREFIX = "old_naima_XPs/"
TAKE_MAX_SCORE = False # in case of several results, otherwise do the mean+std
SORTED = True # sort the histograms by score, disable at your own risk!
FACTOR_STD = 1. # 1.96 for 95% confidence interval
OLDVERSION = False # version before March 10
LAST_ITERS = 10 # take the last XX iterations as results (considering converged)
# USED ONLY FOR TEST currently
if LAST_ITERS > 1 and TEST:
TAKE_MAX_SCORE = False
DO_ONLY = {'colloc_syll': 'baseline',
't_colloc_syll': 'split vocab',
't_readapt_colloc_syll': 'share vocab',
't_colloc_syll_wth_common': 'with common',
#'t_permuted_colloc_syll': 'permuted split vocab',
### 't_permuted_colloc_syll_wth_common': 'permuted with common',
#'t_random_colloc_syll': 'random split vocab',
### 't_random_colloc_syll_wth_common': 'random with common',
'colloc3_syll': 'colloc3 syll',
't_colloc3_syll_collocs_common': 'colloc3 syll collocs common'}
#'syll': 'syll',
#'t_syll': 'syll split vocab',
#'t_readapt_syll': 'syll share vocab'}
#'unigram': 'unigram', 't_readapt_unigram': 'unigram share vocab',
#'t_unigram': 'unigram split vocab'}
#'t_readapt_colloc_syll_wth_common': 'share vocab with common',
#'t_readapt_colloc_syll_wth_common2': 'share vocab with common 2'}
if OLDVERSION:
DO_ONLY = {'syll': 'syll', 'colloc': 'colloc',
't_readapt_colloc': 't_colloc_shr_vocab',
't_syll': 't_syll_spl_vocab',
't_readapt_colloc_wth_common': 't_colloc_wth_common',
'colloc_syll': 'colloc_syll',
't_colloc_syll': 't_colloc_syll_spl_vocab',
't_readapt_colloc_syll': 't_colloc_syll_shr_vocab',
't_colloc_syll_wth_common': 't_colloc_syll_wth_common'}
if TEST:
DO_ONLY = {'t_nopfx_colloc_syll_wth_common': 'with common no prefix',
't_test_colloc_syll_wth_common': 'with common test',
't_nopfx_colloc_syll': 'split vocab no prefix',
'test_coll_syll': 'baseline test',
't_test_colloc_syll': 'split vocab test'}
if OLDVERSION:
DO_ONLY = {'t_nopfx_coll_syll_wth_common': 't_colloc_syll_wth_common_nopfx',
't_test_coll_syll_wth_common': 't_colloc_syll_wth_common_test',
't_nopfx_coll_syll': 't_colloc_syll_spl_vocab_nopfx',
'test_coll_syll': 'colloc_syll_test',
't_test_coll_syll': 't_colloc_syll_spl_vocab_test'}
#DO_ONLY = {}
# for cosmetics when preparing figures for papers
# e.g. DO_ONLY = {'t_colloc': 'colloc with topics'}
scores_order = "token_f-score token_precision token_recall boundary_f-score boundary_precision boundary_recall".split()
results = defaultdict(lambda: [dict(zip(scores_order, [[] for i in range(len(scores_order))])) for tmp_i in range(N_MONTHS)])
if TAKE_MAX_SCORE:
results = defaultdict(lambda: [dict(zip(scores_order, [0 for i in range(len(scores_order))])) for tmp_i in range(N_MONTHS)])
for month in xrange(SAGE, EAGE+1):
for fname in glob.iglob(PREFIX+'naima_' + str(SAGE_XPS) + 'to' + str(month)
+ 'm/nai*-' + str(SAGE_XPS) + '-' + str(month) + '*.o*'):
if TEST and (not "test" in fname and not "nopfx" in fname):
continue
elif not TEST and ("test" in fname or "nopfx" in fname):
continue
if "-sc" in fname and not "single-context" in TYPES:
continue
if "docs" in fname and not "topics" in TYPES:
continue
# always plots basic results currently
doit = False
with open (fname.replace(".o", ".e")) as f:
line = ""
for line in f:
for iternumber in ITERS:
striter = str(iternumber)
if striter + " iterations" in line or "Iteration " + striter in line:
doit = True
break
if not doit:
print "NOT DOING:", fname
else:
print fname
scores = []
s_dict = {}
with open(fname) as f:
last_lines = []
for line in f:
last_lines.append(line)
try:
if TEST and LAST_ITERS > 1 and len(last_lines) > LAST_ITERS+1:
for iter_to_take in range(1,LAST_ITERS+1):
scores = [float(last_lines[-iter_to_take].split('\t')[i]) for i in range(6)]
if not len(s_dict):
s_dict = [dict(zip(scores_order, scores))]
else:
s_dict.append(dict(zip(scores_order, scores)))
else:
scores = [float(last_lines[-1].split('\t')[i]) for i in range(6)]
s_dict = dict(zip(scores_order, scores))
except:
print "PARSE ERROR: parse went wrongly for", fname
fname = '/'.join(fname.split('/')[1:])
fname = fname.replace('coll-', 'colloc-') # old names
if 'docs' in fname:
condname = '_'.join(fname.split('/')[-1].split('-')[-1].split('.')[0].split('_')[2:])
if condname == '': # topics-based unigram
condname = 'uni'
condname = 'd_' + condname
elif '-sc' in fname:
fname = fname.replace('-sc', '')
condname = 't'
if '-r+' in fname or '-r.' in fname:
condname = 't_readapt'
fname = fname.replace('-r', '')
if '-w+' in fname:
fname = fname.replace('-w+', '_words_common')
elif '-c+' in fname:
fname = fname.replace('-c+', '_collocs_common')
elif '+' in fname:
fname = fname.replace('+', '_wth_common')
condname = '_'.join([condname] + fname.split('/')[-1].split('-')[3:]).split('.')[0]
else:
condname = '_'.join(fname.split('/')[-1].split('-')[3:]).split('.')[0]
########## cosmetic (for legends) ##########
if len(DO_ONLY):
if condname in DO_ONLY:
condname = DO_ONLY[condname]
else:
continue
########## /cosmetic (for legends) ##########
if type(s_dict) == type({}) and len(s_dict) == 6:
if TAKE_MAX_SCORE:
if results[condname][month-SAGE]['token_f-score'] == 0 or s_dict['token_f-score'] > results[condname][month-SAGE]['token_f-score']:
results[condname][month-SAGE] = s_dict
else:
for k, v in s_dict.iteritems():
results[condname][month-SAGE][k].append(v)
elif type(s_dict) == type([]):
for e in s_dict:
for k, v in e.iteritems():
results[condname][month-SAGE][k].append(v)
print results
fig = plt.figure(figsize=(12, 9), dpi=1200)
plt.xticks(xrange(N_MONTHS))
ax = plt.gca()
ax.set_ylim([0.55, 0.90])
ax.set_xlim([-0.1, N_MONTHS - 0.9])
ax.set_xticklabels(map(str, range(SAGE, EAGE+1)))
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(24)
for cond, a in results.iteritems():
linetype = ''
if "syll" in cond:
linetype = '^-.'
else:
linetype = 'v-.'
if "d_" or "t_" in cond:
linetype = linetype[0] + '--'
vals = None
stddevs = None
if TAKE_MAX_SCORE:
vals = [x['token_f-score'] for x in a]
else:
vals = [np.mean(x['token_f-score']) for x in a]
stddevs = [FACTOR_STD*np.std(x['token_f-score']) for x in a] # TODO (gaussian process or some smoothing)
plt.plot(map(lambda x: 'NaN' if x <= 0.0 else x, vals), linetype, linewidth=3.5, alpha=0.8)
plt.xlabel('months')
plt.ylabel('token f-score')
plt.legend([l for l in results.iterkeys()], loc='best', ncol=4)
plt.setp(ax.get_legend().get_texts(), fontsize=20)
plt.savefig('progress_ages.png')
matplotlib.rcParams.update({'font.size': 20})
matplotlib.rcParams.update({'text.color': "black"})
matplotlib.rcParams.update({'axes.labelcolor': "black"})
matplotlib.rcParams.update({'xtick.color': "black"})
matplotlib.rcParams.update({'ytick.color': "black"})
plotted_results = {} # plotted_results[month][cond][score_type] = mean
for month in xrange(SAGE, EAGE+1):
y_pos = [0.5]
scores = []
stddevs = []
conds = []
s_dicts = []
for cond, a in results.iteritems():
score = 0
stddev = 0
if TAKE_MAX_SCORE:
score = a[month-SAGE]['token_f-score']
else:
score = np.mean(a[month-SAGE]['token_f-score'])
stddev = FACTOR_STD*np.std(a[month-SAGE]['token_f-score'])
if score > 0:
y_pos.append(y_pos[-1] + 1)
scores.append(score)
stddevs.append(stddev)
conds.append(cond)
s_dicts.append({'token_f-score': score,
'token_precision': np.mean(a[month-SAGE]['token_precision']),
'token_recall': np.mean(a[month-SAGE]['token_recall']),
'boundary_f-score': np.mean(a[month-SAGE]['boundary_f-score']),
'boundary_precision': np.mean(a[month-SAGE]['boundary_precision']),
'boundary_recall': np.mean(a[month-SAGE]['boundary_recall'])})
plotted_results[month] = dict(zip(conds, s_dicts))
if len(conds) == 0:
continue
y_pos = y_pos[:-1]
fig = plt.figure(figsize=(9, len(y_pos)), dpi=1200)
ax = plt.gca()
ax.set_ylim([0, len(y_pos)+1])
ax.set_xlim([0.6, 0.86])
if TEST:
ax.set_xlim([0.7, 0.86])
tmp = ()
if TAKE_MAX_SCORE:
tmp = zip(y_pos, scores, conds, ['g' for tmp_i in range(len(y_pos))])
if OLDVERSION:
tmp = map(lambda (y, s, cond, color): (y, s, cond, 'b') if 't' == cond[0] or 'd' == cond[0] else (y, s, cond, color), tmp)
else:
tmp = map(lambda (y, s, cond, color): (y, s, cond, 'b') if 'b' != cond[0] or 'd' == cond[0] else (y, s, cond, color), tmp) # cond[0]=='b' for cond=='baseline'
else:
tmp = zip(y_pos, scores, stddevs, conds, ['g' for tmp_i in range(len(y_pos))])
if OLDVERSION:
tmp = map(lambda (y, s, sd, cond, color): (y, s, sd, cond, 'b') if 't' == cond[0] or 'd' == cond[0] else (y, s, sd, cond, color), tmp)
else:
if TEST:
tmp = map(lambda (y, s, sd, cond, color): (y, s, sd, cond, 'b') if 'no prefix' in cond else (y, s, sd, cond, color), tmp) # "no prefix" cond => different color
tmp = map(lambda (y, s, sd, cond, color): (y, s, sd, cond, 'grey') if 'b' == cond[0] else (y, s, sd, cond, color), tmp) # cond[0]=='b' for cond=='baseline'
else:
tmp = map(lambda (y, s, sd, cond, color): (y, s, sd, cond, 'b') if 'b' != cond[0] else (y, s, sd, cond, color), tmp) # cond[0]=='b' for cond=='baseline'
if SORTED:
ys = map(lambda x: x[0], tmp)
tmp = sorted(tmp, key=lambda x: x[1])
tmp = map(lambda y,t: sum(((y,), t[1:]), ()), ys, tmp)
if TAKE_MAX_SCORE:
y_pos, scores, conds, colors = zip(*tmp)
plt.barh(y_pos, scores, color=colors, ecolor='r', alpha=0.8)
else:
y_pos, scores, stddev, conds, colors = zip(*tmp)
plt.barh(y_pos, scores, xerr=stddev, color=colors, ecolor='r', alpha=0.8)
plt.yticks(map(lambda x: x+0.5, y_pos), conds)
plt.xlabel('token f-score')
#plt.title('')
plt.savefig('histogram_' + str(SAGE_XPS) + 'to' + str(month) + 'm.png', bbox_inches='tight')
from pandas import DataFrame
from copy import deepcopy
import pandas as pd
mydata = defaultdict(lambda: [])
ages_max_points = [0 for i in xrange(SAGE, EAGE+1)]
results_m = deepcopy(results)
for cond, a in results_m.iteritems():
for i, x in enumerate(a):
if len(x['token_f-score']) > ages_max_points[i]:
ages_max_points[i] = len(x['token_f-score'])
mydata[cond].append(x['token_f-score'])
mydata['months'] = [[m for i in range(ages_max_points[m-SAGE])] for m in xrange(SAGE, EAGE+1)]
#mydata['months'] = [[str(m) for i in range(ages_max_points[m-SAGE])] for m in xrange(SAGE, EAGE+1)] # TODO if we don't want the stat_smooth to know about X (months)
for key, value in mydata.iteritems():
for i, l in enumerate(value):
value[i] = l + [np.nan for j in range(ages_max_points[i] - len(l))]
mydata[key] = [j for i in value for j in i]
if np.all(map(np.isnan, mydata[key])): # remove data that is only nan
mydata.pop(key)
print mydata
print ">>> conditions that will be plotted"
print mydata.keys()
mydataframe = DataFrame(mydata)
my_lng = pd.melt(mydataframe[['months'] + [k for k in mydata.keys() if k != 'months']], id_vars='months')
#my_lng = pd.melt(mydataframe[['months', 'share vocab', 'baseline', 'with common', 'split vocab']], id_vars='months')
#my_lng = pd.melt(mydataframe[['months', 't_permuted_colloc_syll', 't_permuted_colloc_syll_wth_common', 'unigram', 't_unigram', 't_readapt_unigram', 'colloc_syll', 't_colloc_syll', 't_colloc_syll_wth_common']], id_vars='months')
if OLDVERSION:
my_lng = pd.melt(mydataframe[['months', 't_colloc_syll_shr_vocab', 'colloc_syll', 't_colloc_syll_wth_common', 't_colloc_syll_spl_vocab']], id_vars='months')
# from ggplot_import_*
# #p = ggplot(aes(x='months', y='colloc'), data=mydataframe) + geom_point(color='lightgreen') + stat_smooth(se=True) + xlab('age in months') + ylab('token f-score')
# my_lng = pd.melt(mydataframe[['months', 't_colloc syll shr vocab', 'colloc syll', 't_colloc_syll_wth_common', 't_colloc_syll_spl_vocab', 'colloc', 'syll', 't_syll_spl_vocab']], id_vars='months')
# #p = ggplot(aes(x='months', y='value', color='variable'), data=my_lng) + stat_smooth(se=True, method='lm', level=0.95) + xlab('age in months') + ylab('token f-score')
# p = ggplot(aes(x='months', y='value', color='variable'), data=my_lng) + stat_smooth(se=False) + xlab('age in months') + ylab('token f-score')
# ggsave(p, 'ggplot_progress.png')
import rpy2.robjects as robj
import rpy2.robjects.pandas2ri # for dataframe conversion
from rpy2.robjects.packages import importr
from rpy2.robjects import globalenv
import pandas.rpy.common as com
#grdevices = importr('grDevices')
#robj.pandas2ri.activate()
#data_r = robj.conversion.py2ri(mydata)
lng_r = com.convert_to_r_dataframe(my_lng)
data_r = com.convert_to_r_dataframe(mydataframe)
globalenv['lng_r'] = lng_r
globalenv['data_r'] = data_r
globalenv['eage'] = EAGE
globalenv['sage'] = SAGE
print "==================="
print "and now for the R part"
print "==================="
rstring = """
library("ggplot2")
library("grid")
#print(lng_r)
#print(factor(lng_r$months))
#print(factor(lng_r$variable))
cLevels <- levels(lng_r$variable)
p <- ggplot(data=lng_r, aes(x=months, y=value, group=variable, colour=variable, fill=variable, shape=variable, linetype=variable))\
+ scale_y_continuous(name='token f-score')\
+ scale_x_discrete('age in months', breaks=seq(eage,sage), labels=seq(eage,sage))\
+ coord_cartesian(xlim = c(eage, sage))\
+ theme_bw()\
+ scale_colour_discrete("model", drop=TRUE, limits=cLevels)\
+ scale_fill_discrete("model", drop=TRUE, limits=cLevels)\
+ scale_shape_discrete("model", drop=TRUE, limits=cLevels)\
+ scale_linetype_discrete("model", drop=TRUE, limits=cLevels)\
+ stat_smooth(level=0.68, size=1.8)\
+ theme(text = element_text(size=44))\
"""
#+ geom_point()\
#+ xlab('age in months')\
#+ ylab('token f-score')\
#+ scale_x_continuous('age in months', breaks=seq(eage,sage), limits=c(eage,sage))\
# + scale_x_discrete('age in months')
if len(DO_ONLY) and len(DO_ONLY) < 5:
rstring += """+ opts(legend.position = c(0.96, 0.5),
legend.justification = c(1, 0.5),
legend.background = element_rect(colour = "grey70", fill = "white"),
legend.text=element_text(size=44),
legend.title=element_text(size=44),
legend.key.size=unit(2, "cm"),
plot.margin=unit(c(1,1,1,1), "cm"))
"""
else:
rstring += """+ opts(legend.background = element_rect(colour = "grey70", fill = "white"),
legend.text=element_text(size=44),
legend.title=element_text(size=44),
legend.key.size=unit(2, "cm"),
plot.margin=unit(c(1,1,1,1), "cm"))
"""
rstring += """
ggsave('ggplot2_progress.pdf', plot=p, width=22, height=16)
"""
plotFunc_2 = robj.r(rstring)
print "==================="
print "and now for the LaTeX tables"
print "==================="
header_table = """
\\begin{table*}[ht] \caption{Mean f-scores (f), precisions (p), and recalls (r) for different models depending on the size of dataset}
\\vspace{-0.5cm}
\\begin{center}
\\begin{scriptsize}
\\begin{tabular}{|c|ccc|ccc|ccc|ccc|ccc|ccc|ccc|ccc|}
\hline
& \multicolumn{3}{|c|}{syll}
& \multicolumn{3}{|c|}{t\_syll}
& \multicolumn{3}{|c|}{colloc}
& \multicolumn{3}{|c|}{t\_coll\_wth\_common}
& \multicolumn{3}{|c|}{coll\_syll}
& \multicolumn{3}{|c|}{t\_coll\_syll\_shr\_voc}
& \multicolumn{3}{|c|}{t\_coll\_syll\_spl\_voc}
& \multicolumn{3}{|c|}{t\_coll\_syll\_wth\_com}\\\\
"""
print header_table
for typ in ['token', 'boundary']:
print typ + """ & f & p & r & f & p & r & f & p & r & f & p & r & f & p & r & f & p & r & f & p & r & f & p & r \\\\
\hline """
for month, d in plotted_results.iteritems():
print str(SAGE_XPS) + "-" + str(month),
if OLDVERSION:
listmodels = ['syll', 't_syll_spl_vocab', 'colloc', 't_colloc_wth_common', 'colloc_syll', 't_colloc_syll_shr_vocab', 't_colloc_syll_spl_vocab', 't_colloc_syll_wth_common']
listmodels = ['unigram', 'unigram share vocab', 'unigram split vocab', 'baseline', 'share vocab', 'split vocab', 'with common']
for cond in listmodels:
s_dict = d[cond]
f = s_dict[typ+'_f-score']
p = s_dict[typ+'_precision']
r = s_dict[typ+'_recall']
print " & ",
print "%.3f" % f,
print " & ",
print "%.3f" % p,
print " & ",
print "%.3f" % r,
print "\\\\"
print "\hline"
footer_table = """
\end{tabular}
\label{results}
\end{scriptsize}
\end{center}
\end{table*}
"""
print footer_table
| 2.078125 | 2 |
supports/pyload/src/pyload/plugins/accounts/LinkifierCom.py | LuckyNicky/pycrawler | 1 | 12790102 | # -*- coding: utf-8 -*-
import hashlib
import json
import pycurl
from ..base.multi_account import MultiAccount
class LinkifierCom(MultiAccount):
__name__ = "LinkifierCom"
__type__ = "account"
__version__ = "0.01"
__status__ = "testing"
__pyload_version__ = "0.5"
__description__ = """Linkifier.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
__config__ = [
("mh_mode", "all;listed;unlisted", "Filter hosters to use", "all"),
("mh_list", "str", "Hoster list (comma separated)", ""),
("mh_interval", "int", "Reload interval in hours", 12),
]
API_KEY = "<KEY>"
API_URL = "https://api.linkifier.com/downloadapi.svc/"
def api_response(self, method, user, password, **kwargs):
post = {
"login": user,
"md5Pass": hashlib.md5(password.encode()).hexdigest(),
"apiKey": self.API_KEY,
}
post.update(kwargs)
self.req.http.c.setopt(
pycurl.HTTPHEADER, ["Content-Type: application/json; charset=utf-8"]
)
res = json.loads(self.load(self.API_URL + method, post=json.dumps(post)))
self.req.http.c.setopt(
pycurl.HTTPHEADER, ["Content-Type: text/html; charset=utf-8"]
)
return res
def grab_hosters(self, user, password, data):
json_data = self.api_response("hosters", user, password)
if json_data["hasErrors"]:
self.log_warning(json_data["ErrorMSG"] or "Unknown error")
return []
return [
x["hostername"]
for x in json_data["hosters"]
if x["hostername"] and x["isActive"]
]
def grab_info(self, user, password, data):
json_data = self.api_response("user", user, password)
trafficleft = json_data["extraTraffic"]
validuntil = float(json_data["expirydate"]) // 1000
return {
"validuntil": validuntil,
"trafficleft": -1
if trafficleft.lower() == "unlimited"
else int(trafficleft),
"premium": True,
}
def signin(self, user, password, data):
json_data = self.api_response("user", user, password)
if json_data.get("hasErrors", True) or not json_data.get("isActive", True):
self.log_warning(json_data["ErrorMSG"] or "Unknown error")
self.fail_login()
| 2.0625 | 2 |
temp.py | Hrishabh-B/Basic_codes | 0 | 12790103 | #This code is written for dynamic step-size. step size c0 gets smaller when it achieves the number 200.
#Author: <NAME>, Senior Research Fellow, University of Delhi
#Date: 5-07-2021
from math import *
import numpy as np
c0=50.0
for x in np.arange(c0,580,10):
t=10*(abs(200.1-c0)/200.1)*abs(np.log(0.3/abs(c0-200.1)))
y=1.0/(c0-200.0**2)**2
print(str(c0)+" "+str(y))
c0+=t
if c0> 198 and c0<202:
c0+=1
| 3.203125 | 3 |
src/OTLMOW/OTLModel/Classes/Grasland.py | davidvlaminck/OTLClassPython | 2 | 12790104 | <gh_stars>1-10
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.GrazigeVegetatie import GrazigeVegetatie
from OTLMOW.OTLModel.Datatypes.KlNSB import KlNSB
from OTLMOW.GeometrieArtefact.VlakGeometrie import VlakGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class Grasland(GrazigeVegetatie, VlakGeometrie):
"""Grazige vegetatie met daarin kruidachtigen die jaarlijks één of meerdere malen per jaar gemaaid of begraasd wordt."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Grasland'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
GrazigeVegetatie.__init__(self)
VlakGeometrie.__init__(self)
self._natuurstreefbeeld = OTLAttribuut(field=KlNSB,
naam='natuurstreefbeeld',
label='natuurstreefbeeld',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Grasland.natuurstreefbeeld',
definition='Een natuurstreefbeeld is een nagestreefd biotoop, mozaïek van biotopen of een leefgebied van een soort dat je wil behouden of verkrijgen via een goed natuurbeheer.In het definitief plan van type twee, drie of vier wordt het ecologisch einddoel vastgesteld aan de hand van natuurstreefbeelden.',
owner=self)
@property
def natuurstreefbeeld(self):
"""Een natuurstreefbeeld is een nagestreefd biotoop, mozaïek van biotopen of een leefgebied van een soort dat je wil behouden of verkrijgen via een goed natuurbeheer.
In het definitief plan van type twee, drie of vier wordt het ecologisch einddoel vastgesteld aan de hand van natuurstreefbeelden."""
return self._natuurstreefbeeld.get_waarde()
@natuurstreefbeeld.setter
def natuurstreefbeeld(self, value):
self._natuurstreefbeeld.set_waarde(value, owner=self)
| 2.015625 | 2 |
build_html/preprocess_xml.py | DCCouncil/dc-law-tools | 3 | 12790105 | <reponame>DCCouncil/dc-law-tools
import lxml.etree as et
from .preprocessors import preprocessors
from .preprocessors.utils import index_docs
import os
DIR = os.path.abspath(os.path.dirname(__file__))
out_dir = os.path.join(DIR, '../../dc-law-html')
bld_file = os.path.join(DIR, '../working_files/dccode-html-bld.xml')
def preprocess_xml():
print('preprocessing...')
parser = et.XMLParser(remove_blank_text=True)
with open(bld_file) as f:
dom = et.parse(f, parser)
index_docs(dom)
for preprocessor in preprocessors:
preprocess(dom, *preprocessor)
with open(bld_file, 'wb') as f:
f.write(et.tostring(dom, pretty_print=True, encoding="utf-8"))
def preprocess(dom, xpath, *preprocessors):
roots = dom.xpath(xpath)
if not roots:
raise BaseException('no valid roots for xpath:', xpath)
for root in roots:
for preprocessor in preprocessors:
preprocessor(root)
def pdfs(dom):
pdf_dir = os.path.join(DIR, 'dc_laws')
pdf_out_path = os.path.join(DIR, '../../dc-law-docs-laws/{}.pdf')
pdfs = os.listdir(pdf_dir)
law_root = dom.find('//collection[@name="dclaws"]')
skip_laws = law_root.xpath('./collection/document[cites/law/@url]/num/text()')
for pdf in pdfs:
if not pdf.startswith('dc-law-docs-laws'):
print('skipping', pdf)
continue
pdf_path = os.path.join(pdf_dir, pdf)
law_num = pdf.replace('dc-law-docs-laws', '')[:-4]
if law_num in skip_laws:
continue
os.rename(pdf_path, pdf_out_path.format(law_num))
import ipdb
ipdb.set_trace() | 2.421875 | 2 |
code/config.py | SimonSuster/rc-cnn-dailymail | 325 | 12790106 |
import theano
import argparse
_floatX = theano.config.floatX
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1', 'y')
def get_args():
parser = argparse.ArgumentParser()
parser.register('type', 'bool', str2bool)
# Basics
parser.add_argument('--debug',
type='bool',
default=False,
help='whether it is debug mode')
parser.add_argument('--test_only',
type='bool',
default=False,
help='test_only: no need to run training process')
parser.add_argument('--random_seed',
type=int,
default=1013,
help='Random seed')
# Data file
parser.add_argument('--train_file',
type=str,
default=None,
help='Training file')
parser.add_argument('--dev_file',
type=str,
default=None,
help='Development file')
parser.add_argument('--pre_trained',
type=str,
default=None,
help='Pre-trained model.')
parser.add_argument('--model_file',
type=str,
default='model.pkl.gz',
help='Model file to save')
parser.add_argument('--log_file',
type=str,
default=None,
help='Log file')
parser.add_argument('--embedding_file',
type=str,
default=None,
help='Word embedding file')
parser.add_argument('--max_dev',
type=int,
default=None,
help='Maximum number of dev examples to evaluate on')
parser.add_argument('--relabeling',
type='bool',
default=True,
help='Whether to relabel the entities when loading the data')
# Model details
parser.add_argument('--embedding_size',
type=int,
default=None,
help='Default embedding size if embedding_file is not given')
parser.add_argument('--hidden_size',
type=int,
default=128,
help='Hidden size of RNN units')
parser.add_argument('--bidir',
type='bool',
default=True,
help='bidir: whether to use a bidirectional RNN')
parser.add_argument('--num_layers',
type=int,
default=1,
help='Number of RNN layers')
parser.add_argument('--rnn_type',
type=str,
default='gru',
help='RNN type: lstm or gru (default)')
parser.add_argument('--att_func',
type=str,
default='bilinear',
help='Attention function: bilinear (default) or mlp or avg or last or dot')
# Optimization details
parser.add_argument('--batch_size',
type=int,
default=32,
help='Batch size')
parser.add_argument('--num_epoches',
type=int,
default=100,
help='Number of epoches')
parser.add_argument('--eval_iter',
type=int,
default=100,
help='Evaluation on dev set after K updates')
parser.add_argument('--dropout_rate',
type=float,
default=0.2,
help='Dropout rate')
parser.add_argument('--optimizer',
type=str,
default='sgd',
help='Optimizer: sgd (default) or adam or rmsprop')
parser.add_argument('--learning_rate', '-lr',
type=float,
default=0.1,
help='Learning rate for SGD')
parser.add_argument('--grad_clipping',
type=float,
default=10.0,
help='Gradient clipping')
return parser.parse_args()
| 2.4375 | 2 |
modules/evaluation.py | hrayrhar/limit-label-memorization | 37 | 12790107 | <reponame>hrayrhar/limit-label-memorization
from tqdm import tqdm
import numpy as np
def compute_accuracy_with_bootstrapping(pred, target, n_iters=1000):
""" Expects numpy arrays. pred should have shape (n_samples, n_classes), while
target should have shape (n_samples,).
"""
assert pred.shape[0] == target.shape[0]
all_accuracies = []
for _ in tqdm(range(n_iters), desc='bootstrapping') :
indices = np.random.choice(pred.shape[0], size=pred.shape[0], replace=True)
cur_pred = pred[indices]
cur_target = target[indices]
cur_accuracy = np.mean((cur_pred.argmax(axis=1) == cur_target).astype(np.float))
all_accuracies.append(cur_accuracy)
return {
'mean': np.mean(all_accuracies),
'std': np.std(all_accuracies)
}
| 2.90625 | 3 |
hmm_for_baxter_using_only_success_trials/anomaly_identification.py | HongminWu/HMM | 3 | 12790108 | <reponame>HongminWu/HMM
#!/usr/bin/env python
import os
import numpy as np
from sklearn.externals import joblib
from matplotlib import pyplot as plt
import util
import training_config
import pandas as pd
import random
import ipdb
def run(anomaly_data_path_for_testing,
model_save_path,
figure_save_path,):
'''
1. load all the anomalous trained models
2. load testing anomaly data
3. plot the log-likelihood wrt each model and plot in a same figure
'''
# load trained anomaly models
anomaly_model_group_by_label = {}
folders = os.listdir(training_config.anomaly_model_save_path)
for fo in folders:
path = os.path.join(training_config.anomaly_data_path, fo)
if not os.path.isdir(path):
continue
anomaly_model_path = os.path.join(training_config.anomaly_model_save_path,
fo,
training_config.config_by_user['data_type_chosen'],
training_config.config_by_user['model_type_chosen'],
training_config.model_id)
try:
anomaly_model_group_by_label[fo] = joblib.load(anomaly_model_path + "/model_s%s.pkl"%(1,))
except IOError:
print 'anomaly model of %s not found'%(fo,)
continue
# one-folder
confuse_matrix = {}
folders = os.listdir(anomaly_data_path_for_testing)
for fo in folders:
predict_class = []
data_path = os.path.join(anomaly_data_path_for_testing, fo)
if not os.path.isdir(path):
continue
anomaly_testing_group_by_folder_name = util.get_anomaly_data_for_labelled_case(training_config, data_path)
# one-file
for trial_name in anomaly_testing_group_by_folder_name:
'''
#plot
fig = plt.figure()
ax = fig.add_subplot(111)
from matplotlib.pyplot import cm
color = iter(cm.rainbow(np.linspace(0, 1, len(anomaly_model_group_by_label))))
'''
calc_cofidence_resourse = []
for model_label in anomaly_model_group_by_label:
one_log_curve_of_this_model = util.fast_log_curve_calculation(
anomaly_testing_group_by_folder_name[trial_name][1],
anomaly_model_group_by_label[model_label])
# one_predict_proba_of_this_state = anomaly_model_group_by_label[model_label].predict_proba(anomaly_testing_group_by_folder_name[trial_name][1]) # HDPHSMM haven't implemented this
# one_hidden_stateSeq_of_this_state = anomaly_model_group_by_label[model_label].decode(anomaly_testing_group_by_folder_name[trial_name][1],len(anomaly_testing_group_by_folder_name[trial_name][1])-1)
calc_cofidence_resourse.append({
'model_label' : model_label,
'culmulative_loglik': one_log_curve_of_this_model[-1],
'loglik_curve' : one_log_curve_of_this_model,
# 'predict_proba' : one_predict_proba_of_this_state,
# 'hidden_stateSeq' : one_hidden_stateSeq_of_this_state
})
'''
#--plot
c = next(color)
plot_line, = ax.plot(one_log_curve_of_this_model, linestyle="solid", color = c)
plot_line.set_label(model_label)
title = ('Anomaly_identification for ' + fo)
ax.set_title(title)
'''
sorted_list = sorted(calc_cofidence_resourse, key=lambda x:x['culmulative_loglik']) # from small to large
optimal_result = sorted_list[-1]
classified_model = optimal_result['model_label']
predict_class.append(classified_model)
all_log_curves_of_this_state, threshold, confidence = get_confidence_of_identification(optimal_result)
'''
if confidence < 0.0:
df = pd.DataFrame(anomaly_testing_group_by_folder_name[trial_name][1], columns=training_config.interested_data_fields)
id = random.randint(1000,10000)
_name = 'unknown_anomaly_' + str(id)
unknown_anomaly_path = os.path.join(training_config.anomaly_data_path, _name)
os.makedirs(unknown_anomaly_path)
print 'generated a new anomaly:' + _name
print '*\n'*5
print 'synthetic data generation'
import generate_synthetic_data
generate_synthetic_data.run_finite_differece_matrix(df=df, num_data = 5, csv_save_path=unknown_anomaly_path, trial_name)
'''
#--plot
'''
for no_trial in range(len(all_log_curves_of_this_state)):
ax.plot(all_log_curves_of_this_state[no_trial], linestyle= '--', color = 'gray', label = 'trials')
ax.plot(threshold.tolist()[0], linestyle='--', color='gold', label='threshold')
ax.legend(loc='upper left')
ax.text(20,optimal_result['culmulative_loglik']/2, optimal_result['model_label'] + ': ' + str(confidence),
ha = 'center', va = 'baseline',
bbox=dict(boxstyle="round",
ec=(1., 0.6, 0.6),
fc=(1., 0.9, 0.9),)
)
if not os.path.isdir(figure_save_path + '/anomaly_identification_plot'):
os.makedirs(figure_save_path + '/anomaly_identification_plot')
fig.savefig(os.path.join(figure_save_path, 'anomaly_identification_plot', fo + ":" + trial_name + ".jpg"), format="jpg")
# fig.show(1)
# raw_input('testing another trial?? Please any key to continue')
'''
print 'Finish testing: '+ fo + '\n'
confuse_matrix[fo] = predict_class
_items = confuse_matrix.keys()
_matrix = np.identity(len(_items))
for row in _items:
for col in _items:
r = _items.index(row)
c = _items.index(col)
_matrix[r, c] = confuse_matrix[row].count(col)
print 'print the confusion matrix...'
print _items
print _matrix
def get_confidence_of_identification(optimal_result):
confidence_metric = ['culmulative_loglik_divide_by_the_culmulative_mean_loglik',
'posterior_of_gmm_model',
'calc_kullback_leibler_divergence_of_predict_proba',
'hamming_distance_of_hidden_state_sequence',
]
CONF_TYPE = confidence_metric[0]
anomaly_model_path = os.path.join(training_config.anomaly_model_save_path,
optimal_result['model_label'],
training_config.config_by_user['data_type_chosen'],
training_config.config_by_user['model_type_chosen'],
training_config.model_id)
if CONF_TYPE == 'culmulative_loglik_divide_by_the_culmulative_mean_loglik':
c_value = 5
all_log_curves_of_this_state = joblib.load(os.path.join(anomaly_model_path, 'all_log_curves_of_this_state.pkl'))
std_of_log_likelihood = joblib.load(os.path.join(anomaly_model_path, 'std_of_log_likelihood.pkl'))
np_matrix_traj_by_time = np.matrix(all_log_curves_of_this_state)
mean_of_log_curve = np_matrix_traj_by_time.mean(0)
threshold = mean_of_log_curve - std_of_log_likelihood[1]
confidence = optimal_result['culmulative_loglik'] - threshold.tolist()[0][-1]
return all_log_curves_of_this_state, threshold, confidence
elif CONF_TYPE == 'posterior_of_gmm_model':
#load -> build a hmm model -> calculate the probability of testing sequence
all_log_curves_of_this_state = joblib.load(os.path.join(anomaly_model_path, 'all_log_curves_of_this_state.pkl'))
data = np.ndarray([])
for icurve in range(len(all_log_curves_of_this_state)):
tVal = range(len(all_log_curves_of_this_state[icurve]))
feature = all_log_curves_of_this_state[icurve]
data_points = np.vstack([np.array(tVal), feature]).T
if icurve == 0:
data = data_points
else:
data = np.vstack([data, data_points])
# fit a gmm model
from sklearn import mixture
gmm = mixture.GaussianMixture(n_components = 5, covariance_type = 'diag').fit(data)
tVal = range(optimal_result['loglik_curve'].shape[0])
testing_data = np.vstack([np.array(tVal), optimal_result['loglik_curve']]).T
confidence = gmm.score(testing_data)
return all_log_curves_of_this_state, None, confidence
elif CONF_TYPE == 'calc_kullback_leibler_divergence_of_predict_proba':
print 'obsoleted'
pass
from scipy.stats import entropy
# average_predict_proba
average_predict_proba = joblib.load(os.path.join(anomaly_model_path, 'average_predict_proba.pkl'))
testing_predict_proba = optimal_result['predict_proba']
confidence = 0.0
for iObs in range(len(testing_predict_proba)):
confidence += entropy(testing_predict_proba[iObs,:], average_predict_proba[1][iObs,:])
return None, None, confidence
elif CONF_TYPE == 'hamming_distance_of_hidden_state_sequence':
# hidden_state_sequence_of_training_trials
hidden_stateSeq = joblib.load(os.path.join(anomaly_model_path, 'hidden_stateSeq.pkl'))
hidden_stateSeq = np.append(hidden_stateSeq, [hidden_stateSeq[-1]]) # add one item, because for autoregressive model, I had deleted one data point
totalLen = len(hidden_stateSeq)
testing_stateSeq = optimal_result['hidden_stateSeq']
testing_stateSeq = np.append(testing_stateSeq, [testing_stateSeq[-1]])
tLen = len(testing_stateSeq)
hidden_stateSeq = hidden_stateSeq.reshape(totalLen/tLen, tLen)
from scipy.spatial.distance import hamming
confidence = 0
for iTrial in range(totalLen/tLen):
confidence += hamming(testing_stateSeq, hidden_stateSeq[iTrial,:])
return None, None, confidence
else:
print ("without the confidence_metric as: " + CONF_TYPE)
pass
| 2.390625 | 2 |
matador/orm/__init__.py | dquigley-warwick/matador | 24 | 12790109 | <filename>matador/orm/__init__.py
# coding: utf-8
# Distributed under the terms of the MIT License.
__all__ = ["DataContainer"]
__author__ = '<NAME>'
__maintainer__ = '<NAME>'
from .orm import DataContainer
| 1.265625 | 1 |
lib/Model.py | calumcorrie/Meraki-Crowd-Interface | 0 | 12790110 | <reponame>calumcorrie/Meraki-Crowd-Interface<gh_stars>0
import os
import sys
import numpy as np
from PIL import Image, ImageFilter
import bz2
import pickle
import datetime
import requests
import hashlib
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from lib.APIQuery import APIQuery, FloorPlan
from lib.BoundaryDetector import BoundaryDetector
# As per Scanning API v3
SECRET_K = "secret"
class Floor:
"Wrapper class for the floorplan object including model data"
def __init__(self,floorplan:FloorPlan):
self.floorplan = floorplan
# Dimentions should default to (height, width)
self.floorplan_dimensions = (self.floorplan.height,self.floorplan.width)
self.overlay_dimensions = (
int(self.floorplan_dimensions[0]//Model.CELL_SIZE_M)+1,
int(self.floorplan_dimensions[1]//Model.CELL_SIZE_M)+1
)
# Determine the distance between the end of the floorplan and the data overlay, in metres and px
self.margin_m = (
Model.CELL_SIZE_M - (self.floorplan_dimensions[0] % Model.CELL_SIZE_M),
Model.CELL_SIZE_M - (self.floorplan_dimensions[1] % Model.CELL_SIZE_M)
)
self.margin_px = (
self.margin_m[0] * self.floorplan.px_per_m_h,
self.margin_m[1] * self.floorplan.px_per_m_w
)
self.mask_enabled = False
self.pixelmask = None
self.mask = np.ones(self.overlay_dimensions,dtype=np.bool_)
self.aps = {}
self.bm_boxes = []
def set_bounds_mask(self,blindspots=None,wallthreshold:float=None) -> None:
""""
Generate a mask from BoundaryDetector for areas of the floor that people cannot possibly be or are to be ignored
eg outside high floors.
Blindspots should be a Numpy array, tuple or nested list of the form [[x1,x2,y1,y2],...]
"""
self.bm_boxes = blindspots
if wallthreshold == None:
bd = BoundaryDetector( self.floorplan.get_image() )
else: # pragma: no cover
# Not covered as only parameter passing
bd = BoundaryDetector( self.floorplan.get_image(), threshold=wallthreshold )
if blindspots != None:
for spot in blindspots:
bd.add_blindspot(*tuple(spot))
bd.run()
self.pixelmask = bd.getBoundaryMask()
#Downsample from pixel level to mask level
#Assuming cell >> pixel
#Mask (1msq-scale, small-dims) dims
mx = self.mask.shape[0]
my = self.mask.shape[1]
#Image-scale pixel mask (mini-scale, big-dims) dims
ix = self.pixelmask.shape[0]
iy = self.pixelmask.shape[1]
#Image-scale chunk divisions (what coords do we get laying one mask over the other)
# Account for margin between edge of floorplan and overlay
ix_divs = np.floor(np.linspace( 0, ix+self.margin_px[0], mx+1 )).astype("int32")
iy_divs = np.floor(np.linspace( 0, iy+self.margin_px[1], my+1 )).astype("int32")
for x in range(mx):
#Top, bottom
it = ix_divs[x]
ib = ix_divs[x+1]
for y in range(my):
#Left, right
il = iy_divs[y]
ir = iy_divs[y+1]
# As array is binary, mean gives ratio of elems 1 to elems total
self.mask[x][y] = self.pixelmask[it:ib,il:ir].mean() < Model.DOWNSAMPLE_THRESHOLD
def calc_bounds_mask(self,blindspots=None,wallthreshold:float=None) -> Image.Image:
"""
Generate a preview of a bounds mask with given parameters.
For more info see Floor.set_bounds_mask
"""
if wallthreshold == None:
bd = BoundaryDetector( self.floorplan.get_image() )
else: # pragma: no cover
# Not covered as only parameter passing
bd = BoundaryDetector( self.floorplan.get_image(), threshold=wallthreshold )
if blindspots != None:
for spot in blindspots:
bd.add_blindspot(*tuple(spot))
bd.run()
return bd.generate_graphic()
def render_overlay(self,overlay:np.ndarray,pixelmask:bool=True):
"""
Render overlay onto the floorplan image in heatmap form.
If pixelmask and image has bounds mask set, will mask final image to keep
overlay heatmap within bounds.
"""
POS = np.array([255,0,0,180],dtype=np.uint8)
NEG = np.array([0,255,0,180],dtype=np.uint8)
BLUR_CELLS = 0.35
destination = self.floorplan.get_image().convert("RGBA")
# Overlay scaling
absmax = max(overlay.max(), overlay.min(), key=abs)
#m_max, m_min = absmax, -absmax
imarr = np.zeros((destination.size[1],destination.size[0],4),dtype="uint8")
# Account for margin between edge of floorplan and overlay
ix_points = np.floor(np.linspace(
0, imarr.shape[0] + self.margin_px[0], overlay.shape[0] + 1
)).astype("int32")
iy_points = np.floor(np.linspace(
0,imarr.shape[1] + self.margin_px[1], overlay.shape[1] + 1
)).astype("int32")
for mx in range(overlay.shape[0]):
# Top, bottom
ixs = ix_points[mx]
ixe = ix_points[mx+1]
for my in range(overlay.shape[1]):
val = overlay[mx,my]
if val==0: continue
#Colour by sign, scale alpha by magnitude
pos = val > 0
alpha = abs(val / absmax)
#Left, right
iys = iy_points[my]
iye = iy_points[my+1]
imarr[ixs:ixe,iys:iye] = POS if pos else NEG
imarr[ixs:ixe,iys:iye,3] = ( imarr[ixs:ixe,iys:iye,3].astype(np.float64) * alpha ).astype(np.uint8)
imarr = Image.fromarray(imarr,"RGBA").filter( ImageFilter.BoxBlur( BLUR_CELLS * destination.size[0] / overlay.shape[1] ) )
if isinstance(self.pixelmask, np.ndarray) and pixelmask:
# Tidy the edges
# Make the mask an Image, mode=L
mask = Image.fromarray(255*self.pixelmask.astype(np.uint8),"L")
# Paste alpha on masked region
imarr.paste((0,0,0,0),mask)
elif pixelmask:
print("Error: Could not filter by pixel mask as no mask exists")
destination.putalpha(255)
return Image.alpha_composite(destination,imarr)
class Layer:
"Class representing a data layer: a series of overlays covering all floorplans with data from a single source type"
def __init__(self,floorplans:dict,exposure:int):
self.exposure = exposure
self.overlays = { _id : Overlay(_id, floor.overlay_dimensions, floor.floorplan_dimensions, floor.mask, exposure) for _id,floor in floorplans.items() }
def set_observations(self,observations:dict):
"Set the layer to contain passed observations, clearing any previous. Pass floor object dictionary"
bins = { id : dict() for id in self.overlays.keys() }
for id, placeable in observations.items():
bins[placeable.floorPlanId][id] = placeable
for fid, overlay in self.overlays.items():
overlay.roll()
obs = bins.get(fid)
if obs != None:
overlay.add(obs)
def get_deltas(self, masked:bool=True, exposure:int=0) -> dict:
"""
Return the delta data for each overlay in the layer.
If exposure 0 (default), will provide all available exposures squashed.
For more info, see Overlay.get_delta
"""
return { _id : over.get_delta(masked, exposure) for _id, over in self.overlays.items() }
def get_full(self, masked:bool=True, exposure:int=0) -> dict: #pragma: no cover
"""
Return the full data for each overlay in the layer.
If exposure 0 (default), will provide all available exposures squashed.
See Overlay.get_full
"""
# Not covered as not required for current scope
return { _id : over.get_full(masked, exposure) for _id, over in self.overlays.items() }
def copy(self,flatten:bool=True):
"""
Return a copy of the layer.
Flatten squashes exposures into 1 frame
For more info, see Overlay.copy()
"""
ly = Layer({}, 1 if flatten else self.exposure )
ly.overlays = { _id : over.copy(flatten) for _id, over in self.overlays.items() }
return ly
def clear(self)->None:
"Clear all member overlays of observation data"
for over in self.overlays.values():
over.clear()
def verify_and_update(self, floorplans:dict):
"""
Ensure overlays are compatible with current floorplans (Floor objects).
Throws ModelException in case of dimention mismatch.
If mask is outdated, update - note this does not effect existing data, only new observations
If an overlay missing, print info, create new
If an overlay is extra, do nothing
"""
for fpid in set(floorplans).difference(self.overlays.keys()):
# A floorplan not represented in the floorplans but not in the overlays
print("Info: Creating new overlay for FPID:{}".format(fpid))
fp = floorplans[fpid]
self.overlays[fpid] = Overlay(fpid, fp.overlay_dimensions, fp.floorplan_dimensions, fp.mask, self.exposure)
for fpid, floor in floorplans.items():
self.overlays[fpid].verify_and_update(floor)
class Overlay:
"Represents an data overlay of a single floorplan"
def __init__(self, floorid:str, overlay_dimensions:tuple, real_dimensions:tuple, floormask:np.ndarray, exposure:int):
self.floorid = floorid
#self.observations = dict()
self.overlay_dimensions = overlay_dimensions
self.real_dimensions = real_dimensions
self.mask = floormask
assert exposure > 0
self.exposure = exposure
# Exposure queue, shape (exp,x,y)
self.__unfixed_observations = np.zeros(exposure)
self.__masked_dataoverlay = np.zeros( (exposure,)+overlay_dimensions, dtype="float32" )
self.__unmasked_dataoverlay = np.zeros( (exposure,)+overlay_dimensions, dtype="float32" )
def set(self, unfixed_count:np.ndarray, masked_overlay:np.ndarray, unmasked_overlay:np.ndarray):
"Sets internal observation data. Not for general use, instead use Overlay.add"
assert len(self.__unfixed_observations.shape) == len(unfixed_count.shape)
assert len(self.__masked_dataoverlay.shape) == len(masked_overlay.shape)
assert len(self.__unmasked_dataoverlay.shape) == len(unmasked_overlay.shape)
self.__unfixed_observations = unfixed_count
self.__masked_dataoverlay = masked_overlay
self.__unmasked_dataoverlay = unmasked_overlay
def roll(self) -> None:
"Roll the exposure, preparing for a new frame of data"
self.__masked_dataoverlay = np.roll(self.__masked_dataoverlay, 1, axis=0)
self.__unmasked_dataoverlay = np.roll(self.__unmasked_dataoverlay, 1, axis=0)
self.__unfixed_observations = np.roll(self.__unfixed_observations, 1, axis=0)
self.__masked_dataoverlay[0] = 0
self.__unmasked_dataoverlay[0] = 0
self.__unfixed_observations[0] = 0
def clear(self) -> None:
"Clear accumulated observation data including exposure"
self.__unfixed_observations[:] = 0
self.__masked_dataoverlay[:] = 0
self.__unmasked_dataoverlay[:] = 0
def copy(self,flatten:bool=False):
"""
Return a copy of the overlay.
If flatten, squash (mean) exposure window into 1 frame.
"""
if flatten:
cp = Overlay(self.floorid, self.overlay_dimensions, self.real_dimensions, self.mask, 1)
cp.__unfixed_observations[0] = self.__unfixed_observations.mean(axis=0)
cp.__masked_dataoverlay[0] = self.__masked_dataoverlay.mean(axis=0)
cp.__unmasked_dataoverlay[0] = self.__unmasked_dataoverlay.mean(axis=0)
else:
cp = Overlay(self.floorid, self.overlay_dimensions, self.real_dimensions, self.mask, self.exposure)
cp.__unfixed_observations = self.__unfixed_observations.copy()
cp.__masked_dataoverlay = self.__masked_dataoverlay.copy()
cp.__unmasked_dataoverlay = self.__unmasked_dataoverlay.copy()
return cp
def add(self,observations:dict) -> None:
fixed = {}
unfixed = {}
for id, placeable in observations.items():
if ( placeable.x != None and placeable.y != None ) or placeable.has_mask_override:
fixed[id] = placeable
else:
unfixed[id] = placeable
self.__add_fixed_locations(fixed)
self.__add_unfixed_locations(unfixed)
def __add_fixed_locations(self,fixed:dict) -> None:
for placeable in fixed.values():
# We store both a copy of the m(asked)_possible locations and the u(n)m(asked)_possible locations
um_possible = np.zeros(self.overlay_dimensions, dtype=np.bool_)
if placeable.has_mask_override:
# Even if a floorplan mask is in place, mask override takes precident
um_possible = placeable.mask_override
m_possible = placeable.mask_override
else:
if placeable.variance < Model.VARIANCE_THRESHOLD:
# Calculated minimum reach
# Account for change of axis
um_possible[-int(placeable.y/Model.CELL_SIZE_M),int(placeable.x/Model.CELL_SIZE_M)] = 1
else:
# Store parsed location in x,y tuple, in m, with change of axis
client_loc = np.array( [self.real_dimensions[0] - placeable.y, placeable.x ] )
for x in range(um_possible.shape[0]):
for y in range(um_possible.shape[1]):
# For each square, see if it's centre is close enough to be within variance metres
d = np.linalg.norm(
(np.array([x,y]) + 0.5) * Model.CELL_SIZE_M - client_loc
)
um_possible[x,y] = d <= placeable.variance
# m(asked)_possible is a copy of the u(n)m(asked)_possible's with the floor mask applied
m_possible = np.logical_and( um_possible, self.mask, dtype=np.bool_ )
# If theres a DIV0 here, the search hasn't found any near enough to call near
# Ignore floor mask
self.__unmasked_dataoverlay[0][um_possible] += 1.0 / um_possible.sum()
# Include floor mask
self.__masked_dataoverlay[0][m_possible] += 1.0 / m_possible.sum()
def __add_unfixed_locations(self,unfixed:dict) -> None:
self.__unfixed_observations[0] += len(unfixed)
def get_delta(self, masked:bool=True, exposure:int=0)->np.ndarray:
"""
Returns a copy of the delta overlay (only fixed observations).
If masked, will select data masked at input, else unmasked data.
Set exposure to specify mean smoothing on the first n frames of stored exposure,
default (0) combines all available frames,
1 gives only the latest frame (no smoothing).
"""
window = self.exposure if exposure == 0 else exposure
if masked:
return self.__masked_dataoverlay[:window].mean(axis=0)
else:
return self.__unmasked_dataoverlay[:window].mean(axis=0)
def get_unfixed_observations(self, exposure:int=0)->float:
"""
Return how many unfixed observations were passed.
For more details on exposure, see Overlay.get_delta
"""
window = self.exposure if exposure == 0 else exposure
return self.__unfixed_observations[:window].mean(axis=0)
def get_full(self, masked:bool=True, exposure:int=0)->np.ndarray: #pragma: no cover
"""
Returns a copy of the full client overlay (including distributed unfixed observations)
For exposure, see Overlay.get_delta
"""
data = self.get_delta(masked,exposure)
window = self.exposure if exposure == 0 else exposure
# Distribute unfixed observations evenly across the floorplan (or mask)
mask = self.mask if masked else np.ones(self.overlay_dimensions,dtype=np.bool_)
data[mask] += self.__unfixed_observations[:window].mean(axis=0) / mask.sum()
return data
def verify_and_update(self,floor:Floor)->None:
"Verifies overlay is compatible with passed floor, updates mask"
if self.overlay_dimensions != floor.overlay_dimensions or self.real_dimensions != floor.floorplan_dimensions:
raise Model.ModelException("Error: Overlay and Floorplan dimension mismatch for FPID={}".format(floor.floorplan.id))
# Update mask in case of change
# Note this does not change existing data, only new observations
self.mask == floor.mask
class Model:
LAYER_SNAP_WIFI = 1
LAYER_SNAP_BT = 2
LAYER_MVSENSE = 3
LAYERS_ALL = {LAYER_SNAP_WIFI, LAYER_SNAP_BT, LAYER_MVSENSE}
CONFIG_PATH = os.path.join('model.conf')
CELL_SIZE_M = 1
DOWNSAMPLE_THRESHOLD = 0.5
VARIANCE_THRESHOLD = np.hypot( *( 2*(CELL_SIZE_M / 2,) ) )
# This means let x = cell_s_m / 2; V_T = sqrt(x^2+x^2)
# I wish I was kidding
# This is 0.707 iff cell_s_m = 1
DEFAULT_EXPOSURE = 3
DEFAULT_PASSWORD = '<PASSWORD>'
__BAD_LAYER = "Layer {} not defined. Use internally defined layer (eg Model.LAYER_*)"
class ModelException(Exception):
pass
class BadRequest(Exception):
pass
def __init__(self,network_id:str=None,layers:set={}):
"Initialise model. API key must be defined in enviroment variable \"MERAKI_DASHBOARD_API_KEY\""
self.read_config_data()
self.write_config_data()
def populate(self,layers:set):
assert isinstance(self.query_obj, APIQuery)
self.network_id = self.query_obj.network_id
self.plans = self.pullFloors()
self.getAPs()
self.query_obj.pullCameras()
self.data_layers = dict()
self.webhook_threshold = 0.35
for layer in layers:
if layer not in Model.LAYERS_ALL:
raise Model.ModelException(Model.__BAD_LAYER.format(layer))
self.data_layers[layer] = Layer(self.plans, Model.DEFAULT_EXPOSURE)
self.timeslot = TimeSlotAvg.load(self.data_layers,self.plans)
self.webhook_addresses = []
### Floorplans
def pullFloors(self) -> dict:
"Pull floorplans from the network, construct blank floor layer for each"
floorplans = self.query_obj.pullFloorPlans()
self.plans = { id : Floor(fp) for id,fp in floorplans.items() }
return self.plans
def getFloorplanSummary(self) -> dict:
"Get a dict of retreived floor plan IDs and names"
return { k : v.floorplan.name for k,v in self.plans.items() }
def findFloorplanByName(self,name) -> str:
"Find a floorPlanId from the floor name"
for k in self.plans:
if self.plans[k].floorplan.name == name:
return k
return None
def setBoundsMask(self,floor_id:str,on:bool,blindspots=None,wallthreshold:float=None) -> None:
"Generate a mask from BoundaryDetector for areas that people cannot possibly be on the given floor eg outside high floors. Blindspots should be a Numpy array, tuple or nested list of the form [[x1,x2,y1,y2],...]"
if blindspots==None:
pass
elif not (isinstance(blindspots,(np.ndarray,list,tuple))):
raise TypeError("Invalid type for blindspots parameter. Should be of type np.array, list or tuple")
elif False in [ len(spot) == 4 for spot in blindspots ]:
raise ValueError("Invalid format for blindspots parameter. Should be of shape (n,4), got {}".format(str(blindspots)))
if wallthreshold == None:
pass
elif not isinstance(wallthreshold,(int,float)):
raise TypeError("Invalid type for wallthreshold parameter. Should be of type int or float, got {}".format(str(type(wallthreshold))))
if floor_id not in self.plans.keys():
raise Model.ModelException("No such floor: ",floor_id)
floor = self.plans[floor_id]
if on:
floor.set_bounds_mask(blindspots,wallthreshold)
else:
floor.bm_boxes = blindspots
floor.mask[:] = 1
floor.mask_enabled = on
self.update_layers()
### Layers
def update_layers(self)->None:
"""
Must be called when a Floor is added, removed, or altered, including by set_bounds_mask
This will update the Overlay objects to reflect this change
May throw error if dimensions do not equate and historical data would be invalidated
"""
for layer in self.data_layers.values():
layer.verify_and_update(self.plans)
### Access Points
def getAPs(self) -> None:
"Get APs and store internally in relevent floor objects"
aps = self.query_obj.pullAPs()
for mac,ap in aps.items():
if ap.floorPlanId in self.plans.keys():
self.plans[ap.floorPlanId].aps[mac] = ap
### Scanning API (SAPI)
def __validate_scanning(self,SAPI_packet:dict) -> None:
if type(SAPI_packet) != dict:
raise TypeError("JSON parsed a {}, expected a dict".format(str(type(SAPI_packet))))
try:
source_net_id = SAPI_packet["data"]["networkId"]
if SAPI_packet[SECRET_K] != self.secret:
raise Model.BadRequest("Request has bad authentication secret - rejecting data")
except KeyError as ke:
raise Model.BadRequest("Request is missing data: " + str(ke) )
if source_net_id != self.network_id:
raise Model.BadRequest("Request has data from wrong network: expected {} got {}".format(self.network_id,source_net_id))
def get_type(SAPI_packet:dict) -> int:
"Get the Model layer constant for a given SAPI packet"
api_layer_val = APIQuery.get_SAPI_type(SAPI_packet)
if api_layer_val == "WiFi":
return Model.LAYER_SNAP_WIFI
elif api_layer_val == "Bluetooth":
return Model.LAYER_SNAP_BT
else:
raise Model.ModelException(Model.__BAD_LAYER.format(api_layer_val))
def __generate_person_obs(self) -> dict:
"Indexes observed person objects with an arbitrary key"
obs = self.query_obj.get_camera_observations()
# Zip [0,n) with n obs objects, converting to dictionary
return dict(zip(range(len(obs)),obs))
def provide_scanning(self,SAPI_packet:dict) -> None:
"Update model with SAPI data"
# Raise a racket if theres something wrong
self.__validate_scanning(SAPI_packet)
dest_layer = Model.get_type(SAPI_packet)
observations = self.query_obj.extract_SAPI_observations(SAPI_packet)
self.data_layers[dest_layer].set_observations(observations)
### Camera and MVSense
def setFOVs(self,mac:str,coords:set)->None:
"""
Set the FOV coords from given camera (by mac).
Coords should be iterable of shape (n,2).
Coords pertain to sqm pixels on internal datamap.
Pass len(iterable)==0 to unset mask
"""
#Check if Layer exists
if Model.LAYER_MVSENSE in self.data_layers.keys():
#Check camera with mac exists
if mac in self.query_obj.getCameras().keys():
#Check coords of correct shape and iterable, or of len 0 to unset
try:
if len(coords) == 0 or False not in [ len(coord)==2 for coord in coords ]:
cam = self.query_obj.cameras[mac]
shape = self.plans[cam.floorPlanId].overlay_dimensions
cam.set_FOV(shape,coords)
else:
raise ValueError
except (ValueError, TypeError) as err:
raise err.__class__("Coordinates supplied of incorrect shape or type, should be iterable shape (n,2)")
else:
raise Model.ModelException("Camera with mac {} not found".format(mac))
else:
raise Model.ModelException("Model not configured for LAYER_MVSENSE")
def pull_mvsense_data(self):
"Pull live MVSense data from cameras and feed into data layer"
self.query_obj.updateCameraMVSenseData()
observations = self.__generate_person_obs()
self.data_layers[Model.LAYER_MVSENSE].set_observations(observations)
def spike(self, layer, threshhold)->dict: #add floorplan ID into params, camera/wifi/bluetoothall into params? threshold into params?
dims = ( (len(layer)//3)+1, (len(layer[0])//3)+1 ) #splits floorplan into 3m^2 areas
clusters = np.zeros(dims, dtype="float32")
for x in range(len(clusters)):
for y in range(len(clusters[0])):
clusters[x,y] = layer[3*x:3*(x+1),3*y:3*(y+1)].sum()
busiest = 0
busiest_location=None
for x in range(len(clusters)):
for y in range(len(clusters[0])):
if clusters[x][y] > busiest:
busiest = clusters[x][y]
busiest_location = (3*(x+0.5), 3*(y+0.5))
return {'spike':busiest > threshhold, 'location':busiest_location}
def nearestCameras(self, n:int, floor:Floor,spikeDict:dict)->tuple: #returns a list of camera objects
# They call me the comprehension king
# Maybe after we spend 30 minutes fixing it ;)
event = spikeDict["location"]
event_root = tuple([ int(d) for d in event ])
cameras = { cam for cam in self.query_obj.getCameras().values() if cam.floorPlanId == floor.floorplan.id }
FOVcams = { cam for cam in cameras if cam.has_FOV() }
nonFOVcams = cameras - FOVcams
hasView = { cam for cam in FOVcams if cam.get_FOV()[event_root]==True }
if len(hasView)>0:
return ("Covered", list(hasView) )
distances = dict()
for cam in FOVcams:
fov = cam.get_FOV()
mindist = 9999999
for x,row in enumerate(fov):
for y, cell in enumerate(row):
if cell:
dist = np.hypot( (x+0.5)-event[0], (y+0.5)-event[1] )
if dist < mindist:
mindist = dist
distances[cam] = mindist
for cam in nonFOVcams:
distances[cam] = np.hypot( cam.x-event[0], cam.y-event[1] )
top_n = [ cam[0] for cam in sorted(distances.items(),key=lambda x: x[1])[:n] ]
return ("Best Effort", top_n)
def getCameraImage(self, camera) -> dict:
#returns a dictionary containing a link to the image
response = self.query_obj.getCameraSnap(camera)
return response
def snapshotWebhook(self, snapshot:dict):
for address in self.webhook_addresses:
response = requests.post(address, json = snapshot)
print(response)
def addWebhookAddress(self, webhookAddress:str):
self.webhook_addresses.append(webhookAddress)
### Historical
def put_historical(self) -> None:
"Updates the average data for the current TimeSlotAvg object"
self.update_timeslot() # Get the current timeslot object
self.timeslot.update_avg_data( self.data_layers )
def comp_historical(self, floorPlanId:str):
"Get the relative busyness of a floorplan using all layers"
self.update_timeslot() # Get the current timeslot object
hist_fp_data = self.timeslot.get_floor_avgs(floorPlanId)
collective = np.zeros( self.plans[floorPlanId].overlay_dimensions )
for lid in self.data_layers.keys():
mask_enabled = self.plans[floorPlanId].mask_enabled
current = self.data_layers[lid].overlays[floorPlanId].get_delta(masked=mask_enabled)
historical = hist_fp_data[lid].get_delta(masked=mask_enabled,exposure=1)
collective += (current - historical)
collective /= len(self.data_layers)
return collective
def update_timeslot(self):
"Calls the factory if the current TimeSlotAvg object is not current"
if not (self.timeslot.is_current_time()):
self.timeslot.write()
self.timeslot = TimeSlotAvg.load( self.data_layers, self.plans )
self.timeslot.verify_and_update_struct( self.data_layers, self.plans )
### Providers
def poll_layer(self,layer:int,exposure:int) -> dict:
return self.data_layers[layer].get_full(exposure=exposure)
def render_delta(self,floorPlanId:str)->Image:
"Get the current datamap in terms of absolute delta from mean"
datamap = self.comp_historical(floorPlanId)
return self.plans[floorPlanId].render_overlay(datamap)
def render_abs(self,floorPlanId:str)->Image:
"Get latest frame of WiFi layer rendered on the floor plan"
return self.plans[floorPlanId].render_overlay(self.data_layers[Model.LAYER_SNAP_WIFI].overlays[floorPlanId].get_delta(exposure=1))
def debug_render(self,fpid)->Image:
import datetime
dm = self.plans[fpid]
dims = dm.overlay_dimensions
testarr = np.zeros(dims).ravel()
n = (datetime.datetime.now().second / 60) * len(testarr)
testarr[:int(n)] = 1
return dm.render_overlay(testarr.reshape(dims))
def update(self)->None:
"Update non-webhook (non-SAPI) layers, write history"
self.pull_mvsense_data()
self.put_historical()
#spike detect
POST_data = {}
for fpid, floor in self.plans.items():
spikedict = self.spike(self.comp_historical(fpid), self.webhook_threshold)
if spikedict['spike'] == True:
idealality, cameras = self.nearestCameras(2, floor, spikedict) #need to get relevant floor obj
POST_data[fpid] = {"type" : "SnapshotData", "is_ideal" : idealality}
for i, cam in enumerate(cameras):
response = self.getCameraImage(cam)
POST_data[fpid]["camera_data_" + str(i)] = response
if POST_data != {}:
self.snapshotWebhook(POST_data)
### Configuration
STORE_WEBHOOK = "webhooklist"
STORE_SELECTED = "selectednet"
STORE_LAYERS = "layers"
STORE_FOVCOORDS = "fov_coords"
STORE_FOVMASK = "fov_mask"
STORE_BMBOXES = "bm_boxes"
STORE_BDENABLED = "bd_enabled"
STORE_SECRET = "sapisecret"
STORE_TOKEN = "<PASSWORD>_token"
STORE_PASSWORD = "<PASSWORD>"
STORE_WHTHRESHOLD = "webhook_threshold"
def update_model_config(self, netid, conf_dict):
layers = conf_dict.get(Model.STORE_LAYERS,set())
try:
if netid != self.network_id:
raise AttributeError
except AttributeError:
self.query_obj = APIQuery(netid)
self.populate(layers)
self.secret = conf_dict.get(Model.STORE_SECRET)
self.validator_token = conf_dict.get(Model.STORE_TOKEN)
self.webhook_addresses = conf_dict.get(Model.STORE_WEBHOOK,list())
self.password = conf_dict.get(Model.STORE_PASSWORD,Model.DEFAULT_PASSWORD)
self.webhook_threshold = conf_dict.get(Model.STORE_WHTHRESHOLD,self.webhook_threshold)
for mac, coords in conf_dict.get(Model.STORE_FOVCOORDS,dict()).items():
self.setFOVs( mac, coords )
for fpid, boxes in conf_dict.get(Model.STORE_BMBOXES, dict()).items():
on = conf_dict.get(Model.STORE_BDENABLED,{fpid:False})[fpid]
self.setBoundsMask(fpid, on, boxes)
def serialize(self):
conf = dict()
conf[Model.STORE_SECRET] = self.secret
conf[Model.STORE_TOKEN] = self.validator_token
conf[Model.STORE_LAYERS] = set(self.data_layers.keys())
conf[Model.STORE_WEBHOOK] = self.webhook_addresses
conf[Model.STORE_WHTHRESHOLD] = self.webhook_threshold
conf[Model.STORE_PASSWORD] = self.password
conf[Model.STORE_FOVCOORDS] = { cam.mac: cam.get_fov_coords() for cam in self.query_obj.cameras.values() }
conf[Model.STORE_BMBOXES] = { fpid: fp.bm_boxes for fpid,fp in self.plans.items() }
conf[Model.STORE_BDENABLED] = { fpid: fp.mask_enabled for fpid,fp in self.plans.items() }
return conf
def write_config_data(self):
config_data = {Model.STORE_SELECTED:self.network_id}
config_data[self.network_id] = self.serialize()
with open( self.CONFIG_PATH, 'wb' ) as f:
pickle.dump(config_data, f)
def read_config_data(self):
if os.path.isfile(Model.CONFIG_PATH):
with open( Model.CONFIG_PATH, 'rb' ) as f:
config_data = pickle.load(f)
selected_id = config_data[Model.STORE_SELECTED]
select_data = config_data[selected_id]
self.update_model_config(selected_id,select_data)
else:
print("Warning: config file not found")
try:
self.update_model_config(None, {Model.STORE_LAYERS: Model.LAYERS_ALL} )
except APIQuery.APIException:
raise Model.ModelException("Could not get network from config file")
class TimeSlotAvg:
class TimeSlotAvgException( Exception ):
pass
DATA_DIR = "historical_data"
def __init__(self, data_layers:dict, day:int, hour:int):
self.day = day
self.hour = hour
self.data_layers = dict()
self.count = dict()
for l_id, layer in data_layers.items():
# Copy the layer structure but clear the transient data
# Also we only need 1 frame to store average so flatten
self.data_layers[l_id] = layer.copy(flatten=True)
self.data_layers[l_id].clear()
# Set a count for each overlay in each layer stored
self.count[l_id] = { fpid:0 for fpid in layer.overlays.keys() }
@staticmethod
def load( data_layers:dict, floors:dict, day=None, hour=None ):
"Static factory method; Load TimeSlotAvg object from compressed pickle file or create new"
#TODO remove day hour params - used for unit tests
if day==None or hour==None:
day, hour = TimeSlotAvg.get_time()
try:
tsa = bz2.BZ2File(os.path.join(TimeSlotAvg.DATA_DIR,'{}_{}.pbz2'.format(day, hour)), 'rb')
tsa = pickle.load(tsa)
except FileNotFoundError:
tsa = TimeSlotAvg( data_layers, day, hour )
tsa.verify_and_update_struct(data_layers, floors)
tsa.write()
else:
if __name__!="__main__": assert isinstance(tsa,TimeSlotAvg)
tsa.verify_and_update_struct(data_layers, floors)
return tsa
def is_current_time(self, debug=None) -> bool:
"Returns True iff timeslot is for current time"
if debug != None:
return debug
curr_day, curr_hour = TimeSlotAvg.get_time()
return curr_day == self.day and curr_hour == self.hour
def update_avg_data(self, current_data:dict, debug:bool=None) -> None:
"Updates an average model for a timeslot using the current model data, if valid time"
if self.is_current_time(debug): # it is valid to update with the current model
# For each layer in the new data
for l_key, layer in current_data.items():
# Get the respective average layer
avg_layer = self.data_layers[l_key]
# For each overlay in the respective new data layer
for o_key, over in layer.overlays.items():
# Get masked and unmasked deltas, and unfixed count from new data
# Get full available exposure by default
new_um_overlay = over.get_delta(masked=False)
new_m_overlay = over.get_delta(masked=True)
new_unfixed_obs = over.get_unfixed_observations()
# Similar from averages
# avg layers are already flat so exposure of 1
avg_um_overlay = avg_layer.overlays[o_key].get_delta(masked=False,exposure=1)
avg_m_overlay = avg_layer.overlays[o_key].get_delta(masked=True,exposure=1)
avg_unfixed_obs = over.get_unfixed_observations()
# Count
c = self.count[l_key][o_key]
# update the average by adding current values to sum total and dividing by new count
upd_um_overlay = ( avg_um_overlay * c + new_um_overlay ) / (c+1)
upd_m_overlay = ( avg_m_overlay * c + new_m_overlay ) / (c+1)
upd_unfixed_obs = ( avg_unfixed_obs * c + new_unfixed_obs ) / (c+1)
# save the new data overlay to the timeslots model, promoting as exposure of historicals = 1
self.data_layers[l_key].overlays[o_key].set( upd_unfixed_obs[None,], upd_m_overlay[None,], upd_um_overlay[None,] )
# Update the count
self.count[l_key][o_key] += 1
#self.write()
else:
raise TimeSlotAvg.TimeSlotAvgException(f"Cannot update with current model as it is not currently day:{self.day}, hour:{self.hour}")
def write(self):
"Save TimeSlotAvg object to a compressed file"
filepath = os.path.join(TimeSlotAvg.DATA_DIR,'{}_{}.pbz2'.format(self.day, self.hour))
if not os.path.exists(TimeSlotAvg.DATA_DIR):
os.makedirs(TimeSlotAvg.DATA_DIR)
with bz2.BZ2File(filepath, 'wb') as f:
pickle.dump(self, f)
def get_floor_avgs(self, fpid:str)->dict:
"Return the flat average Overlay object indexed by each layer stored"
return { layer_id: layer.overlays[fpid] for layer_id, layer in self.data_layers.items() }
@staticmethod
def get_time()->tuple:
"Get the current time values needed for reading and writing data files"
curr_dt = datetime.datetime.now( datetime.timezone( offset=datetime.timedelta(hours=0) ) )
curr_day = curr_dt.weekday()
curr_hour = curr_dt.hour
return curr_day, curr_hour
def verify_and_update_struct(self, data_layers:dict, floors:dict)->None:
"""
Verifies that the data in the TimeSlotAvg is compatible with the current Model.
If layers or overlays are not represented in TSA, those are created, infos are printed
Throws ModelException if dimensions do not match
"""
for l_id in set(data_layers.keys()).difference(self.data_layers.keys()):
# For layers in data_layers not in self
self.data_layers[l_id] = data_layers[l_id].copy()
self.count[l_id] = dict()
print("Info: Layer implicitly created for Layer ID {}".format(l_id))
for l_id, layer in self.data_layers.items():
# Add any missing overlays
layer.verify_and_update(floors)
for l_id in data_layers.keys():
# Get count if exists, else set to 1
self.count[l_id] = { ov_id:self.count[l_id].get(ov_id,1) for ov_id in data_layers[l_id].overlays.keys() }
def sha256(inpt:str) -> str:
m = hashlib.sha256()
m.update(inpt.encode())
return m.hexdigest() | 2.09375 | 2 |
53. Maximum Subarray/main.py | Competitive-Programmers-Community/LeetCode | 2 | 12790111 | <filename>53. Maximum Subarray/main.py
class Solution:
def maxSubArray(self, A):
if not A:
return 0
curSum = maxSum = A[0]
for num in A[1:]:
curSum = max(num, curSum + num)
maxSum = max(maxSum, curSum)
return maxSum
| 3.65625 | 4 |
esociallib/v2_04/evtCdBenPrRP.py | akretion/esociallib | 6 | 12790112 | <filename>esociallib/v2_04/evtCdBenPrRP.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Tue Oct 10 00:42:21 2017 by generateDS.py version 2.28b.
# Python 2.7.12 (default, Nov 19 2016, 06:48:10) [GCC 5.4.0 20160609]
#
# Command line options:
# ('--no-process-includes', '')
# ('-o', 'esociallib/v2_04/evtCdBenPrRP.py')
#
# Command line arguments:
# schemas/v2_04/evtCdBenPrRP.xsd
#
# Command line:
# /usr/local/bin/generateDS --no-process-includes -o "esociallib/v2_04/evtCdBenPrRP.py" schemas/v2_04/evtCdBenPrRP.xsd
#
# Current working directory (os.getcwd()):
# esociallib
#
import sys
import re as re_
import base64
import datetime as datetime_
import warnings as warnings_
try:
from lxml import etree as etree_
except ImportError:
from xml.etree import ElementTree as etree_
Validate_simpletypes_ = True
if sys.version_info.major == 2:
BaseStrType_ = basestring
else:
BaseStrType_ = str
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
#
# Namespace prefix definition table (and other attributes, too)
#
# The module generatedsnamespaces, if it is importable, must contain
# a dictionary named GeneratedsNamespaceDefs. This Python dictionary
# should map element type names (strings) to XML schema namespace prefix
# definitions. The export method for any class for which there is
# a namespace prefix definition, will export that definition in the
# XML representation of that element. See the export method of
# any generated element type class for a example of the use of this
# table.
# A sample table is:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceDefs = {
# "ElementtypeA": "http://www.xxx.com/namespaceA",
# "ElementtypeB": "http://www.xxx.com/namespaceB",
# }
#
try:
from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_
except ImportError:
GenerateDSNamespaceDefs_ = {}
#
# The root super-class for element type classes
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node=None, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return values
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node=None, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node=None, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (time_parts[0], micro_seconds, )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns. We should:
# - AND the outer elements
# - OR the inner elements
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
if re_.search(patterns2, target) is not None:
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
return instring.encode(ExternalEncoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
if type(self) != type(other):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def getSubclassFromModule_(module, class_):
'''Get the subclass of a class from a specific module.'''
name = class_.__name__ + 'Sub'
if hasattr(module, name):
return getattr(module, name)
else:
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
# Change this to redirect the generated superclass module to use a
# specific subclass module.
CurrentSubclassModule_ = None
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace,
pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(
outfile, level, namespace, name,
pretty_print=pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name,
base64.b64encode(self.value),
self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(
element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0,
optional=0, child_attrs=None, choice=None):
self.name = name
self.data_type = data_type
self.container = container
self.child_attrs = child_attrs
self.choice = choice
self.optional = optional
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs
def get_child_attrs(self): return self.child_attrs
def set_choice(self, choice): self.choice = choice
def get_choice(self): return self.choice
def set_optional(self, optional): self.optional = optional
def get_optional(self): return self.optional
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class eSocial(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, evtCdBenPrRP=None, Signature=None):
self.original_tagname_ = None
self.evtCdBenPrRP = evtCdBenPrRP
self.Signature = Signature
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, eSocial)
if subclass is not None:
return subclass(*args_, **kwargs_)
if eSocial.subclass:
return eSocial.subclass(*args_, **kwargs_)
else:
return eSocial(*args_, **kwargs_)
factory = staticmethod(factory)
def get_evtCdBenPrRP(self): return self.evtCdBenPrRP
def set_evtCdBenPrRP(self, evtCdBenPrRP): self.evtCdBenPrRP = evtCdBenPrRP
def get_Signature(self): return self.Signature
def set_Signature(self, Signature): self.Signature = Signature
def hasContent_(self):
if (
self.evtCdBenPrRP is not None or
self.Signature is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='eSocial', namespacedef_=' xmlns:ds="http://www.w3.org/2000/09/xmldsig#" ', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('eSocial')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='eSocial')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='eSocial', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='eSocial'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='eSocial', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.evtCdBenPrRP is not None:
self.evtCdBenPrRP.export(outfile, level, namespace_, name_='evtCdBenPrRP', pretty_print=pretty_print)
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % ('ds:', self.gds_encode(self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), 'ds:', eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'evtCdBenPrRP':
obj_ = evtCdBenPrRP.factory()
obj_.build(child_)
self.evtCdBenPrRP = obj_
obj_.original_tagname_ = 'evtCdBenPrRP'
elif nodeName_ == 'Signature':
Signature_ = child_.text
Signature_ = self.gds_validate_string(Signature_, node, 'Signature')
self.Signature = Signature_
# end class eSocial
class evtCdBenPrRP(GeneratedsSuper):
"""Evento de cadastro de benefícios previdenciários de Regimes Próprios"""
subclass = None
superclass = None
def __init__(self, Id=None, ideEvento=None, ideEmpregador=None, ideBenef=None, infoBeneficio=None):
self.original_tagname_ = None
self.Id = _cast(None, Id)
self.ideEvento = ideEvento
self.ideEmpregador = ideEmpregador
self.ideBenef = ideBenef
self.infoBeneficio = infoBeneficio
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, evtCdBenPrRP)
if subclass is not None:
return subclass(*args_, **kwargs_)
if evtCdBenPrRP.subclass:
return evtCdBenPrRP.subclass(*args_, **kwargs_)
else:
return evtCdBenPrRP(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ideEvento(self): return self.ideEvento
def set_ideEvento(self, ideEvento): self.ideEvento = ideEvento
def get_ideEmpregador(self): return self.ideEmpregador
def set_ideEmpregador(self, ideEmpregador): self.ideEmpregador = ideEmpregador
def get_ideBenef(self): return self.ideBenef
def set_ideBenef(self, ideBenef): self.ideBenef = ideBenef
def get_infoBeneficio(self): return self.infoBeneficio
def set_infoBeneficio(self, infoBeneficio): self.infoBeneficio = infoBeneficio
def get_Id(self): return self.Id
def set_Id(self, Id): self.Id = Id
def hasContent_(self):
if (
self.ideEvento is not None or
self.ideEmpregador is not None or
self.ideBenef is not None or
self.infoBeneficio is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='evtCdBenPrRP', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('evtCdBenPrRP')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='evtCdBenPrRP')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='evtCdBenPrRP', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='evtCdBenPrRP'):
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
outfile.write(' Id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.Id), input_name='Id')), ))
def exportChildren(self, outfile, level, namespace_='', name_='evtCdBenPrRP', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.ideEvento is not None:
self.ideEvento.export(outfile, level, namespace_, name_='ideEvento', pretty_print=pretty_print)
if self.ideEmpregador is not None:
self.ideEmpregador.export(outfile, level, namespace_, name_='ideEmpregador', pretty_print=pretty_print)
if self.ideBenef is not None:
self.ideBenef.export(outfile, level, namespace_, name_='ideBenef', pretty_print=pretty_print)
if self.infoBeneficio is not None:
self.infoBeneficio.export(outfile, level, namespace_, name_='infoBeneficio', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('Id', node)
if value is not None and 'Id' not in already_processed:
already_processed.add('Id')
self.Id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ideEvento':
obj_ = TIdeEveTrab.factory()
obj_.build(child_)
self.ideEvento = obj_
obj_.original_tagname_ = 'ideEvento'
elif nodeName_ == 'ideEmpregador':
obj_ = TEmprPJ.factory()
obj_.build(child_)
self.ideEmpregador = obj_
obj_.original_tagname_ = 'ideEmpregador'
elif nodeName_ == 'ideBenef':
obj_ = ideBenef.factory()
obj_.build(child_)
self.ideBenef = obj_
obj_.original_tagname_ = 'ideBenef'
elif nodeName_ == 'infoBeneficio':
obj_ = infoBeneficio.factory()
obj_.build(child_)
self.infoBeneficio = obj_
obj_.original_tagname_ = 'infoBeneficio'
# end class evtCdBenPrRP
class ideBenef(GeneratedsSuper):
"""Identificação do beneficiário"""
subclass = None
superclass = None
def __init__(self, cpfBenef=None, nmBenefic=None, dadosBenef=None):
self.original_tagname_ = None
self.cpfBenef = cpfBenef
self.nmBenefic = nmBenefic
self.dadosBenef = dadosBenef
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ideBenef)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ideBenef.subclass:
return ideBenef.subclass(*args_, **kwargs_)
else:
return ideBenef(*args_, **kwargs_)
factory = staticmethod(factory)
def get_cpfBenef(self): return self.cpfBenef
def set_cpfBenef(self, cpfBenef): self.cpfBenef = cpfBenef
def get_nmBenefic(self): return self.nmBenefic
def set_nmBenefic(self, nmBenefic): self.nmBenefic = nmBenefic
def get_dadosBenef(self): return self.dadosBenef
def set_dadosBenef(self, dadosBenef): self.dadosBenef = dadosBenef
def hasContent_(self):
if (
self.cpfBenef is not None or
self.nmBenefic is not None or
self.dadosBenef is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ideBenef', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ideBenef')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ideBenef')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ideBenef', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ideBenef'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ideBenef', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.cpfBenef is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scpfBenef>%s</%scpfBenef>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.cpfBenef), input_name='cpfBenef')), namespace_, eol_))
if self.nmBenefic is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snmBenefic>%s</%snmBenefic>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.nmBenefic), input_name='nmBenefic')), namespace_, eol_))
if self.dadosBenef is not None:
self.dadosBenef.export(outfile, level, namespace_, name_='dadosBenef', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'cpfBenef':
cpfBenef_ = child_.text
cpfBenef_ = self.gds_validate_string(cpfBenef_, node, 'cpfBenef')
self.cpfBenef = cpfBenef_
elif nodeName_ == 'nmBenefic':
nmBenefic_ = child_.text
nmBenefic_ = self.gds_validate_string(nmBenefic_, node, 'nmBenefic')
self.nmBenefic = nmBenefic_
elif nodeName_ == 'dadosBenef':
obj_ = TDadosBenef.factory()
obj_.build(child_)
self.dadosBenef = obj_
obj_.original_tagname_ = 'dadosBenef'
# end class ideBenef
class cpfBenef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, cpfBenef)
if subclass is not None:
return subclass(*args_, **kwargs_)
if cpfBenef.subclass:
return cpfBenef.subclass(*args_, **kwargs_)
else:
return cpfBenef(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='cpfBenef', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('cpfBenef')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cpfBenef')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='cpfBenef', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='cpfBenef'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='cpfBenef', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cpfBenef
class nmBenefic(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, nmBenefic)
if subclass is not None:
return subclass(*args_, **kwargs_)
if nmBenefic.subclass:
return nmBenefic.subclass(*args_, **kwargs_)
else:
return nmBenefic(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='nmBenefic', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('nmBenefic')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='nmBenefic')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='nmBenefic', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='nmBenefic'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='nmBenefic', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class nmBenefic
class infoBeneficio(GeneratedsSuper):
"""Informações relacionadas ao benefício previdenciário concedido ao
servidor"""
subclass = None
superclass = None
def __init__(self, tpPlanRP=None, iniBeneficio=None, altBeneficio=None, fimBeneficio=None):
self.original_tagname_ = None
self.tpPlanRP = tpPlanRP
self.iniBeneficio = iniBeneficio
self.altBeneficio = altBeneficio
self.fimBeneficio = fimBeneficio
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, infoBeneficio)
if subclass is not None:
return subclass(*args_, **kwargs_)
if infoBeneficio.subclass:
return infoBeneficio.subclass(*args_, **kwargs_)
else:
return infoBeneficio(*args_, **kwargs_)
factory = staticmethod(factory)
def get_tpPlanRP(self): return self.tpPlanRP
def set_tpPlanRP(self, tpPlanRP): self.tpPlanRP = tpPlanRP
def get_iniBeneficio(self): return self.iniBeneficio
def set_iniBeneficio(self, iniBeneficio): self.iniBeneficio = iniBeneficio
def get_altBeneficio(self): return self.altBeneficio
def set_altBeneficio(self, altBeneficio): self.altBeneficio = altBeneficio
def get_fimBeneficio(self): return self.fimBeneficio
def set_fimBeneficio(self, fimBeneficio): self.fimBeneficio = fimBeneficio
def hasContent_(self):
if (
self.tpPlanRP is not None or
self.iniBeneficio is not None or
self.altBeneficio is not None or
self.fimBeneficio is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='infoBeneficio', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('infoBeneficio')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='infoBeneficio')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='infoBeneficio', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='infoBeneficio'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='infoBeneficio', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.tpPlanRP is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stpPlanRP>%s</%stpPlanRP>%s' % (namespace_, self.gds_format_integer(self.tpPlanRP, input_name='tpPlanRP'), namespace_, eol_))
if self.iniBeneficio is not None:
self.iniBeneficio.export(outfile, level, namespace_, name_='iniBeneficio', pretty_print=pretty_print)
if self.altBeneficio is not None:
self.altBeneficio.export(outfile, level, namespace_, name_='altBeneficio', pretty_print=pretty_print)
if self.fimBeneficio is not None:
self.fimBeneficio.export(outfile, level, namespace_, name_='fimBeneficio', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'tpPlanRP':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'tpPlanRP')
self.tpPlanRP = ival_
elif nodeName_ == 'iniBeneficio':
obj_ = TDadosBeneficio.factory()
obj_.build(child_)
self.iniBeneficio = obj_
obj_.original_tagname_ = 'iniBeneficio'
elif nodeName_ == 'altBeneficio':
obj_ = TDadosBeneficio.factory()
obj_.build(child_)
self.altBeneficio = obj_
obj_.original_tagname_ = 'altBeneficio'
elif nodeName_ == 'fimBeneficio':
obj_ = fimBeneficio.factory()
obj_.build(child_)
self.fimBeneficio = obj_
obj_.original_tagname_ = 'fimBeneficio'
# end class infoBeneficio
class tpPlanRP(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, tpPlanRP)
if subclass is not None:
return subclass(*args_, **kwargs_)
if tpPlanRP.subclass:
return tpPlanRP.subclass(*args_, **kwargs_)
else:
return tpPlanRP(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='tpPlanRP', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('tpPlanRP')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='tpPlanRP')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='tpPlanRP', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='tpPlanRP'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='tpPlanRP', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class tpPlanRP
class fimBeneficio(GeneratedsSuper):
"""Informações relativas a benefícios previdenciários - Término.
Validação: Só pode ser informado se já houver informação
anterior de benefícios para o beneficiário identificado em
{ideBenef} e para o qual não tenha havido ainda informação de
término de benefícios."""
subclass = None
superclass = None
def __init__(self, tpBenef=None, nrBenefic=None, dtFimBenef=None, mtvFim=None):
self.original_tagname_ = None
self.tpBenef = tpBenef
self.nrBenefic = nrBenefic
if isinstance(dtFimBenef, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(dtFimBenef, '%Y-%m-%d').date()
else:
initvalue_ = dtFimBenef
self.dtFimBenef = initvalue_
self.mtvFim = mtvFim
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, fimBeneficio)
if subclass is not None:
return subclass(*args_, **kwargs_)
if fimBeneficio.subclass:
return fimBeneficio.subclass(*args_, **kwargs_)
else:
return fimBeneficio(*args_, **kwargs_)
factory = staticmethod(factory)
def get_tpBenef(self): return self.tpBenef
def set_tpBenef(self, tpBenef): self.tpBenef = tpBenef
def get_nrBenefic(self): return self.nrBenefic
def set_nrBenefic(self, nrBenefic): self.nrBenefic = nrBenefic
def get_dtFimBenef(self): return self.dtFimBenef
def set_dtFimBenef(self, dtFimBenef): self.dtFimBenef = dtFimBenef
def get_mtvFim(self): return self.mtvFim
def set_mtvFim(self, mtvFim): self.mtvFim = mtvFim
def hasContent_(self):
if (
self.tpBenef is not None or
self.nrBenefic is not None or
self.dtFimBenef is not None or
self.mtvFim is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='fimBeneficio', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('fimBeneficio')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='fimBeneficio')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='fimBeneficio', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='fimBeneficio'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='fimBeneficio', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.tpBenef is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stpBenef>%s</%stpBenef>%s' % (namespace_, self.gds_format_integer(self.tpBenef, input_name='tpBenef'), namespace_, eol_))
if self.nrBenefic is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snrBenefic>%s</%snrBenefic>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.nrBenefic), input_name='nrBenefic')), namespace_, eol_))
if self.dtFimBenef is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdtFimBenef>%s</%sdtFimBenef>%s' % (namespace_, self.gds_format_date(self.dtFimBenef, input_name='dtFimBenef'), namespace_, eol_))
if self.mtvFim is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%smtvFim>%s</%smtvFim>%s' % (namespace_, self.gds_format_integer(self.mtvFim, input_name='mtvFim'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'tpBenef':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'tpBenef')
self.tpBenef = ival_
elif nodeName_ == 'nrBenefic':
nrBenefic_ = child_.text
nrBenefic_ = self.gds_validate_string(nrBenefic_, node, 'nrBenefic')
self.nrBenefic = nrBenefic_
elif nodeName_ == 'dtFimBenef':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.dtFimBenef = dval_
elif nodeName_ == 'mtvFim':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'mtvFim')
self.mtvFim = ival_
# end class fimBeneficio
class tpBenef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, tpBenef)
if subclass is not None:
return subclass(*args_, **kwargs_)
if tpBenef.subclass:
return tpBenef.subclass(*args_, **kwargs_)
else:
return tpBenef(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='tpBenef', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('tpBenef')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='tpBenef')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='tpBenef', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='tpBenef'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='tpBenef', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class tpBenef
class nrBenefic(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, nrBenefic)
if subclass is not None:
return subclass(*args_, **kwargs_)
if nrBenefic.subclass:
return nrBenefic.subclass(*args_, **kwargs_)
else:
return nrBenefic(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='nrBenefic', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('nrBenefic')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='nrBenefic')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='nrBenefic', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='nrBenefic'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='nrBenefic', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class nrBenefic
class dtFimBenef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, dtFimBenef)
if subclass is not None:
return subclass(*args_, **kwargs_)
if dtFimBenef.subclass:
return dtFimBenef.subclass(*args_, **kwargs_)
else:
return dtFimBenef(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='dtFimBenef', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('dtFimBenef')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='dtFimBenef')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='dtFimBenef', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='dtFimBenef'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='dtFimBenef', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class dtFimBenef
class mtvFim(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, mtvFim)
if subclass is not None:
return subclass(*args_, **kwargs_)
if mtvFim.subclass:
return mtvFim.subclass(*args_, **kwargs_)
else:
return mtvFim(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='mtvFim', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('mtvFim')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='mtvFim')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='mtvFim', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='mtvFim'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='mtvFim', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class mtvFim
class TIdeEveTrab(GeneratedsSuper):
"""Identificação do evento"""
subclass = None
superclass = None
def __init__(self, indRetif=None, nrRecibo=None, tpAmb=None, procEmi=None, verProc=None):
self.original_tagname_ = None
self.indRetif = indRetif
self.nrRecibo = nrRecibo
self.tpAmb = tpAmb
self.procEmi = procEmi
self.verProc = verProc
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TIdeEveTrab)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TIdeEveTrab.subclass:
return TIdeEveTrab.subclass(*args_, **kwargs_)
else:
return TIdeEveTrab(*args_, **kwargs_)
factory = staticmethod(factory)
def get_indRetif(self): return self.indRetif
def set_indRetif(self, indRetif): self.indRetif = indRetif
def get_nrRecibo(self): return self.nrRecibo
def set_nrRecibo(self, nrRecibo): self.nrRecibo = nrRecibo
def get_tpAmb(self): return self.tpAmb
def set_tpAmb(self, tpAmb): self.tpAmb = tpAmb
def get_procEmi(self): return self.procEmi
def set_procEmi(self, procEmi): self.procEmi = procEmi
def get_verProc(self): return self.verProc
def set_verProc(self, verProc): self.verProc = verProc
def hasContent_(self):
if (
self.indRetif is not None or
self.nrRecibo is not None or
self.tpAmb is not None or
self.procEmi is not None or
self.verProc is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TIdeEveTrab', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TIdeEveTrab')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TIdeEveTrab')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TIdeEveTrab', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TIdeEveTrab'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='TIdeEveTrab', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.indRetif is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sindRetif>%s</%sindRetif>%s' % (namespace_, self.gds_format_integer(self.indRetif, input_name='indRetif'), namespace_, eol_))
if self.nrRecibo is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snrRecibo>%s</%snrRecibo>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.nrRecibo), input_name='nrRecibo')), namespace_, eol_))
if self.tpAmb is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stpAmb>%s</%stpAmb>%s' % (namespace_, self.gds_format_integer(self.tpAmb, input_name='tpAmb'), namespace_, eol_))
if self.procEmi is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sprocEmi>%s</%sprocEmi>%s' % (namespace_, self.gds_format_integer(self.procEmi, input_name='procEmi'), namespace_, eol_))
if self.verProc is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sverProc>%s</%sverProc>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.verProc), input_name='verProc')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'indRetif':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'indRetif')
self.indRetif = ival_
elif nodeName_ == 'nrRecibo':
nrRecibo_ = child_.text
nrRecibo_ = self.gds_validate_string(nrRecibo_, node, 'nrRecibo')
self.nrRecibo = nrRecibo_
elif nodeName_ == 'tpAmb':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'tpAmb')
self.tpAmb = ival_
elif nodeName_ == 'procEmi':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'procEmi')
self.procEmi = ival_
elif nodeName_ == 'verProc':
verProc_ = child_.text
verProc_ = self.gds_validate_string(verProc_, node, 'verProc')
self.verProc = verProc_
# end class TIdeEveTrab
class indRetif(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, indRetif)
if subclass is not None:
return subclass(*args_, **kwargs_)
if indRetif.subclass:
return indRetif.subclass(*args_, **kwargs_)
else:
return indRetif(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='indRetif', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('indRetif')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='indRetif')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='indRetif', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='indRetif'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='indRetif', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class indRetif
class nrRecibo(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, nrRecibo)
if subclass is not None:
return subclass(*args_, **kwargs_)
if nrRecibo.subclass:
return nrRecibo.subclass(*args_, **kwargs_)
else:
return nrRecibo(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='nrRecibo', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('nrRecibo')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='nrRecibo')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='nrRecibo', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='nrRecibo'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='nrRecibo', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class nrRecibo
class tpAmb(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, tpAmb)
if subclass is not None:
return subclass(*args_, **kwargs_)
if tpAmb.subclass:
return tpAmb.subclass(*args_, **kwargs_)
else:
return tpAmb(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='tpAmb', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('tpAmb')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='tpAmb')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='tpAmb', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='tpAmb'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='tpAmb', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class tpAmb
class procEmi(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, procEmi)
if subclass is not None:
return subclass(*args_, **kwargs_)
if procEmi.subclass:
return procEmi.subclass(*args_, **kwargs_)
else:
return procEmi(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='procEmi', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('procEmi')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='procEmi')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='procEmi', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='procEmi'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='procEmi', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class procEmi
class verProc(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, verProc)
if subclass is not None:
return subclass(*args_, **kwargs_)
if verProc.subclass:
return verProc.subclass(*args_, **kwargs_)
else:
return verProc(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='verProc', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('verProc')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='verProc')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='verProc', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='verProc'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='verProc', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class verProc
class TEmprPJ(GeneratedsSuper):
"""Informações do Empregador PJ"""
subclass = None
superclass = None
def __init__(self, tpInsc=None, nrInsc=None):
self.original_tagname_ = None
self.tpInsc = tpInsc
self.nrInsc = nrInsc
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TEmprPJ)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TEmprPJ.subclass:
return TEmprPJ.subclass(*args_, **kwargs_)
else:
return TEmprPJ(*args_, **kwargs_)
factory = staticmethod(factory)
def get_tpInsc(self): return self.tpInsc
def set_tpInsc(self, tpInsc): self.tpInsc = tpInsc
def get_nrInsc(self): return self.nrInsc
def set_nrInsc(self, nrInsc): self.nrInsc = nrInsc
def hasContent_(self):
if (
self.tpInsc is not None or
self.nrInsc is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TEmprPJ', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TEmprPJ')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TEmprPJ')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TEmprPJ', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TEmprPJ'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='TEmprPJ', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.tpInsc is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stpInsc>%s</%stpInsc>%s' % (namespace_, self.gds_format_integer(self.tpInsc, input_name='tpInsc'), namespace_, eol_))
if self.nrInsc is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snrInsc>%s</%snrInsc>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.nrInsc), input_name='nrInsc')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'tpInsc':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'tpInsc')
self.tpInsc = ival_
elif nodeName_ == 'nrInsc':
nrInsc_ = child_.text
nrInsc_ = self.gds_validate_string(nrInsc_, node, 'nrInsc')
self.nrInsc = nrInsc_
# end class TEmprPJ
class tpInsc(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, tpInsc)
if subclass is not None:
return subclass(*args_, **kwargs_)
if tpInsc.subclass:
return tpInsc.subclass(*args_, **kwargs_)
else:
return tpInsc(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='tpInsc', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('tpInsc')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='tpInsc')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='tpInsc', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='tpInsc'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='tpInsc', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class tpInsc
class nrInsc(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, nrInsc)
if subclass is not None:
return subclass(*args_, **kwargs_)
if nrInsc.subclass:
return nrInsc.subclass(*args_, **kwargs_)
else:
return nrInsc(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='nrInsc', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('nrInsc')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='nrInsc')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='nrInsc', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='nrInsc'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='nrInsc', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class nrInsc
class TDadosBenef(GeneratedsSuper):
"""Dados de beneficiário"""
subclass = None
superclass = None
def __init__(self, dadosNasc=None, endereco=None):
self.original_tagname_ = None
self.dadosNasc = dadosNasc
self.endereco = endereco
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TDadosBenef)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TDadosBenef.subclass:
return TDadosBenef.subclass(*args_, **kwargs_)
else:
return TDadosBenef(*args_, **kwargs_)
factory = staticmethod(factory)
def get_dadosNasc(self): return self.dadosNasc
def set_dadosNasc(self, dadosNasc): self.dadosNasc = dadosNasc
def get_endereco(self): return self.endereco
def set_endereco(self, endereco): self.endereco = endereco
def hasContent_(self):
if (
self.dadosNasc is not None or
self.endereco is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TDadosBenef', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TDadosBenef')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TDadosBenef')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TDadosBenef', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TDadosBenef'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='TDadosBenef', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.dadosNasc is not None:
self.dadosNasc.export(outfile, level, namespace_, name_='dadosNasc', pretty_print=pretty_print)
if self.endereco is not None:
self.endereco.export(outfile, level, namespace_, name_='endereco', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'dadosNasc':
obj_ = dadosNasc.factory()
obj_.build(child_)
self.dadosNasc = obj_
obj_.original_tagname_ = 'dadosNasc'
elif nodeName_ == 'endereco':
obj_ = endereco.factory()
obj_.build(child_)
self.endereco = obj_
obj_.original_tagname_ = 'endereco'
# end class TDadosBenef
class dadosNasc(GeneratedsSuper):
"""Informações de nascimento do beneficiário"""
subclass = None
superclass = None
def __init__(self, dtNascto=None, codMunic=None, uf=None, paisNascto=None, paisNac=None, nmMae=None, nmPai=None):
self.original_tagname_ = None
if isinstance(dtNascto, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(dtNascto, '%Y-%m-%d').date()
else:
initvalue_ = dtNascto
self.dtNascto = initvalue_
self.codMunic = codMunic
self.uf = uf
self.paisNascto = paisNascto
self.paisNac = paisNac
self.nmMae = nmMae
self.nmPai = nmPai
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, dadosNasc)
if subclass is not None:
return subclass(*args_, **kwargs_)
if dadosNasc.subclass:
return dadosNasc.subclass(*args_, **kwargs_)
else:
return dadosNasc(*args_, **kwargs_)
factory = staticmethod(factory)
def get_dtNascto(self): return self.dtNascto
def set_dtNascto(self, dtNascto): self.dtNascto = dtNascto
def get_codMunic(self): return self.codMunic
def set_codMunic(self, codMunic): self.codMunic = codMunic
def get_uf(self): return self.uf
def set_uf(self, uf): self.uf = uf
def get_paisNascto(self): return self.paisNascto
def set_paisNascto(self, paisNascto): self.paisNascto = paisNascto
def get_paisNac(self): return self.paisNac
def set_paisNac(self, paisNac): self.paisNac = paisNac
def get_nmMae(self): return self.nmMae
def set_nmMae(self, nmMae): self.nmMae = nmMae
def get_nmPai(self): return self.nmPai
def set_nmPai(self, nmPai): self.nmPai = nmPai
def hasContent_(self):
if (
self.dtNascto is not None or
self.codMunic is not None or
self.uf is not None or
self.paisNascto is not None or
self.paisNac is not None or
self.nmMae is not None or
self.nmPai is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='dadosNasc', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('dadosNasc')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='dadosNasc')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='dadosNasc', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='dadosNasc'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='dadosNasc', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.dtNascto is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdtNascto>%s</%sdtNascto>%s' % (namespace_, self.gds_format_date(self.dtNascto, input_name='dtNascto'), namespace_, eol_))
if self.codMunic is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scodMunic>%s</%scodMunic>%s' % (namespace_, self.gds_format_integer(self.codMunic, input_name='codMunic'), namespace_, eol_))
if self.uf is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%suf>%s</%suf>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.uf), input_name='uf')), namespace_, eol_))
if self.paisNascto is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spaisNascto>%s</%spaisNascto>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.paisNascto), input_name='paisNascto')), namespace_, eol_))
if self.paisNac is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spaisNac>%s</%spaisNac>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.paisNac), input_name='paisNac')), namespace_, eol_))
if self.nmMae is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snmMae>%s</%snmMae>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.nmMae), input_name='nmMae')), namespace_, eol_))
if self.nmPai is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snmPai>%s</%snmPai>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.nmPai), input_name='nmPai')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'dtNascto':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.dtNascto = dval_
elif nodeName_ == 'codMunic':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'codMunic')
self.codMunic = ival_
elif nodeName_ == 'uf':
uf_ = child_.text
uf_ = self.gds_validate_string(uf_, node, 'uf')
self.uf = uf_
elif nodeName_ == 'paisNascto':
paisNascto_ = child_.text
paisNascto_ = self.gds_validate_string(paisNascto_, node, 'paisNascto')
self.paisNascto = paisNascto_
elif nodeName_ == 'paisNac':
paisNac_ = child_.text
paisNac_ = self.gds_validate_string(paisNac_, node, 'paisNac')
self.paisNac = paisNac_
elif nodeName_ == 'nmMae':
nmMae_ = child_.text
nmMae_ = self.gds_validate_string(nmMae_, node, 'nmMae')
self.nmMae = nmMae_
elif nodeName_ == 'nmPai':
nmPai_ = child_.text
nmPai_ = self.gds_validate_string(nmPai_, node, 'nmPai')
self.nmPai = nmPai_
# end class dadosNasc
class dtNascto(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, dtNascto)
if subclass is not None:
return subclass(*args_, **kwargs_)
if dtNascto.subclass:
return dtNascto.subclass(*args_, **kwargs_)
else:
return dtNascto(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='dtNascto', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('dtNascto')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='dtNascto')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='dtNascto', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='dtNascto'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='dtNascto', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class dtNascto
class codMunic(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, codMunic)
if subclass is not None:
return subclass(*args_, **kwargs_)
if codMunic.subclass:
return codMunic.subclass(*args_, **kwargs_)
else:
return codMunic(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='codMunic', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('codMunic')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='codMunic')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='codMunic', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='codMunic'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='codMunic', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class codMunic
class uf(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, uf)
if subclass is not None:
return subclass(*args_, **kwargs_)
if uf.subclass:
return uf.subclass(*args_, **kwargs_)
else:
return uf(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='uf', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('uf')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='uf')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='uf', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='uf'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='uf', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class uf
class paisNascto(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, paisNascto)
if subclass is not None:
return subclass(*args_, **kwargs_)
if paisNascto.subclass:
return paisNascto.subclass(*args_, **kwargs_)
else:
return paisNascto(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='paisNascto', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('paisNascto')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='paisNascto')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='paisNascto', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='paisNascto'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='paisNascto', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class paisNascto
class paisNac(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, paisNac)
if subclass is not None:
return subclass(*args_, **kwargs_)
if paisNac.subclass:
return paisNac.subclass(*args_, **kwargs_)
else:
return paisNac(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='paisNac', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('paisNac')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='paisNac')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='paisNac', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='paisNac'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='paisNac', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class paisNac
class nmMae(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, nmMae)
if subclass is not None:
return subclass(*args_, **kwargs_)
if nmMae.subclass:
return nmMae.subclass(*args_, **kwargs_)
else:
return nmMae(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='nmMae', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('nmMae')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='nmMae')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='nmMae', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='nmMae'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='nmMae', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class nmMae
class nmPai(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, nmPai)
if subclass is not None:
return subclass(*args_, **kwargs_)
if nmPai.subclass:
return nmPai.subclass(*args_, **kwargs_)
else:
return nmPai(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='nmPai', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('nmPai')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='nmPai')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='nmPai', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='nmPai'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='nmPai', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class nmPai
class endereco(GeneratedsSuper):
"""Grupo de informações do endereço do Trabalhador"""
subclass = None
superclass = None
def __init__(self, brasil=None, exterior=None):
self.original_tagname_ = None
self.brasil = brasil
self.exterior = exterior
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, endereco)
if subclass is not None:
return subclass(*args_, **kwargs_)
if endereco.subclass:
return endereco.subclass(*args_, **kwargs_)
else:
return endereco(*args_, **kwargs_)
factory = staticmethod(factory)
def get_brasil(self): return self.brasil
def set_brasil(self, brasil): self.brasil = brasil
def get_exterior(self): return self.exterior
def set_exterior(self, exterior): self.exterior = exterior
def hasContent_(self):
if (
self.brasil is not None or
self.exterior is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='endereco', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('endereco')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='endereco')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='endereco', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='endereco'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='endereco', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.brasil is not None:
self.brasil.export(outfile, level, namespace_, name_='brasil', pretty_print=pretty_print)
if self.exterior is not None:
self.exterior.export(outfile, level, namespace_, name_='exterior', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'brasil':
obj_ = TEnderecoBrasil.factory()
obj_.build(child_)
self.brasil = obj_
obj_.original_tagname_ = 'brasil'
elif nodeName_ == 'exterior':
obj_ = TEnderecoExterior.factory()
obj_.build(child_)
self.exterior = obj_
obj_.original_tagname_ = 'exterior'
# end class endereco
class TEnderecoBrasil(GeneratedsSuper):
"""Informações do Endereço no Brasil"""
subclass = None
superclass = None
def __init__(self, tpLograd=None, dscLograd=None, nrLograd=None, complemento=None, bairro=None, cep=None, codMunic=None, uf=None):
self.original_tagname_ = None
self.tpLograd = tpLograd
self.dscLograd = dscLograd
self.nrLograd = nrLograd
self.complemento = complemento
self.bairro = bairro
self.cep = cep
self.codMunic = codMunic
self.uf = uf
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TEnderecoBrasil)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TEnderecoBrasil.subclass:
return TEnderecoBrasil.subclass(*args_, **kwargs_)
else:
return TEnderecoBrasil(*args_, **kwargs_)
factory = staticmethod(factory)
def get_tpLograd(self): return self.tpLograd
def set_tpLograd(self, tpLograd): self.tpLograd = tpLograd
def get_dscLograd(self): return self.dscLograd
def set_dscLograd(self, dscLograd): self.dscLograd = dscLograd
def get_nrLograd(self): return self.nrLograd
def set_nrLograd(self, nrLograd): self.nrLograd = nrLograd
def get_complemento(self): return self.complemento
def set_complemento(self, complemento): self.complemento = complemento
def get_bairro(self): return self.bairro
def set_bairro(self, bairro): self.bairro = bairro
def get_cep(self): return self.cep
def set_cep(self, cep): self.cep = cep
def get_codMunic(self): return self.codMunic
def set_codMunic(self, codMunic): self.codMunic = codMunic
def get_uf(self): return self.uf
def set_uf(self, uf): self.uf = uf
def hasContent_(self):
if (
self.tpLograd is not None or
self.dscLograd is not None or
self.nrLograd is not None or
self.complemento is not None or
self.bairro is not None or
self.cep is not None or
self.codMunic is not None or
self.uf is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TEnderecoBrasil', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TEnderecoBrasil')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TEnderecoBrasil')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TEnderecoBrasil', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TEnderecoBrasil'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='TEnderecoBrasil', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.tpLograd is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stpLograd>%s</%stpLograd>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.tpLograd), input_name='tpLograd')), namespace_, eol_))
if self.dscLograd is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdscLograd>%s</%sdscLograd>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.dscLograd), input_name='dscLograd')), namespace_, eol_))
if self.nrLograd is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snrLograd>%s</%snrLograd>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.nrLograd), input_name='nrLograd')), namespace_, eol_))
if self.complemento is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scomplemento>%s</%scomplemento>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.complemento), input_name='complemento')), namespace_, eol_))
if self.bairro is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sbairro>%s</%sbairro>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.bairro), input_name='bairro')), namespace_, eol_))
if self.cep is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scep>%s</%scep>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.cep), input_name='cep')), namespace_, eol_))
if self.codMunic is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scodMunic>%s</%scodMunic>%s' % (namespace_, self.gds_format_integer(self.codMunic, input_name='codMunic'), namespace_, eol_))
if self.uf is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%suf>%s</%suf>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.uf), input_name='uf')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'tpLograd':
tpLograd_ = child_.text
tpLograd_ = self.gds_validate_string(tpLograd_, node, 'tpLograd')
self.tpLograd = tpLograd_
elif nodeName_ == 'dscLograd':
dscLograd_ = child_.text
dscLograd_ = self.gds_validate_string(dscLograd_, node, 'dscLograd')
self.dscLograd = dscLograd_
elif nodeName_ == 'nrLograd':
nrLograd_ = child_.text
nrLograd_ = self.gds_validate_string(nrLograd_, node, 'nrLograd')
self.nrLograd = nrLograd_
elif nodeName_ == 'complemento':
complemento_ = child_.text
complemento_ = self.gds_validate_string(complemento_, node, 'complemento')
self.complemento = complemento_
elif nodeName_ == 'bairro':
bairro_ = child_.text
bairro_ = self.gds_validate_string(bairro_, node, 'bairro')
self.bairro = bairro_
elif nodeName_ == 'cep':
cep_ = child_.text
cep_ = self.gds_validate_string(cep_, node, 'cep')
self.cep = cep_
elif nodeName_ == 'codMunic':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'codMunic')
self.codMunic = ival_
elif nodeName_ == 'uf':
uf_ = child_.text
uf_ = self.gds_validate_string(uf_, node, 'uf')
self.uf = uf_
# end class TEnderecoBrasil
class tpLograd(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, tpLograd)
if subclass is not None:
return subclass(*args_, **kwargs_)
if tpLograd.subclass:
return tpLograd.subclass(*args_, **kwargs_)
else:
return tpLograd(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='tpLograd', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('tpLograd')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='tpLograd')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='tpLograd', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='tpLograd'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='tpLograd', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class tpLograd
class dscLograd(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, dscLograd)
if subclass is not None:
return subclass(*args_, **kwargs_)
if dscLograd.subclass:
return dscLograd.subclass(*args_, **kwargs_)
else:
return dscLograd(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='dscLograd', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('dscLograd')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='dscLograd')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='dscLograd', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='dscLograd'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='dscLograd', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class dscLograd
class nrLograd(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, nrLograd)
if subclass is not None:
return subclass(*args_, **kwargs_)
if nrLograd.subclass:
return nrLograd.subclass(*args_, **kwargs_)
else:
return nrLograd(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='nrLograd', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('nrLograd')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='nrLograd')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='nrLograd', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='nrLograd'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='nrLograd', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class nrLograd
class complemento(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, complemento)
if subclass is not None:
return subclass(*args_, **kwargs_)
if complemento.subclass:
return complemento.subclass(*args_, **kwargs_)
else:
return complemento(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='complemento', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('complemento')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='complemento')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='complemento', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='complemento'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='complemento', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class complemento
class bairro(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, bairro)
if subclass is not None:
return subclass(*args_, **kwargs_)
if bairro.subclass:
return bairro.subclass(*args_, **kwargs_)
else:
return bairro(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='bairro', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('bairro')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='bairro')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='bairro', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='bairro'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='bairro', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class bairro
class cep(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, cep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if cep.subclass:
return cep.subclass(*args_, **kwargs_)
else:
return cep(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='cep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('cep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='cep', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='cep'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='cep', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cep
class TEnderecoExterior(GeneratedsSuper):
"""Informações do Endereço no Exterior"""
subclass = None
superclass = None
def __init__(self, paisResid=None, dscLograd=None, nrLograd=None, complemento=None, bairro=None, nmCid=None, codPostal=None):
self.original_tagname_ = None
self.paisResid = paisResid
self.dscLograd = dscLograd
self.nrLograd = nrLograd
self.complemento = complemento
self.bairro = bairro
self.nmCid = nmCid
self.codPostal = codPostal
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TEnderecoExterior)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TEnderecoExterior.subclass:
return TEnderecoExterior.subclass(*args_, **kwargs_)
else:
return TEnderecoExterior(*args_, **kwargs_)
factory = staticmethod(factory)
def get_paisResid(self): return self.paisResid
def set_paisResid(self, paisResid): self.paisResid = paisResid
def get_dscLograd(self): return self.dscLograd
def set_dscLograd(self, dscLograd): self.dscLograd = dscLograd
def get_nrLograd(self): return self.nrLograd
def set_nrLograd(self, nrLograd): self.nrLograd = nrLograd
def get_complemento(self): return self.complemento
def set_complemento(self, complemento): self.complemento = complemento
def get_bairro(self): return self.bairro
def set_bairro(self, bairro): self.bairro = bairro
def get_nmCid(self): return self.nmCid
def set_nmCid(self, nmCid): self.nmCid = nmCid
def get_codPostal(self): return self.codPostal
def set_codPostal(self, codPostal): self.codPostal = codPostal
def hasContent_(self):
if (
self.paisResid is not None or
self.dscLograd is not None or
self.nrLograd is not None or
self.complemento is not None or
self.bairro is not None or
self.nmCid is not None or
self.codPostal is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TEnderecoExterior', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TEnderecoExterior')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TEnderecoExterior')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TEnderecoExterior', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TEnderecoExterior'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='TEnderecoExterior', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.paisResid is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spaisResid>%s</%spaisResid>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.paisResid), input_name='paisResid')), namespace_, eol_))
if self.dscLograd is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdscLograd>%s</%sdscLograd>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.dscLograd), input_name='dscLograd')), namespace_, eol_))
if self.nrLograd is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snrLograd>%s</%snrLograd>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.nrLograd), input_name='nrLograd')), namespace_, eol_))
if self.complemento is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scomplemento>%s</%scomplemento>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.complemento), input_name='complemento')), namespace_, eol_))
if self.bairro is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sbairro>%s</%sbairro>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.bairro), input_name='bairro')), namespace_, eol_))
if self.nmCid is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snmCid>%s</%snmCid>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.nmCid), input_name='nmCid')), namespace_, eol_))
if self.codPostal is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scodPostal>%s</%scodPostal>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.codPostal), input_name='codPostal')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'paisResid':
paisResid_ = child_.text
paisResid_ = self.gds_validate_string(paisResid_, node, 'paisResid')
self.paisResid = paisResid_
elif nodeName_ == 'dscLograd':
dscLograd_ = child_.text
dscLograd_ = self.gds_validate_string(dscLograd_, node, 'dscLograd')
self.dscLograd = dscLograd_
elif nodeName_ == 'nrLograd':
nrLograd_ = child_.text
nrLograd_ = self.gds_validate_string(nrLograd_, node, 'nrLograd')
self.nrLograd = nrLograd_
elif nodeName_ == 'complemento':
complemento_ = child_.text
complemento_ = self.gds_validate_string(complemento_, node, 'complemento')
self.complemento = complemento_
elif nodeName_ == 'bairro':
bairro_ = child_.text
bairro_ = self.gds_validate_string(bairro_, node, 'bairro')
self.bairro = bairro_
elif nodeName_ == 'nmCid':
nmCid_ = child_.text
nmCid_ = self.gds_validate_string(nmCid_, node, 'nmCid')
self.nmCid = nmCid_
elif nodeName_ == 'codPostal':
codPostal_ = child_.text
codPostal_ = self.gds_validate_string(codPostal_, node, 'codPostal')
self.codPostal = codPostal_
# end class TEnderecoExterior
class paisResid(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, paisResid)
if subclass is not None:
return subclass(*args_, **kwargs_)
if paisResid.subclass:
return paisResid.subclass(*args_, **kwargs_)
else:
return paisResid(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='paisResid', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('paisResid')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='paisResid')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='paisResid', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='paisResid'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='paisResid', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class paisResid
class nmCid(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, nmCid)
if subclass is not None:
return subclass(*args_, **kwargs_)
if nmCid.subclass:
return nmCid.subclass(*args_, **kwargs_)
else:
return nmCid(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='nmCid', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('nmCid')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='nmCid')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='nmCid', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='nmCid'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='nmCid', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class nmCid
class codPostal(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, codPostal)
if subclass is not None:
return subclass(*args_, **kwargs_)
if codPostal.subclass:
return codPostal.subclass(*args_, **kwargs_)
else:
return codPostal(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='codPostal', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('codPostal')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='codPostal')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='codPostal', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='codPostal'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='codPostal', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class codPostal
class TDadosBeneficio(GeneratedsSuper):
"""Dados do benefício previdenciário"""
subclass = None
superclass = None
def __init__(self, tpBenef=None, nrBenefic=None, dtIniBenef=None, vrBenef=None, infoPenMorte=None):
self.original_tagname_ = None
self.tpBenef = tpBenef
self.nrBenefic = nrBenefic
if isinstance(dtIniBenef, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(dtIniBenef, '%Y-%m-%d').date()
else:
initvalue_ = dtIniBenef
self.dtIniBenef = initvalue_
self.vrBenef = vrBenef
self.infoPenMorte = infoPenMorte
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TDadosBeneficio)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TDadosBeneficio.subclass:
return TDadosBeneficio.subclass(*args_, **kwargs_)
else:
return TDadosBeneficio(*args_, **kwargs_)
factory = staticmethod(factory)
def get_tpBenef(self): return self.tpBenef
def set_tpBenef(self, tpBenef): self.tpBenef = tpBenef
def get_nrBenefic(self): return self.nrBenefic
def set_nrBenefic(self, nrBenefic): self.nrBenefic = nrBenefic
def get_dtIniBenef(self): return self.dtIniBenef
def set_dtIniBenef(self, dtIniBenef): self.dtIniBenef = dtIniBenef
def get_vrBenef(self): return self.vrBenef
def set_vrBenef(self, vrBenef): self.vrBenef = vrBenef
def get_infoPenMorte(self): return self.infoPenMorte
def set_infoPenMorte(self, infoPenMorte): self.infoPenMorte = infoPenMorte
def hasContent_(self):
if (
self.tpBenef is not None or
self.nrBenefic is not None or
self.dtIniBenef is not None or
self.vrBenef is not None or
self.infoPenMorte is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TDadosBeneficio', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TDadosBeneficio')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TDadosBeneficio')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TDadosBeneficio', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TDadosBeneficio'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='TDadosBeneficio', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.tpBenef is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stpBenef>%s</%stpBenef>%s' % (namespace_, self.gds_format_integer(self.tpBenef, input_name='tpBenef'), namespace_, eol_))
if self.nrBenefic is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snrBenefic>%s</%snrBenefic>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.nrBenefic), input_name='nrBenefic')), namespace_, eol_))
if self.dtIniBenef is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdtIniBenef>%s</%sdtIniBenef>%s' % (namespace_, self.gds_format_date(self.dtIniBenef, input_name='dtIniBenef'), namespace_, eol_))
if self.vrBenef is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%svrBenef>%s</%svrBenef>%s' % (namespace_, self.gds_format_float(self.vrBenef, input_name='vrBenef'), namespace_, eol_))
if self.infoPenMorte is not None:
self.infoPenMorte.export(outfile, level, namespace_, name_='infoPenMorte', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'tpBenef':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'tpBenef')
self.tpBenef = ival_
elif nodeName_ == 'nrBenefic':
nrBenefic_ = child_.text
nrBenefic_ = self.gds_validate_string(nrBenefic_, node, 'nrBenefic')
self.nrBenefic = nrBenefic_
elif nodeName_ == 'dtIniBenef':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.dtIniBenef = dval_
elif nodeName_ == 'vrBenef':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'vrBenef')
self.vrBenef = fval_
elif nodeName_ == 'infoPenMorte':
obj_ = infoPenMorte.factory()
obj_.build(child_)
self.infoPenMorte = obj_
obj_.original_tagname_ = 'infoPenMorte'
# end class TDadosBeneficio
class dtIniBenef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, dtIniBenef)
if subclass is not None:
return subclass(*args_, **kwargs_)
if dtIniBenef.subclass:
return dtIniBenef.subclass(*args_, **kwargs_)
else:
return dtIniBenef(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='dtIniBenef', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('dtIniBenef')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='dtIniBenef')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='dtIniBenef', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='dtIniBenef'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='dtIniBenef', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class dtIniBenef
class vrBenef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, vrBenef)
if subclass is not None:
return subclass(*args_, **kwargs_)
if vrBenef.subclass:
return vrBenef.subclass(*args_, **kwargs_)
else:
return vrBenef(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='vrBenef', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('vrBenef')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='vrBenef')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='vrBenef', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='vrBenef'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='vrBenef', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class vrBenef
class infoPenMorte(GeneratedsSuper):
"""Informações relativas a pensão por morte"""
subclass = None
superclass = None
def __init__(self, idQuota=None, cpfInst=None):
self.original_tagname_ = None
self.idQuota = idQuota
self.cpfInst = cpfInst
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, infoPenMorte)
if subclass is not None:
return subclass(*args_, **kwargs_)
if infoPenMorte.subclass:
return infoPenMorte.subclass(*args_, **kwargs_)
else:
return infoPenMorte(*args_, **kwargs_)
factory = staticmethod(factory)
def get_idQuota(self): return self.idQuota
def set_idQuota(self, idQuota): self.idQuota = idQuota
def get_cpfInst(self): return self.cpfInst
def set_cpfInst(self, cpfInst): self.cpfInst = cpfInst
def hasContent_(self):
if (
self.idQuota is not None or
self.cpfInst is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='infoPenMorte', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('infoPenMorte')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='infoPenMorte')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='infoPenMorte', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='infoPenMorte'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='infoPenMorte', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.idQuota is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sidQuota>%s</%sidQuota>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.idQuota), input_name='idQuota')), namespace_, eol_))
if self.cpfInst is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scpfInst>%s</%scpfInst>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.cpfInst), input_name='cpfInst')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'idQuota':
idQuota_ = child_.text
idQuota_ = self.gds_validate_string(idQuota_, node, 'idQuota')
self.idQuota = idQuota_
elif nodeName_ == 'cpfInst':
cpfInst_ = child_.text
cpfInst_ = self.gds_validate_string(cpfInst_, node, 'cpfInst')
self.cpfInst = cpfInst_
# end class infoPenMorte
class idQuota(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, idQuota)
if subclass is not None:
return subclass(*args_, **kwargs_)
if idQuota.subclass:
return idQuota.subclass(*args_, **kwargs_)
else:
return idQuota(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='idQuota', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('idQuota')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='idQuota')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='idQuota', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='idQuota'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='idQuota', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class idQuota
class cpfInst(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, cpfInst)
if subclass is not None:
return subclass(*args_, **kwargs_)
if cpfInst.subclass:
return cpfInst.subclass(*args_, **kwargs_)
else:
return cpfInst(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='cpfInst', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('cpfInst')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cpfInst')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='cpfInst', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='cpfInst'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='cpfInst', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cpfInst
GDSClassesMapping = {
'altBeneficio': TDadosBeneficio,
'brasil': TEnderecoBrasil,
'dadosBenef': TDadosBenef,
'exterior': TEnderecoExterior,
'ideEmpregador': TEmprPJ,
'ideEvento': TIdeEveTrab,
'iniBeneficio': TDadosBeneficio,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'eSocial'
rootClass = eSocial
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'eSocial'
rootClass = eSocial
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
if sys.version_info.major == 2:
from StringIO import StringIO as IOBuffer
else:
from io import BytesIO as IOBuffer
parser = None
doc = parsexml_(IOBuffer(inString), parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'eSocial'
rootClass = eSocial
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'eSocial'
rootClass = eSocial
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from evtCdBenPrRP import *\n\n')
sys.stdout.write('import evtCdBenPrRP as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"TDadosBenef",
"TDadosBeneficio",
"TEmprPJ",
"TEnderecoBrasil",
"TEnderecoExterior",
"TIdeEveTrab",
"eSocial"
]
| 1.984375 | 2 |
pointcloudset/diff/__init__.py | hugoledoux/pointcloudset | 23 | 12790113 | <reponame>hugoledoux/pointcloudset
"""
Functions to calculate differences and distances between entities.
"""
from pointcloudset.diff.origin import calculate_distance_to_origin
from pointcloudset.diff.plane import calculate_distance_to_plane
from pointcloudset.diff.point import calculate_distance_to_point
from pointcloudset.diff.pointcloud import calculate_distance_to_pointcloud
ALL_DIFFS = {
"pointcloud": calculate_distance_to_pointcloud,
"plane": calculate_distance_to_plane,
"point": calculate_distance_to_point,
"origin": calculate_distance_to_origin,
}
| 2.21875 | 2 |
acq4/devices/ThorlabsFilterWheel/__init__.py | aleonlein/acq4 | 1 | 12790114 | <reponame>aleonlein/acq4
from FilterWheel import *
| 0.976563 | 1 |
sdk/python/pulumi_cloudamqp/get_plugins.py | pulumi/pulumi-cloudamqp | 2 | 12790115 | <filename>sdk/python/pulumi_cloudamqp/get_plugins.py<gh_stars>1-10
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
__all__ = [
'GetPluginsResult',
'AwaitableGetPluginsResult',
'get_plugins',
]
@pulumi.output_type
class GetPluginsResult:
"""
A collection of values returned by getPlugins.
"""
def __init__(__self__, id=None, instance_id=None, plugins=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if instance_id and not isinstance(instance_id, int):
raise TypeError("Expected argument 'instance_id' to be a int")
pulumi.set(__self__, "instance_id", instance_id)
if plugins and not isinstance(plugins, list):
raise TypeError("Expected argument 'plugins' to be a list")
pulumi.set(__self__, "plugins", plugins)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> int:
return pulumi.get(self, "instance_id")
@property
@pulumi.getter
def plugins(self) -> Sequence['outputs.GetPluginsPluginResult']:
return pulumi.get(self, "plugins")
class AwaitableGetPluginsResult(GetPluginsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPluginsResult(
id=self.id,
instance_id=self.instance_id,
plugins=self.plugins)
def get_plugins(instance_id: Optional[int] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPluginsResult:
"""
Use this data source to retrieve information about installed and available plugins for the CloudAMQP instance.
## Example Usage
```python
import pulumi
import pulumi_cloudamqp as cloudamqp
plugins = cloudamqp.get_plugins(instance_id=cloudamqp_instance["instance"]["id"])
```
## Argument reference
* `instance_id` - (Required) The CloudAMQP instance identifier.
## Attributes reference
All attributes reference are computed
* `id` - The identifier for this resource.
* `plugins` - An array of plugins. Each `plugins` block consists of the fields documented below.
***
The `plugins` block consist of
* `name` - The type of the recipient.
* `version` - Rabbit MQ version that the plugins are shipped with.
* `description` - Description of what the plugin does.
* `enabled` - Enable or disable information for the plugin.
## Dependency
This data source depends on CloudAMQP instance identifier, `cloudamqp_instance.instance.id`.
"""
__args__ = dict()
__args__['instanceId'] = instance_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('cloudamqp:index/getPlugins:getPlugins', __args__, opts=opts, typ=GetPluginsResult).value
return AwaitableGetPluginsResult(
id=__ret__.id,
instance_id=__ret__.instance_id,
plugins=__ret__.plugins)
| 1.929688 | 2 |
mathrepl/evaluator.py | lpozo/mathrepl | 3 | 12790116 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""MathREPL, a math expression evaluator using Python eval() and the math module."""
from . import ALLOWED_NAMES
def evaluate(expression):
"""Evaluate a math expression."""
# Compile the expression eventually raising a SyntaxError
# when the user enters an invalid expression
code = compile(expression, "<string>", "eval")
# Validate allowed names
for name in code.co_names:
if name not in ALLOWED_NAMES:
raise NameError(f"The use of '{name}' is not allowed")
# Evaluate the expression eventually raising a ValueError
# when the user uses a math function with a wrong input value
# e.g. math.sqrt(-10)
return eval(code, {"__builtins__": {}}, ALLOWED_NAMES)
| 2.625 | 3 |
measure_mate/migrations/0010_measurement_target_rating.py | niche-tester/measure-mate | 15 | 12790117 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-30 00:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('measure_mate', '0009_auto_20160124_1245'),
]
operations = [
migrations.AddField(
model_name='measurement',
name='target_rating',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='target_measurements', to='measure_mate.Rating'),
),
]
| 1.429688 | 1 |
jnpr/openclos/tests/unit/test_writer.py | sysbot/OpenClos | 1 | 12790118 | '''
Created on Aug 26, 2014
@author: preethi
'''
import os
import sys
import shutil
sys.path.insert(0,os.path.abspath(os.path.dirname(__file__) + '/' + '../..')) #trick to make it run from CLI
import unittest
import sqlalchemy
from sqlalchemy.orm import sessionmaker
import pydot
from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base
from jnpr.openclos.writer import WriterBase, ConfigWriter, CablingPlanWriter
from jnpr.openclos.util import configLocation
from jnpr.openclos.dao import Dao
from test_model import createPod, createPodDevice
from flexmock import flexmock
class TestWriterBase(unittest.TestCase):
def setUp(self):
self.conf = {}
self.conf['outputDir'] = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'out')
self.conf['dbUrl'] = 'sqlite:///'
self.conf['DOT'] = {'ranksep' : '5 equally', 'colors': ['red', 'green', 'blue']}
self.conf['deviceFamily'] = {
"QFX5100-24Q": {
"ports": 'et-0/0/[0-23]'
},
"QFX5100-48S": {
"uplinkPorts": 'et-0/0/[48-53]',
"downlinkPorts": 'xe-0/0/[0-47]'
}
}
self.dao = Dao(self.conf)
''' Deletes 'out' folder under test dir'''
shutil.rmtree(self.conf['outputDir'], ignore_errors=True)
def tearDown(self):
''' Deletes 'out' folder under test dir'''
shutil.rmtree(self.conf['outputDir'], ignore_errors=True)
class TestConfigWriter(TestWriterBase):
def testWrite(self):
pod = createPod('pod1', self.dao.Session())
device = Device('test_device', "",'admin', 'admin', 'spine', "", "", pod)
configWriter = ConfigWriter(self.conf, pod, self.dao)
configWriter.write(device, "dummy config")
self.assertTrue(os.path.exists(configWriter.outputDir + '/test_device.conf'))
class TestCablingPlanWriter(TestWriterBase):
def testInitWithTemplate(self):
from jinja2 import TemplateNotFound
pod = createPod('pod1', self.dao.Session())
cablingPlanWriter = CablingPlanWriter(self.conf, pod, self.dao)
self.assertIsNotNone(cablingPlanWriter.template)
with self.assertRaises(TemplateNotFound) as e:
cablingPlanWriter.templateEnv.get_template('unknown-template')
self.assertTrue('unknown-template' in e.exception.message)
def testCreateDeviceInGraph(self):
testDeviceTopology = pydot.Dot(graph_type='graph', )
pod = createPod('pod1', self.dao.Session())
cablingPlanWriter = CablingPlanWriter(self.conf, pod, self.dao)
device = createPodDevice(self.dao.Session(), 'Preethi', pod)
device.id = 'preethi-1'
cablingPlanWriter.createDeviceInGraph(device.name, device, testDeviceTopology)
path = cablingPlanWriter.outputDir + '/testDevicelabel.dot'
testDeviceTopology.write_raw(path)
data = open(path, 'r').read()
#check the generated label for device
self.assertTrue('"preethi-1" [shape=record, label=Preethi];' in data)
def testcreateLinksInGraph(self):
testLinksInTopology = pydot.Dot(graph_type='graph')
pod = createPod('pod1', self.dao.Session())
cablingPlanWriter = CablingPlanWriter(self.conf, pod, self.dao)
deviceOne = Device('spine01',"", 'admin', 'admin', 'spine', "", "", pod)
deviceOne.id = 'spine01'
IF1 = InterfaceDefinition('IF1', deviceOne, 'downlink')
IF1.id = 'IF1'
deviceTwo = Device('leaf01',"", 'admin', 'admin', 'leaf', "", "", pod)
deviceTwo.id = 'leaf01'
IF21 = InterfaceDefinition('IF1', deviceTwo, 'uplink')
IF21.id = 'IF21'
IF1.peer = IF21
IF21.peer = IF1
linkLabel = {deviceOne.id + ':' + IF1.id : deviceTwo.id + ':' + IF21.id}
cablingPlanWriter.createLinksInGraph(linkLabel, testLinksInTopology, 'red')
path = cablingPlanWriter.outputDir + '/testLinklabel.dot'
testLinksInTopology.write_raw(path)
data = open(path, 'r').read()
#check generated label for links
self.assertTrue('spine01:IF1 -- leaf01:IF21 [color=red];' in data)
def testcreateDOTFile(self):
# create pod
# create device
#create interface
session = self.dao.Session()
pod = createPod('pod1', session)
cablingPlanWriter = CablingPlanWriter(self.conf, pod, self.dao)
deviceOne = Device('spine01',"", 'admin', 'admin', 'spine', "", "", pod)
session.add(deviceOne)
IF1 = InterfaceDefinition('IF1', deviceOne, 'downlink')
session.add(IF1)
IF2 = InterfaceDefinition('IF2', deviceOne, 'downlink')
session.add(IF2)
deviceTwo = Device('leaf01',"", 'admin', 'admin', 'leaf', "", "", pod)
session.add(deviceTwo)
IF21 = InterfaceDefinition('IF1', deviceTwo, 'uplink')
session.add(IF21)
IF22 = InterfaceDefinition('IF2', deviceTwo, 'uplink')
session.add(IF22)
IF23 = InterfaceDefinition('IF3', deviceTwo, 'downlink')
session.add(IF23)
IF24 = InterfaceDefinition('IF3', deviceTwo, 'downlink')
session.add(IF24)
deviceThree = Device('Access01', "",'admin', 'admin', 'leaf', "", "", pod)
session.add(deviceThree)
IF31 = InterfaceDefinition('IF1', deviceThree, 'uplink')
session.add(IF31)
IF32 = InterfaceDefinition('IF2', deviceThree, 'uplink')
session.add(IF32)
IF1.peer = IF21
IF2.peer = IF22
IF21.peer = IF1
IF22.peer = IF2
IF23.peer = IF31
IF31.peer = IF23
IF24.peer = IF32
IF32.peer = IF24
session.commit()
devices = session.query(Device).all()
#check the DOT file is generated
cablingPlanWriter.writeDOT()
data = open(cablingPlanWriter.outputDir + '/cablingPlan.dot', 'r').read()
#check generated label for links
self.assertTrue('splines=polyline;' in data)
| 1.96875 | 2 |
py_particle_processor_qt/tools/OrbitTool/__init__.py | DanielWinklehner/py_particle_processor | 0 | 12790119 | from py_particle_processor_qt.tools.OrbitTool.OrbitTool import *
| 1.023438 | 1 |
hyquest/verifiers/timemap.py | Edmonton-Public-Library/centennial | 0 | 12790120 | from hyquest.verifiers.common import getTaskResultSet, getUserAction
from hyquest.constants import TASK_TIMEMAP
# This handles matching up TimeMap state to associated TimeMap Tasks
def matchingTimeMapTasks(user, timeMapState):
tasks = getTaskResultSet(user).filter(type=TASK_TIMEMAP)
activeTasks = []
otherTasks = []
for task in tasks:
if timeMapMatches(task, timeMapState):
action = getUserAction(user, task)
if action is None:
otherTasks.append(task)
elif action.complete != True:
activeTasks.append(task)
return (activeTasks, otherTasks)
def timeMapMatches(task, timeMapState):
reqs = task.getInfoReqs()
if 'minYear' in reqs and ('year' not in timeMapState or int(timeMapState['year'])+5 < int(reqs['minYear'])):
print "year before minYear"
return False
if 'maxYear' in reqs and ('year' not in timeMapState or int(timeMapState['year'])-5 > int(reqs['maxYear'])):
print "year after maxYear"
return False
if 'branch' in reqs and ('branch' not in timeMapState or reqs['branch'] != str(timeMapState['branch'])):
print "incorrect branch"
return False
if 'story' in reqs and ('story' not in timeMapState or reqs['story'] != str(timeMapState['story'])):
print "incorrect story"
return False
if 'onMap' in reqs and ('onMap' not in timeMapState or reqs['onMap'] != str(timeMapState['onMap'])):
print "On Map does not match"
return False
return True
def getTimeMapReqs(task):
taskReqs = task.taskinfo.split(';')
requirements = {}
for req in taskReqs:
if '=' in req:
reqSplit = req.split('=')
requirements[reqSplit[0]] = reqSplit[1]
return requirements
| 2.671875 | 3 |
app.py | 15281029/translation | 0 | 12790121 | # -*- coding: utf-8 -*-
from flask import Flask, request, make_response
import requests
import json
from core import Translation, RequestJson, PBMT
from bean import log
app = Flask(__name__)
def buildResponse(code, msg):
json_data = dict()
json_data['code'] = code
json_data['message'] = msg
response = make_response(json.dumps(json_data, sort_keys=True))
response.headers['Content-type'] = 'application/json; charset=utf-8'
return response
'''
=================翻译=====================
method: POST
headers: Authorization: [your api key]
type: json
{
"text":[text],
"taget":[target language]
}
return: json
{
"code":[status code],
"message":[translation text]
}
'''
@app.route('/languages/api/translate', methods=['GET', 'POST'])
def translate():
ip = request.remote_addr
if request.method != 'POST':
return buildResponse(403, "Method Not Allowed. ")
else:
try:
token = request.headers['Authorization']
except Exception:
return buildResponse(403, "API key not valid. Please pass a valid API key. ")
tobj = Translation(token)
jsondict = request.get_json()
try:
rjson = RequestJson(**jsondict)
except Exception:
log.writelogs(token, ip, '[Failed] Required field error. ')
return buildResponse(400, "Required field error. ")
rlist = tobj.translate(text=rjson.text, target=rjson.target)
if rlist[0] == 200:
log.writelogs(token, ip, '[Succeed]')
else:
log.writelogs(token, ip, '[Failed] '+rlist[1])
return buildResponse(code=rlist[0], msg=rlist[1])
'''
=================日志=====================
method: GET
headers: Authorization: [your api key]
type: NULL
return: json
{
"code":[status code],
"message":[calling log]
}
'''
@app.route('/languages/api/logs', methods=['GET', 'POST'])
def getlog():
if request.method != 'GET':
return buildResponse(403, "Method Not Allowed. ")
else:
try:
token = request.headers['Authorization']
logs = log.getlogs(token)
if logs:
logs = [(str(lo[0]), lo[1], lo[2], lo[3]) for lo in logs]
return buildResponse(200, logs)
elif logs == []:
return buildResponse(200, [])
elif logs is None:
return buildResponse(403, "API key not valid. Please pass a valid API key. ")
except Exception:
return buildResponse(500, "Query log exception. ")
@app.route('/languages/support', methods=['GET', 'POST'])
def support_languages():
if request.method != 'GET':
return buildResponse(403, "Method Not Allowed. ")
else:
return buildResponse(200, PBMT)
if __name__ == '__main__':
app.run('0.0.0.0', 81, debug=True)
| 2.421875 | 2 |
html_parsing/get_price_game/from_gama-gama.py | DazEB2/SimplePyScripts | 117 | 12790122 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# Основа взята из http://stackoverflow.com/a/37755811/5909792
def get_html(url):
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebEngineWidgets import QWebEnginePage
class ExtractorHtml:
def __init__(self, url):
_app = QApplication([])
self._page = QWebEnginePage()
self._page.loadFinished.connect(self._load_finished_handler)
self.html = None
# Небольшой костыль для получения содержимого страницы сайта http://gama-gama.ru
# Загрузка страницы проходит 2 раза: сначада кусок хитрого javascript кода, потом страница
# сайта с содержимым
self._counter_finished = 0
self._page.load(QUrl(url))
# Ожидание загрузки страницы и получения его содержимого
# Этот цикл асинхронный код делает синхронным
while self.html is None:
_app.processEvents()
_app.quit()
# Чтобы избежать падений скрипта
self._page = None
def _callable(self, data):
self.html = data
def _load_finished_handler(self, _):
self._counter_finished += 1
if self._counter_finished == 2:
self._page.toHtml(self._callable)
return ExtractorHtml(url).html
text = 'mad'
url = 'http://gama-gama.ru/search/?searchField=' + text
html = get_html(url)
from bs4 import BeautifulSoup
root = BeautifulSoup(html, 'lxml')
for game in root.select('.catalog-content > a'):
name = game['title'].strip()
name = name.replace('Купить ', '')
price = None
price_holder = game.select_one('.catalog_price_holder')
price_1 = price_holder.select_one('.price_1')
if price_1:
price = price_1.text.strip()
else:
# Содержит описание цены со скидкой. Вытаскиваем цену со скидкой
price_2 = price_holder.select_one('.price_2')
if price_2:
price = price_2.select_one('.price_group > .promo_price').text
# Удаление пустых символов пробелом
import re
price = re.sub(r'\s+', ' ', price)
price = price.strip()
print(name, price)
| 2.421875 | 2 |
tests/test_kmeans.py | joezuntz/TreeCorr | 0 | 12790123 | # Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import time
import coord
import warnings
import treecorr
from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer
@timer
def test_dessv():
try:
import fitsio
except ImportError:
print('Skipping dessv test, since fitsio is not installed')
return
#treecorr.set_omp_threads(1);
get_from_wiki('des_sv.fits')
file_name = os.path.join('data','des_sv.fits')
cat = treecorr.Catalog(file_name, ra_col='ra', dec_col='dec', ra_units='deg', dec_units='deg')
# Use an odd number to make sure we force some of the shuffle bits in InitializeCenters
# to happen.
npatch = 43
field = cat.getNField(max_top=5)
t0 = time.time()
patches, cen = field.run_kmeans(npatch)
t1 = time.time()
print('patches = ',np.unique(patches))
assert len(patches) == cat.ntot
assert min(patches) == 0
assert max(patches) == npatch-1
# Check the returned center to a direct calculation.
xyz = np.array([cat.x, cat.y, cat.z]).T
direct_cen = np.array([xyz[patches==i].mean(axis=0) for i in range(npatch)])
direct_cen /= np.sqrt(np.sum(direct_cen**2,axis=1)[:,np.newaxis])
np.testing.assert_allclose(cen, direct_cen, atol=1.e-3)
# KMeans minimizes the total inertia.
# Check this value and the rms size, which should also be quite small.
inertia = np.array([np.sum((xyz[patches==i] - cen[i])**2) for i in range(npatch)])
sizes = np.array([np.mean((xyz[patches==i] - cen[i])**2) for i in range(npatch)])**0.5
sizes *= 180. / np.pi * 60. # convert to arcmin
counts = np.array([np.sum(patches==i) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
print('mean size = ',np.mean(sizes))
print('rms size = ',np.std(sizes))
assert np.sum(inertia) < 200. # This is specific to this particular field and npatch.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually < 0.2 * mean
assert np.std(sizes) < 0.1 * np.mean(sizes) # sizes have even less spread usually.
# Should all have similar number of points. Nothing is required here though.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
t0 = time.time()
patches, cen = field.run_kmeans(npatch, alt=True)
t1 = time.time()
assert len(patches) == cat.ntot
assert min(patches) == 0
assert max(patches) == npatch-1
inertia = np.array([np.sum((xyz[patches==i] - cen[i])**2) for i in range(npatch)])
sizes = np.array([np.mean((xyz[patches==i] - cen[i])**2) for i in range(npatch)])**0.5
sizes *= 180. / np.pi * 60. # convert to arcmin
counts = np.array([np.sum(patches==i) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
print('mean size = ',np.mean(sizes))
print('rms size = ',np.std(sizes))
assert np.sum(inertia) < 200. # Total shouldn't increase much. (And often decreases.)
assert np.std(inertia) < 0.15 * np.mean(inertia) # rms should be even smaller here.
assert np.std(sizes) < 0.1 * np.mean(sizes) # This is only a little bit smaller.
# This doesn't keep the counts as equal as the standard algorithm.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Finally, use a field with lots of top level cells to check the other branch in
# InitializeCenters.
field = cat.getNField(min_top=10)
t0 = time.time()
patches, cen = field.run_kmeans(npatch)
t1 = time.time()
assert len(patches) == cat.ntot
assert min(patches) == 0
assert max(patches) == npatch-1
inertia = np.array([np.sum((xyz[patches==i] - cen[i])**2) for i in range(npatch)])
sizes = np.array([np.mean((xyz[patches==i] - cen[i])**2) for i in range(npatch)])**0.5
sizes *= 180. / np.pi * 60. # convert to arcmin
counts = np.array([np.sum(patches==i) for i in range(npatch)])
# This doesn't give as good an initialization, so these are a bit worse usually.
print('With min_top=10:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
print('mean size = ',np.mean(sizes))
print('rms size = ',np.std(sizes))
assert np.sum(inertia) < 210.
assert np.std(inertia) < 0.4 * np.mean(inertia) # I've seen over 0.3 x mean here.
assert np.std(sizes) < 0.15 * np.mean(sizes)
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
@timer
def test_radec():
# Very similar to the above, but with a random set of points, so it will run even
# if the user doesn't have fitsio installed.
# In addition, we add weights to make sure that works.
ngal = 100000
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
print('minra = ',np.min(ra) * coord.radians / coord.degrees)
print('maxra = ',np.max(ra) * coord.radians / coord.degrees)
print('mindec = ',np.min(dec) * coord.radians / coord.degrees)
print('maxdec = ',np.max(dec) * coord.radians / coord.degrees)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w)
npatch = 111
field = cat.getNField()
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
# Check the returned center to a direct calculation.
xyz = np.array([cat.x, cat.y, cat.z]).T
direct_cen = np.array([np.average(xyz[p==i], axis=0, weights=w[p==i]) for i in range(npatch)])
direct_cen /= np.sqrt(np.sum(direct_cen**2,axis=1)[:,np.newaxis])
np.testing.assert_allclose(cen, direct_cen, atol=2.e-3)
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # This is specific to this particular field and npatch.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
# With weights, these aren't actually all that similar. The range is more than a
# factor of 10. I think because it varies whether high weight points happen to be near the
# edges or middles of patches, so the total weight varies when you target having the
# inertias be relatively similar.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
t0 = time.time()
p, cen = field.run_kmeans(npatch, alt=True)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # Total shouldn't increase much. (And often decreases.)
assert np.std(inertia) < 0.15 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Finally, use a field with lots of top level cells to check the other branch in
# InitializeCenters.
field = cat.getNField(min_top=10)
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
# This doesn't give as good an initialization, so these are a bit worse usually.
print('With min_top=10:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 210.
assert np.std(inertia) < 0.4 * np.mean(inertia) # I've seen over 0.3 x mean here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
@timer
def test_3d():
# Like the above, but using x,y,z positions.
ngal = 100000
s = 1.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal) + 1
cat = treecorr.Catalog(x=x, y=y, z=z, w=w)
npatch = 111
field = cat.getNField()
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
xyz = np.array([x, y, z]).T
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 33000.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Should be the same thing with ra, dec, ra
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
r = (x**2 + y**2 + z**2)**0.5
cat2 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', r=r, w=w)
field = cat2.getNField()
t0 = time.time()
p2, cen = field.run_kmeans(npatch)
t1 = time.time()
inertia = np.array([np.sum(w[p2==i][:,None] * (xyz[p2==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p2==i]) for i in range(npatch)])
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 33000.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
t0 = time.time()
p, cen = field.run_kmeans(npatch, alt=True)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 33000.
assert np.std(inertia) < 0.1 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Finally, use a field with lots of top level cells to check the other branch in
# InitializeCenters.
field = cat.getNField(min_top=10)
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
# This doesn't give as good an initialization, so these are a bit worse usually.
print('With min_top=10:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 33000.
assert np.std(inertia) < 0.4 * np.mean(inertia) # I've seen over 0.3 x mean here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
@timer
def test_2d():
# Like the above, but using x,y positions.
# An additional check here is that this works with other fields besides NField, even though
# in practice NField will alsmost always be the kind of Field used.
ngal = 100000
s = 1.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal) + 1
g1 = rng.normal(0,s, (ngal,) )
g2 = rng.normal(0,s, (ngal,) )
k = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y, w=w, g1=g1, g2=g2, k=k)
npatch = 111
field = cat.getGField()
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
xy = np.array([x, y]).T
inertia = np.array([np.sum(w[p==i][:,None] * (xy[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 5300.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
t0 = time.time()
p, cen = field.run_kmeans(npatch, alt=True)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xy[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 5300.
assert np.std(inertia) < 0.1 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Finally, use a field with lots of top level cells to check the other branch in
# InitializeCenters.
field = cat.getKField(min_top=10)
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xy[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
# This doesn't give as good an initialization, so these are a bit worse usually.
print('With min_top=10:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 5300.
assert np.std(inertia) < 0.4 * np.mean(inertia) # I've seen over 0.3 x mean here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
@timer
def test_init_random():
# Test the init=random option
ngal = 100000
s = 1.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
z = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y, z=z)
xyz = np.array([x, y, z]).T
# Skip the refine_centers step.
print('3d with init=random')
npatch = 10
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'random')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
# Use higher max_iter, since random isn't a great initialization.
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Use a field with lots of top level cells
print('3d with init=random, min_top=10')
field = cat.getNField(min_top=10)
cen1 = field.kmeans_initialize_centers(npatch, 'random')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Repeat in 2d
print('2d with init=random')
cat = treecorr.Catalog(x=x, y=y)
xy = np.array([x, y]).T
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'random')
assert cen1.shape == (npatch, 2)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xy[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000)
inertia2 = np.array([np.sum((xy[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Repeat in spherical
print('spher with init=random')
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad')
xyz = np.array([cat.x, cat.y, cat.z]).T
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'random')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
with assert_raises(ValueError):
field.run_kmeans(npatch, init='invalid')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch, init='invalid')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=ngal*2, init='random')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=ngal+1, init='random')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=0, init='random')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=-100, init='random')
# Should be valid to give npatch = 1, although not particularly useful.
cen_1 = field.kmeans_initialize_centers(npatch=1, init='random')
p_1 = field.kmeans_assign_patches(cen_1)
np.testing.assert_equal(p_1, np.zeros(ngal))
# If same number of patches as galaxies, each galaxy gets a patch.
# (This is stupid of course, but check that it doesn't fail.)
# Do this with fewer points though, since it's not particularly fast with N=10^5.
n = 100
cat = treecorr.Catalog(ra=ra[:n], dec=dec[:n], ra_units='rad', dec_units='rad')
field = cat.getNField()
cen_n = field.kmeans_initialize_centers(npatch=n, init='random')
p_n = field.kmeans_assign_patches(cen_n)
np.testing.assert_equal(sorted(p_n), list(range(n)))
@timer
def test_init_kmpp():
# Test the init=random option
ngal = 100000
s = 1.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
z = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y, z=z)
xyz = np.array([x, y, z]).T
# Skip the refine_centers step.
print('3d with init=kmeans++')
npatch = 10
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
# Use higher max_iter, since random isn't a great initialization.
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Use a field with lots of top level cells
print('3d with init=kmeans++, min_top=10')
field = cat.getNField(min_top=10)
cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Repeat in 2d
print('2d with init=kmeans++')
cat = treecorr.Catalog(x=x, y=y)
xy = np.array([x, y]).T
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
assert cen1.shape == (npatch, 2)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xy[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
inertia2 = np.array([np.sum((xy[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Repeat in spherical
print('spher with init=kmeans++')
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad')
xyz = np.array([cat.x, cat.y, cat.z]).T
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=ngal*2, init='kmeans++')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=ngal+1, init='kmeans++')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=0, init='kmeans++')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=-100, init='kmeans++')
# Should be valid to give npatch = 1, although not particularly useful.
cen_1 = field.kmeans_initialize_centers(npatch=1, init='kmeans++')
p_1 = field.kmeans_assign_patches(cen_1)
np.testing.assert_equal(p_1, np.zeros(ngal))
# If same number of patches as galaxies, each galaxy gets a patch.
# (This is stupid of course, but check that it doesn't fail.)
# Do this with fewer points though, since it's not particularly fast with N=10^5.
n = 100
cat = treecorr.Catalog(ra=ra[:n], dec=dec[:n], ra_units='rad', dec_units='rad')
field = cat.getNField()
cen_n = field.kmeans_initialize_centers(npatch=n, init='kmeans++')
p_n = field.kmeans_assign_patches(cen_n)
np.testing.assert_equal(sorted(p_n), list(range(n)))
@timer
def test_zero_weight():
# Based on test_ra_dec, but where many galaxies have w=0.
# There used to be a bug where w=0 objects were not assigned to any patch.
ngal = 10000
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
w = np.zeros(ngal)
w[np.random.choice(range(ngal), ngal//10, replace=False)] = 1.0
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
print('minra = ',np.min(ra) * coord.radians / coord.degrees)
print('maxra = ',np.max(ra) * coord.radians / coord.degrees)
print('mindec = ',np.min(dec) * coord.radians / coord.degrees)
print('maxdec = ',np.max(dec) * coord.radians / coord.degrees)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,
keep_zero_weight=True)
treecorr.set_omp_threads(1)
npatch = 16
field = cat.getNField()
t0 = time.time()
p, c = field.run_kmeans(npatch)
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
print('w>0 patches = ',np.unique(p[w>0]))
print('w==0 patches = ',np.unique(p[w==0]))
assert set(p[w>0]) == set(p[w==0])
@timer
def test_catalog_sphere():
# This follows the same path as test_radec, but using the Catalog API to run kmeans.
ngal = 100000
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec, r = coord.CelestialCoord.xyz_to_radec(x,y,z, return_r=True)
print('minra = ',np.min(ra) * coord.radians / coord.degrees)
print('maxra = ',np.max(ra) * coord.radians / coord.degrees)
print('mindec = ',np.min(dec) * coord.radians / coord.degrees)
print('maxdec = ',np.max(dec) * coord.radians / coord.degrees)
npatch = 111
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w, npatch=npatch)
t0 = time.time()
p = cat.patch
cen = cat.patch_centers
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
# Check the returned center to a direct calculation.
xyz = np.array([cat.x, cat.y, cat.z]).T
direct_cen = np.array([np.average(xyz[p==i], axis=0, weights=w[p==i]) for i in range(npatch)])
direct_cen /= np.sqrt(np.sum(direct_cen**2,axis=1)[:,np.newaxis])
np.testing.assert_allclose(cen, direct_cen, atol=2.e-3)
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # This is specific to this particular field and npatch.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
# With weights, these aren't actually all that similar. The range is more than a
# factor of 10. I think because it varies whether high weight points happen to be near the
# edges or middles of patches, so the total weight varies when you target having the
# inertias be relatively similar.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
cat2 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,
npatch=npatch, kmeans_alt=True)
t0 = time.time()
p = cat2.patch
cen = cat2.patch_centers
t1 = time.time()
assert len(p) == cat2.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # Total shouldn't increase much. (And often decreases.)
assert np.std(inertia) < 0.15 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check using patch_centers from (ra,dec) -> (ra,dec,r)
cat3 = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,
patch_centers=cat2.patch_centers)
np.testing.assert_array_equal(cat2.patch, cat3.patch)
np.testing.assert_array_equal(cat2.patch_centers, cat3.patch_centers)
@timer
def test_catalog_3d():
# With ra, dec, r, the Catalog API should only do patches using RA, Dec.
ngal = 100000
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec, r = coord.CelestialCoord.xyz_to_radec(x,y,z, return_r=True)
print('minra = ',np.min(ra) * coord.radians / coord.degrees)
print('maxra = ',np.max(ra) * coord.radians / coord.degrees)
print('mindec = ',np.min(dec) * coord.radians / coord.degrees)
print('maxdec = ',np.max(dec) * coord.radians / coord.degrees)
npatch = 111
cat = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,
npatch=npatch)
t0 = time.time()
p = cat.patch
cen = cat.patch_centers
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
# Check the returned center to a direct calculation.
xyz = np.array([cat.x/cat.r, cat.y/cat.r, cat.z/cat.r]).T
print('cen = ',cen)
print('xyz = ',xyz)
direct_cen = np.array([np.average(xyz[p==i], axis=0, weights=w[p==i]) for i in range(npatch)])
direct_cen /= np.sqrt(np.sum(direct_cen**2,axis=1)[:,np.newaxis])
np.testing.assert_allclose(cen, direct_cen, atol=2.e-3)
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # This is specific to this particular field and npatch.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
# With weights, these aren't actually all that similar. The range is more than a
# factor of 10. I think because it varies whether high weight points happen to be near the
# edges or middles of patches, so the total weight varies when you target having the
# inertias be relatively similar.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
cat2 = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,
npatch=npatch, kmeans_alt=True)
t0 = time.time()
p = cat2.patch
cen = cat2.patch_centers
t1 = time.time()
assert len(p) == cat2.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # Total shouldn't increase much. (And often decreases.)
assert np.std(inertia) < 0.15 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check using patch_centers from (ra,dec,r) -> (ra,dec)
cat3 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,
patch_centers=cat2.patch_centers)
np.testing.assert_array_equal(cat2.patch, cat3.patch)
np.testing.assert_array_equal(cat2.patch_centers, cat3.patch_centers)
if __name__ == '__main__':
test_dessv()
test_radec()
test_3d()
test_2d()
test_init_random()
test_init_kmpp()
test_zero_weight()
test_catalog_sphere()
test_catalog_3d()
| 1.945313 | 2 |
ws2122-lspm/Lib/site-packages/pm4py/objects/log/util/sorting.py | Malekhy/ws2122-lspm | 1 | 12790124 | '''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from pm4py.objects.log.obj import EventLog, Trace, EventStream
from pm4py.util import xes_constants as xes
def sort_timestamp_trace(trace, timestamp_key=xes.DEFAULT_TIMESTAMP_KEY, reverse_sort=False):
"""
Sort a trace based on timestamp key
Parameters
-----------
trace
Trace
timestamp_key
Timestamp key
reverse_sort
If true, reverses the direction in which the sort is done (ascending)
Returns
-----------
trace
Sorted trace
"""
events = sorted(trace._list, key=lambda x: x[timestamp_key], reverse=reverse_sort)
new_trace = Trace(events, attributes=trace.attributes)
return new_trace
def sort_timestamp_stream(event_log, timestamp_key=xes.DEFAULT_TIMESTAMP_KEY, reverse_sort=False):
"""
Sort an event log based on timestamp key
Parameters
-----------
event_log
Event log
timestamp_key
Timestamp key
reverse_sort
If true, reverses the direction in which the sort is done (ascending)
Returns
-----------
event_log
Sorted event log
"""
events = sorted(event_log._list, key=lambda x: x[timestamp_key], reverse=reverse_sort)
new_stream = EventStream(events, attributes=event_log.attributes, extensions=event_log.extensions,
omni_present=event_log.omni_present, classifiers=event_log.classifiers,
properties=event_log.properties)
return new_stream
def sort_timestamp_log(event_log, timestamp_key=xes.DEFAULT_TIMESTAMP_KEY, reverse_sort=False):
"""
Sort a log based on timestamp key
Parameters
-----------
event_log
Log
timestamp_key
Timestamp key
reverse_sort
If true, reverses the direction in which the sort is done (ascending)
Returns
-----------
log
Sorted log
"""
new_log = EventLog(attributes=event_log.attributes, extensions=event_log.extensions,
omni_present=event_log.omni_present, classifiers=event_log.classifiers,
properties=event_log.properties)
for trace in event_log:
if trace:
new_log.append(sort_timestamp_trace(trace, timestamp_key=timestamp_key, reverse_sort=reverse_sort))
new_log._list.sort(key=lambda x: x[0][timestamp_key], reverse=reverse_sort)
return new_log
def sort_timestamp(log, timestamp_key=xes.DEFAULT_TIMESTAMP_KEY, reverse_sort=False):
"""
Sort a log based on timestamp key
Parameters
-----------
log
Trace/Event log
timestamp_key
Timestamp key
reverse_sort
If true, reverses the direction in which the sort is done (ascending)
Returns
-----------
log
Sorted Trace/Event log
"""
if type(log) is EventLog:
return sort_timestamp_log(log, timestamp_key=timestamp_key, reverse_sort=reverse_sort)
return sort_timestamp_stream(log, timestamp_key=timestamp_key, reverse_sort=reverse_sort)
def sort_lambda_log(event_log, sort_function, reverse=False):
"""
Sort a log based on a lambda expression
Parameters
------------
event_log
Log
sort_function
Sort function
reverse
Boolean (sort by reverse order)
Returns
------------
new_log
Sorted log
"""
traces = sorted(event_log._list, key=sort_function, reverse=reverse)
new_log = EventLog(traces, attributes=event_log.attributes, extensions=event_log.extensions,
omni_present=event_log.omni_present, classifiers=event_log.classifiers,
properties=event_log.properties)
return new_log
def sort_lambda_stream(event_log, sort_function, reverse=False):
"""
Sort a stream based on a lambda expression
Parameters
------------
event_log
Stream
sort_function
Sort function
reverse
Boolean (sort by reverse order)
Returns
------------
stream
Sorted stream
"""
events = sorted(event_log._list, key=sort_function, reverse=reverse)
new_stream = EventStream(events, attributes=event_log.attributes, extensions=event_log.extensions,
omni_present=event_log.omni_present, classifiers=event_log.classifiers,
properties=event_log.properties)
return new_stream
def sort_lambda(log, sort_function, reverse=False):
"""
Sort a log based on lambda expression
Parameters
-------------
log
Log
sort_function
Sort function
reverse
Boolean (sort by reverse order)
Returns
-------------
log
Sorted log
"""
if type(log) is EventLog:
return sort_lambda_log(log, sort_function, reverse=reverse)
return sort_lambda_stream(log, sort_function, reverse=reverse)
| 2.25 | 2 |
kuconnect/dropout.py | ozanarkancan/KuConnect | 0 | 12790125 | import theano
from utils import srng
def dropout(input, dropout_rate=0):
if dropout_rate > 0:
retain = 1 - dropout_rate
d_output = (input / retain) * srng.binomial(input.shape, p=retain,
dtype='int32').astype('float32')
else:
d_output = input
return d_output
| 2.515625 | 3 |
pytglib/api/types/push_message_content_location.py | iTeam-co/pytglib | 6 | 12790126 | <filename>pytglib/api/types/push_message_content_location.py<gh_stars>1-10
from ..utils import Object
class PushMessageContentLocation(Object):
"""
A message with a location
Attributes:
ID (:obj:`str`): ``PushMessageContentLocation``
Args:
is_live (:obj:`bool`):
True, if the location is live
is_pinned (:obj:`bool`):
True, if the message is a pinned message with the specified content
Returns:
PushMessageContent
Raises:
:class:`telegram.Error`
"""
ID = "pushMessageContentLocation"
def __init__(self, is_live, is_pinned, **kwargs):
self.is_live = is_live # bool
self.is_pinned = is_pinned # bool
@staticmethod
def read(q: dict, *args) -> "PushMessageContentLocation":
is_live = q.get('is_live')
is_pinned = q.get('is_pinned')
return PushMessageContentLocation(is_live, is_pinned)
| 2.859375 | 3 |
03. Tuples and Sets - Lab/05_softuni_party.py | elenaborisova/Python-Advanced | 2 | 12790127 | def input_to_list(guests_count):
return [input() for _ in range(guests_count)]
def input_to_list_until_command(command):
result = []
line = input()
while not line == command:
result.append(line)
line = input()
return result
def get_not_arrived_guests(guests, guests_arrived):
return set(guests) - set(guests_arrived)
def print_result(result):
result = sorted(result)
print(len(result))
[print(guest) for guest in result if guest[0].isdigit()]
[print(guest) for guest in result if not guest[0].isdigit()]
guests = input_to_list(int(input()))
guests_arrived = input_to_list_until_command("END")
print_result(
get_not_arrived_guests(guests, guests_arrived)
)
| 3.765625 | 4 |
nmtwizard/config.py | OpenNMT/nmt-wizard-docker | 44 | 12790128 | <filename>nmtwizard/config.py
"""Functions to manipulate and validate configurations."""
import collections
import jsonschema
import copy
def merge_config(a, b):
"""Merges config b in a."""
for key, b_value in b.items():
if not isinstance(b_value, dict):
a[key] = b_value
else:
a_value = a.get(key)
if a_value is not None and isinstance(a_value, dict):
merge_config(a_value, b_value)
else:
a[key] = b_value
return a
def replace_config(a, b):
"""Updates fields in a by fields in b."""
a.update(b)
return a
_non_user_fields = {"model", "modelType", "imageTag", "build", "parent_model"}
def update_config(a, b, mode="default"):
"""Update the configuration a with b."""
if not b:
return a
from_version = get_config_version(a)
to_version = get_config_version(b)
if from_version == 1 and to_version == 2:
# When updating the configuration to a newer version, we clear all user fields.
a = {k: v for k, v in a.items() if k in _non_user_fields}
return replace_config(a, b)
if mode == "default" or mode == "merge":
return merge_config(a, b)
if mode == "replace":
return replace_config(a, b)
raise ValueError("Invalid configuration update mode: %s" % mode)
def index_config(config, path, index_structure=True):
"""Index a configuration with a path-like string."""
key = None
sections = path.split("/")
if not index_structure:
key = sections[-1]
sections = sections[:-1]
for section in sections:
if isinstance(config, dict):
if section not in config:
raise ValueError("Invalid path %s in config" % path)
config = config[section]
elif isinstance(config, list):
section_index = None
try:
section_index = int(section)
except ValueError:
for i, block in enumerate(config):
if isinstance(block, dict) and block.get("name") == section:
section_index = i
break
if section_index is None:
raise ValueError(
"Expected an array index in path, but got %s instead" % section
)
config = config[section_index]
else:
raise ValueError(
"Paths in config can only represent object and array structures"
)
if index_structure:
return config
else:
return config, key
def build_override(config, path, value):
"""Builds a configuration override to update the value at path."""
if not path:
return value
sections = path.split("/")
section = sections[0]
inner_path = "/".join(sections[1:])
if isinstance(config, dict):
return {section: build_override(config.get(section), inner_path, value)}
if isinstance(config, list):
index = int(sections[0])
override = build_override(config[index], inner_path, value)
# Since lists can't be merged, the override should contain the full list content.
config = list(config)
if isinstance(override, dict):
config[index] = merge_config(copy.deepcopy(config[index]), override)
else:
config[index] = override
return config
raise TypeError("Paths in config can only represent object and array structures")
def index_schema(schema, path):
"""Index a JSON schema with a path-like string."""
for section in path.split("/"):
if schema["type"] != "object":
raise ValueError(
"Only object types are supported in the schema structure, "
"but saw type %s" % schema["type"]
)
properties = schema["properties"]
if section not in properties:
raise ValueError("Invalid path %s in user options" % path)
schema = properties[section]
return schema
def validate_inference_options(inference_options, config):
"""Validate the inference options, raising ValueError on error."""
json_schema = inference_options.get("json_schema")
if json_schema is None:
raise ValueError('Missing "json_schema" in "inference_options"')
jsonschema.Draft7Validator.check_schema(json_schema)
options = inference_options.get("options")
if options is None:
raise ValueError('Missing "options" in "inference_options"')
validate_mapping(json_schema, options, config)
return json_schema
def validate_mapping(schema, options, config):
"""Validate the mapping between inference options and configuration fields,
raising ValueError on error.
"""
for i, mapping in enumerate(options):
config_path = mapping.get("config_path")
if config_path is None:
raise ValueError('Missing "config_path" in option mapping %d' % i)
if isinstance(config_path, str):
config_path = [config_path]
for cp in config_path:
dst_config, _ = index_config(config, cp, index_structure=False)
if not isinstance(dst_config, dict):
raise ValueError("Paths in config can only index object structures")
option_path = mapping.get("option_path")
if option_path is None:
raise ValueError('Missing "option_path" in option mapping %d' % i)
_ = index_schema(schema, option_path)
def read_options(config, options):
"""Reads the inference options.
For V1 configurations, this function returns a configuration override.
For V2 configurations, this function returns a dict mapping operator names to their options.
Raises:
ValueError: if inference options were not expected or the value is not accepted.
"""
inference_options = config.get("inference_options")
if inference_options is None:
raise ValueError("This model does not expect inference options")
try:
jsonschema.validate(options, inference_options["json_schema"])
except jsonschema.ValidationError as e:
raise ValueError("Options validation error: %s" % e.message)
v2_config = is_v2_config(config)
operators_options = collections.defaultdict(dict)
config_override = {}
for mapping in inference_options["options"]:
try:
option_value = index_config(options, mapping["option_path"])
except ValueError:
continue # Option not passed for this request.
config_path = mapping["config_path"]
if isinstance(config_path, str):
config_path = [config_path]
if v2_config:
for cp in config_path:
dst_config, dst_key = index_config(config, cp, index_structure=False)
operators_options[dst_config["name"]].update({dst_key: option_value})
else:
for cp in config_path:
merge_config(
config_override,
build_override(config, cp, option_value),
)
if v2_config:
return operators_options
return config_override
def is_v2_config(config):
"""Returns True if config is a V2 configuration."""
preprocess = config.get("preprocess")
return (
"tokenization" not in config
and preprocess is not None
and isinstance(preprocess, list)
)
def is_v1_config(config):
"""Returns True if config is a V1 configuration."""
return not is_v2_config(config)
def get_config_version(config):
"""Returns the version of the configuration."""
return 2 if is_v2_config(config) else 1
def ensure_operators_name(config):
"""Make sure all operators in model configuration have a unique name."""
if is_v1_config(config):
return
i = 1
for process in ["preprocess", "postprocess"]:
process_config = config.get(process)
if process_config:
for op_config in process_config:
op_type = op_config.get("op")
if op_type:
op_config.setdefault("name", "%s_%d" % (op_type, i))
i += 1
def old_to_new_config(config):
"""Locally update old configuration with 'tokenization' field to include new 'vocabulary' and 'preprocess" fields."""
if not config:
return
tok_config = config.get("tokenization")
new_config = config
if tok_config:
if "vocabulary" not in config:
new_config = copy.deepcopy(config)
vocab_src = tok_config["source"].get("vocabulary", None)
vocab_tgt = tok_config["target"].get("vocabulary", None)
replace_src = tok_config["source"].get("replace_vocab", False)
replace_tgt = tok_config["target"].get("replace_vocab", False)
prev_vocab_src = tok_config["source"].get("previous_vocabulary", None)
prev_vocab_tgt = tok_config["target"].get("previous_vocabulary", None)
if vocab_src or vocab_tgt:
new_config["vocabulary"] = {}
if vocab_src:
new_config["vocabulary"]["source"] = {
"path": vocab_src,
"replace_vocab": replace_src,
}
if vocab_tgt:
new_config["vocabulary"]["target"] = {
"path": vocab_tgt,
"replace_vocab": replace_tgt,
}
if prev_vocab_src:
new_config["vocabulary"]["source"][
"previous_vocabulary"
] = prev_vocab_src
if prev_vocab_tgt:
new_config["vocabulary"]["target"][
"previous_vocabulary"
] = prev_vocab_tgt
if "preprocess" not in config:
new_tok_config = copy.deepcopy(tok_config)
new_tok_config["source"].pop("vocabulary", None)
new_tok_config["target"].pop("vocabulary", None)
new_tok_config["source"].pop("replace_vocab", None)
new_tok_config["target"].pop("replace_vocab", None)
new_config["preprocess"] = [
{
"op": "tokenization",
"source": new_tok_config["source"],
"target": new_tok_config["target"],
}
]
return new_config
def _ensure_params_order(params):
params = collections.OrderedDict(sorted(params.items(), key=lambda x: x[0]))
preferred_first = ["op", "name"]
preferred_last = ["overrides"]
for field in reversed(preferred_first):
if field in params:
params.move_to_end(field, last=False)
for field in preferred_last:
if field in params:
params.move_to_end(field, last=True)
return params
def prepare_config_for_save(config):
"""Prepares the configuration before saving it in the model directory."""
if is_v2_config(config):
# In V2 operators, we prefer that some fields appear first (or last) for readability.
config = config.copy()
for section_name in ("preprocess", "postprocess"):
section = config.get(section_name)
if section is None:
continue
config[section_name] = [_ensure_params_order(params) for params in section]
return config
| 2.640625 | 3 |
shap/plots/_utils.py | willianfco/shap | 16,097 | 12790129 | from .. import Explanation
from ..utils import OpChain
from . import colors
import numpy as np
def convert_color(color):
try:
color = pl.get_cmap(color)
except:
pass
if color == "shap_red":
color = colors.red_rgb
elif color == "shap_blue":
color = colors.blue_rgb
return color
def convert_ordering(ordering, shap_values):
if issubclass(type(ordering), OpChain):
ordering = ordering.apply(Explanation(shap_values))
if issubclass(type(ordering), Explanation):
if "argsort" in [op["name"] for op in ordering.op_history]:
ordering = ordering.values
else:
ordering = ordering.argsort.flip.values
return ordering
def get_sort_order(dist, clust_order, cluster_threshold, feature_order):
""" Returns a sorted order of the values where we respect the clustering order when dist[i,j] < cluster_threshold
"""
#feature_imp = np.abs(values)
# if partition_tree is not None:
# new_tree = fill_internal_max_values(partition_tree, shap_values)
# clust_order = sort_inds(new_tree, np.abs(shap_values))
clust_inds = np.argsort(clust_order)
feature_order = feature_order.copy()#order.apply(Explanation(shap_values))
# print("feature_order", feature_order)
for i in range(len(feature_order)-1):
ind1 = feature_order[i]
next_ind = feature_order[i+1]
next_ind_pos = i + 1
for j in range(i+1,len(feature_order)):
ind2 = feature_order[j]
#if feature_imp[ind] >
# if ind1 == 2:
# print(ind1, ind2, dist[ind1,ind2])
if dist[ind1,ind2] <= cluster_threshold:
# if ind1 == 2:
# print(clust_inds)
# print(ind1, ind2, next_ind, dist[ind1,ind2], clust_inds[ind2], clust_inds[next_ind])
if dist[ind1,next_ind] > cluster_threshold or clust_inds[ind2] < clust_inds[next_ind]:
next_ind = ind2
next_ind_pos = j
# print("next_ind", next_ind)
# print("next_ind_pos", next_ind_pos)
# insert the next_ind next
for j in range(next_ind_pos, i+1, -1):
#print("j", j)
feature_order[j] = feature_order[j-1]
feature_order[i+1] = next_ind
#print(feature_order)
return feature_order
def merge_nodes(values, partition_tree):
""" This merges the two clustered leaf nodes with the smallest total value.
"""
M = partition_tree.shape[0] + 1
ptind = 0
min_val = np.inf
for i in range(partition_tree.shape[0]):
ind1 = int(partition_tree[i,0])
ind2 = int(partition_tree[i,1])
if ind1 < M and ind2 < M:
val = np.abs(values[ind1]) + np.abs(values[ind2])
if val < min_val:
min_val = val
ptind = i
#print("ptind", ptind, min_val)
ind1 = int(partition_tree[ptind,0])
ind2 = int(partition_tree[ptind,1])
if ind1 > ind2:
tmp = ind1
ind1 = ind2
ind2 = tmp
partition_tree_new = partition_tree.copy()
for i in range(partition_tree_new.shape[0]):
i0 = int(partition_tree_new[i,0])
i1 = int(partition_tree_new[i,1])
if i0 == ind2:
partition_tree_new[i,0] = ind1
elif i0 > ind2:
partition_tree_new[i,0] -= 1
if i0 == ptind + M:
partition_tree_new[i,0] = ind1
elif i0 > ptind + M:
partition_tree_new[i,0] -= 1
if i1 == ind2:
partition_tree_new[i,1] = ind1
elif i1 > ind2:
partition_tree_new[i,1] -= 1
if i1 == ptind + M:
partition_tree_new[i,1] = ind1
elif i1 > ptind + M:
partition_tree_new[i,1] -= 1
partition_tree_new = np.delete(partition_tree_new, ptind, axis=0)
# update the counts to be correct
fill_counts(partition_tree_new)
return partition_tree_new, ind1, ind2
def dendrogram_coords(leaf_positions, partition_tree):
""" Returns the x and y coords of the lines of a dendrogram where the leaf order is given.
Note that scipy can compute these coords as well, but it does not allow you to easily specify
a specific leaf order, hence this reimplementation.
"""
xout = []
yout = []
_dendrogram_coords_rec(partition_tree.shape[0]-1, leaf_positions, partition_tree, xout, yout)
return np.array(xout), np.array(yout)
def _dendrogram_coords_rec(pos, leaf_positions, partition_tree, xout, yout):
M = partition_tree.shape[0] + 1
if pos < 0:
return leaf_positions[pos + M], 0
left = int(partition_tree[pos, 0]) - M
right = int(partition_tree[pos, 1]) - M
x_left, y_left = _dendrogram_coords_rec(left, leaf_positions, partition_tree, xout, yout)
x_right, y_right = _dendrogram_coords_rec(right, leaf_positions, partition_tree, xout, yout)
y_curr = partition_tree[pos, 2]
xout.append([x_left, x_left, x_right, x_right])
yout.append([y_left, y_curr, y_curr, y_right])
return (x_left + x_right) / 2, y_curr
def fill_internal_max_values(partition_tree, leaf_values):
""" This fills the forth column of the partition tree matrix with the max leaf value in that cluster.
"""
M = partition_tree.shape[0] + 1
new_tree = partition_tree.copy()
for i in range(new_tree.shape[0]):
val = 0
if new_tree[i,0] < M:
ind = int(new_tree[i,0])
val = max(val, np.abs(leaf_values[ind]))
else:
ind = int(new_tree[i,0])-M
val = max(val, np.abs(new_tree[ind,3])) # / partition_tree[ind,2])
if new_tree[i,1] < M:
ind = int(new_tree[i,1])
val = max(val, np.abs(leaf_values[ind]))
else:
ind = int(new_tree[i,1])-M
val = max(val, np.abs(new_tree[ind,3])) # / partition_tree[ind,2])
new_tree[i,3] = val
return new_tree
def fill_counts(partition_tree):
""" This updates the
"""
M = partition_tree.shape[0] + 1
for i in range(partition_tree.shape[0]):
val = 0
if partition_tree[i,0] < M:
ind = int(partition_tree[i,0])
val += 1
else:
ind = int(partition_tree[i,0])-M
val += partition_tree[ind,3]
if partition_tree[i,1] < M:
ind = int(partition_tree[i,1])
val += 1
else:
ind = int(partition_tree[i,1])-M
val += partition_tree[ind,3]
partition_tree[i,3] = val
def sort_inds(partition_tree, leaf_values, pos=None, inds=None):
if inds is None:
inds = []
if pos is None:
partition_tree = fill_internal_max_values(partition_tree, leaf_values)
pos = partition_tree.shape[0]-1
M = partition_tree.shape[0] + 1
if pos < 0:
inds.append(pos + M)
return
left = int(partition_tree[pos, 0]) - M
right = int(partition_tree[pos, 1]) - M
left_val = partition_tree[left,3] if left >= 0 else leaf_values[left + M]
right_val = partition_tree[right,3] if right >= 0 else leaf_values[right + M]
if left_val < right_val:
tmp = right
right = left
left = tmp
sort_inds(partition_tree, leaf_values, left, inds)
sort_inds(partition_tree, leaf_values, right, inds)
return inds | 2.75 | 3 |
samples/pose_estimation/solver.py | SushmaDG/MaskRCNN | 1 | 12790130 | <filename>samples/pose_estimation/solver.py
import cv2
import os
import trimesh
import numpy as np
from paz.core import Pose6D
from paz.core.ops import Camera
import paz.processors as pr
from paz.core import ops
import matplotlib.pyplot as plt
MESH_DIR = '/home/incendio/Documents/Thesis/YCBVideo_detector/color_meshes'
GREEN = (0, 255, 0)
class PnPSolver():
""" Implements PnP RANSAC algorithm to compute rotation and
translation vector for a given RGB mask of an object
# Arguments:
rgb_mask: RGB mask of object
true_id: Int
class_name: class name of object. String
dimension: (width, height) for draw_cube
size: size of the mask
"""
def __init__(self, rgb_mask, true_id, class_name, color=GREEN, dimension=[.1, .1], size=(320, 320)):
self.rgb_mask = rgb_mask
self.size = size
self.dimension = dimension
self.camera = self.compute_camera_matrix()
self.id = true_id
self.class_name = class_name
self.vertex_colors = self.get_vertex_colors()
self.color = color
self.world_to_camera = np.array([[ 0.70710678, 0., -0.70710678, 0.01674194],
[-0.40824829, 0.81649658, -0.40824829, -0.01203142],
[ 0.57735027, 0.57735027, 0.57735027, -1.73205081],
[ 0., 0., 0., 1.]])
def get_vertex_colors(self):
for name in os.listdir(MESH_DIR):
class_id = name.split('_')[0]
if int(class_id) == self.id:
mesh_path = os.path.join(MESH_DIR, name)
self.mesh = trimesh.load(mesh_path)
vertex_colors = self.mesh.visual.vertex_colors[:, :3]
return vertex_colors
def solve_PnP(self):
points3d, image2D = self.get_points()
assert image2D.shape[0] == points3d.shape[0]
(_, rotation, translation, inliers) = ops.solve_PNP(points3d, image2D, self.camera, ops.UPNP)
pose6D = Pose6D.from_rotation_vector(rotation, translation, self.class_name)
return pose6D
def visualize_3D_boxes(self, image, pose6D):
dimensions = {self.class_name: self.dimension}
pose = {'pose6D': pose6D, 'image': image, 'color': self.color}
draw = pr.DrawBoxes3D(self.camera, dimensions)
args, projected_points = draw(pose)
return args, projected_points
def get_points(self):
points3d, image2d = [], []
rows, cols, channels = np.where(self.rgb_mask > 0)
for index in range(len(rows)):
x, y = rows[index], cols[index]
R, G, B = self.rgb_mask[x, y, :]
matches = np.unique(np.array(self.get_matches(x, y)))
if len(matches) == 1:
image2d.append([y, x])
vertex = self.mesh.vertices[matches[0], :]
points3d.append(vertex)
# x_index = np.where(self.vertex_colors == np.stack([R, G, B]))[0]
# mid_index = int(len(x_index) / 2)
# points3d.append(self.mesh.vertices[x_index[mid_index], :])
image2d = np.array(image2d).astype(np.float32) #(N, 2)
points3d = np.array(points3d).astype(np.float32) #(N, 3)
return points3d, image2d
def get_matches(self, x, y):
R, G, B = self.rgb_mask[x, y, :]
r_index = np.where(self.vertex_colors[:, 0] == R)[0]
g_index = np.where(self.vertex_colors[:, 1] == G)[0]
b_index = np.where(self.vertex_colors[:, 2] == B)[0]
matches = [r_index, g_index, b_index]
intersection = list(set(matches[0]).intersection(*matches))
return intersection
def get_model_point(self):
rows, cols, channels = np.where(self.rgb_mask > 0)
x, y = int(np.mean(rows)), int(np.mean(cols))
R, G, B = self.rgb_mask[x, y, 0], self.rgb_mask[x, y, 1], self.rgb_mask[x, y, 2]
x_index = np.where(self.vertex_colors == np.stack([R, G, B]))[0]
mid_index = int(len(x_index) / 2)
return self.mesh.vertices[x_index[mid_index], :]
def compute_camera_matrix(self):
focal_length = self.size[1]
camera_center = (self.size[1] / 2, self.size[0] / 2)
camera_matrix = np.array([[focal_length, 0, camera_center[0]],
[0, focal_length, camera_center[1]],
[0, 0, 1]], dtype='double')
camera = Camera(0)
camera.intrinsics = camera_matrix
camera.distortion = np.zeros((4, 1))
return camera
def draw_axis(self, mask, projected_points, thickness=2):
rows, cols, channels = np.where(mask > 0)
x, y = (int(np.mean(rows)), int(np.mean(cols)))
center = (y, x)
image = mask.copy()
R, G, B = (255, 0, 0), (0, 255, 0), (0, 0, 255)
projected_points = projected_points.astype(np.int32)
image = cv2.line(image, center, tuple(projected_points[0].ravel()), R, thickness)
image = cv2.line(image, center, tuple(projected_points[1].ravel()), G, thickness)
image = cv2.line(image, center, tuple(projected_points[2].ravel()), B, thickness)
return image
def get_neighbors(self, image, row, col, window=1):
neighbor = image[row - window : row + window + 1, col - window : col + window + 1]
color_values = np.reshape(neighbor, (9, 3))
return color_values
| 2.3125 | 2 |
MLSD/Transformers/Text_Transformers.py | HaoranXue/Machine_Learning_For_Structured_Data | 4 | 12790131 | <filename>MLSD/Transformers/Text_Transformers.py
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
class BasicText(TransformerMixin):
def __init__(self, Dreduction= None, *args,**kwargs):
self.Dreduction = Dreduction
def fit(self, X, y =None):
self.trans = CountVectorizer(*args,**kwargs)
self.TFid = TfidfTransformer()
self.trans.fit(X.values)
self.TFid.fit(self.trans.fit_transform(X.values).toarray())
def transform(self,X):
self.features = pd.DataFrame(self.TFid.transform(X.values), index=X.index)
return self.features
| 2.90625 | 3 |
glycan_profiling/output/annotate_spectra.py | mstim/glycresoft | 0 | 12790132 | <gh_stars>0
import os
import logging
import string
import platform
from glycan_profiling import serialize
from glycan_profiling.serialize import (
Protein, Glycopeptide, IdentifiedGlycopeptide,
func, MSScan, GlycopeptideSpectrumMatch)
from glycan_profiling.task import TaskBase
from glycan_profiling.serialize import DatabaseBoundOperation
from glycan_profiling.chromatogram_tree import Unmodified
from glycan_profiling.tandem.ref import SpectrumReference
from glycan_profiling.tandem.glycopeptide.scoring import CoverageWeightedBinomialModelTree
from glycan_profiling.plotting import figure
from glycan_profiling.plotting.sequence_fragment_logo import glycopeptide_match_logo
from glycan_profiling.plotting.spectral_annotation import TidySpectrumMatchAnnotator
from ms_deisotope.output.mzml import ProcessedMzMLDeserializer
from matplotlib import pyplot as plt, style
from matplotlib import rcParams as mpl_params
status_logger = logging.getLogger("glycresoft.status")
def format_filename(s):
"""Take a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in valid_chars are
removed. Also spaces are replaced with underscores.
"""
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in s if c in valid_chars)
filename = filename.replace(' ', '_')
return filename
class SpectrumAnnotatorExport(TaskBase, DatabaseBoundOperation):
def __init__(self, database_connection, analysis_id, output_path, mzml_path=None):
DatabaseBoundOperation.__init__(self, database_connection)
self.analysis_id = analysis_id
self.mzml_path = mzml_path
self.output_path = output_path
self.analysis = self.session.query(serialize.Analysis).get(self.analysis_id)
self.scan_loader = None
self._mpl_style = {
'figure.facecolor': 'white',
'figure.edgecolor': 'white',
'font.size': 10,
'savefig.dpi': 72,
'figure.subplot.bottom': .125
}
def _make_scan_loader(self):
if self.mzml_path is not None:
if not os.path.exists(self.mzml_path):
raise IOError("No such file {}".format(self.mzml_path))
self.scan_loader = ProcessedMzMLDeserializer(self.mzml_path)
else:
self.mzml_path = self.analysis.parameters['sample_path']
if not os.path.exists(self.mzml_path):
raise IOError((
"No such file {}. If {} was relocated, you may need to explicily pass the"
" corrected file path.").format(
self.mzml_path,
self.database_connection._original_connection))
self.scan_loader = ProcessedMzMLDeserializer(self.mzml_path)
return self.scan_loader
def _load_spectrum_matches(self):
query = self.query(GlycopeptideSpectrumMatch).join(
GlycopeptideSpectrumMatch.scan).filter(
GlycopeptideSpectrumMatch.analysis_id == self.analysis_id).order_by(
MSScan.index)
return query.all()
def run(self):
scan_loader = self._make_scan_loader()
gpsms = self._load_spectrum_matches()
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
n = len(gpsms)
self.log("%d Spectrum Matches" % (n,))
for i, gpsm in enumerate(gpsms):
scan = scan_loader.get_scan_by_id(gpsm.scan.scan_id)
gpep = gpsm.structure.convert()
if i % 10 == 0:
self.log("... %0.2f%%: %s @ %s" % (((i + 1) / float(n) * 100.0), gpep, scan.id))
with style.context(self._mpl_style):
fig = figure()
grid = plt.GridSpec(nrows=5, ncols=1)
ax1 = fig.add_subplot(grid[1, 0])
ax2 = fig.add_subplot(grid[2:, 0])
ax3 = fig.add_subplot(grid[0, 0])
match = CoverageWeightedBinomialModelTree.evaluate(scan, gpep)
ax3.text(0, 0.5, (
str(match.target) + '\n' + scan.id +
'\nscore=%0.3f q value=%0.3g' % (gpsm.score, gpsm.q_value)), va='center')
ax3.axis('off')
match.plot(ax=ax2)
glycopeptide_match_logo(match, ax=ax1)
fname = format_filename("%s_%s.pdf" % (scan.id, gpep))
path = os.path.join(self.output_path, fname)
abspath = os.path.abspath(path)
if len(abspath) > 259 and platform.system().lower() == 'windows':
abspath = '\\\\?\\' + abspath
fig.savefig(abspath, bbox_inches='tight')
plt.close(fig)
| 2.203125 | 2 |
payloadcode/phototest.py | debragail/n3m0 | 21 | 12790133 | print "Here we go!"
# Import DroneKit-Python
from dronekit import connect, VehicleMode, LocationGlobalRelative, LocationGlobal
from pymavlink import mavutil # Needed for command message definitions
from picamera import PiCamera
import time
import math
import requests
# Connect to the Vehicle.
print("\nConnecting to vehicle")
#vehicle = connect(/dev/ttyACM0, wait_ready=True) # pixhawk usb
#vehicle = connect("/dev/ttyUSB0", wait_ready='armed', baud=57600) # telemetry usb
#vehicle = connect("/dev/ttyUSB0", baud=57600) # telemetry usb
vehicle = connect("/dev/ttyS0", baud=57600) # telemetry usb
# Using the ``wait_ready(True)`` waits on :py:attr:`parameters`, :py:attr:`gps_0`,
# :py:attr:`armed`, :py:attr:`mode`, and :py:attr:`attitude`. In practice this usually
# means that all supported attributes will be populated.
# 'parameters'
#vehicle.wait_ready('gps_0','armed','mode','attitude')
vehicle.wait_ready('gps_0')
# Get some vehicle attributes (state)
print "Get some vehicle attribute values:"
print " GPS: %s" % vehicle.gps_0
print " Battery: %s" % vehicle.battery
print " Last Heartbeat: %s" % vehicle.last_heartbeat
print " Is Armable?: %s" % vehicle.is_armable
print " System status: %s" % vehicle.system_status.state
print " Mode: %s" % vehicle.mode.name # settable
# class to hold info for courses, states, etc.
class PhotoStuff:
## Photo point (where to take photo)
point1 = LocationGlobalRelative(38.0, -122.0, 0)
## current location
lat = 38.06807841429639
lon = -122.23280310630798
mode='none'
message = 'no msg'
Photoing = False
time_to_quit = False
## picture data
getpix = False
plat=0
plon=0
pmode='none'
pmsg = 'no message'
camera = PiCamera()
def update_n3m0_location(self):
## update the boat location
r=requests.post('http://sailbot.holdentechnology.com/postlatlon.php',data={'b_no':1,'lat':myPhoto.lat,'lon':myPhoto.lon,'mode':myPhoto.mode,'debug':myPhoto.message})
#print(r.text)
def get_pic_requests(self):
## get data
r2 = requests.get('http://sailbot.holdentechnology.com/getbuoys.php')
thedata=r2.text.split(';')
myPhoto.plat=float(thedata[8])
myPhoto.plon=float(thedata[9])
myPhoto.pmode = thedata[10]
myPhoto.pmsg = thedata[11]
if (str(myPhoto.pmode).find("REQUESTED") >= 0): # new request, acknowledge.
r=requests.post('http://sailbot.holdentechnology.com/postlatlon.php',data={
'b_no':2,'lat':myPhoto.plat,'lon':myPhoto.plon,'mode':"n3m0 Received",'debug':"need auth"})
def take_photo(self, w,h, filename):
print('taking photo')
print filename
self.camera.resolution = (w, h)
#self.camera.resolution = (1920, 1080)
#self.camera.resolution = (640, 480)
self.camera.start_preview()
#time.sleep(2)
self.camera.capture(filename)
self.camera.stop_preview()
print('photo taken')
def post_photo(self,filename, newname):
print ('posting photo')
url = 'http://sailbot.holdentechnology.com/upload.php'
#url = 'http://httpbin.org/post'
data={'submit':'Submit','name':'fileToUpload','id':'fileToUpload'}
files = {'fileToUpload': (newname, open(filename, 'rb'))}
rr = requests.post(url, data=data, files=files)
print rr.text
print('photo posted')
def deliver_photo(self,filename):
myPhoto.pmode = "Finished<br>" + time.strftime("%Y-%m-%d %H:%M:%S")
#myPhoto.pmsg = "<a href=\"uploads/"+filename+"\"><img src=\"uploads/" + filename + "\" height=50 ></a>"
myPhoto.pmsg = "uploads/" + filename
r=requests.post('http://sailbot.holdentechnology.com/insertlatlon.php',data={'b_no':2,'lat':myPhoto.plat,'lon':myPhoto.plon,'mode':myPhoto.pmode,'debug':myPhoto.pmsg})
print "should be guided now",myPhoto.get_distance_meters(myPhoto.point1,vehicle.location.global_relative_frame)
print myPhoto.pmsg
#myPhoto.time_to_quit=True
myPhoto.pmode = "DONE"
myPhoto.pmsg = "Ready for new request" + time.strftime(" %Y-%m-%d %H:%M:%S")
r=requests.post('http://sailbot.holdentechnology.com/postlatlon.php',data={'b_no':2,'lat':myPhoto.plat,'lon':myPhoto.plon,'mode':myPhoto.pmode,'debug':myPhoto.pmsg})
def get_location_meters(self,original_location, dNorth, dEast):
"""
Returns a LocationGlobal object containing the latitude/longitude `dNorth` and `dEast` metres from the
specified `original_location`. The returned LocationGlobal has the same `alt` value
as `original_location`.
The function is useful when you want to move the vehicle around specifying locations relative to
the current vehicle position.
The algorithm is relatively accurate over small distances (10m within 1km) except close to the poles.
For more information see:
http://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters
"""
earth_radius = 6378137.0 #Radius of "spherical" earth
#Coordinate offsets in radians
dLat = dNorth/earth_radius
dLon = dEast/(earth_radius*math.cos(math.pi*original_location.lat/180))
#New position in decimal degrees
newlat = original_location.lat + (dLat * 180/math.pi)
newlon = original_location.lon + (dLon * 180/math.pi)
print("llatlon" )
if type(original_location) is LocationGlobal:
targetlocation=LocationGlobal(newlat, newlon,original_location.alt)
elif type(original_location) is LocationGlobalRelative:
targetlocation=LocationGlobalRelative(newlat, newlon,original_location.alt)
else:
raise Exception("Invalid Location object passed")
return targetlocation;
def get_distance_meters(self, aLocation1, aLocation2):
"""
Returns the ground distance in metres between two LocationGlobal objects.
This method is an approximation, and will not be accurate over large distances and close to the
earth's poles. It comes from the ArduPilot test code:
https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
"""
dlat = aLocation2.lat - aLocation1.lat
dlong = aLocation2.lon - aLocation1.lon
return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
def get_bearing(self, aLocation1, aLocation2):
"""
Returns the bearing between the two LocationGlobal objects passed as parameters.
This method is an approximation, and may not be accurate over large distances and close to the
earth's poles. It comes from the ArduPilot test code:
https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
"""
off_x = aLocation2.lon - aLocation1.lon
off_y = aLocation2.lat - aLocation1.lat
bearing = 90.00 + math.atan2(-off_y, off_x) * 57.2957795
if bearing < 0:
bearing += 360.00
return bearing;
myPhoto = PhotoStuff()
# Callback when location has changed. 'value' is the updated value
# Mode changing done here.
# myPhoto.Photoing is True when we are heading for a picture point
# also saves current location into myPhoto variables.
def location_callback(self, attr_name, value):
#print "Location: ", value
## store data
myPhoto.lat = vehicle.location.global_relative_frame.lat
myPhoto.lon = vehicle.location.global_relative_frame.lon
myPhoto.mode = str(vehicle.mode.name)
## check for reaching picture waypoint
dist = myPhoto.get_distance_meters(myPhoto.point1,vehicle.location.global_relative_frame)
# if reached photo point: take photo, return to auto mode.
if (dist <= 3.0) and (myPhoto.Photoing): # waits until we reach photo point, takes photo
#if (myPhoto.Photoing): # use for bench testing, immediately takes photo.
print "Picture!", dist
# take photo
myPhoto.take_photo(1920, 1080,'/home/pi/Desktop/cap.jpg')
# exit guided mode
myPhoto.Photoing = False
# post photo
fname='n3m0_' + time.strftime("%Y%m%d-%H%M%S") + '.jpg'
myPhoto.post_photo('/home/pi/Desktop/cap.jpg',fname)
myPhoto.deliver_photo(fname)
# continuously check to see if we need to change modes
# if guided flag set but not guided mode: do guided mode.
if myPhoto.Photoing:
myPhoto.mode = str(dist)
if (str(vehicle.mode.name).find("GUIDED") < 0): # not guided
myPhoto.point1.lat = myPhoto.plat
myPhoto.point1.lon = myPhoto.plon
vehicle.mode = VehicleMode("GUIDED")
vehicle.simple_goto(myPhoto.point1)
print "guided mode again: ", str(vehicle.mode.name)
else: # guided flag not set
if (str(vehicle.mode.name).find("GUIDED") >= 0): # guided, return to auto mode.
vehicle.mode = VehicleMode("AUTO")
print "End guided mode ", str(vehicle.mode.name)
#Callback to monitor mode changes. 'value' is the updated value
# If mode changes to "steering" start autonomous action (picture)
# any other mode change cancels autonomous function through this code
def mode_callback(self, attr_name, value):
print "Mode: ", value
if str(value).find("STEERING") >=0:
myPhoto.Photoing = True
myPhoto.pmode = "UNDERWAY"
myPhoto.pmsg = "n3m0 received request" + time.strftime(" %Y-%m-%d %H:%M:%S")
r=requests.post('http://sailbot.holdentechnology.com/postlatlon.php',data={'b_no':2,'lat':myPhoto.plat,'lon':myPhoto.plon,'mode':myPhoto.pmode,'debug':myPhoto.pmsg})
print "should be guided now",myPhoto.get_distance_meters(myPhoto.point1,vehicle.location.global_relative_frame)
else:
if str(value).find("GUIDED") < 0: #not changed to guided mode
myPhoto.Photoing = False #let us go back to manual or RTL etc
print("new mode set, Photoing off")
# Add a callback `location_callback` for the `global_frame` attribute.
vehicle.add_attribute_listener('location.global_frame', location_callback)
vehicle.add_attribute_listener('mode', mode_callback)
# Loop, interrupts are running things now.
while not myPhoto.time_to_quit:
time.sleep(4)
print myPhoto.get_distance_meters(myPhoto.point1,vehicle.location.global_relative_frame)
# getting parameters is a little buggy
#print "Param: %s" % vehicle.parameters['WP_RADIUS']
myPhoto.message = time.strftime("%Y-%m-%d %H:%M:%S ") + str(vehicle.battery) + " " + str(vehicle.gps_0)
myPhoto.mode = str(vehicle.mode.name)
myPhoto.update_n3m0_location()
myPhoto.message = " "
myPhoto.mode = " "
myPhoto.take_photo(640,480,'/home/pi/Desktop/testcap.jpg')
myPhoto.post_photo('/home/pi/Desktop/testcap.jpg','tphoto.jpg')
#fname='Tn3m0_' + time.strftime("%Y%m%d-%H%M%S") + '.jpg'
#myPhoto.post_photo('/home/pi/Desktop/testcap.jpg',fname)
#myPhoto.time_to_quit=True
myPhoto.get_pic_requests()
# Remove observer - specifying the attribute and previously registered callback function
vehicle.remove_message_listener('location.global_frame', location_callback)
vehicle.remove_message_listener('mode', mode_callback)
# Close vehicle object before exiting script
vehicle.close()
print("Completed")
| 2.953125 | 3 |
examples/entities/pdfunderlay.py | jpsantos-mf/ezdxf | 1 | 12790134 | # Copyright (c) 2016-2019 <NAME>
# License: MIT License
import ezdxf
dwg = ezdxf.new('R2000') # underlay requires the DXF R2000 format or newer
pdf_underlay_def = dwg.add_underlay_def(filename='underlay.pdf', name='1') # name = page to display
dwf_underlay_def = dwg.add_underlay_def(filename='underlay.dwf',
name="Underlay_R2013-Model") # don't know how to get this name
dgn_underlay_def = dwg.add_underlay_def(filename='underlay.dgn', name='default') # name = 'default' just works
# The (PDF)DEFINITION entity is like a block definition, it just defines the underlay
msp = dwg.modelspace()
# add first underlay
msp.add_underlay(pdf_underlay_def, insert=(0, 0, 0), scale=1.)
# The (PDF)UNDERLAY entity is like the INSERT entity, it creates an underlay reference,
# and there can be multiple references to the same underlay in a drawing.
msp.add_underlay(pdf_underlay_def, insert=(10, 0, 0), scale=.5, rotation=30)
# use dgn format
msp.add_underlay(dgn_underlay_def, insert=(0, 30, 0), scale=1.)
# use dwf format
msp.add_underlay(dwf_underlay_def, insert=(0, 15, 0), scale=1.)
# get existing underlay definitions, Important: UNDERLAYDEFs resides in the objects section
pdf_defs = dwg.objects.query('PDFDEFINITION') # get all pdf underlay defs in drawing
dwg.saveas("underlay.dxf")
| 2.765625 | 3 |
setup.py | krazybean/pybusy | 0 | 12790135 | <filename>setup.py
"""
Cursor glamor prettiness for bash
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pybusy',
version='0.0.1',
description='Bash progress decoration',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/krazybean/pybusy',
author='krazybean',
author_email='<EMAIL>',
classifiers=[
'Development Status :: 1 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='python3 bash progress',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['cursor', 'ansicolors']
)
| 1.5625 | 2 |
discovery-provider/src/queries/get_remix_track_parents.py | mikedotexe/audius-protocol | 1 | 12790136 | <reponame>mikedotexe/audius-protocol<filename>discovery-provider/src/queries/get_remix_track_parents.py
from sqlalchemy import desc, and_
from src.models import Track, Remix
from src.utils import helpers
from src.utils.db_session import get_db_read_replica
from src.queries.query_helpers import get_current_user_id, populate_track_metadata, \
paginate_query, add_users_to_tracks
def get_remix_track_parents(track_id, args):
db = get_db_read_replica()
with db.scoped_session() as session:
base_query = (
session.query(Track)
.join(
Remix,
and_(
Remix.parent_track_id == Track.track_id,
Remix.child_track_id == track_id
)
)
.filter(
Track.is_current == True,
Track.is_unlisted == False
)
.order_by(
desc(Track.created_at),
desc(Track.track_id)
)
)
tracks = paginate_query(base_query).all()
tracks = helpers.query_result_to_list(tracks)
track_ids = list(map(lambda track: track["track_id"], tracks))
current_user_id = get_current_user_id(required=False)
tracks = populate_track_metadata(session, track_ids, tracks, current_user_id)
if args.get("with_users", False):
add_users_to_tracks(session, tracks)
return tracks
| 2.125 | 2 |
answerer/query.py | apricis/erudite | 0 | 12790137 | import re
import string
class Reformulator(object):
def __init__(self, question, qclass, lang='en', stopwords=None):
self.__original_question = question
punctuation = re.sub(r"[-+/&']", '', string.punctuation)
self.__punctuation_re = r'[{}]'.format(punctuation)
question = question[0].lower() + question[1:]
question = re.sub(r'(?<=[A-Z])\.', 'QQQ', question)
question = re.sub(self.__punctuation_re, '', question)
self.__question = re.sub(r'QQQ', '.', question)
self.__stopwords = stopwords
self.__qclass = qclass.split(':')[1]
if lang == 'en':
question_words = ['what', 'which', 'who', 'whom', 'when', 'where', 'why', 'how']
conj_prep_words = ['of', 'not']
elif lang == 'sv':
question_words = ['vilket', 'vilken', 'vem', 'whom', 'när', 'var', 'varför', 'hur']
conj_prep_words = ['av', 'inte', 'ej']
else:
raise NotImplemented('This language is not available')
self.__exact_stop_words = set(stopwords) - set(conj_prep_words)
self.__expansion_rules = {
'dismed': 'disease',
'instru': 'instrument',
'lang': 'language',
'other': '',
'techmeth': 'technique',
'termeq': 'term',
'veh': 'vehicle',
'dist': 'distance',
'ord': 'order',
'perc': 'percentage',
'speed': 'speed',
'temp': 'temperature',
'volsize': 'size'
}
if qclass == 'ABBR:abb':
try:
self.__stopwords.append('abbreviation')
except:
self.__stopwords.add('abbreviation')
self.__exact_stop_words.append('abbreviation')
def question(self):
return self.__question
def reformulate(self):
without_stopwords = [w for w in self.__question.split()
if w not in self.__stopwords]
query = without_stopwords
query.append(self.__expansion_rules.get(self.__qclass, ''))
return " ".join(query)
def reformulate_exact(self):
without_exact_stopwords = [w for w in self.__question.split()
if w not in self.__exact_stop_words]
query = without_exact_stopwords
query.append(self.__expansion_rules.get(self.__qclass, ''))
return " ".join(query)
| 2.9375 | 3 |
example.py | ipconfiger/result2 | 4 | 12790138 | <filename>example.py
#coding=utf8
from result2 import Result, Ok, Err
def get_valid_user_by_email(email):
"""
Return user instance
"""
user = get_user(email)
if user:
if user.valid is False:
return Err("user not valid")
return Ok(user)
return Err("user not exists")
result = user = get_user_by_email('<EMAIL>')
if result == Result.Ok:
# do something if user exists ...
else:
# to create new user page with reason
| 3.125 | 3 |
restApi/helpers/ride_helpers.py | Kitingu/restplus | 0 | 12790139 | <reponame>Kitingu/restplus
import datetime
from flask_restplus import reqparse
class RideParser:
parser = reqparse.RequestParser()
parser.add_argument('start_point',
type=str,
required=True,
location='json',
help="Please enter a valid starting point")
parser.add_argument('destination',
type=str,
required=True,
location='json',
help="This field cannot be blank")
parser.add_argument('seats_available',
type=int,
required=True,
location='json',
help="This field cannot be blank")
parser.add_argument('date',
type=lambda x: datetime.datetime.strptime(x, '%d/%m/%Y').strftime('%d/%m/%Y'),
required=True,
location='json',
help="please enter a valid date using format")
parser.add_argument('time',
type=lambda x: datetime.datetime.strptime(x, '%H:%M').strftime('%H:%M'),
required=True,
location='json',
help="Use 24 hour clock system")
| 2.953125 | 3 |
scapy/scapy-arp_request.py | all3g/pieces | 34 | 12790140 | <reponame>all3g/pieces<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf8 -*-
from scapy.all import *
import logging
import threading
import Queue
logging.basicConfig(level=logging.DEBUG,
format='[*] %(name)s - %(message)s')
logger = logging.getLogger('arpscanner')
# disable scapy verbose mode
conf.verb = 0
# disable scapy warning
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
def arpscanner(iplist, lock):
"""Scan internal mac addresses"""
while 1:
try:
lock.acquire()
ip = iplist.get_nowait()
lock.release()
# create a ether object
ether = Ether(type=0x0806)
# create a arp object
arp = ARP(op=1, hwdst='ff:ff:ff:ff:ff:ff', pdst=ip)
# send arp request and receive response
arpres = srp1(ether/arp, timeout=0.05)
if arpres and arpres.haslayer('ARP'):
logger.info('%s \t %s' % (ip, arpres['ARP'].hwsrc))
else:
logger.debug('%s \t %s' % (ip, None))
except Queue.Empty:
lock.release()
break
return
if __name__ == "__main__":
iplist = Queue.Queue()
lock = threading.Lock()
for i in range(1, 255, 1):
ip = "192.168.1.%s" % i
iplist.put(ip)
for n in range(30):
t = threading.Thread(target=arpscanner, args=(iplist, lock))
t.start()
| 2.328125 | 2 |
src/third_party/swiftshader/third_party/subzero/pydir/run-pnacl-sz.py | rhencke/engine | 2,151 | 12790141 | #!/usr/bin/env python2
import argparse
import itertools
import os
import re
import subprocess
import sys
import tempfile
from utils import FindBaseNaCl, GetObjdumpCmd, shellcmd
def TargetAssemblerFlags(target, sandboxed):
# TODO(reed kotler). Need to find out exactly we need to
# add here for Mips32.
flags = { 'x8632': ['-triple=%s' % ('i686-nacl' if sandboxed else 'i686')],
'x8664': ['-triple=%s' % (
'x86_64-nacl' if sandboxed else 'x86_64')],
'arm32': ['-triple=%s' % (
'armv7a-nacl' if sandboxed else 'armv7a'),
'-mcpu=cortex-a9', '-mattr=+neon'],
'mips32': ['-triple=%s' % (
'mipsel-nacl' if sandboxed else 'mipsel'),
'-mcpu=mips32'] }
return flags[target]
def TargetDisassemblerFlags(target):
flags = { 'x8632': ['-Mintel'],
'x8664': ['-Mintel'],
'arm32': [],
'mips32':[] }
return flags[target]
def main():
"""Run the pnacl-sz compiler on an llvm file.
Takes an llvm input file, freezes it into a pexe file, converts
it to a Subzero program, and finally compiles it.
"""
argparser = argparse.ArgumentParser(
description=' ' + main.__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
argparser.add_argument('--input', '-i', required=True,
help='LLVM source file to compile')
argparser.add_argument('--output', '-o', required=False,
help='Output file to write')
argparser.add_argument('--insts', required=False,
action='store_true',
help='Stop after translating to ' +
'Subzero instructions')
argparser.add_argument('--no-local-syms', required=False,
action='store_true',
help="Don't keep local symbols in the pexe file")
argparser.add_argument('--llvm', required=False,
action='store_true',
help='Parse pexe into llvm IR first, then ' +
'convert to Subzero')
argparser.add_argument('--llvm-source', required=False,
action='store_true',
help='Parse source directly into llvm IR ' +
'(without generating a pexe), then ' +
'convert to Subzero')
argparser.add_argument(
'--pnacl-sz', required=False, default='./pnacl-sz', metavar='PNACL-SZ',
help="Subzero translator 'pnacl-sz'")
argparser.add_argument('--pnacl-bin-path', required=False,
default=(
'{root}/toolchain/linux_x86/pnacl_newlib_raw/bin'
).format(root=FindBaseNaCl()),
metavar='PNACL_BIN_PATH',
help='Path to LLVM & Binutils executables ' +
'(e.g. for building PEXE files)')
argparser.add_argument('--assemble', required=False,
action='store_true',
help='Assemble the output')
argparser.add_argument('--disassemble', required=False,
action='store_true',
help='Disassemble the assembled output')
argparser.add_argument('--dis-flags', required=False,
action='append', default=[],
help='Add a disassembler flag')
argparser.add_argument('--filetype', default='iasm', dest='filetype',
choices=['obj', 'asm', 'iasm'],
help='Output file type. Default %(default)s')
argparser.add_argument('--forceasm', required=False, action='store_true',
help='Force --filetype=asm')
argparser.add_argument('--target', default='x8632', dest='target',
choices=['x8632','x8664','arm32','mips32'],
help='Target architecture. Default %(default)s')
argparser.add_argument('--echo-cmd', required=False,
action='store_true',
help='Trace command that generates ICE instructions')
argparser.add_argument('--tbc', required=False, action='store_true',
help='Input is textual bitcode (not .ll)')
argparser.add_argument('--expect-fail', required=False, action='store_true',
help='Negate success of run by using LLVM not')
argparser.add_argument('--allow-pnacl-reader-error-recovery',
action='store_true',
help='Continue parsing after first error')
argparser.add_argument('--args', '-a', nargs=argparse.REMAINDER,
default=[],
help='Remaining arguments are passed to pnacl-sz')
argparser.add_argument('--sandbox', required=False, action='store_true',
help='Sandboxes the generated code')
args = argparser.parse_args()
pnacl_bin_path = args.pnacl_bin_path
llfile = args.input
if args.llvm and args.llvm_source:
raise RuntimeError("Can't specify both '--llvm' and '--llvm-source'")
if args.llvm_source and args.no_local_syms:
raise RuntimeError("Can't specify both '--llvm-source' and " +
"'--no-local-syms'")
if args.llvm_source and args.tbc:
raise RuntimeError("Can't specify both '--tbc' and '--llvm-source'")
if args.llvm and args.tbc:
raise RuntimeError("Can't specify both '--tbc' and '--llvm'")
if args.forceasm:
if args.expect_fail:
args.forceasm = False
elif args.filetype == 'asm':
pass
elif args.filetype == 'iasm':
# TODO(sehr) implement forceasm for iasm.
pass
elif args.filetype == 'obj':
args.filetype = 'asm'
args.assemble = True
cmd = []
if args.tbc:
cmd = [os.path.join(pnacl_bin_path, 'pnacl-bcfuzz'), llfile,
'-bitcode-as-text', '-output', '-', '|']
elif not args.llvm_source:
cmd = [os.path.join(pnacl_bin_path, 'llvm-as'), llfile, '-o', '-', '|',
os.path.join(pnacl_bin_path, 'pnacl-freeze')]
if not args.no_local_syms:
cmd += ['--allow-local-symbol-tables']
cmd += ['|']
if args.expect_fail:
cmd += [os.path.join(pnacl_bin_path, 'not')]
cmd += [args.pnacl_sz]
cmd += ['--target', args.target]
if args.sandbox:
cmd += ['-sandbox']
if args.insts:
# If the tests are based on '-verbose inst' output, force
# single-threaded translation because dump output does not get
# reassembled into order.
cmd += ['-verbose', 'inst,global_init', '-notranslate', '-threads=0']
elif args.allow_pnacl_reader_error_recovery:
cmd += ['-allow-pnacl-reader-error-recovery', '-threads=0']
if not args.llvm_source:
cmd += ['--bitcode-format=pnacl']
if not args.no_local_syms:
cmd += ['--allow-local-symbol-tables']
if args.llvm or args.llvm_source:
cmd += ['--build-on-read=0']
else:
cmd += ['--build-on-read=1']
cmd += ['--filetype=' + args.filetype]
cmd += ['--emit-revision=0']
script_name = os.path.basename(sys.argv[0])
for _, arg in enumerate(args.args):
# Redirecting the output file needs to be done through the script
# because forceasm may introduce a new temporary file between pnacl-sz
# and llvm-mc. Similar issues could occur when setting filetype, target,
# or sandbox through --args. Filter and report an error.
if re.search('^-?-(o|output|filetype|target|sandbox)(=.+)?$', arg):
preferred_option = '--output' if re.search('^-?-o(=.+)?$', arg) else arg
print 'Option should be set using:'
print ' %s ... %s ... --args' % (script_name, preferred_option)
print 'rather than:'
print ' %s ... --args %s ...' % (script_name, arg)
exit(1)
asm_temp = None
output_file_name = None
keep_output_file = False
if args.output:
output_file_name = args.output
keep_output_file = True
cmd += args.args
if args.llvm_source:
cmd += [llfile]
if args.assemble or args.disassemble:
if not output_file_name:
# On windows we may need to close the file first before it can be
# re-opened by the other tools, so don't do delete-on-close,
# and instead manually delete.
asm_temp = tempfile.NamedTemporaryFile(delete=False)
asm_temp.close()
output_file_name = asm_temp.name
if args.assemble and args.filetype != 'obj':
cmd += (['|', os.path.join(pnacl_bin_path, 'llvm-mc')] +
TargetAssemblerFlags(args.target, args.sandbox) +
['-filetype=obj', '-o', output_file_name])
elif output_file_name:
cmd += ['-o', output_file_name]
if args.disassemble:
# Show wide instruction encodings, diassemble, show relocs and
# dissasemble zeros.
cmd += (['&&', os.path.join(pnacl_bin_path, GetObjdumpCmd(args.target))] +
args.dis_flags +
['-w', '-d', '-r', '-z'] + TargetDisassemblerFlags(args.target) +
[output_file_name])
stdout_result = shellcmd(cmd, echo=args.echo_cmd)
if not args.echo_cmd:
sys.stdout.write(stdout_result)
if asm_temp and not keep_output_file:
os.remove(output_file_name)
if __name__ == '__main__':
main()
| 2.609375 | 3 |
examples/svd.py | ravenSanstete/hako | 1 | 12790142 | <reponame>ravenSanstete/hako
from .context import monad
from .context import tangle
from .context import feeder
from .context import prototype
from .context import magica
from .context import hako
from connectors import ml_100k_conn as conn
ip='10.141.246.29';
port=27017;
version='100k';
batch_size=100;
connector=conn.MLConnector(ip,port,version);
print(connector.feed_train_batch(batch_size));
#
| 1.585938 | 2 |
project_euler/101.py | huangshenno1/project_euler | 0 | 12790143 | <reponame>huangshenno1/project_euler
from __future__ import division
def u(n):
ret = 0
for i in xrange(0, 11):
ret += (-1)**i * n**i
return ret
def solve(k):
a = []
for n in xrange(1, k+1):
x = [n**x for x in xrange(0, k)]
x.append(u(n))
a.append(x)
for j in xrange(0, k):
for i in xrange(0, k):
if i == j: continue
d = a[i][j] / a[j][j]
for l in xrange(j, k+1):
a[i][l] -= a[j][l] * d
ret = 0
for i in xrange(0, k):
ret += (a[i][k] / a[i][i]) * (k+1)**i
return ret
ans = sum(map(solve, xrange(1, 11)))
print ans
| 2.484375 | 2 |
vasp-validator/tests/test_vasp_proxy_hook.py | tanshuai/reference-wallet | 14 | 12790144 | <reponame>tanshuai/reference-wallet
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
def test_tautology():
...
| 1.070313 | 1 |
Part 2/Chapter 02/Programming projects/project_01.py | phuycke/Practice-of-computing-using-Python | 1 | 12790145 | <reponame>phuycke/Practice-of-computing-using-Python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
email: <EMAIL>
GitHub: phuycke
"""
#%%
total_grains = 0
multiplier = 1
for i in range(1, 65):
total_grains += multiplier
multiplier *= 2
print('Total amount of wheat: {}'.format(total_grains))
#%%
print('Weight of wheat (in tons): {}'.format(total_grains * 0.05 / 1000))
#%%
# I don't understand question c | 3.890625 | 4 |
nova/objects/volume_usage.py | bopopescu/nova-token | 0 | 12790146 | <filename>nova/objects/volume_usage.py
begin_unit
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'db'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'base'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'fields'
newline|'\n'
nl|'\n'
nl|'\n'
op|'@'
name|'base'
op|'.'
name|'NovaObjectRegistry'
op|'.'
name|'register'
newline|'\n'
DECL|class|VolumeUsage
name|'class'
name|'VolumeUsage'
op|'('
name|'base'
op|'.'
name|'NovaPersistentObject'
op|','
name|'base'
op|'.'
name|'NovaObject'
op|')'
op|':'
newline|'\n'
comment|'# Version 1.0: Initial version'
nl|'\n'
DECL|variable|VERSION
indent|' '
name|'VERSION'
op|'='
string|"'1.0'"
newline|'\n'
nl|'\n'
DECL|variable|fields
name|'fields'
op|'='
op|'{'
nl|'\n'
string|"'id'"
op|':'
name|'fields'
op|'.'
name|'IntegerField'
op|'('
name|'read_only'
op|'='
name|'True'
op|')'
op|','
nl|'\n'
string|"'volume_id'"
op|':'
name|'fields'
op|'.'
name|'UUIDField'
op|'('
op|')'
op|','
nl|'\n'
string|"'instance_uuid'"
op|':'
name|'fields'
op|'.'
name|'UUIDField'
op|'('
name|'nullable'
op|'='
name|'True'
op|')'
op|','
nl|'\n'
string|"'project_id'"
op|':'
name|'fields'
op|'.'
name|'StringField'
op|'('
name|'nullable'
op|'='
name|'True'
op|')'
op|','
nl|'\n'
string|"'user_id'"
op|':'
name|'fields'
op|'.'
name|'StringField'
op|'('
name|'nullable'
op|'='
name|'True'
op|')'
op|','
nl|'\n'
string|"'availability_zone'"
op|':'
name|'fields'
op|'.'
name|'StringField'
op|'('
name|'nullable'
op|'='
name|'True'
op|')'
op|','
nl|'\n'
string|"'tot_last_refreshed'"
op|':'
name|'fields'
op|'.'
name|'DateTimeField'
op|'('
name|'nullable'
op|'='
name|'True'
op|','
nl|'\n'
DECL|variable|read_only
name|'read_only'
op|'='
name|'True'
op|')'
op|','
nl|'\n'
string|"'tot_reads'"
op|':'
name|'fields'
op|'.'
name|'IntegerField'
op|'('
name|'read_only'
op|'='
name|'True'
op|')'
op|','
nl|'\n'
string|"'tot_read_bytes'"
op|':'
name|'fields'
op|'.'
name|'IntegerField'
op|'('
name|'read_only'
op|'='
name|'True'
op|')'
op|','
nl|'\n'
string|"'tot_writes'"
op|':'
name|'fields'
op|'.'
name|'IntegerField'
op|'('
name|'read_only'
op|'='
name|'True'
op|')'
op|','
nl|'\n'
string|"'tot_write_bytes'"
op|':'
name|'fields'
op|'.'
name|'IntegerField'
op|'('
name|'read_only'
op|'='
name|'True'
op|')'
op|','
nl|'\n'
string|"'curr_last_refreshed'"
op|':'
name|'fields'
op|'.'
name|'DateTimeField'
op|'('
name|'nullable'
op|'='
name|'True'
op|','
nl|'\n'
DECL|variable|read_only
name|'read_only'
op|'='
name|'True'
op|')'
op|','
nl|'\n'
string|"'curr_reads'"
op|':'
name|'fields'
op|'.'
name|'IntegerField'
op|'('
op|')'
op|','
nl|'\n'
string|"'curr_read_bytes'"
op|':'
name|'fields'
op|'.'
name|'IntegerField'
op|'('
op|')'
op|','
nl|'\n'
string|"'curr_writes'"
op|':'
name|'fields'
op|'.'
name|'IntegerField'
op|'('
op|')'
op|','
nl|'\n'
string|"'curr_write_bytes'"
op|':'
name|'fields'
op|'.'
name|'IntegerField'
op|'('
op|')'
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
op|'@'
name|'staticmethod'
newline|'\n'
DECL|member|_from_db_object
name|'def'
name|'_from_db_object'
op|'('
name|'context'
op|','
name|'vol_usage'
op|','
name|'db_vol_usage'
op|')'
op|':'
newline|'\n'
indent|' '
name|'for'
name|'field'
name|'in'
name|'vol_usage'
op|'.'
name|'fields'
op|':'
newline|'\n'
indent|' '
name|'setattr'
op|'('
name|'vol_usage'
op|','
name|'field'
op|','
name|'db_vol_usage'
op|'['
name|'field'
op|']'
op|')'
newline|'\n'
dedent|''
name|'vol_usage'
op|'.'
name|'_context'
op|'='
name|'context'
newline|'\n'
name|'vol_usage'
op|'.'
name|'obj_reset_changes'
op|'('
op|')'
newline|'\n'
name|'return'
name|'vol_usage'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'base'
op|'.'
name|'remotable'
newline|'\n'
DECL|member|save
name|'def'
name|'save'
op|'('
name|'self'
op|','
name|'update_totals'
op|'='
name|'False'
op|')'
op|':'
newline|'\n'
indent|' '
name|'db_vol_usage'
op|'='
name|'db'
op|'.'
name|'vol_usage_update'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'_context'
op|','
name|'self'
op|'.'
name|'volume_id'
op|','
name|'self'
op|'.'
name|'curr_reads'
op|','
nl|'\n'
name|'self'
op|'.'
name|'curr_read_bytes'
op|','
name|'self'
op|'.'
name|'curr_writes'
op|','
name|'self'
op|'.'
name|'curr_write_bytes'
op|','
nl|'\n'
name|'self'
op|'.'
name|'instance_uuid'
op|','
name|'self'
op|'.'
name|'project_id'
op|','
name|'self'
op|'.'
name|'user_id'
op|','
nl|'\n'
name|'self'
op|'.'
name|'availability_zone'
op|','
name|'update_totals'
op|'='
name|'update_totals'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_from_db_object'
op|'('
name|'self'
op|'.'
name|'_context'
op|','
name|'self'
op|','
name|'db_vol_usage'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 1.804688 | 2 |
misc/python/materialize/zippy/source_capabilities.py | bobbyiliev/materialize | 1 | 12790147 | # Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
from typing import Optional
from materialize.zippy.framework import Capability
from materialize.zippy.kafka_capabilities import TopicExists
from materialize.zippy.watermarks import Watermarks
class SourceExists(Capability):
def __init__(self, name: str, topic: Optional[TopicExists] = None) -> None:
self.name = name
self.topic = topic
def get_watermarks(self) -> Watermarks:
assert self.topic is not None
return self.topic.watermarks
| 1.867188 | 2 |
main.py | gbuenoandrade/Integralization-Simulator---Unicamp | 0 | 12790148 | <reponame>gbuenoandrade/Integralization-Simulator---Unicamp
import numpy as np
import matplotlib.pyplot as plt
class Course:
def __init__(self):
self.name = ''
self.type = ''
self.credits = 0
self.grade = 0.0
self.sem = ''
def __str__(self):
return self.name + ' ' + self.type + ' ' + str(self.credits) + ' ' + str(self.grade)
def parseFromFile(fname):
def parseRevClass(v):
c = Course()
c.name = v.pop()
if len(c.name) == 1:
c.name += ' ' + v.pop()
crd = v.pop()
if len(crd) == 1:
crd = v.pop()
c.credits = int(crd)
c.type = v.pop()
if len(c.type) != 1: #it doesn't count
for i in range(3):
v.pop()
if len(v) != 0:
return parseRevClass(v)
else:
return []
try:
grd = list(v.pop())
for i in range(len(grd)):
if grd[i] == ',':
grd[i] = '.'
break
grd = ''.join(grd)
c.grade = float(grd)
except ValueError:
for i in range(3):
v.pop()
if len(v) != 0:
return parseRevClass(v)
else:
return []
for i in range(2):
v.pop()
c.sem = v.pop()
if len(v) != 0:
return [c] + parseRevClass(v)
else:
return [c]
with open(fname) as f:
content = f.readlines()
content = [line.strip() for line in content]
ans = []
for line in content:
ret = parseRevClass(line.split()[::-1])
ans += ret
return ans
courses = []
def addNewCourse(name, credits, grade, sem):
c = Course()
c.name = name
c.credits = int(credits)
c.grade = float(grade)
c.sem = sem
courses.append(c)
def getCR():
return getCRUntil('2S3016')
def getCRUntil(sem):
num = 0.0
den = 0
pr = int(sem[0])
year = int(sem[2:])
for c in courses:
cpr = int(c.sem[0])
cyear = int(c.sem[2:])
if cyear < year or (cyear == year and cpr <= pr):
num += c.grade*c.credits
den += c.credits
cr = num/den/10
return cr
def getSemCR(sem):
num = 0.0
den = 0
pr = int(sem[0])
year = int(sem[2:])
for c in courses:
cpr = int(c.sem[0])
cyear = int(c.sem[2:])
if cyear == year and cpr == pr:
num += c.grade*c.credits
den += c.credits
cr = num/den/10
return cr
def plot_charts():
year = 2012
pr = 1
labels = []
acc = []
partial = []
ignored = {'2S2015', '1S2016', '2S2018'}
while year <= 2018:
sem = str(pr) + 'S' + str(year)
if sem not in ignored:
cacc = getCRUntil(sem)
cpartial = getSemCR(sem)
print('%s: %.4f' % (sem, cacc))
labels.append(sem)
acc.append(cacc)
partial.append(cpartial)
pr += 1
if pr == 3:
pr = 1
year += 1
print(acc)
print(partial)
plt.plot(labels, acc)
plt.plot(labels, partial)
plt.legend(('Accumulated', 'Partial'))
plt.xlabel('Semester')
plt.ylabel('CR')
plt.title('CR evolution')
plt.show()
def main():
global courses
courses += parseFromFile('grades.txt')
plot_charts()
if __name__ == "__main__":
main() | 3.25 | 3 |
database/test_code/test_scrape_wiki_mysql.py | Coslate/NBA_Win_Predictor | 0 | 12790149 | #! /usr/bin/env python3.6
import pymysql
import re
import random
import datetime
import sys
import argparse
import os
#########################
# Main-Routine #
#########################
def main():
#Initialization
print('> Crawler Initialization...')
iter_num = 0
crawler_nba.init()
#Argument Parser
(password, table, max_sql_store_num, unix_socket, database_name) = ArgumentParser()
#DB Initialization
print('> DB Initialization...')
crawler_nba.MySQLDBInitialize(password, table, unix_socket, database_name)
#Sideband Setting
current_time = datetime.datetime.now()
print(f'current_time = {current_time}')
random.seed(datetime.datetime.now())
starting_url = "https://en.wikipedia.org/wiki/Kevin_Bacon"
print(f'starting_url = {starting_url}')
# Scrape articles from Wikipedia and store into MySQl Database
choose_link = starting_url
skipping = 0
while(iter_num < max_sql_store_num):
print('iter_num = {}. Get Wiki Links and store the content to MySQL...'.format(iter_num))
print(f'choose_link = {choose_link}')
all_internal_links_loop, skipping = crawler_nba.GetWikiLinksContent(choose_link, crawler_nba.cur, table)
total_num_internal_links_loop = len(all_internal_links_loop)
if(total_num_internal_links_loop > 0):
choose_link = "http://en.wikipedia.org"+all_internal_links_loop[random.randint(0, total_num_internal_links_loop-1)].attrs['href']
if(skipping == 0):
iter_num += 1
# Test to read from MySQL Database
sql_ex = 'SELECT id, title, created, LEFT(content, 32) FROM {table_name} WHERE id=4;'.format(table_name=table)
crawler_nba.cur.execute(sql_ex)
results = crawler_nba.cur.fetchall()
print(f'-------------------Execution {sql_ex}-------------------')
print(f'table = {table}')
for row in results:
id_name = str(row[0])
title_name = row[1]
created_name = str(row[2])
content_name = row[3]
print('{x:<2s}, {y:<2s}, {z:<2s}, {k:<2s}'.format(x=id_name, y=title_name, z=created_name, k=content_name))
# Close the connection of MySQL Database
crawler_nba.MySQLDBClose(crawler_nba.cur, crawler_nba.conn)
#########################
# Sub-Routine #
#########################
def ArgumentParser():
password = ""
table = ""
database_name = ""
unix_socket = ""
max_sql_store_num = 10
parser = argparse.ArgumentParser()
parser.add_argument("--mysql_password", "-sql_p", help="The password to connect to MySQL server.", required=True)
parser.add_argument("--mysql_table_name", "-sql_tn", help="The table name that will be used to store data.", required=True)
parser.add_argument("--max_sql_store_num", "-sql_mx_sn", help="The maximum number that stores in MySQL table.", required=True)
parser.add_argument("--unix_socket", "-sql_un_sock", help="The unix_socket that is used to mypysql connection.", required=True)
parser.add_argument("--database_name", "-database_name", help="The unix_socket that is used to mypysql connection.", required=True)
args = parser.parse_args()
if args.mysql_password:
password = args.mysql_password
if args.mysql_table_name:
table = args.mysql_table_name
if args.max_sql_store_num:
max_sql_store_num = int(args.max_sql_store_num)
if args.unix_socket:
unix_socket = args.unix_socket
if args.database_name:
database_name = args.database_name
return(password, table, max_sql_store_num, unix_socket, database_name)
#-----------------Execution------------------#
if __name__ == '__main__':
import sys
this_script_path = os.path.realpath(__file__)
this_script_folder = os.path.dirname(this_script_path)
crawler_nba_pkg_path = this_script_folder+'/../../crawler'
print('Add to sys.path : {x}'.format(x=crawler_nba_pkg_path))
sys.path.append(crawler_nba_pkg_path)
import package_crawler_nba.crawler_nba as crawler_nba
print('Import package_crawler_nba successfully.')
main()
| 2.875 | 3 |
reina/iv/__init__.py | SoumilShekdar/Reina | 4 | 12790150 | <filename>reina/iv/__init__.py<gh_stars>1-10
from .2sls import SieveTSLS
| 1.125 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.