max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
language/python/Lib_beautifulsoup/beautifulsoup.py | LIU2016/Demo | 1 | 12798551 | <filename>language/python/Lib_beautifulsoup/beautifulsoup.py
'''
比xpath lxml 更方便的
节点选择器:
若有同名节点,只输出第一个,不存在,输出None :soup.title
直接子节点:
contents、children
孙子节点选择器:
descendants
方法选择器:
find find_all
css选择器:
select
获取文本:
get_text,string属性
获取属性值:
['key'],attrs['key']
通过浏览器直接拷贝选择器
'''
from bs4 import BeautifulSoup
from urllib3 import *
http=PoolManager()
disable_warnings()
response= http.request(url="https://www.cnhnb.com/supply/",method="GET").data
html=response.decode("utf-8")
#print(html)
soup=BeautifulSoup(html,'lxml')
#print(soup.title.string)
#print(soup.body.contents)
#print(soup.body.children)
for i,item in enumerate(soup.body.div.ul):
print(i,item)
| 3.359375 | 3 |
dataflows_aws/__init__.py | frictionlessdata/dataflows-aws | 2 | 12798552 | <reponame>frictionlessdata/dataflows-aws
from .processors.change_acl_on_s3 import change_acl_on_s3
from .processors.dump_to_s3 import S3Dumper as dump_to_s3
| 1.148438 | 1 |
link_pred_tasker.py | sykailak/evolvegcn | 0 | 12798553 | <reponame>sykailak/evolvegcn<gh_stars>0
#FINAL NODE2VEC
import torch
import taskers_utils as tu
import utils as u
from stellargraph import StellarGraph
import pandas as pd
from stellargraph.data import BiasedRandomWalk
from gensim.models import Word2Vec
import numpy as np
import scipy.sparse as sp
import logging
logging.getLogger("gensim.models").setLevel(logging.WARNING)
import time
import random
class Link_Pred_Tasker():
def __init__(self, args, dataset):
self.data = dataset
# max_time for link pred should be one before
self.max_time = dataset.max_time - 1
self.args = args
self.num_classes = 2
if not (args.use_2_hot_node_feats or args.use_1_hot_node_feats):
self.feats_per_node = dataset.feats_per_node
self.prepare_node_feats = self.build_prepare_node_feats(args, dataset)
self.is_static = False
self.feats_per_node = 100
self.all_node_feats_dic = self.build_get_node_feats(args, dataset) ##should be a dic
def build_prepare_node_feats(self, args, dataset):
if args.use_2_hot_node_feats or args.use_1_hot_node_feats:
def prepare_node_feats(node_feats):
return u.sparse_prepare_tensor(node_feats,
torch_size=[dataset.num_nodes,
self.feats_per_node])
else:
prepare_node_feats = self.data.prepare_node_feats
return prepare_node_feats
def build_get_node_feats(self, args, dataset):
def get_node_feats(adj): # input is cur_adj
edgelist = adj['idx'].cpu().data.numpy()
source = edgelist[:, 0]
target = edgelist[:, 1]
weight = np.ones(len(source))
G = pd.DataFrame({'source': source, 'target': target, 'weight': weight})
G = StellarGraph(edges=G)
rw = BiasedRandomWalk(G)
weighted_walks = rw.run(
nodes=list(G.nodes()), # root nodes
length=2, # maximum length of a random walk
n=5, # number of random walks per root node
p=1, # Defines (unormalised) probability, 1/p, of returning to source node
q=0.5, # Defines (unormalised) probability, 1/q, for moving away from source node
weighted=True, # for weighted random walks
seed=42, # random seed fixed for reproducibility
)
str_walks = [[str(n) for n in walk] for walk in weighted_walks]
weighted_model = Word2Vec(str_walks, size=self.feats_per_node, window=5, min_count=0, sg=1, workers=1,
iter=1)
# Retrieve node embeddings and corresponding subjects
node_ids = weighted_model.wv.index2word # list of node IDs
# change to integer
for i in range(0, len(node_ids)):
node_ids[i] = int(node_ids[i])
weighted_node_embeddings = (
weighted_model.wv.vectors) # numpy.ndarray of size number of nodes times embeddings dimensionality
# create dic
dic = dict(zip(node_ids, weighted_node_embeddings.tolist()))
# ascending order
dic = dict(sorted(dic.items()))
# create matrix
adj_mat = sp.lil_matrix((self.data.num_nodes, self.feats_per_node))
for row_idx in node_ids:
adj_mat[row_idx, :] = dic[row_idx]
adj_mat = adj_mat.tocsr()
adj_mat = adj_mat.tocoo()
coords = np.vstack((adj_mat.row, adj_mat.col)).transpose()
values = adj_mat.data
row = list(coords[:, 0])
col = list(coords[:, 1])
indexx = torch.LongTensor([row, col])
tensor_size = torch.Size([self.data.num_nodes, self.feats_per_node])
degs_out = torch.sparse.FloatTensor(indexx, torch.FloatTensor(values), tensor_size)
hot_1 = {'idx': degs_out._indices().t(), 'vals': degs_out._values()}
return hot_1
# create dic
feats_dic = {}
for i in range(self.data.max_time):
if i%30 == 0:
print('current i to make embeddings:', i)
cur_adj = tu.get_sp_adj(edges=self.data.edges,
time=i,
weighted=True,
time_window=self.args.adj_mat_time_window)
feats_dic[i] = get_node_feats(cur_adj)
return feats_dic
def get_sample(self, idx, test, **kwargs):
hist_adj_list = []
hist_ndFeats_list = []
hist_mask_list = []
existing_nodes = []
for i in range(idx - self.args.num_hist_steps, idx + 1):
cur_adj = tu.get_sp_adj(edges=self.data.edges,
time=i,
weighted=True,
time_window=self.args.adj_mat_time_window)
if self.args.smart_neg_sampling:
existing_nodes.append(cur_adj['idx'].unique())
else:
existing_nodes = None
node_mask = tu.get_node_mask(cur_adj, self.data.num_nodes)
# get node features from the dictionary (already created)
node_feats = self.all_node_feats_dic[i]
cur_adj = tu.normalize_adj(adj=cur_adj, num_nodes=self.data.num_nodes)
hist_adj_list.append(cur_adj)
hist_ndFeats_list.append(node_feats)
hist_mask_list.append(node_mask)
# This would be if we were training on all the edges in the time_window
label_adj = tu.get_sp_adj(edges=self.data.edges,
time=idx + 1,
weighted=False,
time_window=self.args.adj_mat_time_window)
if test:
neg_mult = self.args.negative_mult_test
else:
neg_mult = self.args.negative_mult_training
if self.args.smart_neg_sampling:
existing_nodes = torch.cat(existing_nodes)
if 'all_edges' in kwargs.keys() and kwargs['all_edges'] == True:
non_exisiting_adj = tu.get_all_non_existing_edges(adj=label_adj, tot_nodes=self.data.num_nodes)
else:
non_exisiting_adj = tu.get_non_existing_edges(adj=label_adj,
number=label_adj['vals'].size(0) * neg_mult,
tot_nodes=self.data.num_nodes,
smart_sampling=self.args.smart_neg_sampling,
existing_nodes=existing_nodes)
# For football data, we need to sample due to memory constraints
if self.args.sport=='football':
# Sampling label_adj
num_sample = int(np.floor(len(label_adj['vals'])*0.02))
indice = random.sample(range(len(label_adj['vals'])), num_sample)
indice = torch.LongTensor(indice)
label_adj['idx'] = label_adj['idx'][indice,:]
label_adj['vals'] = label_adj['vals'][indice]
# Sampling non_exisiting_adj
num_sample = int(np.floor(len(non_exisiting_adj['vals'])*0.02))
indice = random.sample(range(len(non_exisiting_adj['vals'])), num_sample)
indice = torch.LongTensor(indice)
non_exisiting_adj['idx'] = non_exisiting_adj['idx'][indice,:]
non_exisiting_adj['vals'] = non_exisiting_adj['vals'][indice]
all_len = len(label_adj['vals']) + len(non_exisiting_adj['vals'])
pos = len(label_adj['vals'])/all_len
neg = len(non_exisiting_adj['vals'])/all_len
# if adapt, we use EXACT adaptive weights when contributing to the loss
if self.args.adapt:
weight = [pos,neg]
else:
weight = self.args.class_weights
label_adj['idx'] = torch.cat([label_adj['idx'], non_exisiting_adj['idx']])
label_adj['vals'] = torch.cat([label_adj['vals'], non_exisiting_adj['vals']])
return {'idx': idx,
'hist_adj_list': hist_adj_list,
'hist_ndFeats_list': hist_ndFeats_list,
'label_sp': label_adj,
'node_mask_list': hist_mask_list,
'weight': weight}
| 1.9375 | 2 |
plugins/msg.py | popa222455/botTWO | 9 | 12798554 | <reponame>popa222455/botTWO
from database import *
from plugin_system import Plugin
plugin = Plugin("Отправка сообщения",
usage=["напиши [id] [сообщение] - написать сообщение пользователю",
"анонимно [id] [сообщение] - написать анонимное сообщение пользователю"
"(посылать можно только текст и/или фото)",
"не беспокоить - не получать сообщения",
"беспокоить - получать сообщения"],
need_db=True)
DISABLED = ('https', 'http', 'com', 'www', 'ftp', '://')
def check_links(string):
return any(x in string for x in DISABLED)
@plugin.on_init()
def init(vk):
pass
@plugin.on_command('анонимка', 'анонимно')
async def anonymously(msg, args):
text_required = True
for k, v in msg.brief_attaches.items():
if '_type' in k and v == "photo":
text_required = False
break
if len(args) < 2 and text_required:
return await msg.answer('Введите ID пользователя и сообщение для него.')
sender_id = msg.user_id
possible_id = args.pop(0)
if not possible_id.isdigit():
uid = await msg.vk.resolve_name(possible_id)
else:
uid = int(possible_id)
if not uid:
return await msg.answer('Проверьте правильность введёного ID пользователя.')
if sender_id == uid:
return await msg.answer('Зачем мне отправлять сообщение вам?!')
if await get_or_none(Ignore, ignored=sender_id, ignored_by=uid):
return await msg.answer('Вы находитесь в чёрном списке у этого пользователя!')
user = await get_or_none(User, uid=uid)
if user and user.do_not_disturb:
return await msg.answer('Этот пользователь попросил его не беспокоить!')
data = ' '.join(args)
if check_links(data):
return await msg.answer('В сообщении были обнаружены ссылки!')
message = "Вам анонимное сообщение!\n"
if data:
message += data + "\n"
if not text_required:
full = await msg.full_attaches
if full:
message += "Вложения:\n".join(m.link for m in full)
val = {
'peer_id': uid,
'message': message
}
result = await msg.vk.method('messages.send', val)
if not result:
return await msg.answer('Сообщение не удалось отправить!')
await msg.answer('Сообщение успешно отправлено!')
@plugin.on_command('админу')
async def to_admin(msg, args):
sender_id = msg.user_id
for role in await db.execute(Role.select().where(Role.role == "admin")):
if await get_or_none(Ignore, ignored=sender_id, ignored_by=role.user_id):
return await msg.answer('Вы находитесь в чёрном списке у этого пользователя!')
user = await get_or_none(User, uid=role.user_id)
if user and user.do_not_disturb:
return await msg.answer('Этот пользователь попросил его не беспокоить!')
data = ' '.join(args)
if check_links(data):
return await msg.answer('В сообщении были обнаружены ссылки!')
sender_data = await msg.vk.method('users.get', {'user_ids': msg.user_id, 'name_case': "gen"})
sender_data = sender_data[0]
val = {
'peer_id': role.user_id,
'message': f"Вам сообщение от {sender_data['first_name']} {sender_data['last_name']}!\n\"{data}\"",
}
if "attach1" in msg.brief_attaches:
val['attachment'] = ",".join(str(x) for x in await msg.full_attaches)
result = await msg.vk.method('messages.send', val)
if not result:
return await msg.answer('Сообщение не удалось отправить!')
await msg.answer('Сообщение успешно отправлено!')
@plugin.on_command('написать', 'напиши', 'лс', 'письмо')
async def write_msg(msg, args):
if (len(args) != 1 or not msg.brief_attaches) and len(args) < 2:
return await msg.answer('Введите ID пользователя и сообщение для него.')
sender_id = msg.user_id
possible_id = args.pop(0)
if not possible_id.isdigit():
uid = await msg.vk.resolve_name(possible_id)
else:
uid = int(possible_id)
if not uid:
return await msg.answer('Проверьте правильность введёного ID пользователя.')
if sender_id == uid:
return await msg.answer('Зачем мне отправлять сообщение самому себе?!')
if await get_or_none(Ignore, ignored=sender_id, ignored_by=uid):
return await msg.answer('Вы находитесь в чёрном списке у этого пользователя!')
user = await get_or_none(User, uid=uid)
if user and user.do_not_disturb:
return await msg.answer('Этот пользователь попросил его не беспокоить!')
data = ' '.join(args)
if check_links(data):
return await msg.answer('В сообщении были обнаружены ссылки!')
sender_data = await msg.vk.method('users.get', {'user_ids': msg.user_id, 'name_case': "gen"})
sender_data = sender_data[0]
val = {
'peer_id': uid,
'message': f"Вам сообщение от {sender_data['first_name']} {sender_data['last_name']}!\n\"{data}\"",
}
if "attach1" in msg.brief_attaches:
val['attachment'] = ",".join(str(x) for x in await msg.full_attaches)
result = await msg.vk.method('messages.send', val)
if not result:
return await msg.answer('Сообщение не удалось отправить!')
await msg.answer('Сообщение успешно отправлено!')
@plugin.on_command('скрыть')
async def hide(msg, args):
if len(args) < 1:
return await msg.answer('Введите ID пользователя для игнорирования.')
sender_id = msg.user_id
ignore_id = args.pop()
if not ignore_id.isdigit():
uid = await msg.vk.resolve_name(ignore_id)
else:
uid = int(ignore_id)
if not uid:
return await msg.answer('Проверьте правильность введёного ID пользователя.')
await db.create_or_get(Ignore, ignored=uid, ignored_by=sender_id)
await msg.answer(f'Вы не будете получать сообщения от {ignore_id}!')
@plugin.on_command('показать')
async def show(msg, unignore):
if len(unignore) < 1:
return await msg.answer('Введите ID пользователя, которого вы хотите убрать из игнора.')
sender_id = msg.user_id
unignore_id = unignore.pop()
if not unignore_id.isdigit():
uid = await msg.vk.resolve_name(unignore_id)
else:
uid = int(unignore_id)
if not uid:
return await msg.answer('Проверьте правильность введёного ID пользователя.')
await db.execute(Ignore.delete().where(
(Ignore.ignored == uid) & (Ignore.ignored_by == sender_id)
))
await msg.answer(f'Теперь {unignore_id} сможет отправлять вам сообщения!')
@plugin.on_command('не беспокоить')
async def do_not_disturb(msg, args):
user = await get_or_none(User, uid=msg.user_id)
if not user:
return await msg.answer('Вы не существуете!\n(или у бота проблемы с базой данных)')
user.do_not_disturb = True
await db.update(user)
await msg.answer('Вы не будете получать сообщения!')
@plugin.on_command('беспокоить')
async def do_disturb(msg, args):
user = await get_or_none(User, uid=msg.user_id)
if not user:
return await msg.answer('Вы не существуете!\n(или у бота проблемы с базой данных)')
user.do_not_disturb = False
await db.update(user)
await msg.answer('Вы будете получать все сообщения!')
| 2.359375 | 2 |
cryptonite/challenge/migrations/0010_challenge_users.py | pshrmn/cryptonite | 1 | 12798555 | <filename>cryptonite/challenge/migrations/0010_challenge_users.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-21 06:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cryptographer', '0001_cryptographer_user'),
('challenge', '0009_cryptographer_user'),
]
operations = [
migrations.AddField(
model_name='challenge',
name='users',
field=models.ManyToManyField(blank=True, through='challenge.CompletedChallenge', to='cryptographer.Cryptographer'),
),
]
| 1.632813 | 2 |
cap-3/13-Deletando_dados-DeleteFrom-Where.py | igoradriano/manipulacao-dados-python-bd | 0 | 12798556 | <filename>cap-3/13-Deletando_dados-DeleteFrom-Where.py<gh_stars>0
import sqlite3 as conector
try:
# Criando uma conexão e um cursor
conexao = conector.connect("./meu_banco.db")
cursor = conexao.cursor()
print("Cursor e Conexao criados com sucesso")
# Deletando com uma string sem delimitador "?"
comando = '''DELETE FROM Pessoa WHERE cpf=61846350263;'''
cursor.execute(comando)
print("Primeiro Comando Especificado com suscesso")
# Deletando com uma string com argumentos nomeados
comando1 = '''DELETE FROM Pessoa WHERE cpf=:cpf;'''
cursor.execute(comando1,{"cpf":98712284220})
print("Segundo Comando Especificado com suscesso")
# Validando a alteração do DB
conexao.commit()
print("Conexao commitada com sucesso")
# Tratamento de Exceções
except conector.IntegrityError as err:
print("Erro de Integridade", err)
except conector.DatabaseError as err:
print("Erro do Banco de Dados", err)
except conector.OperationalError as err:
print("Erro de Operação", err)
# Finalizando o programa
finally:
if conexao:
cursor.close()
conexao.close()
print("Curso e conexao fechados com sucesso") | 3.109375 | 3 |
behave_webdriver/utils.py | stevegibson/behave-webdriver | 43 | 12798557 | <reponame>stevegibson/behave-webdriver
from os import getenv
import six
from behave_webdriver.driver import (Chrome,
Firefox,
Ie,
Edge,
Opera,
Safari,
BlackBerry,
PhantomJS,
Android,
Remote)
def _from_string(webdriver_string):
def get_driver_name(driver):
return driver.__name__.upper()
drivers = [Chrome, Firefox, Ie, Edge, Opera, Safari, BlackBerry, PhantomJS, Android, Remote]
driver_map = {get_driver_name(d): d for d in drivers}
driver_map['CHROME.HEADLESS'] = Chrome.headless
Driver = driver_map.get(webdriver_string.upper(), None)
if Driver is None:
raise ValueError('No such driver "{}". Valid options are: {}'.format(webdriver_string,
', '.join(driver_map.keys())))
return Driver
def from_string(webdriver_string, *args, **kwargs):
Driver = _from_string(webdriver_string)
return Driver(*args, **kwargs)
def _from_env(default_driver=None):
browser_env = getenv('BEHAVE_WEBDRIVER', default_driver)
if browser_env is None:
raise ValueError('No driver found in environment variables and no default driver selection')
if isinstance(browser_env, six.string_types):
Driver = _from_string(browser_env)
else:
# if not a string, assume we have a webdriver instance
Driver = browser_env
return Driver
def from_env(*args, **kwargs):
default_driver = kwargs.pop('default_driver', None)
if default_driver is None:
default_driver = 'Chrome.headless'
Driver = _from_env(default_driver=default_driver)
return Driver(*args, **kwargs)
| 2.75 | 3 |
patchlion/0000/DrawHeadImage.py | saurabh896/python-1 | 3,976 | 12798558 | <filename>patchlion/0000/DrawHeadImage.py<gh_stars>1000+
# -*- coding: utf-8 -*-
__author__ = 'PatchLion'
from PIL import Image, ImageDraw,ImageFont
def drawNumberOnIcon(imgpath, number):
img = Image.open(imgpath)
if (None == img):
print('打开图片失败')
return
img = img.resize((160, 160))
print(imgpath, "->", img.format, img.size, img.mode)
draw = ImageDraw.Draw(img)
img_size = img.size
font = ImageFont.truetype("Varela-Regular.otf", size=int(img_size[1]/4))
text_size = font.getsize(str(number))
draw.text((img_size[0]-text_size[0], 0), str(number), font=font, fill=(255, 0, 0))
img.save('icon_withnumber.jpg')
print('生成图片成功')
drawNumberOnIcon("icon.jpg", 21) | 2.8125 | 3 |
Chapter_5_Mathematics/Combinatorics/kattis_anti11.py | BrandonTang89/CP4_Code | 2 | 12798559 | '''
Kattis - anti11
Let f(x) be the number of x length binary strings without 11 ending with 1
Let g(x) be the numebr of x length binary strings without 11 ending with 0
f(2) = 1 {01}
g(2) = 2 {00, 10}
f(x+1) = g(x) {...01}
g(x+1) = f(x) + g(x) {...00, ...10}
ans(x) = f(x) + g(x)
Time: O(10000), Space: O(10000)
'''
M = int(1e9 + 7)
f = [0, 1, 1]
g = [0, 1, 2]
for i in range(2,10001):
f.append(g[-1])
g.append((g[-1] + f[-2]) % M)
# print(f[:10])
# print(g[:10])
n = int(input())
for i in range(n):
x = int(input())
print((f[x] + g[x])%M) | 3.0625 | 3 |
Arrays/OddOneOut.py | d3xt3r0/Data-Structures-And-Algorithms | 4 | 12798560 | # Odd one out from hackerearth solution
def oddOneOut(arr : list) :
n = len(arr)
arr.sort()
summation = sum(arr)
actual_sum = int((n+1)/2 * (2*arr[0] + (n*2)))
print(actual_sum)
return actual_sum - summation
if __name__ == '__main__' :
arr = list(map(int, input("Enter the elements into the array : ").split()))
print(oddOneOut(arr)) | 3.765625 | 4 |
src/drone_ai/scripts/helpers/tracking/objects.py | Tao-wecorp/drone_sim | 0 | 12798561 | #! /usr/bin/env python
import cv2 as cv
import numpy as np
| 1.195313 | 1 |
broker/src/ave/broker/profile.py | yiu31802/ave | 17 | 12798562 | # Copyright (C) 2013 Sony Mobile Communications AB.
# All rights, including trade secret rights, reserved.
from ave.profile import Profile
from ave.handset.profile import HandsetProfile
from ave.workspace import WorkspaceProfile
from ave.base_workspace import BaseWorkspaceProfile
from ave.relay.profile import RelayProfile
try: # prefer profile from full installation, if available
from ave.positioning.profile import TestDriveProfile
except: # use stub if positioning support is not installed
from positioning import TestDriveProfile
try: # prefer profile from full installation, if available
from ave.powermeter.profile import PowermeterProfile
except: # use stub if powermeter support is not installed
from powermeter import PowermeterProfile
try: # prefer profile from full installation, if available
from ave.beryllium.profile import BerylliumProfile
except: # use stub if beryllium support is not installed
from beryllium import BerylliumProfile
try: # prefer profile from full installation, if available
from ave.wlan.profile import WlanProfile
except: # use stub if beryllium support is not installed
from wlan import WlanProfile
class BrokerProfile(Profile):
def __init__(self, values):
try: del values['authkeys']
except: pass
try: del values['remote']['authkey']
except: pass
Profile.__init__(self, values)
self['type'] = 'broker'
def __hash__(self):
return hash(id(self))
def profile_factory(profile):
return factory(profile)
def factory(profile):
if 'type' not in profile:
raise Exception('profile "type" attribute is missing')
if profile['type'] == 'workspace':
return WorkspaceProfile(profile)
if profile['type'] == 'handset':
return HandsetProfile(profile)
if profile['type'] == 'relay':
return RelayProfile(profile)
if profile['type'] == 'beryllium':
return BerylliumProfile(profile)
if profile['type'] == 'broker':
return BrokerProfile(profile)
if profile['type'] == 'testdrive':
return TestDriveProfile(profile)
if profile['type'] == 'wlan':
return WlanProfile(profile)
if profile['type'] == 'powermeter':
return PowermeterProfile(profile)
raise Exception('type %s not supported in profiles' % profile['type'])
| 1.8125 | 2 |
evaluator.py | jmribeiro/pddpg-hfo | 0 | 12798563 | <gh_stars>0
from learner import Learner
import os
import argparse
import time
import numpy as np
import torch
from agents.random_agent import RandomAgent
from agents.pddpg_agent import PDDPGAgent
from agents.mapddpg_agent import MAPDDPGAgent
from envs import offense_mid_action
from utils.redis_manager import connect_redis, query_all_obs_actions, sync_agent_obs_actions, sync_agent_policy
import logging
class Evaluator(Learner):
def __init__(self, agent_type, tensorboard_dir, save_dir=".", player='offense', save_freq=500,
seed=1, episodes=50, server_port=6000, eval_episodes=1000, start=0):
super(Evaluator, self).__init__(agent_type=agent_type, tensorboard_dir=tensorboard_dir,
save_dir=save_dir, player=player,
seed=seed, episodes=episodes, start=start,
server_port=server_port, save_freq=save_freq)
self.player = player
self.eval_episodes = eval_episodes
def log_results(self, evaluation_results, i):
num_results = evaluation_results.shape[0]
total_returns = sum(evaluation_results[:, 0])
total_timesteps = sum(evaluation_results[:, 1])
goal_timesteps = evaluation_results[:, 1][evaluation_results[:, 2] == 1]
total_goal = sum(evaluation_results[:, 2])
total_captured = sum(evaluation_results[:, 3])
avg_returns = total_returns / num_results
avg_timesteps = total_timesteps / num_results
avg_goal_timesteps = sum(goal_timesteps) / num_results
avg_goal_prob = total_goal / num_results
avg_captured_prob = total_captured / num_results
print("Avg. evaluation return =", avg_returns)
print("Avg. timesteps =", avg_timesteps)
if self.player == 'offense':
print("Avg. goal prob. =", avg_goal_prob)
print("Avg. timesteps per goal =", avg_goal_timesteps)
print("Avg. captured prob. =", avg_captured_prob)
elif self.player == 'goalie':
print("Avg. lose prob. =", avg_goal_prob)
print("Avg. capture prob. =", avg_captured_prob / (1 + avg_goal_prob))
self.writer.add_scalar('Avg. evaluation return', avg_returns, i)
self.writer.add_scalar('Avg. timesteps', avg_timesteps, i)
if self.player == 'offense':
self.writer.add_scalar("Avg. goal prob.", avg_goal_prob, i)
self.writer.add_scalar("Avg. timesteps per goal", avg_goal_timesteps, i)
self.writer.add_scalar("Avg. captured prob.", avg_captured_prob, i)
elif self.player == 'goalie':
self.writer.add_scalar("Avg. lose prob.", avg_goal_prob, i)
self.writer.add_scalar("Avg. capture prob.", avg_captured_prob / (1 + avg_goal_prob), i)
return avg_returns, avg_timesteps, avg_goal_timesteps, avg_goal_prob, avg_captured_prob
def run(self):
# Random seed
# self.seed += 10000 * proc_id()
torch.manual_seed(self.seed)
np.random.seed(self.seed)
# later can sort by approximity (need to be same as in redis_manager)
self.all_agents = list(self.redis_instance.smembers('teammates'))
self.all_agents.sort()
start_time = time.time()
print("Evaluating self.agent over {} episodes".format(self.episodes))
self.agent.epsilon_final = 0.
self.agent.epsilon = 0.
self.agent.noise = None
# PDDPG and MAPDDPG evaluation
if isinstance(self.agent, PDDPGAgent):
# main loop
assert self.save_freq < self.episodes + 1
for i in range(self.start + self.save_freq, self.start + self.episodes + 1, self.save_freq):
# load model
returns = []
timesteps = []
goals = []
captureds = []
self.load_model(self.save_dir, i)
for j in range(self.eval_episodes):
info = {'status': "NOT_SET"}
# initialize environment and reward
obs = self.env.reset()
obs = np.array(obs, dtype=np.float32, copy=False)
episode_reward = 0.
terminal = False
t = 0
# get discrete action and continuous parameters
act, act_param, _, _ = self.agent.act(obs)
action = offense_mid_action(act, act_param)
while not terminal:
t += 1
next_obs, reward, terminal, info = self.env.step(action)
next_obs = np.array(next_obs, dtype=np.float32, copy=False)
# get discrete action and continuous parameters
next_act, next_act_param, _, _ = self.agent.act(next_obs)
next_action = offense_mid_action(next_act, next_act_param)
action = next_action
episode_reward += reward
goal = info['status'] == 'GOAL'
captured = info['status'] == 'CAPTURED_BY_DEFENSE'
timesteps.append(t)
returns.append(episode_reward)
goals.append(goal)
captureds.append(captured)
evaluation_results = np.column_stack((returns, timesteps, goals, captureds))
avg_returns, avg_timesteps, avg_goal_timesteps, avg_goal_prob, avg_captured_prob = \
self.log_results(evaluation_results, i)
end_time = time.time()
print("Evaluation time: %.2f seconds" % (end_time - start_time))
return avg_returns, avg_timesteps, avg_goal_timesteps, avg_goal_prob, avg_captured_prob
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--agent-type', type=str, default='PDDPG')
parser.add_argument('--player', type=str, default='offense')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--server-port', type=int, default=6000)
parser.add_argument('--episodes', type=int, default=20000)
parser.add_argument('--eval-episodes', type=int, default=1000)
parser.add_argument('--save-freq', type=int, default=500)
parser.add_argument('--tensorboard-dir', type=str, default=".")
parser.add_argument('--save-dir', type=str, default=".")
parser.add_argument('--start', type=int, default=0)
args = parser.parse_args()
evaluator = Evaluator(agent_type=args.agent_type, player=args.player, seed=args.seed, episodes=args.episodes,
server_port=args.server_port, tensorboard_dir=args.tensorboard_dir, start=args.start,
save_dir=args.save_dir, eval_episodes=args.eval_episodes, save_freq=args.save_freq)
evaluator.redis_instance.decr('not ready', amount=1)
# to act nearly synchronously
while int(evaluator.redis_instance.get('not ready')) > 0:
print('\rNumber of not ready learners:', evaluator.redis_instance.get('not ready'), end='')
print('======Start Evaluation======')
evaluator.run()
| 2.078125 | 2 |
finetuning/v1/evaluate.py | wietsedv/bertje | 104 | 12798564 | <gh_stars>100-1000
import argparse
import os
from collections import Counter
from sklearn.metrics import confusion_matrix, classification_report
def read_labels(filename):
labels = []
with open(filename) as f:
for line in f:
line = line.strip()
if len(line) == 0:
continue
_, label = line.split('\t')
labels.append(label)
return labels
def compare_labels(true_labels, pred_labels):
true_set = set(true_labels)
pred_set = set(pred_labels)
print('\n▶ Label usage:')
print(' ~ Used in both: {}'.format(true_set | pred_set))
print(' ~ Extra in true: {}'.format(true_set - pred_set))
print(' ~ Extra in pred: {}'.format(pred_set - true_set))
print('\n▶ Raw counts:')
true_counts = Counter(true_labels)
pred_counts = Counter(pred_labels)
sorted_labels = sorted(true_counts, key=true_counts.get, reverse=True) + sorted(pred_set - true_set)
print('\tTrue\tPred\tDiff')
for label in sorted_labels:
diff = pred_counts[label] - true_counts[label]
direction = '+' if diff > 0 else '-' if diff < 0 else ' '
if diff < 0:
diff = -diff
print('{}\t{}\t{}\t{}{:4}'.format(label, true_counts[label], pred_counts[label], direction, diff))
print('\n▶ Confusion matrix:')
sorted_labels = sorted(true_set | pred_set)
padded_labels = [lab + ' ' * (4 - len(lab)) if len(lab) < 8 else lab for lab in sorted_labels]
cm = confusion_matrix(true_labels, pred_labels, labels=sorted_labels)
print(' \tpredicted:')
print(' \t' + '\t'.join(padded_labels))
for i in range(len(cm)):
prefix = 'true: ' if i == 0 else ' ' * 6
prefix += padded_labels[i]
print(prefix + '\t' + '\t'.join([str(n) for n in cm[i]]))
print('\n▶ Classification report:')
print(classification_report(true_labels, pred_labels, digits=3))
print('\n▶ Classification report w/o O label:')
print(classification_report(true_labels, pred_labels, labels=list(true_set - {'O'}), digits=3))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--path", default=None, type=str, required=True, help="Base path")
parser.add_argument("--name", default=None, type=str, required=True, help="File name [train,dev,test]")
args = parser.parse_args()
true_path = os.path.join(args.path, args.name + '.true.tsv')
pred_path = os.path.join(args.path, args.name + '.pred.tsv')
true_labels = read_labels(true_path)
print('▶ Read true labels from {}'.format(true_path))
pred_labels = read_labels(pred_path)
print('▶ Read pred labels from {}'.format(pred_path))
if len(true_labels) != len(pred_labels):
print('True and pred file do not have the same amount of labels ({} and {})'.format(
len(true_labels), len(pred_labels)))
exit(-1)
print('\nFull label comparison:')
compare_labels(true_labels, pred_labels)
if set([lab[0] for lab in true_labels]) == {'B', 'I', 'O'}:
true_label_cats = [lab if lab == 'O' else lab[2:] for lab in true_labels]
pred_label_cats = [lab if lab == 'O' else lab[2:] for lab in pred_labels]
print('\nBIO category comparison:')
compare_labels(true_label_cats, pred_label_cats)
if 'O' in true_labels:
true_label_binary = ['O' if lab == 'O' else 'X' for lab in true_labels]
pred_label_binary = ['O' if lab == 'O' else 'X' for lab in pred_labels]
print('\nBinary comparison:')
compare_labels(true_label_binary, pred_label_binary)
if __name__ == '__main__':
main()
| 2.671875 | 3 |
src/download_and_extract_zip.py | jufu/DSCI_522_Group_10 | 0 | 12798565 | <reponame>jufu/DSCI_522_Group_10
# author: <NAME>
# date: 2020-11-19
"""Downloads a zip file and extracts all to a specified path
Usage: download_and_extract_zip.py --url=<url> --out_file=<out_file>
Options:
<url> URL to download zip file from (must be a zip file with no password)
<out_path> Path (including filename) of where to extract the zip file contents to
Example:
python download_and_extract_zip.py --url=https://archive.ics.uci.edu/ml/machine-learning-databases/00222/bank-additional.zip --out_file="../data/raw/"
"""
import zipfile
import requests
# from tqdm import tqdm
from zipfile import BadZipFile
from io import BytesIO
from docopt import docopt
opt = docopt(__doc__)
def main(url, out_path):
"""[summary]
Parameters
----------
url : string
URL to download zip file from (must be a zip file with no password)
out_path : string
Path to extract the zip file contents to
Example
----------
main(f"https://archive.ics.uci.edu/ml/machine-learning-databases/00222/bank-additional.zip", "../data/raw/"
"""
try:
request = requests.get(url)
zipdoc = zipfile.ZipFile(BytesIO(request.content))
for name in zipdoc.namelist():
print("Extracting... {0}{1}".format(out_path, name))
zipdoc.extract(name, out_path)
zipdoc.close()
print("Done extracting files from the ZipFile")
except BadZipFile as b:
print("Error: ", b)
except Exception as e:
print("Error: ", e)
if __name__ == "__main__":
main(opt["--url"], opt["--out_file"])
| 3.578125 | 4 |
tdcosim/test/der_test_manual_config.py | tdcosim/TDcoSim | 18 | 12798566 | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Thu May 7 16:21:41 2020
@author: splathottam
"""
import os
import tdcosim
dirlocation= os.path.dirname(tdcosim.__file__)
dirlocation = dirlocation[0:len(dirlocation)-8]
test_config = {
"nodenumber": 11,
"filePath": [os.path.join(dirlocation,"SampleData\\DNetworks\\123Bus\\case123ZIP.dss")],
"solarFlag":1,
"DERFilePath": os.path.join(dirlocation,"examples\\config_der.json"),
"initializeWithActual":True,
"DERSetting":"default",
"DERModelType":"ThreePhaseUnbalanced",
"DERParameters":{
"default":{
"solarPenetration":0.01,
"solarPenetrationUnit":'kw',
"derId":"50",
"powerRating":50,
"VrmsRating":177.0,
"steadyStateInitialization":True,
"pvderScale": 1,
"LVRT":{"0":{"V_threshold":0.5,"t_threshold":0.1,"mode":"momentary_cessation"},
"1":{"V_threshold":0.7,"t_threshold":1.0,"mode":"mandatory_operation"},
"2":{"V_threshold":0.88,"t_threshold":2.0,"mode":"mandatory_operation"}},
"HVRT":{"0":{"V_threshold":1.12,"t_threshold":0.016,"mode":"momentary_cessation"},
"1":{"V_threshold":1.06,"t_threshold":3.0,"mode":"momentary_cessation"}},
"output_restore_delay":0.4
},
"PVPlacement":{"50":{"derId":"250","powerRating":250,"pvderScale":1,"VrmsRating":230.0},
"25":{"derId":"50","powerRating":50,"pvderScale":1}
},
"avoidNodes": ['sourcebus','rg60'],
"dt":1/120.0
}
} | 1.65625 | 2 |
rlkit/rlkit/launchers/scalor_training.py | martius-lab/SMORL | 9 | 12798567 | import time
from multiworld.core.image_env import ImageEnv, unormalize_image, normalize_image
from rlkit.core import logger
import cv2
import numpy as np
import os.path as osp
from rlkit.samplers.data_collector.scalor_env import WrappedEnvPathCollector as SCALORWrappedEnvPathCollector
from rlkit.torch.scalor.scalor import SCALOR
from rlkit.util.video import dump_video
from rlkit.util.io import load_local_or_remote_file
import rlkit.torch.pytorch_util as ptu
import gym
import multiworld
def generate_scalor_dataset(variant):
env_kwargs = variant.get('env_kwargs', None)
env_id = variant.get('env_id', None)
N = variant.get('N', 100)
rollout_length = variant.get('rollout_length', 100)
test_p = variant.get('test_p', 0.9)
use_cached = variant.get('use_cached', True)
imsize = variant.get('imsize', 64)
num_channels = variant.get('num_channels', 3)
show = variant.get('show', False)
init_camera = variant.get('init_camera', None)
dataset_path = variant.get('dataset_path', None)
oracle_dataset_using_set_to_goal = variant.get(
'oracle_dataset_using_set_to_goal', False)
random_rollout_data = variant.get('random_rollout_data', False)
random_and_oracle_policy_data = variant.get('random_and_oracle_policy_data',
False)
random_and_oracle_policy_data_split = variant.get(
'random_and_oracle_policy_data_split', 0)
policy_file = variant.get('policy_file', None)
n_random_steps = 1
scalor_dataset_specific_env_kwargs = variant.get(
'scalor_dataset_specific_env_kwargs', None)
save_file_prefix = variant.get('save_file_prefix', None)
tag = variant.get('tag', '')
if env_kwargs is None:
env_kwargs = {}
if save_file_prefix is None:
save_file_prefix = env_id
filename = "./data/tmp/{}_N{}_rollout_length{}_imsize{}_{}{}.npz".format(
save_file_prefix,
str(N),
str(rollout_length),
init_camera.__name__ if init_camera else '',
imsize,
tag,
)
import os
if not osp.exists('./data/tmp/'):
os.makedirs('./data/tmp/')
info = {}
import os
if not os.path.exists("./data/tmp/"):
os.makedirs("./data/tmp/")
if use_cached and osp.isfile(filename):
dataset = np.load(filename)
print("loaded data from saved file", filename)
else:
now = time.time()
multiworld.register_all_envs()
env = gym.make(env_id)
if not isinstance(env, ImageEnv):
env = ImageEnv(
env,
imsize,
init_camera=init_camera,
transpose=True,
normalize=True,
non_presampled_goal_img_is_garbage=True,
)
env.reset()
act_dim = env.action_space.low.size
info['env'] = env
imgs = np.zeros((N, rollout_length, imsize * imsize * num_channels),
dtype=np.uint8)
actions = np.zeros((N, rollout_length, act_dim))
for i in range(N):
env.reset()
for j in range(rollout_length):
action = env.action_space.sample()
obs = env.step(action)[0]
img = obs['image_observation']
imgs[i, j, :] = unormalize_image(img)
actions[i,j, :] = action
if show:
img = img.reshape(3, imsize, imsize).transpose()
img = img[::-1, :, ::-1]
cv2.imshow('img', img)
cv2.waitKey(1)
print("done making training data", filename, time.time() - now)
dataset = {"imgs": imgs, "actions": actions}
print(imgs.shape)
# np.savez(filename, **dataset)
return dataset, info
def scalor_training(variant):
scalor_params = variant.get("scalor_params", dict())
scalor_params["logdir"] = logger.get_snapshot_dir()
scalor = SCALOR(**scalor_params)
data, info = generate_scalor_dataset(variant['generate_scalor_dataset_kwargs'])
imgs, actions = data["imgs"], data["actions"]
imgs = normalize_image(imgs)
scalor.train(imgs=imgs, actions=actions) | 1.84375 | 2 |
git_functions.py | psyonara/pylint-diff | 0 | 12798568 | import subprocess
def is_branch_merged(branch):
"""
Checks if given branch is merged into current branch.
:param branch: Name of branch
:return: True/False
"""
proc = subprocess.Popen(["git", "branch", "--merged"], stdout=subprocess.PIPE)
result = proc.stdout.read().decode()
return branch in result.strip().split("\n")
def get_file_contents_from_branch(filename, branch_name):
"""
Gets the contents of a file from a specific branch.
:param filename: Name of the file
:param branch_name: Name of the branch
:return: Contents of the file
"""
proc = subprocess.Popen(
["git", "show", "%s:%s" % (branch_name, filename)], stdout=subprocess.PIPE
)
return proc.stdout.read().decode()
def get_current_branch_name():
"""
Gets the name of the current git branch in the working directory.
:return: Name of the branch
"""
proc = subprocess.Popen(["git", "rev-parse", "--abbrev-ref", "HEAD"], stdout=subprocess.PIPE)
return proc.stdout.read().decode()
def get_changed_files(branch1, branch2):
"""
Gets a list of changed files between two branches.
:param branch1: name of first branch
:param branch2: name of second branch
:return: A list of changed files
"""
proc = subprocess.Popen(
["git", "diff", "--name-only", branch1, branch2], stdout=subprocess.PIPE
)
return proc.stdout.read().decode()
| 3.53125 | 4 |
examples/tutorials/frames.py | yohaimagen/pygmt | 0 | 12798569 | """
Frames, ticks, titles, and labels
=================================
Setting the style of the map frames, ticks, etc, is handled by the ``frame`` argument
that all plotting methods of :class:`pygmt.Figure`.
"""
import pygmt
########################################################################################
# Plot frame
# ----------
#
# By default, PyGMT does not add a frame to your plot. For example, we can plot the
# coastlines of the world with a Mercator projection:
fig = pygmt.Figure()
fig.coast(shorelines="1/0.5p", region=[-180, 180, -60, 60], projection="M25c")
fig.show()
########################################################################################
# To add the default GMT frame to the plot, use ``frame="f"`` in
# :meth:`pygmt.Figure.basemap` or any other plotting module:
fig = pygmt.Figure()
fig.coast(shorelines="1/0.5p", region=[-180, 180, -60, 60], projection="M25c")
fig.basemap(frame="f")
fig.show()
########################################################################################
# Ticks and grid lines
# --------------------
#
# The automatic frame (``frame=True`` or ``frame="a"``) sets the default GMT style frame
# and automatically determines tick labels from the plot region.
fig = pygmt.Figure()
fig.coast(shorelines="1/0.5p", region=[-180, 180, -60, 60], projection="M25c")
fig.basemap(frame="a")
fig.show()
########################################################################################
# Add automatic grid lines to the plot by adding a ``g`` to ``frame``:
fig = pygmt.Figure()
fig.coast(shorelines="1/0.5p", region=[-180, 180, -60, 60], projection="M25c")
fig.basemap(frame="ag")
fig.show()
########################################################################################
# Title
# -----
#
# The figure title can be set by passing **+t**\ *title* to the ``frame`` parameter of
# :meth:`pygmt.Figure.basemap`. Passing multiple arguments to ``frame`` can be done by
# using a list, as show in the example below.
fig = pygmt.Figure()
# region="IS" specifies Iceland using the ISO country code
fig.coast(shorelines="1/0.5p", region="IS", projection="M25c")
fig.basemap(frame=["a", "+tIceland"])
fig.show()
########################################################################################
# To use a title with multiple words, the title must be placed inside another set of
# quotation marks. To prevent the quotation marks from appearing in the figure title,
# the frame argument can be passed in single quotation marks and the title can be
# passed in double quotation marks.
fig = pygmt.Figure()
# region="TT" specifies Trinidad and Tobago
fig.coast(shorelines="1/0.5p", region="TT", projection="M25c")
fig.basemap(frame=["a", '+t"Trinidad and Tobago"'])
fig.show()
########################################################################################
# Axis labels
# -----------
#
# Axis labels can be set by passing **x+l**\ *label* (or starting with y if
# labeling the y-axis) if to the ``frame`` parameter of :meth:`pygmt.Figure.basemap`.
# Axis labels will be displayed on all primary axes, which the default is all sides of
# the figure. To designate only some of the axes as primary, an argument that
# capitlizes only the primary axes can be passed, which is ``"WSne"`` in the example
# below. The letters correspond with west (left), south (bottom), north (top), and
# east (right) sides of a figure.
#
# The example below used a Cartesian projection, as GMT does not allow axis labels to
# be set for geographic maps.
fig = pygmt.Figure()
fig.basemap(
region=[0, 10, 0, 20],
projection="X10c/8c",
frame=["WSne", "x+lx-axis", "y+ly-axis"],
)
fig.show()
| 3.71875 | 4 |
bot/migrations/0006_auto_20171229_2354.py | CdecPGL/LBot | 1 | 12798570 | # Generated by Django 2.0 on 2017-12-29 14:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bot', '0005_auto_20171229_2354'),
]
operations = [
migrations.AlterField(
model_name='user',
name='authority',
field=models.IntegerField(choices=[(0, 'Master'), (1, 'Editor'), (2, 'Watcher')]),
),
]
| 1.726563 | 2 |
upf_queue.py | ckrybus/redis-unique-priority-queue | 0 | 12798571 | <reponame>ckrybus/redis-unique-priority-queue
import itertools
lua_insert_script = """
local key = KEYS[1]
for i=1, #ARGV, 2 do
local priority = ARGV[i]
local member = ARGV[i + 1]
local score = redis.call('zscore', key, member)
if not score then
-- add, because the element is not yet in the queue
local auto_increment = redis.call('incr', '_'..key..'_seq')
score = priority..string.format('%015d', auto_increment)
redis.call('ZADD', key, score, member)
else
-- update only if the priority is higher
local current_priority = string.sub(score, 1, 1)
if tonumber(priority) < tonumber(current_priority) then
score = priority..string.sub(score, 2)
redis.call('ZADD', key, score, member)
end
end
end
"""
HIGHEST_PRIORITY = 0
LOWEST_PRIORITY = 9
class UniquePriorityFifoQueue:
"""
A redis queue which:
* preserves ordering: first in, first out (FIFO)
* doesn't allow duplicates
When a duplicate item with a higher priority is inserted the priority
will be overwritten.
When a duplicate item with a lower priority is inserted nothing happens.
* has 10 priority classes
Items are returned from the highest to the lowest priority. Only after
all items with a high priority have been popped, items with a lower
priority will be returned.
The priority parameter is an integer in the range 0-9.
0 is the highest priority, 9 ist the lowest priority.
Default priority ist 9.
"""
def __init__(self, name, redis):
self.name = name
self._redis = redis
self.lua_special_zadd = self._redis.register_script(lua_insert_script)
def __len__(self):
return self._redis.zcard(self.name)
def count(self, priority=None):
if priority is None:
return len(self)
idx = '{}000000000000000'
if priority == 0:
score_range = ('-inf', idx.format('(1'))
elif priority < 9:
score_range = (idx.format(priority),
idx.format(f'({priority + 1}'))
elif priority == 9:
score_range = (idx.format('9'), '+inf')
else:
raise ValueError('TODO')
return self._redis.zcount(self.name, *score_range)
def insert(self, keys, priority=LOWEST_PRIORITY, chunk_size=5000):
for keys_chunk in chunks(keys, chunk_size):
# redis expects: priority1, member1, priority2, member2, ...
args = itertools.chain(*((priority, k) for k in keys_chunk))
self.lua_special_zadd(keys=[self.name], args=args)
def delete(self, keys):
if keys:
return self._redis.zrem(self.name, *keys)
def pop(self, count):
assert 0 < count <= 25000
with self._redis.pipeline() as pipe:
pipe.multi()
pipe.zrange(self.name, 0, count - 1)
pipe.zremrangebyrank(self.name, 0, count - 1)
items, _ = pipe.execute()
for value in items:
yield value.decode('utf-8')
def chunks(iterable, size):
"""
A generator which returns chunks of `size` elements until there are no
more items left in the iterable.
"""
it = iter(iterable)
item = list(itertools.islice(it, size))
while item:
yield item
item = list(itertools.islice(it, size))
| 3.15625 | 3 |
pstock/base.py | obendidi/pstock | 5 | 12798572 | <filename>pstock/base.py
from __future__ import annotations
import typing as tp
from abc import ABC, abstractmethod
from datetime import datetime
import pandas as pd
import pendulum
from pydantic import BaseModel as _BaseModel
from pydantic import PrivateAttr
class BaseModel(_BaseModel):
_created_at: datetime = PrivateAttr(default_factory=pendulum.now)
@property
def created_at(self) -> datetime:
return self._created_at
class BaseModelDf(BaseModel, ABC):
_df: tp.Optional[pd.DataFrame] = PrivateAttr(default=None)
@abstractmethod
def gen_df(self) -> pd.DataFrame:
...
@property
def df(self) -> pd.DataFrame:
if self._df is None:
self._df = self.gen_df()
return self._df
T = tp.TypeVar("T", bound=BaseModel)
class BaseModelSequence(tp.Generic[T], BaseModelDf):
__root__: tp.Sequence[T]
@tp.overload
def __getitem__(self, index: int) -> T:
"""Get single item from __root__ by idx."""
@tp.overload
def __getitem__(self, index: slice) -> tp.Sequence[T]:
"""Get slice of items from __root__ by idx."""
def __getitem__(self, index):
return self.__root__[index]
def __len__(self) -> int:
return len(self.__root__)
def __iter__(self) -> tp.Iterator[T]: # type: ignore
return iter(self.__root__)
def gen_df(self) -> pd.DataFrame:
return pd.DataFrame.from_dict(self.dict().get("__root__"), orient="columns")
U = tp.TypeVar("U", bound=BaseModelSequence)
class BaseModelMapping(tp.Generic[U], BaseModelDf):
__root__: tp.Mapping[str, U]
def __getitem__(self, index: str) -> U:
return self.__root__[index]
def __len__(self) -> int:
return len(self.__root__)
def __iter__(self) -> tp.Iterator[str]: # type: ignore
return iter(self.__root__)
def gen_df(self) -> pd.DataFrame:
keys, dfs = zip(*[(key, value.df) for key, value in self.__root__.items()])
return pd.concat(dfs, axis=1, keys=keys)
| 2.5625 | 3 |
finicityapi/controllers/deprecated_controller.py | monarchmoney/finicity-python | 0 | 12798573 | <reponame>monarchmoney/finicity-python
# -*- coding: utf-8 -*-
from finicityapi.api_helper import APIHelper
from finicityapi.configuration import Configuration
from finicityapi.controllers.base_controller import BaseController
from finicityapi.http.auth.custom_header_auth import CustomHeaderAuth
from finicityapi.models.generate_connect_url_response import GenerateConnectURLResponse
from finicityapi.models.customer_accounts import CustomerAccounts
from finicityapi.models.auditable_report import AuditableReport
from finicityapi.models.add_customer_response import AddCustomerResponse
from finicityapi.models.voa_report_record import VOAReportRecord
from finicityapi.models.voie_paystub_with_txverify_report_record import VOIEPaystubWithTxverifyReportRecord
from finicityapi.models.voi_report_record import VOIReportRecord
from finicityapi.models.voa_with_income_report_record import VOAWithIncomeReportRecord
from finicityapi.models.prequalification_report_record import PrequalificationReportRecord
from finicityapi.models.pay_statement_report_record import PayStatementReportRecord
from finicityapi.models.generate_connect_email_response_multiple_borrowers import GenerateConnectEmailResponseMultipleBorrowers
from finicityapi.models.transactions_report_record import TransactionsReportRecord
from finicityapi.models.app_statuses_v_1 import AppStatusesV1
from finicityapi.models.statement_report_record import StatementReportRecord
from finicityapi.exceptions.error_1_error_exception import Error1ErrorException
class DeprecatedController(BaseController):
"""A Controller to access Endpoints in the finicityapi API."""
def generate_connect_url_all_types(self,
accept,
body):
"""Does a POST request to /connect/v1/generate.
No matter how you plan on implementing Finicity Connect, you’ll need
to generate and retrieve a Finicity Connect Link. You will need to
specify what type of Finicity Connect you need depending on what will
happen once the customer accounts and transaction data are gathered.
Below you’ll find how to generate the Connect link as well as where to
specify what type of Finicity Connect you need.
Once you have generated the link it will only last until the
authentication token under which it was generated expires. After that
you will need to regenerate the Connect link under a new
authentication token. We recommend generating a new authentication
token when you generate a Connect link, to guarantee a full two hour
life-span.
Several Finicity products utilize Finicity Connect, and most products
have their own type of Connect. The Connect type is controlled by the
“type” code in the call. Many times the type also corresponds to the
report that will be run upon completing the Connect flow.
It is best to use the documentation for the specific use case you are
interested in as the documentation here is a list of all the possible
parameters you can send for this endpoint depending on the use case.
See the following more specific documentation for your use
case.......
Generate Finicity Connect URL (Data and Payments)
Generate Finicity Connect URL (Lending)
Generate Finicity Connect URL (Lite)
Generate Finicity Connect URL (Fix)
Args:
accept (string): application/json, application/xml
body (GenerateConnectURLRequest): Expected body to be sent with
the request
Returns:
GenerateConnectURLResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(accept=accept,
body=body)
# Prepare query URL
_url_path = '/connect/v1/generate'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'content-type': 'application/json; charset=utf-8',
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept
}
# Prepare and execute request
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, GenerateConnectURLResponse.from_dictionary)
def migrate_institution_login_accounts_v_1(self,
customer_id,
institution_login_id,
new_institution_id):
"""Does a PUT request to /aggregation/v1/customers/{customerId}/institutionLogins/{institutionLoginId}/institutions/{newInstitutionId}.
This service has been replaced by version 2 call now "Migrate
Institution Login Accounts"
This service is to migrate accounts from legacy FI to new OAuth FI.
A successful API response will return a list of accounts for the given
institution login id with an http status code as 200.
Args:
customer_id (long|int): Finicity’s ID of the customer for the
institutionLoginId of accounts
institution_login_id (long|int): Finicity's institutionLoginId for
the set of accounts to be migrated
new_institution_id (long|int): New OAuth FI ID where accounts
will be migrated
Returns:
CustomerAccounts: Response from the API. default response
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(customer_id=customer_id,
institution_login_id=institution_login_id,
new_institution_id=new_institution_id)
# Prepare query URL
_url_path = '/aggregation/v1/customers/{customerId}/institutionLogins/{institutionLoginId}/institutions/{newInstitutionId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'customerId': customer_id,
'institutionLoginId': institution_login_id,
'newInstitutionId': new_institution_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'Finicity-App-Key': Configuration.finicity_app_key
}
# Prepare and execute request
_request = self.http_client.put(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, CustomerAccounts.from_dictionary)
def get_report_by_customer(self,
customer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/customers/{customerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
customer_id (long|int): Finicity’s ID of the customer
report_id (string): Finicity’s ID of the report
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
AuditableReport: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(customer_id=customer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/customers/{customerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'customerId': customer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, AuditableReport.from_dictionary)
def get_report_by_consumer(self,
consumer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/consumers/{consumerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
consumer_id (string): Finicity’s ID of the consumer (UUID with max
length 32 characters)
report_id (string): Finicity’s ID of the report (UUID with max
length 32 characters)
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
AuditableReport: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(consumer_id=consumer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/consumers/{consumerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'consumerId': consumer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, AuditableReport.from_dictionary)
def add_testing_customer_v_1(self,
content_type,
accept,
body):
"""Does a POST request to /aggregation/v1/customers/testing.
Enroll a testing customer. A testing customer may only register
accounts with FinBank institutions.
Args:
content_type (string): application/json, application/xml
accept (string): application/json, application/xml
body (AddCustomerRequest): The Fields For The New Testing
Customer
Returns:
AddCustomerResponse: Response from the API. default response
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(content_type=content_type,
accept=accept,
body=body)
# Prepare query URL
_url_path = '/aggregation/v1/customers/testing'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Content-Type': content_type,
'Accept': accept
}
# Prepare and execute request
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, AddCustomerResponse.from_dictionary)
def get_voa_report_by_consumer(self,
consumer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/consumers/{consumerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
consumer_id (string): Finicity’s ID of the consumer (UUID with max
length 32 characters)
report_id (string): Finicity’s ID of the report
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
VOAReportRecord: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(consumer_id=consumer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/consumers/{consumerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'consumerId': consumer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, VOAReportRecord.from_dictionary)
def get_voie_txverify_report_by_customer(self,
customer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/customers/{customerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
customer_id (long|int): Finicity’s ID of the customer
report_id (string): Finicity’s ID of the report
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
VOIEPaystubWithTxverifyReportRecord: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(customer_id=customer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/customers/{customerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'customerId': customer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, VOIEPaystubWithTxverifyReportRecord.from_dictionary)
def get_voi_report_by_consumer(self,
consumer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/consumers/{consumerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
consumer_id (string): Finicity’s ID of the consumer (UUID with max
length 32 characters)
report_id (string): Finicity’s ID of the report
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
VOIReportRecord: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(consumer_id=consumer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/consumers/{consumerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'consumerId': consumer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, VOIReportRecord.from_dictionary)
def get_voi_report_by_customer(self,
customer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/customers/{customerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
customer_id (long|int): Finicity’s ID of the customer
report_id (string): Finicity’s ID of the report (UUID with max
length 32 characters)
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
VOIReportRecord: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(customer_id=customer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/customers/{customerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'customerId': customer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, VOIReportRecord.from_dictionary)
def get_voa_with_income_report_by_consumer(self,
consumer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/consumers/{consumerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
consumer_id (string): Finicity’s ID of the consumer (UUID with max
length 32 characters)
report_id (string): Finicity’s ID of the report
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
VOAWithIncomeReportRecord: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(consumer_id=consumer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/consumers/{consumerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'consumerId': consumer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, VOAWithIncomeReportRecord.from_dictionary)
def get_voa_with_income_report_by_customer(self,
customer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/customers/{customerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
customer_id (long|int): Finicity’s ID of the customer
report_id (string): Finicity’s ID of the report
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
VOAWithIncomeReportRecord: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(customer_id=customer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/customers/{customerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'customerId': customer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, VOAWithIncomeReportRecord.from_dictionary)
def get_prequalification_voa_report_by_customer(self,
customer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/customers/{customerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
customer_id (long|int): Finicity’s ID of the customer
report_id (string): Finicity’s ID of the report
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
PrequalificationReportRecord: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(customer_id=customer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/customers/{customerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'customerId': customer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, PrequalificationReportRecord.from_dictionary)
def get_prequalification_report_by_consumer(self,
consumer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/consumers/{consumerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
consumer_id (string): Finicity’s ID of the consumer (UUID with max
length 32 characters)
report_id (string): Finicity’s ID of the report
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
PrequalificationReportRecord: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(consumer_id=consumer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/consumers/{consumerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'consumerId': consumer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, PrequalificationReportRecord.from_dictionary)
def get_pay_statement_by_consumer(self,
consumer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/consumers/{consumerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
consumer_id (string): Finicity’s ID of the consumer (UUID with max
length 32 characters)
report_id (string): Finicity’s ID of the report
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
PayStatementReportRecord: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(consumer_id=consumer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/consumers/{consumerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'consumerId': consumer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, PayStatementReportRecord.from_dictionary)
def get_voa_report_by_customer(self,
customer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/customers/{customerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
customer_id (long|int): Finicity’s ID of the customer
report_id (string): Finicity’s ID of the report
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
VOAReportRecord: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(customer_id=customer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/customers/{customerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'customerId': customer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, VOAReportRecord.from_dictionary)
def get_voie_txverify_report_by_consumer(self,
consumer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/consumers/{consumerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
consumer_id (string): Finicity’s ID of the consumer (UUID with max
length 32 characters)
report_id (string): Finicity’s ID of the report
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
VOIEPaystubWithTxverifyReportRecord: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(consumer_id=consumer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/consumers/{consumerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'consumerId': consumer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, VOIEPaystubWithTxverifyReportRecord.from_dictionary)
def get_pay_statement_extraction_by_customer(self,
customer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/customers/{customerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
customer_id (long|int): Finicity’s ID of the customer
report_id (string): Finicity’s ID of the report
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
PayStatementReportRecord: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(customer_id=customer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/customers/{customerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'customerId': customer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, PayStatementReportRecord.from_dictionary)
def add_customer_v_1(self,
accept,
content_type,
body):
"""Does a POST request to /aggregation/v1/customers/active.
This version 1 service has been replaced with version 2
Enroll an active customer, which is the actual owner of one or more
real-world accounts. This is a billable customer.
This service is not available from the Test Drive. Calls to this
service before enrolling in a paid plan will return HTTP 429 (Too Many
Requests).
Args:
accept (string): application/json, application/xml
content_type (string): application/json, application/xml
body (AddCustomerRequest): The Fields For The New Customer
Returns:
AddCustomerResponse: Response from the API. default response
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(accept=accept,
content_type=content_type,
body=body)
# Prepare query URL
_url_path = '/aggregation/v1/customers/active'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, AddCustomerResponse.from_dictionary)
def generate_connect_url_data_and_payments_connect(self,
accept,
body):
"""Does a POST request to /connect/v1/generate.
No matter how you plan on implementing Finicity Connect, you’ll need
to generate and retrieve a Finicity Connect Link. You will need to
specify what type of Finicity Connect you need depending on what will
happen once the customer accounts and transaction data are gathered.
Below you’ll find how to generate the Connect link as well as where to
specify what type of Finicity Connect you need.
Once you have generated the link it will only last until the
authentication token under which it was generated expires. After that
you will need to regenerate the Connect link under a new
authentication token. We recommend generating a new authentication
token when you generate a Connect link, to guarantee a full two hour
life-span.
Several Finicity products utilize Finicity Connect, and most products
have their own type of Connect. The Connect type is controlled by the
“type” code in the call.
See the specific documentation for the types to see more details on
the flow. This documentation gives the applicable implementation
details for the following types......
- ach
- aggregation
Args:
accept (string): application/json, application/xml
body (GenerateConnectURLRequestDataAndPayments): Expected body to
be sent with the request
Returns:
GenerateConnectURLResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(accept=accept,
body=body)
# Prepare query URL
_url_path = '/connect/v1/generate'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'content-type': 'application/json; charset=utf-8',
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept
}
# Prepare and execute request
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, GenerateConnectURLResponse.from_dictionary)
def generate_connect_url_lending(self,
accept,
body):
"""Does a POST request to /connect/v1/generate.
No matter how you plan on implementing Finicity Connect, you’ll need
to generate and retrieve a Finicity Connect Link. You will need to
specify what type of Finicity Connect you need depending on what will
happen once the customer accounts and transaction data are gathered.
Below you’ll find how to generate the Connect link as well as where to
specify what type of Finicity Connect you need.
Once you have generated the link it will only last until the
authentication token under which it was generated expires. After that
you will need to regenerate the Connect link under a new
authentication token. We recommend generating a new authentication
token when you generate a Connect link, to guarantee a full two hour
life-span.
Several Finicity products utilize Finicity Connect, and most products
have their own type of Connect. The Connect type is controlled by the
“type” code in the call. For lending, each type signifies a report
that will be generated as part of the connect flow unless otherwise
specified.
See the specific documentation for the types to see more details on
the flow. This documentation gives the applicable implementation
details for the following types......
- voa
- voahistory
- voi
- voieTxVerify
- voieStatement
- payStatement
- assetSummary
- preQualVoa
Args:
accept (string): application/json, application/xml
body (GenerateConnectURLRequestLending): Expected body to be sent
with the request
Returns:
GenerateConnectURLResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(accept=accept,
body=body)
# Prepare query URL
_url_path = '/connect/v1/generate'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'content-type': 'application/json; charset=utf-8',
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept
}
# Prepare and execute request
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, GenerateConnectURLResponse.from_dictionary)
def generate_connect_url_lite(self,
accept,
body):
"""Does a POST request to /connect/v1/generate.
No matter how you plan on implementing Finicity Connect, you’ll need
to generate and retrieve a Finicity Connect Link. You will need to
specify what type of Finicity Connect you need depending on what will
happen once the customer accounts and transaction data are gathered.
Below you’ll find how to generate the Connect link as well as where to
specify what type of Finicity Connect you need.
Once you have generated the link it will only last until the
authentication token under which it was generated expires. After that
you will need to regenerate the Connect link under a new
authentication token. We recommend generating a new authentication
token when you generate a Connect link, to guarantee a full two hour
life-span.
Several Finicity products utilize Finicity Connect, and most products
have their own type of Connect. The Connect type is controlled by the
“type” code in the call.
See the specific documentation for the types to see more details on
the flow. This documentation gives the applicable implementation
details for the following types......
- lite
Args:
accept (string): application/json, application/xml
body (GenerateConnectURLRequestLite): Expected body to be sent
with the request
Returns:
GenerateConnectURLResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(accept=accept,
body=body)
# Prepare query URL
_url_path = '/connect/v1/generate'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'content-type': 'application/json; charset=utf-8',
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept
}
# Prepare and execute request
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, GenerateConnectURLResponse.from_dictionary)
def generate_connect_url_fix(self,
accept,
body):
"""Does a POST request to /connect/v1/generate.
No matter how you plan on implementing Finicity Connect, you’ll need
to generate and retrieve a Finicity Connect Link. You will need to
specify what type of Finicity Connect you need depending on what will
happen once the customer accounts and transaction data are gathered.
Below you’ll find how to generate the Connect link as well as where to
specify what type of Finicity Connect you need.
Once you have generated the link it will only last until the
authentication token under which it was generated expires. After that
you will need to regenerate the Connect link under a new
authentication token. We recommend generating a new authentication
token when you generate a Connect link, to guarantee a full two hour
life-span.
Several Finicity products utilize Finicity Connect, and most products
have their own type of Connect. The Connect type is controlled by the
“type” code in the call.
See the specific documentation for the types to see more details on
the flow. This documentation gives the applicable implementation
details for the following types......
- fix
Args:
accept (string): application/json, application/xml
body (GenerateConnectURLRequestFix): Expected body to be sent with
the request
Returns:
GenerateConnectURLResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(accept=accept,
body=body)
# Prepare query URL
_url_path = '/connect/v1/generate'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'content-type': 'application/json; charset=utf-8',
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept
}
# Prepare and execute request
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, GenerateConnectURLResponse.from_dictionary)
def send_connect_email(self,
accept,
body):
"""Does a POST request to /connect/v1/send/email.
A connect email sends an email to the customer which will contain a
link to the connect flow. You will need to specify what type of
Finicity Connect you need depending on what will happen once the
customer accounts and transaction data are gathered.
Several Finicity products utilize Finicity Connect, and most products
have their own type of Connect. The Connect type is controlled by the
“type” code in the call. Many times the type also corresponds to the
report that will be run upon completing the Connect flow.
For Send Connect Email service it does not support the types
aggregation, lite and fix.
See the endpoint Generate Finicity Connect URL (Lending) for
additional details on a non email implementation.
Args:
accept (string): application/json
body (GenerateConnectEmailRequest): Expected body to be sent with
the request
Returns:
GenerateConnectEmailResponseMultipleBorrowers: Response from the
API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(accept=accept,
body=body)
# Prepare query URL
_url_path = '/connect/v1/send/email'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'content-type': 'application/json; charset=utf-8',
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept
}
# Prepare and execute request
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, GenerateConnectEmailResponseMultipleBorrowers.from_dictionary)
def get_transactions_report_by_customer(self,
customer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/customers/{customerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
customer_id (long|int): Finicity’s ID of the customer
report_id (string): Finicity’s ID of the report (UUID with max
length 32 characters)
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
TransactionsReportRecord: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(customer_id=customer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/customers/{customerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'customerId': customer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, TransactionsReportRecord.from_dictionary)
def get_transactions_report_by_consumer(self,
consumer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/consumers/{consumerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
consumer_id (string): Finicity’s ID of the consumer (UUID with max
length 32 characters)
report_id (string): Finicity’s ID of the report
accept (string): JSON or XML
content_type (string): JSON or XML
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
TransactionsReportRecord: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(consumer_id=consumer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/consumers/{consumerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'consumerId': consumer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, TransactionsReportRecord.from_dictionary)
def get_app_registration_status_v_1(self):
"""Does a GET request to /aggregation/v1/partners/applications.
Get the status of your application registration to access FI's with
OAuth connections.
Returns:
AppStatusesV1: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/aggregation/v1/partners/applications'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'Finicity-App-Key': Configuration.finicity_app_key
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, AppStatusesV1.from_dictionary)
def get_statement_report_by_customer(self,
customer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/customers/{customerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
customer_id (long|int): Finicity’s ID of the customer
report_id (string): Finicity’s ID of the report
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
StatementReportRecord: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(customer_id=customer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/customers/{customerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'customerId': customer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, StatementReportRecord.from_dictionary)
def get_statement_report_by_consumer(self,
consumer_id,
report_id,
accept,
content_type,
on_behalf_of=None,
purpose=None):
"""Does a GET request to /decisioning/v1/consumers/{consumerId}/reports/{reportId}.
Get a report that has been generated by calling one of the Generate
Report services.
The report's status field will contain inProgress, failure, or
success. If the status shows inProgress, the client app should wait 20
seconds and then call again to see if the report is finished.
See Permissible Purpose Codes for a list of permissible purposes for
retrieving a report.
Args:
consumer_id (string): Finicity’s ID of the consumer (UUID with max
length 32 characters)
report_id (string): Finicity’s ID of the report
accept (string): Replace 'json' with 'xml' if preferred
content_type (string): Replace 'json' with 'xml' if preferred
on_behalf_of (string, optional): The name of the entity you are
retrieving the report on behalf of.
purpose (string, optional): 2-digit code from Permissible Purpose
Codes, specifying the reason for retrieving this report.
Returns:
StatementReportRecord: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(consumer_id=consumer_id,
report_id=report_id,
accept=accept,
content_type=content_type)
# Prepare query URL
_url_path = '/decisioning/v1/consumers/{consumerId}/reports/{reportId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'consumerId': consumer_id,
'reportId': report_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'onBehalfOf': on_behalf_of,
'purpose': purpose
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'Finicity-App-Key': Configuration.finicity_app_key,
'Accept': accept,
'Content-Type': content_type
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _context.response.status_code == 400:
raise Error1ErrorException('Bad Request', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, StatementReportRecord.from_dictionary)
| 2.015625 | 2 |
perceptron/digit_recognition.py | lstefanello/perceptron | 0 | 12798574 | import perceptron as pc
import numpy as np
def mnist_load(file, samples):
raw_data = np.array(np.genfromtxt(file, delimiter=',', max_rows=samples))
labels = raw_data[:,0]
data = np.delete(raw_data, 0, 1)/255.0
return (data, labels)
def main():
print("loading data...")
samples = 10000
batch_size = 20
train = mnist_load("mnist_train.csv", samples)
validate = mnist_load("mnist_test.csv", samples)
restart_params = (.0001, 0.01, 0.01, 2*samples/batch_size) #lower bound, upper bound, decay rate, cycle length.
structure = [784, 256, 128, 10, 10]
activation_functions = ("elu", "elu", "elu", "softmax")
network = pc.network(structure, activation_functions, train, validate)
network.train(dropout=[.5, .2, 0], beta=0.9, lr_func="warm restarts", lr_params=restart_params, batch_size=batch_size, epochs=10, cost_func="cross entropy")
main()
| 3.234375 | 3 |
tests/clients/git/test_git.py | zaibon/js-ng | 2 | 12798575 | <gh_stars>1-10
from jumpscale.loader import j
from tests.base_tests import BaseTests
class GitTests(BaseTests):
def setUp(self):
super().setUp()
self.instance_name = self.random_name()
self.repo_dir = j.sals.fs.join_paths("/tmp", self.random_name())
self.repo_name = "js-ng"
self.repo_url = "https://github.com/threefoldtech/js-ng"
j.sals.fs.mkdir(f"{self.repo_dir}")
j.sals.process.execute(f"git clone {self.repo_url}", cwd=f"{self.repo_dir}")
path = j.sals.fs.join_paths(self.repo_dir, self.repo_name)
self.git_client = j.clients.git.new(name=self.instance_name, path=path)
def tearDown(self):
j.clients.git.delete(self.instance_name)
j.sals.fs.rmtree(path=self.repo_dir)
def test01_check_git_config(self):
"""Test case for checking git config file.
**Test Scenario**
- Get a git client.
- Read the git.config file.
- Check that remote_url equal to repo_url.
- Check that url equal to remote_url.
"""
self.info("Read the git.config file")
path = j.sals.fs.join_paths(self.repo_dir, self.repo_name, ".git/config")
git_config = j.sals.fs.read_file(path)
self.info("Check that remote_url equal to repo_url")
self.assertEqual(self.repo_url, self.git_client.remote_url)
self.info("Check that git config url equal to remote_url")
self.assertIn(self.git_client.remote_url, git_config)
def test02_set_remote_ssh_url(self):
"""Test case for setting remote url.
**Test Scenario**
- Get a git client.
- Set remote url to repo ssh url.
- Read the git config file.
- Check that remote_url equals to repo ssh url.
"""
repo_ssh_url = "[email protected]:threefoldtech/js-ng.git"
self.info("Set remote url to repo ssh url")
self.git_client.set_remote_url(repo_ssh_url)
self.info("Read the git config file")
path = j.sals.fs.join_paths(self.repo_dir, self.repo_name, ".git/config")
git_config = j.sals.fs.read_file(path)
self.info("Check that remote_url equals to repo ssh url")
self.assertEqual(self.git_client.remote_url, repo_ssh_url)
self.assertIn(self.git_client.remote_url, git_config)
def test03_git_branch(self):
"""Test case for checking a branch name.
**Test Scenario**
- Get a git client.
- Get the branch name.
- Check branch name.
"""
self.info("Get the branch name")
path = j.sals.fs.join_paths(self.repo_dir, self.repo_name)
branch_name = j.sals.process.execute("git branch --show-current", cwd=path)
self.info("Check branch name")
self.assertIn(self.git_client.branch_name, branch_name[1])
def test04_git_modified_files(self):
"""Test case for getting the modified files.
**Test Scenario**
- Get a git client.
- Create a file in repository path.
- Commit changes
- modify this file
- Check if file has been modified
"""
self.info("Create a file in repository path")
file_name = self.random_name()
path = j.sals.fs.join_paths(self.repo_dir, self.repo_name, file_name)
j.sals.fs.touch(path)
self.info("Commit changes")
self.git_client.commit(f"add {file_name}")
self.info("modify this file")
path = j.sals.fs.join_paths(self.repo_dir, self.repo_name, file_name)
j.sals.fs.write_file(path, "test modify file")
self.info("Check if file has been modified")
modified_file = self.git_client.get_modified_files()
self.assertTrue(modified_file)
self.assertEqual(file_name, modified_file["M"][0])
def test05_git_add_new_file(self):
"""Test case for adding a new file with git.
**Test Scenario**
- Get a git client.
- Create a file in repository path.
- Check if a new file has been added
"""
self.info("Create a file in repository path")
file_name = self.random_name()
path = j.sals.fs.join_paths(self.repo_dir, self.repo_name, file_name)
j.sals.fs.touch(path)
self.info("Check if a new file has been added")
added_file = self.git_client.get_modified_files()
self.assertTrue(added_file)
self.assertEqual(file_name, added_file["N"][0])
def test06_git_commit(self):
"""Test case for committing a change.
**Test Scenario**
- Get a git client.
- Create a file in repository path.
- Commit the change of creating a new file.
- Get commit logs.
- Check if commit has been done.
"""
file_name = self.random_name()
commit_msg = self.random_name()
self.info("Create a file in repository path")
path = j.sals.fs.join_paths(self.repo_dir, self.repo_name, file_name)
j.sals.fs.touch(path)
self.info("Commit the change of creating a new file")
self.git_client.commit(commit_msg)
self.info("Get commit logs")
path = j.sals.fs.join_paths(self.repo_dir, self.repo_name)
last_commit = j.sals.process.execute("git log -1", cwd=path)
self.info("Check if commit has been done")
self.assertIn(commit_msg, str(last_commit))
def test07_git_commit_one_file(self):
"""Test case for checking a commit with add_all=False.
**Test Scenario**
- Get a git client.
- Create a two file in repository path.
- Check that two file has been added.
- Commit the file 1.
- Check if commit has been done for one file.
"""
file1_name = self.random_name()
file2_name = self.random_name()
self.info("Create a two file in repository path")
path_file1 = j.sals.fs.join_paths(self.repo_dir, self.repo_name, file1_name)
path_file2 = j.sals.fs.join_paths(self.repo_dir, self.repo_name, file2_name)
j.sals.fs.touch(path_file1)
j.sals.fs.touch(path_file2)
self.info("Check that two file has been added.")
self.assertEqual([file2_name, file1_name].sort(), self.git_client.get_modified_files()["N"].sort())
self.info("Commit the file 1")
path = j.sals.fs.join_paths(self.repo_dir, self.repo_name)
j.sals.process.execute(f"git add {file1_name}", cwd=path)
self.git_client.commit("commit file 1", add_all=False)
self.info("Check if commit has been done for one file")
self.assertNotIn(file1_name, self.git_client.get_modified_files()["N"])
def test08_git_pull(self):
"""Test case for pulling a repository
**Test Scenario**
- Get a git client.
- Create a file in repository path.
- Try pull before commit and should get error.
- Commit the change of creating a new file.
- Pull from remote repository.
"""
file_name = self.random_name()
commit_msg = self.random_name()
self.info("Create a file in repository path")
path = j.sals.fs.join_paths(self.repo_dir, self.repo_name, file_name)
j.sals.fs.touch(path)
self.info("Try pull before commit and should get error")
with self.assertRaises(j.exceptions.Input):
self.git_client.pull()
self.info("Commit the change of creating a new file")
self.git_client.commit(commit_msg)
self.info("Pull from remote repository")
self.git_client.pull()
| 2.125 | 2 |
UtilsPlot.py | felipegb94/ToFSim | 12 | 12798576 | <filename>UtilsPlot.py
"""UtilsPlot
Attributes:
colors (TYPE): Colors for plotting
plotParams (TYPE): Default plotting parameters
"""
#### Python imports
#### Library imports
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# from IPython.core import debugger
# breakpoint = debugger.set_trace
#### Local imports
import Utils
#### Default matplotlib preferences
plt.style.use('ggplot')
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
plotParams = {
'font.size': 16,
'figure.dpi': 80,
'figure.autolayout': True,
'figure.titleweight': 'bold',
'savefig.dpi': 200,
'axes.titlesize': 18, # main title
'axes.labelsize': 16, # x and y titles
'axes.titleweight': 'bold', # x and y titles
'axes.labelweight': 'bold', # x and y titles
'grid.linestyle': '--',
'grid.linewidth': 2,
'text.usetex': False,
'xtick.labelsize': 14,
'xtick.minor.visible': True,
'ytick.labelsize': 14,
'ytick.minor.visible': True,
'lines.linewidth': 2,
'lines.markersize': 8.0,
'legend.fontsize': 14,
'legend.shadow': True,
}
mpl.use('Qt4Agg', warn=False) ## Needed to allow drawing with matplotlib during debug mode
plt._INSTALL_FIG_OBSERVER = True
mpl.rcParams.update(plotParams)
plt.ion()
def PlotCodingScheme(ModFs, DemodFs):
"""PlotCodingScheme: Create a 1x3 figure with modulation, demodulation, and the correlation.
Args:
modF (numpy.ndarray): Modulation functions. N x K matrix.
demodF (numpy.ndarray): Demodulation functions. N x K matrix
Returns:
plt.figure: Figure handle
plt.axis: Axis handle
"""
#### Assume the following constants
totalEnergy = 1.
tau = 1.
averagePower = totalEnergy / tau
#### Reshape to ensure needed dimensions
## Assume that the number of elements is larger than the number of coding pairs, i.e. rows>cols
if(ModFs.shape[0] < ModFs.shape[1]): ModFs = ModFs.transpose()
if(DemodFs.shape[0] < DemodFs.shape[1]): DemodFs = DemodFs.transpose()
#### Verify Inputs
assert(ModFs.shape == DemodFs.shape), "Input Error - PlotCodingScheme: ModFs and \
DemodFs should be the same dimensions."
#### Set some parameters
(N,K) = ModFs.shape
avgPower = np.sum(ModFs[:,0])/N
#### Set default values
t = np.linspace(0, tau, N)
phase = np.linspace(0, 2*np.pi,N)
#### Reshape to ensure same dimensions
t = t.reshape((N,))
#### Get Correlation functions
CorrFs = Utils.GetCorrelationFunctions(ModFs=ModFs,DemodFs=DemodFs)
#### Plot Decomposition
## Clear current plot
plt.clf()
## Get current figure
fig = plt.gcf()
## Add subplots and get axis array
for i in range(K):
# breakpoint()
fig.add_subplot(K,3,3*i + 1)
fig.add_subplot(K,3,3*i + 2)
fig.add_subplot(K,3,3*i + 3)
axarr = fig.get_axes()
## Make all plots
## Calculate Avg power.
avgPower = np.sum(ModFs[:,0]) / N
avgPower = [avgPower for i in range(0, N)]
## Plot ObjCorrF first so that stars don't cover the corrFs.
for i in range(0, K):
labelInfo = str(i)
axarr[3*i + 0].plot(t, ModFs[:,i], label='Md-'+labelInfo,linewidth=2, color=colors[i])
axarr[3*i + 1].plot(t, DemodFs[:,i], label='Dmd-'+labelInfo,linewidth=2, color=colors[i])
axarr[3*i + 2].plot(phase, CorrFs[:,i], label='Crr-'+labelInfo,linewidth=2, color=colors[i])
axarr[3*i + 0].plot(t, avgPower, '--', label='AvgPower', linewidth=3, color=colors[i])
## Set axis labels
axarr[3*i + 0].set_xlabel('Time')
axarr[3*i + 1].set_xlabel('Time')
axarr[3*i + 2].set_xlabel('Phase')
axarr[3*i + 0].set_ylabel('Instant Power')
axarr[3*i + 1].set_ylabel('Exposure')
axarr[3*i + 2].set_ylabel('Magnitude')
## Set Titles
axarr[0].set_title('Modulation')
axarr[1].set_title('Demodulation')
axarr[2].set_title('Correlation')
# ## Set ylimit so that we can see the legend
# axarr[0].set_ylim([0,1.2*np.max(ModFs)])
# axarr[1].set_ylim([0,1.2*np.max(DemodFs)])
# axarr[2].set_ylim([0,1.2*np.max(CorrFs)])
return (fig, axarr)
| 2.3125 | 2 |
AutoAcademicCV/textfile.py | german-arroyo-moreno/AutoAcademia | 0 | 12798577 | <reponame>german-arroyo-moreno/AutoAcademia
# encoding: utf-8
"""
Copyright 2018 (c) <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
def openFile(fileName):
"""
" Open a file or exit of the program,
" return the handler of the file
"""
try:
finput = open(fileName, 'r')
except IOError:
print("Error loading file '" + fileName + "'. ABORT.")
sys.exit(-1)
return finput
def openTxtFile(fileName):
"""
" Open a file or exit of the program,
" return the text of the file
"""
try:
finput = open(fileName, 'r')
except IOError:
print("Error loading text file '" + fileName + "'. ABORT.")
sys.exit(-1)
text = finput.read()
finput.close()
return text
def openLinesTxtFile(fileName):
"""
" Open a file or exit of the program,
" return a list of lines of text of the file
"""
try:
finput = open(fileName, 'r')
except IOError:
print("Error loading text file '" + fileName + "'. ABORT.")
sys.exit(-1)
text = finput.readlines()
finput.close()
return text
def saveTxtFile(fileName, text, append=False):
"""
" Open a file or exit of the program,
" return the text of the file
"""
try:
if append:
foutput = open(fileName, 'a')
else:
foutput = open(fileName, 'w')
except IOError:
print("Error loading text file '" + fileName + "'. ABORT.")
sys.exit(-1)
foutput.write(text)
foutput.close()
return
def openListFile(fileName, delim=','):
"""
" Open a file or exit of the program,
" return a list separated by delim
"""
try:
finput = open(fileName, 'r')
except IOError:
print("Error loading text file '" + fileName + "'. ABORT.")
sys.exit(-1)
text = finput.read()
listT = text.split(delim)
listT = [item.replace('\n', '').replace('\r','').strip() for item in listT]
finput.close()
return listT
| 2.453125 | 2 |
guess.py | Olayinka2020/ds_wkday_class | 0 | 12798578 | <reponame>Olayinka2020/ds_wkday_class
# the idea is that we'll have a secret word that we store inside of our program and then the user
# will interact with the program to try and guess the secret word
# we want the user to be able to keep guessing what the secret word is until they finally get the word.
secret_word = "hello"
guess = ""
guess_count = 0
guess_limit = 3
out_of_guesses = False
# while (guess != secret_word):
# guess = input("Enter guess: ")
# guess_count += 1
# print("You win!!!")
while guess != secret_word and not(out_of_guesses):
if guess_count < guess_limit:
guess = input("Enter guess: ")
guess_count += 1
else:
out_of_guesses = True
# when we break this loop, there's going to be 2 possible scenarios. it's either the user guesses
# the word correctly or the user runs out of guesses
if out_of_guesses:
print("You re out of guesses and you lost the game")
else:
print("You win!!") | 4.4375 | 4 |
gym_graph_coloring/envs/graph_coloring.py | dankiy/gym-graph-coloring | 0 | 12798579 | import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from gym import spaces, Env
class NXColoringEnv(Env):
def __init__(self, generator=nx.barabasi_albert_graph, **kwargs):
'''
generator — netwokrx graph generator,
kwargs — generator named arguments
'''
self.G = generator(**kwargs)
self.pos = nx.spring_layout(self.G, iterations=1000) #determine by n and m (?)
self.edges = np.array(self.G.edges())
self.n = len(self.G.nodes())
self.m = len(self.edges)
self.action_space = spaces.Box(low=0, high=self.n-1, shape=(self.n,2), dtype=np.uint32)
self.used_colors = []
self.current_state = np.full(self.n, self.n, dtype=np.uint32)
self.done = False
self.total_reward = 0
def get_graph(self):
return self.G.copy()
def step(self, action):
def is_action_available(action):
node, color = action
adjacent_nodes = np.unique(self.edges[np.sum(np.isin(self.edges, node), axis=1, dtype=bool)])
return ~np.any(self.current_state[adjacent_nodes]==color)
reward = 0
if is_action_available(action):
node, color = action
self.current_state[node] = color
if color not in self.used_colors:
reward = -1
self.total_reward -= 1
self.used_colors.append(color)
if self.n not in np.unique(self.current_state):
self.done = True
info = {}
return self.current_state, reward, self.done, info
def reset(self):
self.used_colors = []
self.current_state = np.full(self.n, self.n, dtype=np.uint32)
self.done = False
self.total_reward = 0
def render(self, mode='human', close=False):
nx.draw(self.G, self.pos, node_color=self.current_state, cmap=plt.cm.tab20) | 2.734375 | 3 |
backend/wmg/config.py | chanzuckerberg/dcp-prototype | 2 | 12798580 | import os
from backend.corpora.common.utils.secret_config import SecretConfig
class WmgConfig(SecretConfig):
def __init__(self, *args, **kwargs):
super().__init__("backend", secret_name="wmg_config", **kwargs)
# TODO: promote this impl to parent class, if new behavior works universally
def __getattr__(self, name):
# Environment variables intentionally override config file.
if not self.config_is_loaded():
self.load()
if (value := self.value_from_env(name)) is not None:
return value
if (value := self.value_from_config(name)) is not None:
return value
if (value := self.value_from_defaults(name)) is not None:
return value
self.raise_error(name)
def get_defaults_template(self):
deployment_stage = os.getenv("DEPLOYMENT_STAGE", "test")
defaults_template = {"bucket": f"wmg-{deployment_stage}", "data_path_prefix": "", "tiledb_config_overrides": {}}
return defaults_template
| 2.140625 | 2 |
contact_sort.py | onelharrison/labs | 0 | 12798581 | <gh_stars>0
"""
The following code explores the presentation of contacts
in an on-screen phone book in which a contact's
first name xor last name may be missing and contacts can
be sorted by first name xor last name.
A true sort by first name and last name is implemented as well as
correspending sort procedures with a presentation bias. In effect,
the presentation bias makes the contacts in the on-screen phone book
appear to be sorted by their intended sort key but in really this may
not be true. However, because of how the contacts are displayed, there
is no visual consequence in the case of contacts that are missing a
first name xor last name.
A presentation-biased sort may be preferable to a true sort as the
presentation-biased sort is visually within expectation while a true
sort may betray visual expectation. See the outputs of the sorts by last name.
"""
from collections import namedtuple
Contact = namedtuple("Contact", "fname lname")
contacts = [
Contact("Lex", "Luther"),
Contact("Firestorm", "Lobo"),
Contact("Green", "Lantern"),
Contact("Aquaman", None),
Contact(None, "Diner"),
Contact(None, "Metropolis"),
]
def coalesce(*values):
"""Returns the first not-None arguement or None"""
return next((v for v in values if v is not None), None)
def display_contacts(contacts):
"""Displays contacts"""
for contact in contacts:
print(f"{(contact.fname or '')} {(contact.lname or '')}".strip())
def presentation_sort_by_first_name(contacts):
"""Returns a sorted sequence of contacts with a presentation bias.
Tries to sort by first name, but if the first name isn't present, the last name is used instead
"""
return sorted(contacts, key=lambda c: coalesce(c.fname, c.lname))
def presentation_sort_by_last_name(contacts):
"""Returns a sorted sequence of contacts with a presentation bias.
Tries to sort by last name, but if the last name isn't present, the first name is used instead
"""
return sorted(contacts, key=lambda c: coalesce(c.lname, c.fname))
def sort_by_first_name(contacts):
"""Returns a sorted sequence of contacts with a presentation bias.
Truly sorts by first name
"""
return sorted(contacts, key=lambda c: coalesce(c.fname, ""))
def sort_by_last_name(contacts):
"""Returns a sorted sequence of contacts with a presentation bias.
Truly sorts by last name
"""
return sorted(contacts, key=lambda c: coalesce(c.lname, ""))
print("True Sort by First Name")
display_contacts(sort_by_first_name(contacts))
print()
print("Sort by First Name (with presentation bias)")
display_contacts(presentation_sort_by_first_name(contacts))
print()
print("True Sort by Last Name")
display_contacts(sort_by_last_name(contacts))
print()
print("Sort by Last Name (with presentation bias)")
display_contacts(presentation_sort_by_last_name(contacts))
print()
| 3.890625 | 4 |
bin/download-prebuilt-firmware.py | baldurmen/HDMI2USB-mode-switch | 7 | 12798582 | <reponame>baldurmen/HDMI2USB-mode-switch<gh_stars>1-10
#!/usr/bin/env python
# FIXME: Make this work under Python 2
import argparse
import csv
import doctest
import json
import os
import pickle
import sys
import time
import urllib.request
from collections import namedtuple
from datetime import datetime
class TargetNotFound(Exception):
pass
def ls_github(url, cache_ttl=None):
# FIXME: Move cache to a class or find a package.
cache_name = "github.pickle"
def load_cache():
try:
cache = pickle.load(open(cache_name, 'rb'))
except IOError:
cache = {}
return cache
def save_cache(cache):
pickle.dump(cache, open(cache_name, 'wb'))
cache = load_cache()
if url in cache and (cache_ttl is None or (
(datetime.now()-cache[url]['timestamp']).total_seconds() <
cache_ttl)):
data = cache[url]['data']
else:
while True:
data = json.loads(urllib.request.urlopen(url).read().decode())
if "message" in data:
print("Warning: {}".format(data["message"]))
time.sleep(1)
continue
else:
break
cache[url] = {
'timestamp': datetime.now(),
'data': data
}
save_cache(cache)
return data
_Version = namedtuple("Version", ("version", "commits", "hash"))
class Version(_Version):
"""
>>> v = Version("v0.0.4-44-g0cd842f")
>>> v
Version(version='v0.0.4', commits=44, hash='0cd842f')
>>> str(v)
'v0.0.4-44-g0cd842f'
"""
def __new__(cls, value):
version, commits, githash = value.split('-')
commits = int(commits)
assert githash[0] == 'g'
return _Version.__new__(cls, version, commits, githash[1:])
def __str__(self):
return "%s-%i-g%s" % self
doctest.testmod()
def parse_args():
parser = argparse.ArgumentParser(
description='Download prebuilt firmware')
parser.add_argument('--rev',
help='Get a specific version.')
parser.add_argument('--platform',
help='Get for a specific platform (board + expansion boards configuration).')
parser.add_argument('--board',
help='Alias for --platform.', dest="platform")
parser.add_argument('--channel',
help="Get latest version from in a specific channel ().",
default="unstable")
parser.add_argument('--tag',
help='Alias for --channel.', dest="channel")
parser.add_argument('--latest', dest="channel", action="store_const",
help="Get the latest version.",
const="unstable")
parser.add_argument('--target',
help="Target to download from.", default="hdmi2usb")
parser.add_argument('--firmware',
help="Firmware to download from.", default="firmware")
parser.add_argument('--arch', default="lm32",
help="Soft-CPU architecture to download from.")
parser.add_argument('--user',
help='Github user to download from.', default="timvideos")
parser.add_argument('--branch',
help="Branch to download from.", default="master")
parser.add_argument('-o', '--output',
help="Output filename.", )
args = parser.parse_args()
assert args.platform
assert args.rev or args.channel
assert args.target
return args
def mk_url(user, branch):
details = {
"owner": user,
"repo": "HDMI2USB-firmware-prebuilt",
"branch": branch,
}
archive_url = "https://api.github.com/repos/{owner}/{repo}/contents/archive/{branch}/".format(
**details)
return archive_url
def get_revs(archive_url):
# this changes as revs are added.
# builds take over 20 min, so refresh every 20 min.
print("revs = ls_github(archive_url) {}".format(archive_url))
revs = ls_github(archive_url, cache_ttl=60*20)
possible_revs = [Version(d['name']) for d in revs if d['type'] == 'dir']
possible_revs.sort()
return possible_revs
def get_goog_sheet():
data = urllib.request.urlopen(
"https://docs.google.com/spreadsheets/d/e/2PACX-1vTmqEM-XXPW4oHrJMD7QrCeKOiq1CPng9skQravspmEmaCt04Kz4lTlQLFTyQyJhcjqzCc--eO2f11x/pub?output=csv"
).read().decode('utf-8')
rev_names = {}
for i in csv.reader(data.splitlines(), dialect='excel'):
print(len(i),i)
if not i:
continue
if i[0] == "Link":
continue
if i[0] != "GitHub":
pass
# continue
if len(i) != 7:
print("Skipping row %s" % i)
continue
_, _, rev_str, name, conf, notes, more_notes = i
if not rev_str:
continue
print(rev_str, name)
rev = Version(rev_str)
# assert rev in possible_revs
# assert name not in rev_names, "{} is listed multiple times!".format(name)
rev_names[name] = rev
return rev_names
def get_rev(possible_revs,rev=None, channel="unstable"):
if not rev:
if channel == "unstable":
rev = possible_revs[-1]
else:
rev_names = get_goog_sheet()
if channel not in rev_names:
print("Did not find {} in {}".format(channel, rev_names))
sys.exit(1)
rev = rev_names[channel]
print("Channel {} is at rev {}".format(channel, rev))
else:
rev = Version(rev)
assert rev in possible_revs, "{} is not found in {}".format(
rev, possible_revs)
print("rev: {}".format(rev))
return rev
def get_rev_url(archive_url, rev):
rev_url = "{}{:s}/".format(archive_url, str(rev))
return rev_url
def get_platforms(args, rev_url):
platforms = ls_github(rev_url)
possible_platforms = [d['name'] for d in platforms if d['type'] == 'dir']
print("Found platforms: {}".format(", ".join(possible_platforms)))
if args.platform not in possible_platforms:
print("Did not find platform {} at rev {} (found {})".format(
args.platform, rev, ", ".join(possible_platforms)))
sys.exit(1)
return possible_platforms
def get_targets_url(args, rev_url):
targets_url = "{}{:s}/".format(rev_url, args.platform)
return targets_url
def get_targets(args, rev, targets_url):
targets = ls_github(targets_url)
possible_targets = [d['name'] for d in targets if d['type'] == 'dir']
print("Found targets: {}".format(", ".join(possible_targets)))
if args.target not in possible_targets:
print("Did not find target {} for platform {} at rev {} (found {})".
format(args.target, args.platform, rev,
", ".join(possible_targets)))
raise TargetNotFound()
return possible_targets
def find_last_rev(args, possible_revs):
possible_revs.reverse()
archive_url = mk_url(args.user, args.branch)
for rev in possible_revs:
rev_url = get_rev_url(archive_url, rev)
possible_platforms = get_platforms(args, rev_url)
targets_url = get_targets_url(args, rev_url)
try:
possible_targets = get_targets(args, rev, targets_url)
print("found at rev {}".format(rev))
return rev
except TargetNotFound:
continue
def get_archs_url(args, targets_url):
archs_url = "{}{:s}/".format(targets_url, args.target)
return archs_url
def get_archs(args, archs_url):
archs = ls_github(archs_url)
possible_archs = [d['name'] for d in archs if d['type'] == 'dir']
print("Found archs: {}".format(", ".join(possible_archs)))
if args.arch not in possible_archs:
print(
"Did not find arch {} for target {} for platform {} at rev {} (found {})".
format(args.arch, args.target, args.platform, rev,
", ".join(possible_archs)))
sys.exit(1)
return possible_archs
def get_firmwares_url(args, archs_url):
firmwares_url = "{}{:s}/".format(archs_url, args.arch)
return firmwares_url
def get_firmwares(args, firmwares_url):
firmwares = ls_github(firmwares_url)
possible_firmwares = [
d['name'] for d in firmwares
if d['type'] == 'file' and d['name'].endswith('.bin')
]
print("Found firmwares: {}".format(", ".join(possible_firmwares)))
return possible_firmwares
def get_filename(args, possible_firmwares):
filename = None
for f in possible_firmwares:
if f.endswith("{}.bin".format(args.firmware)):
filename = f
break
if not filename:
print(
"Did not find firmware {} for target {} for platform {} at rev {} (found {})".
format(args.firmware, args.target, args.platform, rev,
", ".join(possible_firmwares)))
sys.exit(1)
return filename
def get_image_url(args, rev, filename):
image_url = "https://github.com/{user}/HDMI2USB-firmware-prebuilt/raw/master/archive/{branch}/{rev}/{platform}/{target}/{arch}/{filename}".format(
user=args.user,
branch=args.branch,
rev=rev,
platform=args.platform,
target=args.target,
arch=args.arch,
filename=filename)
print("Image URL: {}".format(image_url))
return image_url
def download(args, rev, filename, image_url):
if args.output:
out_filename = args.output
else:
parts = os.path.splitext(filename)
out_filename = ".".join(
list(parts[:-1]) +
[str(rev), args.platform, args.target, args.arch, parts[-1][1:]])
print("Downloading to: {}".format(out_filename))
urllib.request.urlretrieve(image_url, out_filename)
return True
def main():
args = parse_args()
archive_url = mk_url(args.user, args.branch)
possible_revs = get_revs(archive_url)
rev = get_rev(possible_revs, args.rev, args.channel)
rev_url = get_rev_url(archive_url, rev)
possible_platforms = get_platforms(args, rev_url)
targets_url = get_targets_url(args, rev_url)
try:
possible_targets = get_targets(args, rev, targets_url)
except TargetNotFound:
rev = find_last_rev(args, possible_revs)
# TODO: use this rev instead.
sys.exit(1)
archs_url = get_archs_url(args, targets_url)
possible_archs = get_archs(args, archs_url)
firmwares_url = get_firmwares_url(args, archs_url)
possible_firmwares = get_firmwares(args, firmwares_url)
filename = get_filename(args, possible_firmwares)
image_url = get_image_url(args, rev, filename)
ret = download(args, rev, filename, image_url)
print("Done!")
return
if __name__ == "__main__":
main()
| 2.53125 | 3 |
src/test_quick_sort.py | han8909227/data-structures | 1 | 12798583 | """Test my quick sort algorithm tests."""
from quick_sort import quick_sort, _quicksort
import pytest
from random import randint
@pytest.fixture(scope='function')
def list_ten():
"""Make a list of 10 vals."""
return [x for x in range(10)]
@pytest.fixture(scope='function')
def rand_ten():
"""Make a random list of length 10."""
return [randint(0, 1000) for _ in range(10)]
@pytest.fixture(scope='function')
def rand_neg():
"""Make a random list of neg value list len 100."""
return [randint(-1000, 0) for _ in range(100)]
def test_sort_list_with_neg_values(rand_neg):
"""Test if sorting method sorts negative values."""
key = sorted(rand_neg)
result = quick_sort(rand_neg)
assert key == result
def test_sort_nums_in_list_random_case(rand_ten):
"""Test quick sort function."""
result = quick_sort(rand_ten)
key = sorted(rand_ten)
assert result == key
def test_sort_nums_in_tuple_random_case(rand_ten):
"""Test quick sort function."""
rand = tuple(rand_ten)
result = quick_sort(rand)
key = sorted(rand)
assert result == key
def test_sort_nums_in_list_wrose_case(list_ten):
"""Test quick sort function."""
reverse = list(reversed(list_ten))
result = quick_sort(reverse)
assert result == list_ten
def test_sort_nums_in_tuple_wrose_case(list_ten):
"""Test quick sort function."""
reverse = tuple(reversed(list_ten))
result = quick_sort(reverse)
assert result == list_ten
def test_sort_method_raises_error():
"""Test if error gets raised for invalid type."""
with pytest.raises(ValueError):
quick_sort('12345')
def test_sort_method_raises_error_val():
"""Test if error gets raised for invalid type."""
with pytest.raises(ValueError):
quick_sort([1, 2, '3'])
def test_sort_method_raise_error_dic():
"""Test if error gets raised for invalid type."""
with pytest.raises(ValueError):
quick_sort({1, 2, 3})
def test_sort_method_raise_error_fun():
"""Test if error gets raised for invalid type."""
with pytest.raises(ValueError):
quick_sort([1, 2, 3, 'p'])
def test_sort_nums_in_list_random_case_helper(rand_ten):
"""Test quick sort function."""
result = quick_sort(rand_ten)
key = sorted(rand_ten)
assert result == key
def test_sort_nums_in_list_wrose_case_helper(list_ten):
"""Test _quick sort function."""
reverse = list(reversed(list_ten))
result = _quicksort(reverse)
assert result == list_ten
| 3.375 | 3 |
homework4/problem1.py | jojonium/CS-539-Machine-Learning | 0 | 12798584 | import numpy as np
# Note: please don't import any new package. You should solve this problem using only the package(s) above.
#-------------------------------------------------------------------------
'''
Problem 1: Multi-Armed Bandit Problem (15 points)
In this problem, you will implement the epsilon-greedy method for Multi-armed bandit problem.
A list of all variables being used in this problem is provided at the end of this file.
'''
#--------------------------
def Terms_and_Conditions():
'''
By submitting this homework or changing this function, you agree with the following terms:
(1) Not sharing your code/solution with any student before and after the homework due. For example, sending your code segment to another student, putting your solution online or lending your laptop (if your laptop contains your solution or your Dropbox automatically copied your solution from your desktop computer and your laptop) to another student to work on this homework will violate this term.
(2) Not using anyone's code in this homework and building your own solution. For example, using some code segments from another student or online resources due to any reason (like too busy recently) will violate this term. Changing other's code as your solution (such as changing the variable names) will also violate this term.
(3) When discussing with any other students about this homework, only discuss high-level ideas or use pseudo-code. Don't discuss about the solution at the code level. For example, two students discuss about the solution of a function (which needs 5 lines of code to solve) and they then work on the solution "independently", however the code of the two solutions are exactly the same, or only with minor differences (variable names are different). In this case, the two students violate this term.
All violations of (1),(2) or (3) will be handled in accordance with the WPI Academic Honesty Policy. For more details, please visit: https://www.wpi.edu/about/policies/academic-integrity/dishonesty
Note: we may use the Stanford Moss system to check your code for code similarity. https://theory.stanford.edu/~aiken/moss/
Historical Data: in one year, we ended up finding 25% of the students in that class violating this term in their homework submissions and we handled ALL of these violations according to the WPI Academic Honesty Policy.
'''
#*******************************************
# CHANGE HERE: if you have read and agree with the term above, change "False" to "True".
Read_and_Agree = True
#*******************************************
return Read_and_Agree
#----------------------------------------------------
'''
Given the player's memory about the previous results in the game and the action chosen and reward received at the current time step, update the player's memory.
---- Inputs: --------
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
* r: the reward received at the current time step, a float scalar.
* Rt: (player's memory) the total rewards (i.e., sum of rewards) collected for each action, a numpy float vector of length c. Rt_1[i] represents the sum of total rewards collected on the i-th action.
* Ct: (player's memory) the counts on how many times each action has been tried, a numpy integer vector of length c. Ct_1[i] represents the total number of samples collected on the i-th action, i.e., how many times the i-th action has been tried before".
---- Hints: --------
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def update_memory(a, r, Rt, Ct):
#########################################
## INSERT YOUR CODE HERE (3 points)
Rt[a] = Rt[a] + r
Ct[a] = Ct[a] + 1
#########################################
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_update_memory
--- OR ----
python3 -m nose -v test1.py:test_update_memory
--- OR ----
python -m nose -v test1.py:test_update_memory
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Explore-only) Given a multi-armed bandit game, choose an action at the current time step using explore-only strategy. Randomly pick an action with uniform distribution: equal probability for all actions.
---- Inputs: --------
* c: the number of possible actions in a multi-armed bandit problem, an integer scalar.
---- Outputs: --------
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
---- Hints: --------
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def choose_action_explore(c):
#########################################
## INSERT YOUR CODE HERE (3 points)
a = np.random.randint(0, c)
#########################################
return a
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_choose_action_explore
--- OR ----
python3 -m nose -v test1.py:test_choose_action_explore
--- OR ----
python -m nose -v test1.py:test_choose_action_explore
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Exploit-only) Given a multi-armed bandit game and the player's memory about the previous results, choose an action at the current time step using exploit-only strategy: choose the action with the highest average reward.
---- Inputs: --------
* Rt: (player's memory) the total rewards (i.e., sum of rewards) collected for each action, a numpy float vector of length c. Rt_1[i] represents the sum of total rewards collected on the i-th action.
* Ct: (player's memory) the counts on how many times each action has been tried, a numpy integer vector of length c. Ct_1[i] represents the total number of samples collected on the i-th action, i.e., how many times the i-th action has been tried before".
---- Outputs: --------
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
---- Hints: --------
* If the count in Ct[i] for the i-th action is 0, we can assume the average reward for the i-th action is 0. For example, if the count Ct for 3 actions are [0,1,1], we can assume the average reward for the first action is 0.
* You could us the argmax() function in numpy to return the index of the largest value in a vector.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def choose_action_exploit(Rt, Ct):
#########################################
## INSERT YOUR CODE HERE (3 points)
a = np.argmax([0 if Ct[i] == 0 else Rt[i] / Ct[i] for i in range(Rt.size)])
#########################################
return a
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_choose_action_exploit
--- OR ----
python3 -m nose -v test1.py:test_choose_action_exploit
--- OR ----
python -m nose -v test1.py:test_choose_action_exploit
---------------------------------------------------
'''
#----------------------------------------------------
'''
Given a multi-armed bandit game and the player's memory about the previous results, choose an action at the current step of the game using epsilon-greedy method: with a small probability (epsilon) to follow explore-only method (randomly choose an action) and with a large probability (1-epsilon) to follow exploit-only method (choose the action with the highest average reward).
---- Inputs: --------
* Rt: (player's memory) the total rewards (i.e., sum of rewards) collected for each action, a numpy float vector of length c. Rt_1[i] represents the sum of total rewards collected on the i-th action.
* Ct: (player's memory) the counts on how many times each action has been tried, a numpy integer vector of length c. Ct_1[i] represents the total number of samples collected on the i-th action, i.e., how many times the i-th action has been tried before".
* e: (epsilon) the probability of the player to follow the exploration-only strategy. e is a float scalar between 0 and 1. The player has 1-e probability in each time step to follow the exploitation-only strategy.
---- Outputs: --------
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
---- Hints: --------
* You could use the random.rand() function in numpy to sample a number randomly using uniform distribution between 0 and 1.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def choose_action(Rt, Ct, e=0.05):
#########################################
## INSERT YOUR CODE HERE (6 points)
a = choose_action_explore(Ct.size) if np.random.random() < e else choose_action_exploit(Rt, Ct)
#########################################
return a
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_choose_action
--- OR ----
python3 -m nose -v test1.py:test_choose_action
--- OR ----
python -m nose -v test1.py:test_choose_action
---------------------------------------------------
'''
#--------------------------------------------
'''
TEST problem 1:
Now you can test the correctness of all the above functions by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py
--- OR ----
python3 -m nose -v test1.py
--- OR ----
python -m nose -v test1.py
---------------------------------------------------
If your code passed all the tests, you will see the following message in the terminal:
----------- Problem 1 (15 points in total)--------------------- ... ok
* (3 points) update_memory ... ok
* (3 points) choose_action_explore ... ok
* (3 points) choose_action_exploit ... ok
* (6 points) choose_action ... ok
----------------------------------------------------------------------
Ran 4 tests in 0.586s
OK
'''
#--------------------------------------------
#--------------------------------------------
'''
List of All Variables
* c: the number of possible actions in a multi-armed bandit problem, an integer scalar.
* e: (epsilon) the probability of the player to follow the exploration-only strategy. e is a float scalar between 0 and 1. The player has 1-e probability in each time step to follow the exploitation-only strategy.
* Rt: (player's memory) the total rewards (i.e., sum of rewards) collected for each action, a numpy float vector of length c. Rt_1[i] represents the sum of total rewards collected on the i-th action.
* Ct: (player's memory) the counts on how many times each action has been tried, a numpy integer vector of length c. Ct_1[i] represents the total number of samples collected on the i-th action, i.e., how many times the i-th action has been tried before".
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
* r: the reward received at the current time step, a float scalar.
'''
#-------------------------------------------- | 3.328125 | 3 |
src/photometer.py | yeutterg/beautiful-photometry | 1 | 12798585 | """Photometer
These functions handle data files from spectrophotometers for easy and direct import
The functions are:
* uprtek_import_spectrum - Imports the spectrum from a UPRtek spectrophotometer
* uprtek_import_r_vals - Imports the R values generated by a UPRtek spectrophotometer
* uprtek_file_import - Imports the UPRtek file and extracts the selected data
"""
import csv
import itertools
"""Imports a UPRtek data file and outputs a dictionary with the intensities for each wavelength
Note: UPRtek names these files as .xls, but they are actually formatted as tab-delimited text files
Note2: This has only been tested with the UPRtek CV600 and MK350N. Others may have a different file format
Parameters
----------
filename : String
The filename to import
Returns
-------
dict
A dictionary with the wavelengths and intensities, e.g.:
{380: 0.048, 381: 0.051, ...}
"""
def uprtek_import_spectrum(filename: str):
return uprtek_file_import(filename, 'spd')
"""Imports a UPRtek data file and outputs a dictionary with the R-Values
Note: UPRtek names these files as .xls, but they are actually formatted as tab-delimited text files
Note2: This has only been tested with the UPRtek CV600 and MK350N. Others may have a different file format
Parameters
----------
filename : String
The filename to import
Returns
-------
dict
A dictionary with the R-Values, e.g.:
{'R1': 98.887482, 'R2': 99.234245, ...}
"""
def uprtek_import_r_vals(filename: str):
return uprtek_file_import(filename, 'r_vals')
"""Imports a UPRtek data file and outputs a dictionary with the selected data
Note: UPRtek names these files as .xls, but they are actually formatted as tab-delimited text files
Note2: This has only been tested with the UPRtek CV600 and MK350N. Others may have a different file format
Parameters
----------
filename : String
The filename to import
returntype: dict
The type of data to return. Currently, either 'spd' or 'r_vals'
Returns
-------
dict
A dictionary with the selected data
"""
def uprtek_file_import(filename: str, returntype: dict):
with open(filename, mode='r', encoding='us-ascii') as csvFile:
reader = csv.reader(csvFile, delimiter='\t')
# Get UPRtek model from the first line, then set rows for reading data
model = next(reader)[1]
if model == 'CV600':
spd_start = 40
r_start = 18
r_end = 33
elif model == 'MK350NPLUS':
spd_start = 46
r_start = 26
r_end = 41
else:
print('UPRtek model not available. Using the MK350N format, which could result in errors!')
spd_start = 46
r_start = 26
r_end = 41
# Extract the data and return
if returntype == 'spd':
spd = {}
for row in itertools.islice(reader, spd_start, None):
spd[int(row[0][0:3])] = float(row[1])
return spd
elif returntype == 'r_vals':
r_vals = {}
for row in itertools.islice(reader, r_start, r_end):
r_vals[row[0]] = float(row[1])
return r_vals
| 3.046875 | 3 |
twitter_explorer/views.py | jrstarke/TweetSeeker | 2 | 12798586 | '''
Copyright 2011-2012 <NAME>, The CHISEL group and contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors
<NAME>
'''
from django.shortcuts import render_to_response
from django.template.loader import render_to_string
from twitter.models import Person, Status
from twitter_explorer.models import Group
from django.http import HttpResponse, HttpResponseRedirect
from auth.decorators import wants_account, needs_account
from django.utils import simplejson
from twitter_explorer import settings
from twitter import collector
from datetime import datetime
from django.template.context import RequestContext
from twitter_explorer import tasks
import time
import sys, traceback
import re
import urllib2
def encodeURLs(statuses,account):
r = re.compile(r"(http://[^, ]+[^,. ])")
s = re.compile(r'(\A|\s)@(\w+)')
if isinstance(statuses,Status):
temp = []
temp.append(statuses)
statuses = temp
for status in statuses:
status.text = r.sub(r'<a href="\1" target="_blank" rel="nofollow">\1</a>', status.text)
status.text = s.sub(r'\1@<a href="/user/\2" target="_blank" rel="nofollow">\2</a>', status.text)
@wants_account
def MainPage(request,account=None):
return render_to_response('index.html',{'user':account})
@wants_account
def PersonPage(request,screen_name,account=None):
if account:
person = collector.person(screen_name,account)
else:
person = Person.by_screen_name(screen_name)
if person:
tasks.updatePersonTweets(person,account)
num_friends = person.following_count
else:
num_friends = None
return render_to_response('person.html',{'person':person,
'num_friends':num_friends,
'user':account})
@wants_account
def PersonTweets(request,screen_name,account=None):
person = Person.by_screen_name(screen_name)
filters = {}
terms=None
if request.REQUEST.get('start'):
filters['created_at__gte'] = datetime.strptime(request.REQUEST.get('start'),"%Y-%m-%d")
if request.REQUEST.get('end'):
filters['created_at__lt'] = datetime.strptime(request.REQUEST.get('end'),"%Y-%m-%d")
if request.REQUEST.get('q'):
terms = request.REQUEST.get('q').split()
statuses = person.statuses(20,0,query=terms,**filters)
encodeURLs(statuses, account)
return render_to_response('person-tweets.html', {'screen_name':screen_name, 'statuses':statuses})
@wants_account
def PersonTweetsAdditional(request,screen_name,loadCount=0,account=None):
person = Person.by_screen_name(screen_name)
filters = {}
tweetCount = 20
loadCount = tweetCount * int(loadCount)
terms = None
if request.REQUEST.get('start'):
filters['created_at__gte'] = datetime.strptime(request.REQUEST.get('start'),"%Y-%m-%d")
if request.REQUEST.get('end'):
filters['created_at__lt'] = datetime.strptime(request.REQUEST.get('end'),"%Y-%m-%d")
if request.REQUEST.get('q'):
terms = request.REQUEST.get('q').split()
statuses = person.statuses(tweetCount,loadCount,query=terms,**filters)
encodeURLs(statuses, account)
return render_to_response('person-tweets-additional.html', {'screen_name':screen_name, 'statuses':statuses})
@wants_account
def PersonTweetsBackground(request,screen_name,account=None):
try:
output = {}
person = Person.by_screen_name(screen_name)
count = 0
while person.statusCount() is 0 and person.isUpdating() is True:
time.sleep(2**count)
count = count + 1
complete = not person.isUpdating()
output['num_statuses'] = person.statusCount()
output['complete'] = complete
if person.oldestStatus():
output['oldest_date'] = person.oldestStatus().status_date()
if person.latestStatus():
output['latest_date'] = person.latestStatus().status_date()
if account:
output['api_calls'] = account.rate_remaining
response = HttpResponse()
response.write(simplejson.dumps(output))
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_tb:"
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
print "*** print_exc:"
traceback.print_exc()
print "*** format_exc, first and last line:"
return response
@wants_account
def AboutPage(request,account=None):
return render_to_response('about.html', {'user':account})
@needs_account('/login')
def GroupListPage(request,account=None):
groups = Group.by_user(account)
return render_to_response('group-list.html', {'user':account,
'groups':groups,
})
@needs_account('/login')
def GroupAddPage(request,account=None):
name = request.REQUEST.get('name','')
short_name = name.replace(' ','_').lower()
member_names = request.REQUEST.get('member_names','')
auto_gen = request.REQUEST.get('auto_gen',None)
errors = []
if 'POST' == request.method:
if auto_gen:
return render_to_response('group-add.html', {
"user": account,
"member_names":member_names,
}, context_instance=RequestContext(request))
group = Group.by_short_name_and_user(short_name, account)
if group:
return render_to_response('group-add.html', {
"errors":['A group by the short_name \'' + short_name + '\' already exists for you. Please try another.'],
"user": account,
"short_name":short_name,
"name":name,
"member_names":member_names,
}, context_instance=RequestContext(request))
people = []
if len(short_name) < 1:
errors.append("Please ensure the group name is atleast 1 character long.")
for member in member_names.strip().split(','):
person = Person.by_screen_name(member)
if not person:
person = collector.person(member,account)
if person:
people.append(person)
tasks.updatePersonTweets(person,account)
else:
errors.append("Could not find a user named: " + member)
if len(errors) > 0:
return render_to_response('group-add.html', {
"errors":errors,
"user": account,
"short_name":short_name,
"name":name,
"member_names":member_names,
}, context_instance=RequestContext(request))
else:
group = Group.objects.create(short_name=short_name, user=account, name=name)
for person in people:
group.members.add(person)
group.save()
return HttpResponseRedirect('/group')
return render_to_response('group-add.html', {
'user': account,
},context_instance=RequestContext(request))
@needs_account('/login')
def GroupGenAddPage(request,account=None):
errors = []
if 'POST' == request.method:
url = request.REQUEST.get('url','')
try:
conn = urllib2.urlopen(url)
data = conn.read()
conn.close()
except:
errors.append('We were unable to get any data from the url provided. Please make sure it is correct and includes the http://')
if len(errors) > 0:
return render_to_response('group-gen-add.html', {
'url':url,
'errors':errors,
'user':account,
}, context_instance=RequestContext(request))
r = re.compile(r"(twitter.com/[^, /\"]+[^,. /\"])")
links = r.findall(data)
members = []
for link in links:
link = link.replace('twitter.com/','')
if not link in members:
person = collector.person(link, account)
if person:
members.append(link)
member_names = ""
for member in members:
if len(member_names) > 0:
member_names = member_names + ","
member_names = member_names + member
return render_to_response('group-gen-add.html', {
'user':account,
'member_names':member_names}, context_instance=RequestContext(request))
return render_to_response('group-gen-add.html', {
'user':account,
}, context_instance=RequestContext(request))
@needs_account('/login')
def GroupPage(request,short_name,account=None):
group = Group.by_short_name_and_user(short_name, account)
for member in group.members.all():
tasks.updatePersonTweets(member, account)
return render_to_response('group.html',{'group':group,
'user':account})
@wants_account
def GroupTweets(request,short_name,account=None):
group = Group.by_short_name_and_user(short_name, account)
terms = None
filters = {}
if request.REQUEST.get('start'):
filters['created_at__gte'] = datetime.strptime(request.REQUEST.get('start'),"%Y-%m-%d")
if request.REQUEST.get('end'):
filters['created_at__lt'] = datetime.strptime(request.REQUEST.get('end'),"%Y-%m-%d")
if request.REQUEST.get('q'):
terms = request.REQUEST.get('q').split()
statuses = group.statuses(20,0,query=terms,**filters)
encodeURLs(statuses, account)
return render_to_response('group-tweets.html', {'name':group.name, 'statuses':statuses})
@wants_account
def GroupTweetsAdditional(request,short_name,loadCount=0,account=None):
group = Group.by_short_name_and_user(short_name, account)
filters = {}
terms = None
tweetCount = 20
loadCount = tweetCount * int(loadCount)
if request.REQUEST.get('start'):
filters['created_at__gte'] = datetime.strptime(request.REQUEST.get('start'),"%Y-%m-%d")
if request.REQUEST.get('end'):
filters['created_at__lt'] = datetime.strptime(request.REQUEST.get('end'),"%Y-%m-%d")
if request.REQUEST.get('q'):
terms = request.REQUEST.get('q').split()
statuses = group.statuses(tweetCount,loadCount,query=terms,**filters)
encodeURLs(statuses, account)
return render_to_response('group-tweets-additional.html', {'statuses':statuses})
@wants_account
def GroupTweetsBackground(request,short_name,account=None):
output = {}
group = Group.by_short_name_and_user(short_name, account)
output['num_statuses'] = group.status_count()
complete = not group.isUpdating()
output['complete'] = complete
if group.oldestStatus():
output['oldest_date'] = group.oldestStatus().status_date()
if group.latestStatus():
output['latest_date'] = group.latestStatus().status_date()
if account:
output['api_calls'] = account.rate_remaining
response = HttpResponse()
response.write(simplejson.dumps(output))
return response
@wants_account
def Tweet(request,status_id,account=None):
output = {}
status = Status.by_id(status_id)
if not status:
status = collector.status(status_id,account)
if status:
encodeURLs(status, account)
output['status'] = render_to_string('status.html', {'status':status})
output['next_status'] = str(status.in_reply_to_status_id)
else:
output['status'] = '<li>The next tweet no longer exists</li>'
output['next_status'] = str(None)
if account:
output['api_calls'] = account.rate_remaining
response = HttpResponse()
response.write(simplejson.dumps(output))
return response
def UserRedirect(request):
screen_name = request.REQUEST.get('screen_name')
return HttpResponseRedirect('/user/'+screen_name) | 1.921875 | 2 |
share/dht/script/run.py | tspoon4/codenote | 0 | 12798587 | <filename>share/dht/script/run.py
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import gc
import sys
import json
import time
import signal
import subprocess
# In[ ]:
CONFIG = '/mnt/data/script/config.json'
DHT = '/mnt/data/script/dht_crawl.py'
META = '/mnt/data/script/dht_metadata.py'
GEOIP = '/mnt/data/script/dht_geoloc.py'
# In[ ]:
dht = list()
meta = list()
geoip = list()
# In[ ]:
def __tick_list(processes, script, count):
for p in list(processes):
ret = p.poll()
if ret is not None:
processes.remove(p)
spawncount = count - len(processes)
for i in range(0, spawncount):
p = subprocess.Popen([sys.executable, script])
processes.append(p)
# In[ ]:
def __kill_list(processes):
for p in processes: p.terminate()
for p in processes: p.wait()
# In[ ]:
def __term_handler(signum, frame):
__kill_list(dht)
__kill_list(meta)
__kill_list(geoip)
sys.exit(0)
# In[ ]:
def main():
signal.signal(signal.SIGTERM, __term_handler)
with open(CONFIG, 'r') as f:
config = json.load(f)
crawlers = config['crawlers']
while True:
__tick_list(dht, DHT, crawlers['dht'])
__tick_list(meta, META, crawlers['metadata'])
__tick_list(geoip, GEOIP, crawlers['geoip'])
time.sleep(crawlers['polltime'])
gc.collect()
return 1
# In[ ]:
if __name__ == '__main__':
sys.exit(main())
| 2.21875 | 2 |
GeekforGeeks Array practice/check_sort.py | gurusabarish/python-programs | 2 | 12798588 | def check(lst, l):
for i in range(l):
if lst[0]<=lst[i]:
sort = 1
else:
sort = 0
break
print(sort)
T = int(input())
for i in range(T):
n = int(input())
arr = list(map(int, input().split()))
check(arr, n)
| 3.34375 | 3 |
examples/overnight_hold.py | turingdata/alpaca-trade-api-python | 1 | 12798589 | <gh_stars>1-10
import alpaca_trade_api as tradeapi
from alpaca_trade_api.rest import TimeFrame
from alpaca_trade_api.rest_async import gather_with_concurrency, AsyncRest
from alpaca_trade_api.entity_v2 import BarsV2
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
import pandas as pd
import statistics
import sys
import time
import asyncio
from enum import Enum
import pytz
from datetime import datetime, timedelta
from pytz import timezone
stocks_to_hold = 20 # Max 200
# Only stocks with prices in this range will be considered.
max_stock_price = 26
min_stock_price = 6
# API datetimes will match this format. (-04:00 represents the market's TZ.)
api_time_format = '%Y-%m-%dT%H:%M:%S.%f-04:00'
loop = asyncio.get_event_loop()
if sys.argv[1] == 'backtest':
# so the backtests go faster
executor = ProcessPoolExecutor(10)
else:
executor = ProcessPoolExecutor(1)
class DataType(str, Enum):
Bars = "Bars"
Trades = "Trades"
Quotes = "Quotes"
def get_data_method(data_type: DataType):
if data_type == DataType.Bars:
return rest.get_bars_async
elif data_type == DataType.Trades:
return rest.get_trades_async
elif data_type == DataType.Quotes:
return rest.get_quotes_async
else:
raise Exception(f"Unsupoported data type: {data_type}")
async def get_historic_data_base(symbols, data_type: DataType, start, end,
timeframe: TimeFrame = None):
major = sys.version_info.major
minor = sys.version_info.minor
if major < 3 or minor < 6:
raise Exception('asyncio is not support in your python version')
msg = f"Getting {data_type} data for {len(symbols)} symbols"
msg += f", timeframe: {timeframe}" if timeframe else ""
msg += f" between dates: start={start}, end={end}"
print(msg)
step_size = 1000
results = []
for i in range(0, len(symbols), step_size):
tasks = []
for symbol in symbols[i:i + step_size]:
args = [symbol, start, end, timeframe.value] if timeframe else \
[symbol, start, end]
tasks.append(get_data_method(data_type)(*args))
if minor >= 8:
results.extend(
await asyncio.gather(*tasks, return_exceptions=True))
else:
results.extend(await gather_with_concurrency(500, *tasks))
bad_requests = 0
for response in results:
if isinstance(response, Exception):
print(f"Got an error: {response}")
elif not len(response[1]):
bad_requests += 1
print(f"Total of {len(results)} {data_type}, and {bad_requests} "
f"empty responses.")
return results
async def get_historic_bars(symbols, start, end, timeframe: TimeFrame):
return await get_historic_data_base(symbols, DataType.Bars, start, end,
timeframe)
def _process_dataset(dataset, algo_time, start, end, window_size, index):
if isinstance(dataset, Exception):
return
symbol = dataset[0]
data = dataset[1].truncate(after=end + timedelta(days=1))[
-window_size:]
if data.empty or len(data) < window_size:
return
# Make sure we aren't missing the most recent data.
latest_bar = data.iloc[-1].name.to_pydatetime().astimezone(
timezone('EST'))
if algo_time:
gap_from_present = algo_time - latest_bar
if gap_from_present.days > 1:
return
# Now, if the stock is within our target range, rate it.
price = data.iloc[-1].close
if price <= max_stock_price and price >= min_stock_price:
price_change = price - data.iloc[0].close
# Calculate standard deviation of previous volumes
volume_stdev = data.iloc[:-1].volume.std()
if volume_stdev == 0:
# The data for the stock might be low quality.
return
# Then, compare it to the change in volume since yesterday.
volume_change = data.iloc[-1].volume - data.iloc[-2].volume
volume_factor = volume_change / volume_stdev
# Rating = Number of volume standard deviations * momentum.
rating = price_change / data.iloc[0].close * volume_factor
if rating > 0:
return {
'symbol': symbol,
'rating': price_change / data.iloc[
0].close * volume_factor,
'price': price
}
# Rate stocks based on the volume's deviation from the previous 5 days and
# momentum. Returns a dataframe mapping stock symbols to ratings and prices.
# Note: If algo_time is None, the API's default behavior of the current time
# as `end` will be used. We use this for live trading.
def get_ratings(api, algo_time, datasets=None):
ratings = pd.DataFrame(columns=['symbol', 'rating', 'price'])
index = 0
window_size = 5 # The number of days of data to consider
formatted_time = None
if algo_time is not None:
# Convert the time to something compatable with the Alpaca API.
start_time = (algo_time.date() -
timedelta(days=window_size)).strftime(
api_time_format)
formatted_time = algo_time.date().strftime(api_time_format)
end = pd.Timestamp(formatted_time)
else:
end = pytz.timezone("America/New_York").localize(pd.Timestamp('now'))
start = end - timedelta(
days=window_size + 10) # make sure we don't hit weekends
if not datasets:
assets = api.list_assets(status="active")
assets = [asset for asset in assets if asset.tradable]
symbols = [s.symbol for s in assets]
snapshot = api.get_snapshots(symbols)
symbols = list(filter(lambda x: max_stock_price >= snapshot[
x].latest_trade.p >= min_stock_price if snapshot[x] and snapshot[
x].latest_trade else False, snapshot))
datasets = loop.run_until_complete(
get_historic_bars(symbols, start.isoformat(), end.isoformat(),
TimeFrame.Day))
futures = []
for dataset in datasets:
futures.append(executor.submit(_process_dataset, *(
dataset, algo_time, start, end, window_size, index)))
done = False
while not done:
done = True
for f in futures:
if not f.done():
done = False
break
time.sleep(0.1)
for f in futures:
res = f.result()
if res:
ratings = ratings.append(res, ignore_index=True)
ratings = ratings.sort_values('rating', ascending=False)
ratings = ratings.reset_index(drop=True)
return ratings[:stocks_to_hold]
def get_shares_to_buy(ratings_df, portfolio):
total_rating = ratings_df['rating'].sum()
shares = {}
for _, row in ratings_df.iterrows():
shares[row['symbol']] = int(
row['rating'] / total_rating * portfolio / row['price']
)
return shares
# Returns a string version of a timestamp compatible with the Alpaca API.
def api_format(dt):
return dt.strftime(api_time_format)
def backtest(api, days_to_test, portfolio_amount):
# This is the collection of stocks that will be used for backtesting.
assets = api.list_assets()
now = datetime.now(timezone('EST'))
beginning = now - timedelta(days=days_to_test)
# The calendars API will let us skip over market holidays and handle early
# market closures during our backtesting window.
calendars = api.get_calendar(
start=beginning.strftime("%Y-%m-%d"),
end=now.strftime("%Y-%m-%d")
)
shares = {}
cal_index = 0
assets = api.list_assets(status="active")
assets = [asset for asset in assets if asset.tradable]
symbols = [s.symbol for s in assets]
snapshot = api.get_snapshots(symbols)
symbols = list(filter(lambda x: max_stock_price >= snapshot[
x].latest_trade.p >= min_stock_price if snapshot[x] and snapshot[
x].latest_trade else False, snapshot))
data = loop.run_until_complete(
get_historic_bars(
symbols[:],
pytz.timezone("America/New_York").localize(
calendars[0].date - timedelta(days=10)).isoformat(),
pytz.timezone("America/New_York").localize(
calendars[-1].date).isoformat(),
TimeFrame.Day))
for calendar in calendars:
# See how much we got back by holding the last day's picks overnight
portfolio_amount += get_value_of_assets(api, shares, calendar.date)
print('Portfolio value on {}: {:0.2f} $'.format(calendar.date.strftime(
'%Y-%m-%d'), portfolio_amount)
)
if cal_index == len(calendars) - 2:
# -2 because we don't have today's data yet
# We've reached the end of the backtesting window.
break
# Get the ratings for a particular day
ratings = \
get_ratings(api, timezone('EST').localize(calendar.date),
datasets=data)
shares = get_shares_to_buy(ratings, portfolio_amount)
for _, row in ratings.iterrows():
# "Buy" our shares on that day and subtract the cost.
shares_to_buy = shares[row['symbol']]
cost = row['price'] * shares_to_buy
portfolio_amount -= cost
cal_index += 1
# Print market (S&P500) return for the time period
results = loop.run_until_complete(
get_historic_bars(['SPY'], api_format(calendars[0].date),
api_format(calendars[-1].date),
TimeFrame.Day))
sp500_change = (results[0][1].iloc[-1].close - results[0][1].iloc[
0].close) / results[0][1].iloc[0].close
print('S&P 500 change during backtesting window: {:.4f}%'.format(
sp500_change * 100)
)
return portfolio_amount
# Used while backtesting to find out how much our portfolio would have been
# worth the day after we bought it.
def get_value_of_assets(api, shares_bought, on_date):
if len(shares_bought.keys()) == 0:
return 0
total_value = 0
formatted_date = api_format(on_date)
num_tries = 3
while num_tries > 0:
# sometimes it fails so give it a shot few more times
try:
barset = loop.run_until_complete(
get_historic_bars(list(shares_bought.keys()),
pytz.timezone("America/New_York").localize(
on_date).isoformat(),
pytz.timezone("America/New_York").localize(
on_date).isoformat(), TimeFrame.Day))
barset = dict(barset)
break
except Exception as e:
num_tries -= 1
if num_tries <= 0:
print("Error trying to get data")
sys.exit(-1)
for symbol in shares_bought:
df = barset[symbol]
if not df.empty:
total_value += shares_bought[symbol] * df.iloc[0].open
return total_value
def run_live(api):
# See if we've already bought or sold positions today. If so, we don't want to do it again.
# Useful in case the script is restarted during market hours.
bought_today = False
sold_today = False
try:
# The max stocks_to_hold is 200, so we shouldn't see more than 400
# orders on a given day.
orders = api.list_orders(
after=api_format(datetime.today() - timedelta(days=1)),
limit=400,
status='all'
)
for order in orders:
if order.side == 'buy':
bought_today = True
# This handles an edge case where the script is restarted
# right before the market closes.
sold_today = True
break
else:
sold_today = True
except:
# We don't have any orders, so we've obviously not done anything today.
pass
clock = api.get_clock()
next_market_time = clock.next_open
bought_today = False
sold_today = False
print_waiting = False
while True:
# We'll wait until the market's open to do anything.
clock = api.get_clock()
if clock.is_open and not bought_today:
if sold_today:
# Wait to buy
time_until_close = clock.next_close - clock.timestamp
# We'll buy our shares a couple minutes before market close.
if time_until_close.seconds <= 120:
print('Buying positions...')
portfolio_cash = float(api.get_account().cash)
ratings = get_ratings(
api, None
)
shares_to_buy = get_shares_to_buy(ratings, portfolio_cash)
for symbol in shares_to_buy:
if shares_to_buy[symbol] > 0:
api.submit_order(
symbol=symbol,
qty=shares_to_buy[symbol],
side='buy',
type='market',
time_in_force='day'
)
print('Positions bought.')
bought_today = True
else:
# We need to sell our old positions before buying new ones.
time_after_open = pd.Timestamp(
clock.timestamp.time().isoformat()) - pd.Timestamp(
clock.next_open.time().isoformat())
# We'll sell our shares just a minute after the market opens.
if time_after_open.seconds >= 60:
print('Liquidating positions.')
api.close_all_positions()
sold_today = True
else:
sold_today = False
if clock.timestamp > next_market_time:
next_market_time = clock.next_open
bought_today = False
sold_today = False
print("Market Open")
print_waiting = False
if not print_waiting:
print_waiting = True
print("Waiting for next market day...")
time.sleep(30)
if __name__ == '__main__':
api = tradeapi.REST()
rest = AsyncRest()
if len(sys.argv) < 2:
print(
'Error: please specify a command; either "run" or "backtest '
'<cash balance> <number of days to test>".')
else:
if sys.argv[1] == 'backtest':
# Run a backtesting session using the provided parameters
start_value = float(sys.argv[2])
testing_days = int(sys.argv[3])
portfolio_value = backtest(api, testing_days, start_value)
portfolio_change = (portfolio_value - start_value) / start_value
print('Portfolio change: {:.4f}%'.format(portfolio_change * 100))
elif sys.argv[1] == 'run':
run_live(api)
else:
print('Error: Unrecognized command ' + sys.argv[1]) | 2.4375 | 2 |
alfacoins/__init__.py | iRhonin/alfacoins | 0 | 12798590 | <filename>alfacoins/__init__.py
from .gateway import ALFACoins
from .exceptions import APIException, ServerException
__version__ = '0.1.0a2'
| 1.25 | 1 |
build-tools/scripts/update_gpu_list.py | daniel-falk/nnabla-ext-cuda | 103 | 12798591 | <gh_stars>100-1000
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib.request as request
from html.parser import HTMLParser
import re
from mako.template import Template
import os
from gpu_info import incompatible_arcs, gpu_compute_capability_to_arc
basedir = os.path.dirname(os.path.abspath(__file__))
r = request.urlopen('https://developer.nvidia.com/cuda-gpus')
class GetGpuListFromNvidiaSite(HTMLParser):
def __init__(self):
super().__init__()
self.td = False
self.last_value = None
self.last_data = ''
self.gpu_data = {}
def handle_starttag(self, tag, attrs):
if tag == 'td':
self.td = True
def handle_endtag(self, tag):
if tag == 'td':
if self.td:
m = re.match(r'((\d+)\.(\d+))', self.last_data.strip())
if m:
cap = m.group(1)
cap_major = int(m.group(2))
cap_minor = int(m.group(3))
arch = gpu_compute_capability_to_arc.get(cap_major)
if arch is None:
arch = gpu_compute_capability_to_arc.get(
(cap_major, cap_minor))
if arch is None:
print(f'Error: unknown capability [{cap}]')
arch = ''
name = self.last_value.lower().replace(
'nvidia ', '').replace('tesla ', '') # remove prefix
self.gpu_data[name] = (arch, cap)
self.last_value = self.last_data.strip()
self.last_data = ''
self.td = False
def handle_data(self, data):
if self.td:
self.last_data += data
parser = GetGpuListFromNvidiaSite()
parser.feed(r.read().decode())
gpus_info = parser.gpu_data
incompatible_gpus = {}
for k in incompatible_arcs:
if not incompatible_gpus.get(k):
incompatible_gpus[k] = []
iarc = incompatible_arcs[k]
for gpu_name in gpus_info.keys():
if gpus_info[gpu_name][0] in iarc:
incompatible_gpus[k].append(gpu_name)
fname = os.path.join(basedir, 'skel', 'incompatibale_gpu_list.py.tmpl')
tmpl = Template(filename=fname)
lines = tmpl.render(args=incompatible_gpus)
with open("./python/src/nnabla_ext/cuda/incompatible_gpu_list.py", 'w') as f:
for l in lines:
f.write(l)
| 2.296875 | 2 |
3test.py | zerovm/zpython2 | 4 | 12798592 | #!/usr/bin/python
import os
import sys
import subprocess
import socket
import tempfile
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', help='file containing tests list')
args = parser.parse_args()
# will use it as return code for script
test_result = 0
devnull = open(os.devnull, "w")
PATH = os.path.abspath(os.path.dirname(__file__))
TEST_DIR = os.path.join(PATH, 'Lib', 'test')
NVRAM_TMPLT = """[args]
args = python /dev/1.test.py
[fstab]
channel=/dev/1.python.tar,mountpoint=/,access=ro,removable=no
"""
MANIFEST_TMPLT = """Job = %(socket)s
Node = 1
Version = 20130611
Timeout = 50
Memory = 4294967296,0
Program = %(path)s/python
Channel = /dev/stdin,/dev/stdin,0,0,4294967296,4294967296,0,0
Channel = /dev/null,/dev/stdout,0,0,0,0,4294967296,4294967296
Channel = /dev/null,/dev/stderr,0,0,0,0,4294967296,4294967296
Channel = %(path)s/python.tar,/dev/1.python.tar,3,0,4294967296,4294967296,4294967296,4294967296
Channel = %(path)s/nvram.1,/dev/nvram,3,0,4294967296,4294967296,4294967296,4294967296
Channel = %(test_path)s/%(test)s.py,/dev/1.test.py,3,0,4294967296,4294967296,4294967296,4294967296
"""
# predefined tests
tests = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
]
if args.file:
tests = [l for l in open(args.file, 'r').readlines()]
def client(server_address, input):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
data = ''
try:
sock.connect(server_address)
sdata = input
size = '0x%06x' % (len(sdata))
sock.sendall(size + sdata)
resp = sock.makefile()
sdata = resp.read(8)
size = int(sdata, 0)
data = resp.read(size)
except IOError, e:
print str(e)
raise
finally:
sock.close()
return data
class Daemon(object):
def __enter__(self):
# self.socket = os.path.join(PATH, 'tmp1234')
# self.fd, self.socket = 0, '/tmp/tmp.Mkba0cwcdk'
self.fd, self.socket = tempfile.mkstemp()
self._start_daemon()
return self
def __exit__(self, type, value, traceback):
self._stop_daemon()
os.remove(self.socket)
return False
def send(self, test):
params = {'socket': self.socket, 'path': PATH, 'test_path': TEST_DIR,
'test': test}
self.manifest = MANIFEST_TMPLT % params
return client(self.socket, self.manifest)
def _start_daemon(self):
with open(os.path.join(PATH, 'manifest.1'), 'w') as mfile:
params = {'socket': self.socket, 'path': PATH,
'test_path': TEST_DIR, 'test': ''}
self.manifest = MANIFEST_TMPLT % params
mfile.write(self.manifest)
with open(os.path.join(PATH, 'nvram.1'), 'w') as nfile:
params = {'test': ''}
nfile.write(NVRAM_TMPLT % params)
subprocess.call(['zerovm', os.path.join(PATH, 'manifest.1')],
stdout=devnull, stderr=devnull)
def _stop_daemon(self):
subprocess.call(['pkill', 'zvm'])
with Daemon() as daemon:
for test in tests:
print("%s.." % test.strip()[5:]),
sys.stdout.flush()
try:
ret = daemon.send(test.strip())
retcode = int(ret.splitlines()[2])
if retcode:
test_result = 1
print('\033[1;31mfail\033[1;m')
else:
print('\033[1;32mok\033[1;m')
except KeyboardInterrupt:
break
devnull.close()
sys.exit(test_result)
| 2.203125 | 2 |
plot.py | Neuromancer43/Physics | 0 | 12798593 | <filename>plot.py
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pylab as plt
data=np.loadtxt('./data/u2_sweep.csv')[:,:11]
#plt.figure().dpi=120
#plt.plot(data[0],data[1],'co-',label='Simulation')
#plt.plot(data[0],180/3.14*np.arctan(data[0]),'lightsalmon',linestyle='--',label='Experience')
#plt.ylabel('Repose Angle [deg]')
#plt.xlabel('Energy loss coefficient u1 [1]')
#plt.legend()
#plt.show()
plt.figure().dpi=120
plt.plot(data[0],data[1],'co-',label='Simulation')
#plt.plot(data[0],180/3.14*np.arctan(data[0]),'lightsalmon',linestyle='--',label='Experience')
plt.ylabel('Repose Angle [deg]')
plt.xlabel('Energy diffusion coefficient u2 [1]')
plt.legend()
plt.show() | 2.84375 | 3 |
cephalus/modules/input_prediction.py | TrueAGI/Cephalus | 0 | 12798594 | <gh_stars>0
from typing import Optional, Tuple, Callable
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
from cephalus.frame import StateFrame
from cephalus.kernel import StateKernel
from cephalus.modules.interface import StateKernelModule
__all__ = [
'InputPrediction'
]
class InputPrediction(StateKernelModule):
"""A state kernel module which adds a prediction loss for the next input to the kernel's
state predictions."""
_model = None
_loss_function: Callable = None
def configure(self, kernel: StateKernel) -> None:
super().configure(kernel)
self._model = Sequential([
Dense(self.input_width + self.state_width, activation='tanh'),
Dense(self.input_width)
])
def build(self) -> None:
self._model.build(input_shape=(None, self.state_width))
@tf.function
def loss_function(state, attended_inputs):
prediction = self._model(state[tf.newaxis, :])
target = tf.stop_gradient(attended_inputs)[tf.newaxis, :]
return 0.5 * tf.reduce_mean(tf.square(target - prediction))
self._loss_function = loss_function
super().build()
def get_trainable_weights(self) -> Tuple[tf.Variable, ...]:
return tuple(self._model.trainable_weights)
def get_loss(self, previous_frame: 'StateFrame',
current_frame: 'StateFrame') -> Optional[tf.Tensor]:
return self._loss_function(previous_frame.current_state,
current_frame.attended_input_tensor)
| 2.390625 | 2 |
src/helpers/flags.py | betoSolares/minicomp | 1 | 12798595 | import getopt
import os
import sys
# Help message to show
def help_message():
print("usage: minij [OPTIONS] [FILE]\n")
print("OPTIONS:")
print(" -h, --help Show help for the command")
print(" -o, --output Specify the output file")
# Try to get all the values passed to the program
def parse_flags(args_list):
shorts = "ho:"
longs = ["help", "output="]
try:
opts, vals = getopt.getopt(args_list, shorts, longs)
except getopt.error as e:
print("ERROR: %s" % e)
print("Try doing minij -h or minij --help to get more information")
sys.exit(1)
# Default values
args = {
"input": None,
"output": None,
}
for opt, val in opts:
# Print help message
if opt in ("-h", "--help"):
args["help"] = True
help_message()
sys.exit(0)
# Get specific output file
elif opt in ("-o", "--output"):
if os.path.isdir(val):
print("ERROR: The output file is a directory")
sys.exit(1)
args["output"] = val
# Get the input file
if len(vals) > 1:
print("ERROR: only one file is allowed")
sys.exit(1)
elif len(vals) < 1:
print("ERROR: no file provided")
sys.exit(1)
args["input"] = vals[0]
# Set the output if not specified
if args["output"] is None:
filename = os.path.splitext(os.path.basename(args["input"]))[0]
output = filename
count = 0
while os.path.isfile(output + ".table"):
count += 1
output = filename + "(" + str(count) + ")"
args["output"] = output + ".table"
return args
| 2.984375 | 3 |
contact_test.py | BrianNgeno/python_contacts | 0 | 12798596 | import unittest
import pyperclip
from module_contact import Contact
class TestContact(unittest.TestCase):
def setUp(self):
self.new_contact = Contact("James","Muriuki","0712345678","<EMAIL>")
def test_init(self):
self.assertEqual(self.new_contact.first_name,"James")
self.assertEqual(self.new_contact.last_name,"Muriuki")
self.assertEqual(self.new_contact.phone_number,"0712345678")
self.assertEqual(self.new_contact.email,"<EMAIL>")
def test_save_contact(self):
self.new_contact.save_contact()
self.assertEqual(len(Contact.contact_list), 1)
def test_save_multiple_contact(self):
self.new_contact.save_contact()
test_contact=Contact("Test","user","0712345678","<EMAIL>")
test_contact.save_contact()
self.assertEqual(len(Contact.contact_list),2)
def tearDown(self):
Contact.contact_list=[]
def test_delete_contact(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0712345678","<EMAIL>")
test_contact.save_contact()
self.new_contact.delete_contact()
self.assertEqual(len(Contact.contact_list),1)
def test_find_contact_by_number(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0711223344","<EMAIL>")
test_contact.save_contact()
found_contact = Contact.find_by_number("0711223344")
self.assertEqual(found_contact.email,test_contact.email)
def test_contact_exists(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0711223344","<EMAIL>")
test_contact.save_contact()
contact_exists = Contact.contact_exist("0711223344")
self.assertTrue(contact_exists)
def test_display_all_contacts(self):
self.assertEqual(Contact.display_contacts(),Contact.contact_list)
def test_copy_email(self):
self.new_contact.save_contact()
Contact.copy_email("0712345678")
self.assertEqual(self.new_contact.email,pyperclip.paste())
if __name__ == '__main__':
unittest.main()
| 3.21875 | 3 |
vize/150401032/client.py | hasan-se/blm304 | 2 | 12798597 | <gh_stars>1-10
# Enes TEKİN 150401032
import socket
import sys
import os
import pickle
IP = input('Bağlanmak istediğiniz sunucu IP: ')
PORT = 42
buf = 2048
ADDR = (IP,PORT)
client = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
while True:
cmd = input('\n1. Sunucu listesini listelemek için liste yazınız\n2. Sunucuya dosya eklemek için PUT dosya adı giriniz\n3. Sunucudan dosya indirmek için GET dosya adı giriniz\n') #GET transferfile.txt PUT transferfile.txt gibi
client.sendto(cmd.encode('UTF-8'),ADDR)
if(cmd[:3] == 'lis'):
ldata,addr = client.recvfrom(buf)
data_arr = pickle.loads(ldata)
print(data_arr)
elif(cmd[:3] == 'PUT'):
os.chdir('c:/Users/<NAME>/Desktop/client')
dosyalar = os.listdir('c:/Users/<NAME>IN/Desktop/client') #göndereceğimiz dosyanın yolunu giriyoruz
if cmd[4:] in dosyalar:
dosyaAdi = cmd[4:]
f= open(dosyaAdi,'rb')
l = f.read(buf)
while (l):
client.sendto(l,ADDR)
print('Gönderiliyor\n ',repr(l))
l = f.read(buf)
print('Tamamlandı')
print('Bağlantı kapatılıyor')
sys.exit()
else:
print('Böyle bir dosya bulunamadı')
elif(cmd[:3] == 'GET'):
with open('gelendosya.txt', 'wb') as f:
print('Dosya alınıyor...')
gdata,addr = client.recvfrom(buf)
if not gdata:
break
f.write(gdata)
print('Tamamlandı')
print('Bağlantı kapatılıyor')
sys.exit()
else:
print('Geçersiz komut girdiniz')
| 2.734375 | 3 |
test_IGV.py | toniher/igv.js-flask | 25 | 12798598 | import unittest
import struct
from igvjs import app
class TestIGV(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
app.config['ALLOWED_EMAILS'] = 'test_emails.txt'
app.config['USES_OAUTH'] = True
app.config['PUBLIC_DIR'] = None
self.app = app.test_client()
def test_page_loads(self):
response = self.app.get('/')
self.assertEqual(response.status_code, 200)
self.assertIn(b'<title>IGV - Integrative Genomics Viewer</title>', response.data)
def test_get_data_not_auth(self):
response = self.app.get('static/data/public/gstt1_sample.bam')
self.assertNotEqual(response, None)
self.assertEqual(response.status_code, 401)
def test_get_data_auth_disabled(self):
app.config['USES_OAUTH'] = False
response = self.app.get('static/data/public/gstt1_sample.bam')
self.assertEqual(response.status_code, 200)
def test_get_data_from_private_dir(self):
app.config['PUBLIC_DIR'] = '/static/js'
response = self.app.get('static/data/public/gstt1_sample.bam')
self.assertEqual(response.status_code, 401)
self.assertIn(b'Unauthorized', response.data)
def test_get_data_range_header(self):
start = 25
size = 100
response = self.app.get('../test/BufferedReaderTest.bin',
headers={"Range": "bytes={}-{}".format(start, start+size)})
for i in range(size):
expected_value = -128 + start + i
value = int(struct.unpack('b', response.data[i])[0])
self.assertEqual(value, expected_value)
if __name__ == "__main__":
unittest.main()
| 2.4375 | 2 |
cave/com.raytheon.viz.gfe/localization/gfe/userPython/smartTools/CheckSkyWithPoP.py | srcarter3/awips2 | 0 | 12798599 | <reponame>srcarter3/awips2
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# CheckSkyWithPoP
#
# Author:
# ----------------------------------------------------------------------------
##
# This is an absolute override file, indicating that a higher priority version
# of the file will completely replace a lower priority version of the file.
##
ToolType = "numeric"
WeatherElementEdited = "Sky"
from numpy import *
HideTool = 1
# You can screen the elements for which your tool will appear by using
# a ScreenList. For example:
#
#ScreenList = ["T","Td"]
#ScreenList = ["SCALAR","VECTOR","WEATHER","DISCRETE"]
### If desired, Set up variables to be solicited from the user:
VariableList = [
## ("Variable name1" , defaultValue1, "numeric"),
## ("Variable name2" , "default value2", "alphaNumeric"),
("Sky vs PoP Relationship:" , "add", "radio",
["add", "multiply", "Sky Limit"]),
("For add, multiply (smaller factor), by how much ?" , "20", "numeric"),
("For Sky Limit, only Sky less than Limit affected; it is raised to the Limit:", "", "label"),
("Enter Sky Limit: the minimum Sky cover needed to support Wx:" , 60, "numeric"),
# ("Enter minimum PoP for measurable precip:", 15, "numeric"),
("Enter Sky cover for 5% PoP:" , 30, "numeric"),
## ("Variable name3" , ["default value1", "default value2"], "check",
## ["value1", "value2", "value3"]),
## ("Variable name4" , "default value4", "radio",
## ["value1", "value2", "value3"]),
## ("Variable name5" , defaultValue, "scale",
## [minValue, maxValue], resolution),
## ("Variable name6" , "", "model"),
## ("Variable name7" , "", "D2D_model"),
## ("Label contents" , "", "label"),
## ("", dialogHeight, "scrollbar"),
]
# Set up Class
import SmartScript
## For available commands, see SmartScript
class Tool (SmartScript.SmartScript):
def __init__(self, dbss):
SmartScript.SmartScript.__init__(self, dbss)
# Required Method: Execute
# Called once for each grid
# Fill in the arguments you want to use -- WeatherElement1, WeatherElement2...
def execute(self, Sky, PoP, GridTimeRange, varDict):
"Creates highlight grids showing when and where there is not enough cloud \
to support the corrsponding PoP. The user decides by how much, or by what \
factor, the cloud should be greater."
## # Set up Variables from the varDict (see VariableList below)
## var1 = varDict["Variable name1"]
## var2 = varDict["Variable name2"]
# Determine new value
#############################################################################
#
# Configuration section
#
# Here is where to change the active edit area to your local forecast area
#
localFoecastArea = "ISC_Send_Area"
#
# End Configuration section
#
#############################################################################
area = self.getEditArea(localFoecastArea)
areaMask = self.encodeEditArea(area)
if (self._operator == "add"):
mainAddMask = logical_and(greater_equal(PoP, 5), less(Sky, PoP + self._factor))
InvalidSkyMask1 = logical_and(less(PoP, 5), less_equal(Sky, PoP * self._lofactor))
InvalidSkyMask2 = logical_and(mainAddMask, less_equal(PoP, self._breathingRoom))
InvalidSkyMask3 = logical_and(greater(PoP, self._breathingRoom), less(Sky, 99.5))
InvalidSkyMask4 = logical_or(InvalidSkyMask1, InvalidSkyMask2)
InvalidSkyMask5 = logical_or(InvalidSkyMask3, InvalidSkyMask4)
elif self._operator == "multiply":
InvalidSkyMask1 = logical_and(less(Sky, PoP * self._factor), less_equal(PoP * self._factor, 100))
InvalidSkyMask2 = logical_and(less(Sky, PoP * self._factor), greater(PoP * self._factor, 100))
InvalidSkyMask5 = logical_or(InvalidSkyMask1, InvalidSkyMask2)
else:
lowSkyMask = less(Sky, self._SkyLimit)
InvalidSkyMask1 = logical_and(lowSkyMask, greater_equal(PoP, self._PoPLimit))
InvalidSkyMask2 = logical_and(lowSkyMask, less_equal(Sky, self._SkyLimit - (self._PoPLimit - PoP) * self._slope))
InvalidSkyMask5 = logical_or(InvalidSkyMask1, InvalidSkyMask2)
# InvalidSkyMask = InvalidSkyMask5
InvalidSkyMask = logical_and(InvalidSkyMask5, areaMask)
if any(InvalidSkyMask):
self.createGrid("TemporaryData", "InvalidSkyForPoP", "SCALAR",
InvalidSkyMask, GridTimeRange)
self.setActiveElement("TemporaryData", "InvalidSkyForPoP", "SFC",
GridTimeRange, fitToData=1)
# Return the new value
# return Sky
# Optional Methods
## # These methods can have the additional argument:
## # ToolTimeRange -- selected time range over which we are running the tool
def preProcessTool(self, varDict):
# Called once at beginning of Tool
# Cannot have WeatherElement or Grid arguments
# Get thresholds for Sky cover
# Get method
self._operator = varDict["Sky vs PoP Relationship:"]
# For Add or Multiply:
# Idea is Sky is greater than PoP, either by a fixed amount
# (add), or by a factor. The value for sky, of course, cannot
# be greater than 100; this is where 'breathingRoom' comes in
# Sky is left alone if it is already large enough.
# For Muultply:
self._factor = varDict["For add, multiply (smaller factor), by how much ?"]
# For Add:
self._lofactor = float(self._factor + 5) / 5.
self._breathingRoom = 100 - self._factor
# For Sky Limit:
# self._PoPLimit = varDict["Enter minimum PoP for measurable precip:"] ## 15
self._PoPLimit = 15
self._SkyLimit = varDict["Enter Sky Limit: the minimum Sky cover needed to support Wx:"] ## 60
self._SkyMin = varDict["Enter Sky cover for 5% PoP:"] ## 30
# Make sure minimum PoP for measurable precip is 15-25%
if self._PoPLimit < 15.0:
self._PoPLimit = 15.0
elif self._PoPLimit > 25.0:
self._PoPLimit = 25.0
# Make sure minimum Sky cover for Wx is 50-100%
if self._SkyLimit < 50:
self._SkyLimit = 50
elif self._SkyLimit > 100:
self._SkyLimit = 100
# Make sure Sky cover for 5% PoP is 10-100%
if self._SkyMin < 10:
self._SkyMin = 10
elif self._SkyMin > 100:
self._SkyMin = 100
# Make sure Sky cover for 5% PoP < minimum Sky cover for Wx
if self._SkyMin > self._SkyLimit - 15:
self._SkyMin = self._SkyLimit - 15
# Compute slope to use for this line
self._slope = (self._SkyLimit - self._SkyMin) / (self._PoPLimit - 5.0)
## def preProcessTool(self, varDict):
## # Called once at beginning of Tool
## # Cannot have WeatherElement or Grid arguments
## pass
## def postProcessTool(self, varDict):
## # Called once at end of Tool
## # Cannot have WeatherElement or Grid arguments
## pass
## What is "self"????
## "Self" refers to this Tool class instance. Don't worry much about it.
## All you need to do is:
## -- Make sure to list "self" as the first argument of
## method Definitions:
## def _myMethod(self, arg1, arg2)
## -- When calling your methods, use self._methodName omitting
## "self" as the first argument:
## x = self._myMethod(arg1, arg2)
##
## Error Handling
## Call self.abort(errorString) to stop execution of your tool and
## display a message to the user.
## For example:
## if x > 1000:
## self.abort("x is too large")
##
## Call self.noData(messageString) to stop execution of your tool
## and return a "NoData" error which can be checked by a Procedure.
| 1.539063 | 2 |
proliantutils/ilo/constants.py | anta-nok/proliantutils | 0 | 12798600 | <reponame>anta-nok/proliantutils
# Copyright 2017 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# SUPPORTED_BOOT_MODE constants
SUPPORTED_BOOT_MODE_LEGACY_BIOS_ONLY = 'legacy bios only'
SUPPORTED_BOOT_MODE_UEFI_ONLY = 'uefi only'
SUPPORTED_BOOT_MODE_LEGACY_BIOS_AND_UEFI = 'legacy bios and uefi'
SUPPORTED_BIOS_PROPERTIES = [
"AdvancedMemProtection",
"AutoPowerOn",
"BootMode",
"BootOrderPolicy",
"CollabPowerControl",
"DynamicPowerCapping",
"DynamicPowerResponse",
"IntelligentProvisioning",
"IntelPerfMonitoring",
"IntelProcVtd",
"IntelQpiFreq",
"IntelTxt",
"PowerProfile",
"PowerRegulator",
"ProcAes",
"ProcCoreDisable",
"ProcHyperthreading",
"ProcNoExecute",
"ProcTurbo",
"ProcVirtualization",
"SecureBootStatus",
"Sriov",
"ThermalConfig",
"ThermalShutdown",
"TpmState",
"TpmType",
"UefiOptimizedBoot"
]
SUPPORTED_REDFISH_BIOS_PROPERTIES = SUPPORTED_BIOS_PROPERTIES + [
"WorkloadProfile"
]
| 1.03125 | 1 |
set-1/level-6.py | tritoke/matasano-crypto-challenges | 0 | 12798601 | <reponame>tritoke/matasano-crypto-challenges
import base64
import string
import itertools
with open("6.txt", "r") as the_file:
b64data = the_file.read().replace("\n", "")
data = base64.standard_b64decode(b64data)
print(data)
def hamming_distance(x, y):
# finds the xor of the numbers in the two lists
list_diff = ["{0:b}".format(i ^ j) for i, j in zip(x, y)]
# finds the total number of 1's in the XOR's
return sum([1 for i in "".join(list_diff) if int(i) == 1])
L = []
for KEYSIZE in range(2, 40):
x = 1
t=hamming_distance(data[:KEYSIZE], data[KEYSIZE:(x+1)*KEYSIZE])//KEYSIZE
x+=1
t+=hamming_distance(data[x*KEYSIZE:(x+1)*KEYSIZE], data[(x+1)*KEYSIZE:(x+2)*KEYSIZE])//KEYSIZE
x+=1
t+=hamming_distance(data[x * KEYSIZE:(x + 1) * KEYSIZE], data[(x + 1) * KEYSIZE:(x + 2) * KEYSIZE]) // KEYSIZE
x+=1
t+=hamming_distance(data[x * KEYSIZE:(x + 1) * KEYSIZE], data[(x + 1) * KEYSIZE:(x + 2) * KEYSIZE]) // KEYSIZE
x+=1
t+=hamming_distance(data[x * KEYSIZE:(x + 1) * KEYSIZE], data[(x + 1) * KEYSIZE:(x + 2) * KEYSIZE]) // KEYSIZE
t//=x
L.append((KEYSIZE, t))
KEYSIZES = [x[0] for x in L if x[1] <= 2]
decrypted_strings = {}
for KEYSIZE in KEYSIZES:
blocks = [data[i:i+KEYSIZE] for i in range(0, len(data), KEYSIZE)]
key = ""
for z in range(KEYSIZE):
answer = []
strings = {}
chars = string.ascii_letters + " "
for i in chars:
out_arr = []
for j in [x[z] for x in blocks if len(x) > z]:
out_arr.append(ord(i) ^ j)
strings[i] = "".join([chr(x) for x in out_arr])
answer.append((sum([1 for x in out_arr if chr(x) in chars]), ord(i)))
key += chr(max(answer)[1])
# prints out all of the keys
print(key, KEYSIZE)
key_index = 0
out = []
decrypted_strings[KEYSIZE] = [chr(int(a) ^ ord(b)) for a, b in zip(data, itertools.cycle(key))]
chars = string.ascii_letters
answer = []
for KEYSIZE in KEYSIZES:
answer.append((sum([1 for x in decrypted_strings[KEYSIZE] if x in chars]), KEYSIZE))
print(max(answer)[1])
print("".join(decrypted_strings[max(answer)[1]]))
| 3.203125 | 3 |
slideshow.py | recursethenreverse/numerouter | 0 | 12798602 | from urllib.request import urlopen
from io import BytesIO
import time
import tkinter as tk
from PIL import Image, ImageTk
import json
from rmq import RMQiface
urls = [
'https://cdn.revjet.com/s3/csp/1578955925683/shine.png',
'https://cdn.revjet.com/s3/csp/1578955925683/logo.svg',
'https://tpc.googlesyndication.com/daca_images/simgad/13865403217536204307',
'https://tpc.googlesyndication.com/daca_images/simgad/1948022358329940732?sqp=4sqPyQSWAUKTAQgAEhQNzczMPhUAAABAHQAAAAAlAAAAABgAIgoNAACAPxUAAIA_Kk8IWhABHQAAtEIgASgBMAY4A0CAwtcvSABQAFgAYFpwAngAgAEAiAEAkAEAnQEAAIA_oAEAqAEAsAGAreIEuAH___________8BxQEtsp0-MhoIvwMQ6gEYASABLQAAAD8wvwM46gFFAACAPw&rs=AOga4qmwNN2g28c_J8ehXFAoY4bOr7naGQ',
'https://tpc.googlesyndication.com/simgad/12366423408132574325',
'https://tpc.googlesyndication.com/simgad/3767484695346986263'
]
class HiddenRoot(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
#hackish way, essentially makes root window
#as small as possible but still "focused"
#enabling us to use the binding on <esc>
self.wm_geometry("0x0+0+0")
self.window = MySlideShow(self)
self.window.cycle()
class MySlideShow(tk.Toplevel):
def __init__(self, *args, **kwargs):
tk.Toplevel.__init__(self, *args, **kwargs)
with open('reader_config.json', 'r') as f:
config = json.load(f)
host = config['host']
usr = config['user']
pwd = config['password']
queue = config['filtered_images_queue_name']
self.mq = RMQiface(host, queue, usr, pwd)
self.img_error = Image.open('error.png')
self.img_none = Image.open('none.png')
#remove window decorations
# self.overrideredirect(True)
#save reference to photo so that garbage collection
#does not clear image variable in show_image()
self.persistent_image = None
self.imageList = []
self.pixNum = 0
#used to display as background image
self.label = tk.Label(self)
self.label.pack(side="top", fill="both", expand=True)
def cycle(self):
while True:
self.nexti()
time.sleep(0.01)
def nexti(self):
# import random
# url = random.choice(urls)
url = self.mq.read()
if url:
try:
img = Image.open(BytesIO(urlopen(url).read()))
self.showImage(img)
print(f'INFO:\tshowing {url}')
except Exception:
print(f'ERROR:\tnot a valid image: {url}')
else:
print('INFO:\tQueue is empty')
time.sleep(1.0)
def showImage(self, image):
img_w, img_h = image.size
scr_w, scr_h = self.winfo_screenwidth(), self.winfo_screenheight()
width, height = min(scr_w, img_w), min(scr_h, img_h)
image.thumbnail((width, height), Image.ANTIALIAS)
#set window size after scaling the original image up/down to fit screen
#removes the border on the image
scaled_w, scaled_h = image.size
self.wm_geometry("{}x{}+{}+{}".format(scaled_w,scaled_h,0,0))
# create new image
self.persistent_image = ImageTk.PhotoImage(image)
self.label.configure(image=self.persistent_image)
self.update()
slideShow = HiddenRoot()
# slideShow.window.attributes('-fullscreen', True)
# slideShow.window.attributes('-topmost', True)
slideShow.bind_all("<Escape>", lambda e: slideShow.destroy())
# slideShow.bind_all("<Return>", lambda e: slideShow.window.nexti()) # exit on esc
slideShow.update()
slideShow.mainloop()
| 2.34375 | 2 |
src/core/actions/create_branch.py | victoraugustofd/git-phoenix | 5 | 12798603 | <filename>src/core/actions/create_branch.py
from dataclasses import dataclass
from src.core.actions.executable import Executable
from src.core.actions.executable import _validate_pattern
from src.core.models import ActionExecution
from src.core.px_git import checkout_new_branch
from src.core.px_questionary import confirm
from src.core.template_models import Branch, Affix, Pattern
@dataclass
class CreateBranchParameters:
name: str
source: Branch
affix: Affix
pattern: Pattern
class CreateBranch(Executable):
parameters: CreateBranchParameters
def __init__(self, action_execution: ActionExecution):
super().__init__(action_execution)
action_parameters = self.action_execution.parameters
self.parameters = CreateBranchParameters(
name=action_parameters.get("name", ""),
source=Branch(action_parameters.get("source", {})),
affix=Affix(action_parameters.get("affix", {})),
pattern=Pattern(action_parameters.get("pattern", {})),
)
def execute(self):
name = self.parameters.name
source = self.parameters.source
affix = self.parameters.affix
pattern = self.parameters.pattern
_validate_pattern(source.pattern, source.name, "Source name invalid")
_validate_pattern(pattern, name, "Name invalid")
if affix:
final_name = [name]
if affix.prefix:
final_name = affix.prefix + final_name
name = affix.join_char.join(final_name)
if affix.suffix:
final_name.extend(affix.suffix)
name = affix.join_char.join(final_name)
confirmed = confirm(
msg=f"Você confirma a criação da "
f"branch {name} com base na "
f"branch {source.name}?"
)
if confirmed:
checkout_new_branch(source=source.name, branch=name)
| 2.171875 | 2 |
01_mysite/blog/forms.py | victordomingos/Learning_Django | 1 | 12798604 | <filename>01_mysite/blog/forms.py
from django import forms
from .models import Artigo, Comentario
class ArtigoForm(forms.ModelForm):
class Meta:
model = Artigo
fields = ('titulo', 'texto',)
class ComentarioForm(forms.ModelForm):
class Meta:
model = Comentario
fields = ('autor', 'texto',)
| 2.15625 | 2 |
invenio_app_ils/records/resolver/resolver.py | lauren-d/invenio-app-ils | 0 | 12798605 | <filename>invenio_app_ils/records/resolver/resolver.py
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2019 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Resolvers common."""
def get_field_value_for_record(record_cls, record_pid, field_name):
"""Return the given field value for a given record PID."""
record = record_cls.get_record_by_pid(record_pid)
if not record or field_name not in record:
message = "{0} not found in record {1}".format(field_name, record_pid)
raise KeyError(message)
return record[field_name]
| 2.109375 | 2 |
src/ralph/networks/migrations/0008_auto_20160808_0719.py | DoNnMyTh/ralph | 1,668 | 12798606 | <reponame>DoNnMyTh/ralph<gh_stars>1000+
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import ipaddress
from itertools import chain
from django.db import migrations, models
IPADDRESS_STATUS_RESERVED = 2
def _reserve_margin_addresses(network, bottom_count, top_count, IPAddress):
ips = []
ips_query = IPAddress.objects.filter(
models.Q(
number__gte=network.min_ip + 1,
number__lte=network.min_ip + bottom_count + 1
) |
models.Q(
number__gte=network.max_ip - top_count,
number__lte=network.max_ip
)
)
existing_ips = set(ips_query.values_list('number', flat=True))
to_create = set(chain.from_iterable([
range(int(network.min_ip + 1), int(network.min_ip + bottom_count + 1)), # noqa
range(int(network.max_ip - top_count), int(network.max_ip))
]))
to_create = to_create - existing_ips
for ip_as_int in to_create:
ips.append(IPAddress(
address=str(ipaddress.ip_address(ip_as_int)),
number=ip_as_int,
network=network,
status=IPADDRESS_STATUS_RESERVED
))
print('Creating {} ips for {}'.format(len(ips), network))
IPAddress.objects.bulk_create(ips)
ips_query.update(status=IPADDRESS_STATUS_RESERVED)
def create_reserved_ips(apps, schema_editor):
IPAddress = apps.get_model('networks', 'IPAddress')
Network = apps.get_model('networks', 'Network')
for network in Network.objects.all():
_reserve_margin_addresses(
network,
network.reserved_from_beginning,
network.reserved_from_end,
IPAddress
)
def remove_reserved_ips(apps, schema_editor):
IPAddress = apps.get_model('networks', 'IPAddress')
ips = IPAddress.objects.filter(
models.Q(ethernet__isnull=True) | (
models.Q(ethernet__base_object__isnull=True) &
models.Q(ethernet__mac__isnull=False)
),
status=IPADDRESS_STATUS_RESERVED,
gateway_network__isnull=True,
)
print('Removing {} reserved IPs'.format(ips.count()))
ips.delete()
class Migration(migrations.Migration):
dependencies = [
('networks', '0007_auto_20160804_1409'),
]
operations = [
migrations.AddField(
model_name='network',
name='reserved_from_beginning',
field=models.PositiveIntegerField(help_text='Number of addresses to be omitted in DHCP automatic assignmentcounted from the first IP in range (excluding network address)', default=10),
),
migrations.AddField(
model_name='network',
name='reserved_from_end',
field=models.PositiveIntegerField(help_text='Number of addresses to be omitted in DHCP automatic assignmentcounted from the last IP in range (excluding broadcast address)', default=0),
),
migrations.RunPython(
remove_reserved_ips,
reverse_code=create_reserved_ips
),
]
| 2.296875 | 2 |
sysrev/models.py | iliawnek/SystematicReview | 0 | 12798607 | from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.template.defaultfilters import slugify
from django.utils.html import escape
from django.db import transaction
from sysrev.api.PubMed import _get_authors, _get_date, url_from_id, read_papers_from_ids
from sysrev.api import PubMed
class Review(models.Model):
participants = models.ManyToManyField(User)
title = models.CharField(max_length=128, unique=False)
slug = models.SlugField()
description = models.TextField(default="")
date_created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
completed = models.BooleanField(default=False)
date_completed = models.DateTimeField(default=None, null=True)
query = models.TextField(default="")
def perform_query(self):
# TODO: discard existing papers if there are any
ids_from_query = PubMed.get_ids_from_query(self.query)
if self.paper_pool_counts()["abstract"] == 0:
Paper.create_papers_from_pubmed_ids(ids_from_query, self)
else:
papers = Paper.objects.filter(review=self)
existing_ids = []
for paper in papers:
existing_ids += [paper.pubmed_id]
existing_abstract_ids = []
for paper in papers.filter(pool="A"):
existing_abstract_ids += [paper.pubmed_id]
with transaction.atomic():
Paper.objects\
.filter(pubmed_id__in=existing_abstract_ids)\
.exclude(pubmed_id__in=ids_from_query)\
.delete()
ids_to_add = list(set(ids_from_query).difference(existing_ids))
if ids_to_add:
Paper.create_papers_from_pubmed_ids(ids_to_add, self)
def paper_pool_percentages(self):
# TODO: Typically, paper_pool_counts() gets called then this gets called.
# Seems a bit wasteful, as it ends up running multiple times and querying counts repeatedly
counts = self.paper_pool_counts()
total = float(counts["total"])
if total is not 0:
progress = ((counts["final"] + counts["rejected"]) / total) * 100.0
# minimum display percentage
min_percent = 5.0
for key in counts:
if key == "total":
continue
old = counts[key] = float(counts[key])
result = (counts[key] / total) * 100.0
if result != 0.0 and result < min_percent:
counts[key] = new = (min_percent * total) / 100.0
total += new - old
abstract = (counts["abstract"] / total) * 100.0
document = (counts["document"] / total) * 100.0
final = (counts["final"] / total) * 100.0
rejected = (counts["rejected"] / total) * 100.0
return {"abstract": abstract,
"document": document,
"final": final,
"rejected": rejected,
"progress": progress}
else:
return
def paper_pool_counts(self):
relevant_papers = Paper.objects.filter(review=self)
abstract_count = relevant_papers.filter(pool="A").count()
document_count = relevant_papers.filter(pool="D").count()
final_count = relevant_papers.filter(pool="F").count()
rejected_count = relevant_papers.filter(pool="R").count()
return {"abstract": abstract_count,
"document": document_count,
"final": final_count,
"rejected": rejected_count,
"remaining": abstract_count + document_count,
"total": abstract_count + document_count + final_count + rejected_count}
def invite(self, invitees):
for invitee in invitees:
user = None
if invitee.find("@") == -1:
user = User.objects.get(username=invitee)
else:
user = User.objects.get(email=invitee)
self.participants.add(user)
def clean(self):
if (not self.participants) or self.participants.count() < 1:
raise ValidationError('Need at least one participant')
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(Review, self).save()
def get_absolute_url(self):
return reverse('review_detail', args=[str(self.pk)])[:-1] + "-" + self.slug
def __unicode__(self):
return str(self.pk) + ": " + self.title
class Paper(models.Model):
ABSTRACT_POOL = 'A'
DOCUMENT_POOL = 'D'
FINAL_POOL = 'F'
REJECTED = 'R'
POOLS = (
(ABSTRACT_POOL, 'Abstract pool'),
(DOCUMENT_POOL, 'Document pool'),
(FINAL_POOL, 'Final pool'),
(REJECTED, 'Rejected')
)
review = models.ForeignKey(Review)
title = models.CharField(max_length=128)
authors = models.CharField(max_length=128)
abstract = models.TextField(default="")
publish_date = models.DateField(null=True)
url = models.URLField(default="")
pubmed_id = models.CharField(max_length=16)
notes = models.TextField(default="")
pool = models.CharField(max_length=1, choices=POOLS, default=ABSTRACT_POOL)
@staticmethod
def create_paper_from_data(data, review, pool):
"""Creates Paper model from given data, review and pool"""
medlineCitation = data[u'MedlineCitation']
article = medlineCitation[u'Article']
title = article[u'ArticleTitle'].lstrip("[").rstrip("].")
pubmed_id = medlineCitation[u'PMID']
paper = Paper.objects.get_or_create(review=review, title=title, pubmed_id=pubmed_id)[0]
paper.review = review
paper.authors = _get_authors(article)
abstractText = ""
try:
for stringElement in article[u'Abstract'][u'AbstractText']:
try:
abstractText += "<h4>" + escape(stringElement.attributes[u'Label']) + "</h4>"
except AttributeError:
pass
abstractText += escape(stringElement) + "\n\n"
except KeyError:
pass
paper.abstract = abstractText
paper.publish_date = _get_date(medlineCitation)
paper.url = url_from_id(pubmed_id)
paper.notes = ""
paper.pool = pool
paper.save()
return paper
@staticmethod
def create_papers_from_pubmed_ids(ids, review, pool='A'):
"""Creates papers from all of the given ids, in the given review and pool"""
papers = read_papers_from_ids(ids)
# Commit all papers in single transaction
# Improves performance, as django won't automatically commit after every save call when creating lots of papers
with transaction.atomic():
return map(lambda data: Paper.create_paper_from_data(data, review, pool), papers)
def get_absolute_url(self):
return self.review.get_absolute_url() + "/" + str(self.pk)
def __unicode__(self):
return str(self.review) + " - " + self.title
| 2.046875 | 2 |
tools/bpy_smooth_tiles.py | Avnerus/3d-tiles-validator | 1 | 12798608 | import sys
import bpy
if __name__ == "__main__":
args = sys.argv[sys.argv.index('--'):]
print(args)
bpy.ops.import_scene.gltf(filepath=args[1])
obj = bpy.context.active_object
mod = obj.modifiers.new("CorrectiveSmooth", 'CORRECTIVE_SMOOTH')
mod.factor = 0.1
mod.scale = 1.5
bpy.ops.object.modifier_apply(modifier="CorrectiveSmooth")
bpy.ops.export_scene.gltf(
filepath=args[2],
export_normals=False,
export_colors=False,
use_selection=True
)
| 1.945313 | 2 |
APIs/samples/VehiclePlayer/ConvertFromXML/convert.py | AVSimulation/SCANeR-Samples-Pack | 0 | 12798609 | import os
import sys
import xml.etree.ElementTree as ET
from pyproj import Proj,Transformer
def createSCANeRMDL(fileName,type):
#Creation du fichier MDL
templateFileName = "template/template_"+type+".mdl"
templateFile = open(templateFileName, "r")
template= templateFile.read()
content = template.replace('template_vehicle_name', os.path.basename(fileName))
f = open(fileName + ".mdl", "w")
f.write(content)
templateFile.close()
f.close()
def createSCANePlayer(fileName):
#Creation du fichier Player
templateFileName = "template/template.vhplayer"
templateFile = open(templateFileName, "r")
template= templateFile.read()
content = template.replace('template_vehicle_name', os.path.basename(fileName))
f = open(fileName + ".vhplayer", "w")
f.write(content)
templateFile.close()
f.close()
def createGPX(fileName, root, startNode):
#Creation du fichier GPX
f = open(fileName + ".gpx", "w")
f.write( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
f.write( "<gpx version=\"1.1\" creator=\"GpxTraceNet6.2\"\n")
f.write( "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://www.topografix.com/GPX/1/1\"\n")
f.write( "xsi:schemaLocation=\"http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd\">\n")
f.write( "<trk>\n")
f.write( "<name> RDE </name>\n")
f.write( "<trkseg>\n")
for geo in root.findall(startNode):
#print geo.attrib
strLine = "<trkpt lat=\""+geo.get('lat')+"\" lon=\"" + geo.get('long') + "\"></trkpt>"
f.write( strLine )
f.write( "\n" )
f.write( "</trkseg>\n")
f.write( "</trk>\n")
f.write( "</gpx>\n")
f.close()
def writeCSVLine(f, strLine, col):
for icol in col[:-1]:
strLine += str(icol)+";"
strLine+=str(col[-1])+"\n"
f.write( strLine )
def createCSV(fileName, root, startNode, inProj, outProj):
offsetX = 171338.11
offsetY = 388410.20
f = open(fileName+ ".csv", "w")
col = [0.0]*36
strLine=""
writeCSVLine(f, strLine, col)
strLine=""
writeCSVLine(f, strLine, col)
for geo in root.findall(startNode):
transformer=Transformer.from_proj(inProj,outProj)
X,Y=transformer.transform(geo.get('long'),geo.get('lat'))
x=X-offsetX
y=Y-offsetY
yaw = 0.017453*float(geo.get('course'))
col = [0.0]*36
col[0]=geo.get('secs')
col[1]=col[7]=x
col[2]=col[8]=y
col[6]=col[12]=yaw
strLine=""
writeCSVLine(f, strLine, col)
f.close()
def main():
#Parameters
if len(sys.argv) < 2:
print('argument missing : name of the file to import')
exit()
infilename = sys.argv[1]
outfoldername = os.path.splitext(infilename)[0] + "_vhlplayer/"
try:
os.mkdir(outfoldername)
except:
pass
#Projections
inProj=Proj(proj='latlong',datum='WGS84')
outProj=Proj(init='epsg:28992', towgs84='565.417,50.3319,465.552,-0.398957,0.343988,-1.8774,4.0725')
offsetX = 171338.11
offsetY = 388410.20
tree = ET.parse(infilename)
root = tree.getroot()
#Creation du fichier GPX
createGPX(outfoldername + "hostvehicle", root, ".//hostvehicle/traj/geo")
createCSV(outfoldername + "hostvehicle", root, ".//hostvehicle/traj/geo", inProj, outProj)
createSCANeRMDL(outfoldername + "hostvehicle","car")
createSCANePlayer(outfoldername + "hostvehicle")
if __name__ == '__main__':
sys.exit(main()) | 2.59375 | 3 |
meiduo_mall/utils/viewsMixin.py | joinik/meiduo_mall | 0 | 12798610 | from django.contrib.auth.mixins import LoginRequiredMixin
from django import http
class LoginRequiredJSONMixin(LoginRequiredMixin):
"""Verify that the current user is authenticated."""
def handle_no_permission(self):
return http.JsonResponse({'code': 400, 'errmsg': '用户未登录'}) | 2.046875 | 2 |
python/hello_world.py | kayabe/deadfrog-lib | 7 | 12798611 | from deadfroglib import *
# set up the window
screen_w = 800
screen_h = 600
win = CreateWin(50, 50, screen_w, screen_h, True, 'Hello World Example')
# Choose a font
font = CreateTextRenderer("Courier", 8, True)
# set up the colors
BLACK = 0x00000000
WHITE = 0xffffffff
while not win.contents.windowClosed:
bmp = AdvanceWin(win)
ClearBitmap(bmp, WHITE)
DrawTextCentre(font, BLACK, bmp, screen_w/2, screen_h/2, "Hello World!")
| 3.21875 | 3 |
thelma/repositories/rdb/mappers/worklistseriesmember.py | fogathmann/TheLMA | 1 | 12798612 | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Worklist series member mapper.
"""
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from thelma.entities.liquidtransfer import PlannedWorklist
from thelma.entities.liquidtransfer import WorklistSeries
from thelma.entities.liquidtransfer import WorklistSeriesMember
__docformat__ = 'reStructuredText en'
__all__ = ['create_mapper']
def create_mapper(worklist_series_member_tbl):
"Mapper factory."
m = mapper(WorklistSeriesMember, worklist_series_member_tbl,
properties=dict(
worklist_series=relationship(WorklistSeries,
uselist=False,
back_populates='worklist_series_members'),
planned_worklist=relationship(PlannedWorklist,
uselist=False,
back_populates='worklist_series_member',
cascade='all,delete,delete-orphan',
single_parent=True),
)
)
return m
| 2.1875 | 2 |
msumastro/header_processing/astrometry.py | mwcraig/msumastro | 4 | 12798613 | import logging
import subprocess
from os import path, remove, rename
import tempfile
from textwrap import dedent
__all__ = ['call_astrometry', 'add_astrometry']
logger = logging.getLogger(__name__)
def call_astrometry(filename, sextractor=False,
custom_sextractor_config=False, feder_settings=True,
no_plots=True, minimal_output=True,
save_wcs=False, verify=None,
ra_dec=None, overwrite=False,
wcs_reference_image_center=True,
odds_ratio=None,
astrometry_config=None,
additional_args=None):
"""
Wrapper around astrometry.net solve-field.
Parameters
----------
sextractor : bool or str, optional
``True`` to use `sextractor`, or a ``str`` with the
path to sextractor.
custom_sextractor_config : bool, optional
If ``True``, use a sexractor configuration file customized for Feder
images.
feder_settings : bool, optional
Set True if you want to use plate scale appropriate for Feder
Observatory Apogee Alta U9 camera.
no_plots : bool, optional
``True`` to suppress astrometry.net generation of
plots (pngs showing object location and more)
minimal_output : bool, optional
If ``True``, suppress, as separate files, output of: WCS
header, RA/Dec object list, matching objects list, but see
also `save_wcs`
save_wcs : bool, optional
If ``True``, save WCS header even if other output is suppressed
with `minimial_output`
verify : str, optional
Name of a WCS header to be used as a first guess
for the astrometry fit; if this plate solution does not work
the solution is found as though `verify` had not been specified.
ra_dec : list or tuple of float
(RA, Dec); also limits search radius to 1 degree.
overwrite : bool, optional
If ``True``, perform astrometry even if astrometry.net files from a
previous run are present.
wcs_reference_image_center :
If ``True``, force the WCS reference point in the image to be the
image center.
odds_ratio : float, optional
The odds ratio to use for a successful solve. Default is to use the
default in `solve-field`.
astrometry_config : str, optional
Name of configuration file to use for SExtractor.
additional_args : str or list of str, optional
Additional arguments to pass to `solve-field`
"""
solve_field = ["solve-field"]
option_list = []
option_list.append("--obj 100")
if feder_settings:
option_list.append(
"--scale-low 0.5 --scale-high 0.6 --scale-units arcsecperpix")
if additional_args is not None:
if isinstance(additional_args, str):
add_ons = [additional_args]
else:
add_ons = additional_args
option_list.extend(add_ons)
if isinstance(sextractor, str):
option_list.append("--source-extractor-path " + sextractor)
elif sextractor:
option_list.append("--use-source-extractor")
if no_plots:
option_list.append("--no-plot")
if minimal_output:
option_list.append("--corr none --rdls none --match none")
if not save_wcs:
option_list.append("--wcs none")
if ra_dec is not None:
option_list.append("--ra %s --dec %s --radius 0.5" % ra_dec)
if overwrite:
option_list.append("--overwrite")
if wcs_reference_image_center:
option_list.append("--crpix-center")
options = " ".join(option_list)
solve_field.extend(options.split())
if custom_sextractor_config:
tmp_location = tempfile.mkdtemp()
param_location = path.join(tmp_location, 'default.param')
config_location = path.join(tmp_location, 'feder.config')
config_contents = SExtractor_config.format(param_file=param_location)
with open(config_location, 'w') as f:
f.write(config_contents)
with open(param_location, 'w') as f:
contents = """
X_IMAGE
Y_IMAGE
MAG_AUTO
FLUX_AUTO
"""
f.write(dedent(contents))
additional_solve_args = [
'--source-extractor-config', config_location,
'--x-column', 'X_IMAGE',
'--y-column', 'Y_IMAGE',
'--sort-column', 'MAG_AUTO',
'--sort-ascending'
]
solve_field.extend(additional_solve_args)
if odds_ratio is not None:
solve_field.append('--odds-to-solve')
solve_field.append(odds_ratio)
if astrometry_config is not None:
solve_field.append('--config')
solve_field.append(astrometry_config)
# kludge to handle case when path of verify file contains a space--split
# above does not work for that case.
if verify is not None:
if verify:
solve_field.append("--verify")
solve_field.append("%s" % verify)
else:
solve_field.append("--no-verify")
solve_field.extend([filename])
print(' '.join(solve_field))
logger.info(' '.join(solve_field))
try:
solve_field_output = subprocess.check_output(solve_field,
stderr=subprocess.STDOUT)
return_status = 0
log_level = logging.DEBUG
except subprocess.CalledProcessError as e:
return_status = e.returncode
solve_field_output = 'Output from astrometry.net:\n' + str(e.output)
log_level = logging.WARN
logger.warning('Adding astrometry failed for %s', filename)
raise e
logger.log(log_level, solve_field_output)
return return_status
def add_astrometry(filename, overwrite=False, ra_dec=None,
note_failure=False, save_wcs=False,
verify=None, try_builtin_source_finder=False,
custom_sextractor=False,
odds_ratio=None,
astrometry_config=None,
camera='',
avoid_pyfits=False,
no_source_extractor=False,
solve_field_args=None):
"""Add WCS headers to FITS file using astrometry.net
Parameters
----------
overwrite : bool, optional
Set ``True`` to overwrite the original file. If `False`,
the file astrometry.net generates is kept.
ra_dec : list or tuple of float or str
(RA, Dec) of field center as either decimal or sexagesimal; also
limits search radius to 1 degree.
note_failure : bool, optional
If ``True``, create a file with extension "failed" if astrometry.net
fails. The "failed" file contains the error messages genreated by
astrometry.net.
try_builtin_source_finder : bool
If true, try using astrometry.net's built-in source extractor if
sextractor fails.
save_wcs :
verify :
See :func:`call_astrometry`
camera : str, one of ['celestron', 'u9', 'cp16'], optional
Name of camera; determines the pixel scale used in the solved. Default
is to use `'u9'`.
avoid_pyfits : bool
Add arguments to solve-field to avoid calls to pyfits.BinTableHDU.
See https://groups.google.com/forum/#!topic/astrometry/AT21x6zVAJo
Returns
-------
bool
``True`` on success.
Notes
-----
Tries a couple strategies before giving up: first sextractor,
then, if that fails, astrometry.net's built-in source extractor.
It also cleans up after astrometry.net, keeping only the new FITS
file it generates, the .solved file, and, if desired, a ".failed" file
for fields which it fails to solve.
For more flexible invocation of astrometry.net, see :func:`call_astrometry`
"""
base, ext = path.splitext(filename)
# All are in arcsec per pixel, values are approximate
camera_pixel_scales = {
'celestron': 0.3,
'u9': 0.55,
'cp16': 0.55
}
if camera:
use_feder = False
scale = camera_pixel_scales[camera]
scale_options = ("--scale-low {low} --scale-high {high} "
"--scale-units arcsecperpix".format(low=0.8*scale, high=1.2 * scale))
else:
use_feder = True
scale_options = ''
if avoid_pyfits:
pyfits_options = '--no-remove-lines --uniformize 0'
else:
pyfits_options = ''
additional_opts = ' '.join([scale_options,
pyfits_options])
if solve_field_args is not None:
additional_opts = additional_opts.split()
additional_opts.extend(solve_field_args)
logger.info('BEGIN ADDING ASTROMETRY on {0}'.format(filename))
try:
logger.debug('About to call call_astrometry')
solved_field = (call_astrometry(filename,
sextractor=not no_source_extractor,
ra_dec=ra_dec,
save_wcs=save_wcs, verify=verify,
custom_sextractor_config=custom_sextractor,
odds_ratio=odds_ratio,
astrometry_config=astrometry_config,
feder_settings=use_feder,
additional_args=additional_opts)
== 0)
except subprocess.CalledProcessError as e:
logger.debug('Failed with error')
failed_details = e.output
solved_field = False
if (not solved_field) and try_builtin_source_finder:
log_msg = 'Astrometry failed using sextractor, trying built-in '
log_msg += 'source finder'
logger.info(log_msg)
try:
solved_field = (call_astrometry(filename, ra_dec=ra_dec,
overwrite=True,
save_wcs=save_wcs, verify=verify)
== 0)
except subprocess.CalledProcessError as e:
failed_details = e.output
solved_field = False
if solved_field:
logger.info('Adding astrometry succeeded')
else:
logger.warning('Adding astrometry failed for file %s', filename)
if overwrite and solved_field:
logger.info('Overwriting original file with image with astrometry')
try:
rename(base + '.new', filename)
except OSError as e:
logger.error(e)
return False
# whether we succeeded or failed, clean up
try:
remove(base + '.axy')
except OSError:
pass
if solved_field:
try:
remove(base + '-indx.xyls')
remove(base + '.solved')
except OSError:
pass
if note_failure and not solved_field:
try:
f = open(base + '.failed', 'wb')
f.write(failed_details)
f.close()
except IOError as e:
logger.error('Unable to save output of astrometry.net %s', e)
pass
logger.info('END ADDING ASTROMETRY for %s', filename)
return solved_field
SExtractor_config = """
# Configuration file for SExtractor 2.19.5 based on default by EB 2014-11-26
#
# modification was to change DETECT_MINAREA and turn of filter convolution
#-------------------------------- Catalog ------------------------------------
PARAMETERS_NAME {param_file} # name of the file containing catalog contents
#------------------------------- Extraction ----------------------------------
DETECT_TYPE CCD # CCD (linear) or PHOTO (with gamma correction)
DETECT_MINAREA 15 # min. # of pixels above threshold
DETECT_THRESH 1.5 # <sigmas> or <threshold>,<ZP> in mag.arcsec-2
ANALYSIS_THRESH 1.5 # <sigmas> or <threshold>,<ZP> in mag.arcsec-2
FILTER N # apply filter for detection (Y or N)?
FILTER_NAME default.conv # name of the file containing the filter
DEBLEND_NTHRESH 32 # Number of deblending sub-thresholds
DEBLEND_MINCONT 0.005 # Minimum contrast parameter for deblending
CLEAN Y # Clean spurious detections? (Y or N)?
CLEAN_PARAM 1.0 # Cleaning efficiency
MASK_TYPE CORRECT # type of detection MASKing: can be one of
# NONE, BLANK or CORRECT
#------------------------------ Photometry -----------------------------------
PHOT_APERTURES 10 # MAG_APER aperture diameter(s) in pixels
PHOT_AUTOPARAMS 2.5, 3.5 # MAG_AUTO parameters: <Kron_fact>,<min_radius>
PHOT_PETROPARAMS 2.0, 3.5 # MAG_PETRO parameters: <Petrosian_fact>,
# <min_radius>
SATUR_LEVEL 50000.0 # level (in ADUs) at which arises saturation
SATUR_KEY SATURATE # keyword for saturation level (in ADUs)
MAG_ZEROPOINT 0.0 # magnitude zero-point
MAG_GAMMA 4.0 # gamma of emulsion (for photographic scans)
GAIN 0.0 # detector gain in e-/ADU
GAIN_KEY GAIN # keyword for detector gain in e-/ADU
PIXEL_SCALE 1.0 # size of pixel in arcsec (0=use FITS WCS info)
#------------------------- Star/Galaxy Separation ----------------------------
SEEING_FWHM 1.2 # stellar FWHM in arcsec
STARNNW_NAME default.nnw # Neural-Network_Weight table filename
#------------------------------ Background -----------------------------------
BACK_SIZE 64 # Background mesh: <size> or <width>,<height>
BACK_FILTERSIZE 3 # Background filter: <size> or <width>,<height>
BACKPHOTO_TYPE GLOBAL # can be GLOBAL or LOCAL
#------------------------------ Check Image ----------------------------------
CHECKIMAGE_TYPE NONE # can be NONE, BACKGROUND, BACKGROUND_RMS,
# MINIBACKGROUND, MINIBACK_RMS, -BACKGROUND,
# FILTERED, OBJECTS, -OBJECTS, SEGMENTATION,
# or APERTURES
CHECKIMAGE_NAME check.fits # Filename for the check-image
#--------------------- Memory (change with caution!) -------------------------
MEMORY_OBJSTACK 3000 # number of objects in stack
MEMORY_PIXSTACK 300000 # number of pixels in stack
MEMORY_BUFSIZE 1024 # number of lines in buffer
#----------------------------- Miscellaneous ---------------------------------
VERBOSE_TYPE NORMAL # can be QUIET, NORMAL or FULL
HEADER_SUFFIX .head # Filename extension for additional headers
WRITE_XML N # Write XML file (Y/N)?
XML_NAME sex.xml # Filename for XML output
"""
| 2.234375 | 2 |
punica/deploy/deploy_contract.py | lucas7788/punica-python | 0 | 12798614 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import time
import getpass
from ontology.common.address import Address
from ontology.ont_sdk import OntologySdk
from punica.utils.file_system import (
read_avm,
read_wallet
)
from punica.utils.handle_config import (
handle_network_config,
handle_deploy_config
)
from punica.exception.punica_exception import PunicaException, PunicaError
class Deploy:
@staticmethod
def generate_signed_deploy_transaction(hex_avm_code: str, project_path: str = '', wallet_file_name: str = ''):
wallet_dir_path = os.path.join(project_path, 'wallet')
wallet_manager = read_wallet(wallet_dir_path, wallet_file_name)
deploy_dir_path = os.path.join(project_path, 'contracts')
deploy_information = handle_deploy_config(deploy_dir_path)
need_storage = deploy_information.get('needStorage', True)
name = deploy_information.get('name', os.path.split(project_path)[1])
version = deploy_information.get('version', '0.0.1')
author = deploy_information.get('author', '')
email = deploy_information.get('email', '')
desc = deploy_information.get('desc', '')
b58_payer_address = deploy_information.get('payer', wallet_manager.get_default_account().get_address())
gas_limit = deploy_information.get('gasLimit', 21000000)
gas_price = deploy_information.get('gasPrice', 500)
ontology = OntologySdk()
tx = ontology.neo_vm().make_deploy_transaction(hex_avm_code, need_storage, name, version, author, email,
desc, b58_payer_address, gas_limit, gas_price)
password = <PASSWORD>pass.getpass('\tPlease input payer account password: ')
payer_acct = wallet_manager.get_account(b58_payer_address, password)
ontology.sign_transaction(tx, payer_acct)
return tx
@staticmethod
def generate_contract_address(avm_dir_path: str = '', avm_file_name: str = '') -> str:
if avm_dir_path == '':
avm_dir_path = os.path.join(os.getcwd(), 'build', 'contracts')
if not os.path.isdir(avm_dir_path):
raise PunicaException(PunicaError.dir_path_error)
hex_avm_code = read_avm(avm_dir_path, avm_file_name)[0]
hex_contract_address = Address.address_from_vm_code(hex_avm_code).to_reverse_hex_str()
return hex_contract_address
@staticmethod
def check_deploy_state(tx_hash, project_path: str = '', network: str = ''):
if project_path == '':
project_path = os.getcwd()
if not os.path.isdir(project_path):
raise PunicaException(PunicaError.dir_path_error)
rpc_address = handle_network_config(project_path, network, False)
ontology = OntologySdk()
ontology.rpc.set_address(rpc_address)
time.sleep(6)
tx = ontology.rpc.get_raw_transaction(tx_hash)
if tx == 'unknown transaction':
return False
else:
return True
@staticmethod
def deploy_smart_contract(project_dir: str = '', network: str = '', avm_file_name: str = '',
wallet_file_name: str = ''):
if project_dir == '':
project_dir = os.getcwd()
avm_dir_path = os.path.join(project_dir, 'contracts', 'build')
rpc_address = handle_network_config(project_dir, network)
hex_avm_code, avm_file_name = read_avm(avm_dir_path, avm_file_name)
if hex_avm_code == '':
raise PunicaException(PunicaError.avm_file_empty)
hex_contract_address = Deploy.generate_contract_address(avm_dir_path, avm_file_name)
ontology = OntologySdk()
ontology.rpc.set_address(rpc_address)
contract = ontology.rpc.get_smart_contract(hex_contract_address)
print('Running deployment: {}'.format(avm_file_name))
if contract == 'unknow contracts':
print('\tDeploying...')
print('\t... 0x{}'.format(hex_avm_code[:64]))
tx = Deploy.generate_signed_deploy_transaction(hex_avm_code, project_dir, wallet_file_name)
ontology.rpc.set_address(rpc_address)
tx_hash = ontology.rpc.send_raw_transaction(tx)
return tx_hash
else:
print('\tDeploy failed...')
print('\tContract has been deployed...')
print('\tContract address is 0x{}...'.format(hex_contract_address))
| 1.6875 | 2 |
img/nwcod.py | Heccubernny/web-scraping | 0 | 12798615 | <gh_stars>0
#-------------------------------------------------------------------------------
# Name: Image Downloader
# Purpose: To download images and save links of any image from any website link
#
# Author: Heccubernny
#
# Created: 17/04/2020
# Copyright: (c) Heccubernny 2020
# Licence: CRSPIP licence
#-------------------------------------------------------------------------------
import requests
from bs4 import BeautifulSoup
import urllib.request
url = input("Paste the website url here: ")
#To avoid the website from blocking you
#Visit https://curl.trillworks.com/
headers = {
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Referer': 'http://www.wikipedia.org/',
'Connection': 'keep-alive',
}
#Know let make our request from the website
res = requests.get(url=url, headers=headers)
#To confirm that the website wont block you.
#Type print(res)
#Now we are going to use beautiful soup
soup = BeautifulSoup(res.text, 'html.parser')
index = 0
for img in soup.findAll('img'):
index += 1
#To fetch all the images sources code on the website type print(img)
img_t = img.get('src')
#To fetch for the link for the images type print(img_t)
if img_t[:1] == '/':
img_path = url + img_t
else:
img_path = img_t
print(img_path)
if '.png' in img_path:
with open("{}.png".format(index), 'wb') as saveImg:
saveImg.write(requests.get(url = img_path).content)
print(img_path)
else:
pass
#Later on i will work on where to save the images AND also saving all the images in a txt file
#In the later version i will produce an application which will make it a cross platform | 3.484375 | 3 |
process_data/all/code/split_voxel_then_img.py | hailieqh/3D-Object-Primitive-Graph | 2 | 12798616 | import scipy.io
import numpy as np
import os
import random
import json
import pdb
def check_image_voxel_match(cls):
root = os.path.abspath('.')
out_dir = os.path.join(root, '../output', cls)
# out_dir = '/Users/heqian/Research/projects/primitive-based_3d/data/all_classes/chair'
voxel_txt_dir = os.path.join(out_dir, 'voxeltxt')
voxel_dirs = {x: os.path.join(voxel_txt_dir, 'voxel_{}.txt'.format(x))
for x in ['train', 'val', 'test']}
img_dirs = {x: os.path.join(voxel_txt_dir, '{}.txt'.format(x))
for x in ['train', 'val', 'test']}
with open(os.path.join(out_dir, 'voxels_dir_{}.txt'.format(cls)), 'r') as f:
voxel_all = f.readlines()
voxel_names = {}
img_names = {}
for phase in ['train', 'val', 'test']:
with open(os.path.join(voxel_dirs[phase]), 'r') as f:
voxel_names[phase] = f.readlines()
with open(os.path.join(img_dirs[phase]), 'r') as f:
img_names[phase] = f.readlines()
# pix3d_dir = os.path.join(root, '../input/pix3d.json')
# pix3d = json.load(open(pix3d_dir, 'r'))
match_id = scipy.io.loadmat(os.path.join(out_dir, 'img_voxel_idxs.mat'))
img_match_vox = {x: [] for x in ['train', 'val', 'test']}
for phase in ['train', 'val', 'test']:
for img in img_names[phase]:
id_img_ori = int(img.split('.')[0]) # 1-3839
img_id_real = list(match_id['img_idxs'][0]).index(id_img_ori) # 0-3493
voxel_id_ori = match_id['voxel_idxs'][0, img_id_real] # 1-216
vox = voxel_all[voxel_id_ori - 1]
img_match_vox[phase].append(vox)
# img_match_vox[phase].append('model/'+vox)
img_match_vox = {x: sorted(set(img_match_vox[x])) for x in ['train', 'val', 'test']}
# pdb.set_trace()
for phase in ['train', 'val', 'test']:
if len(set(voxel_names[phase]).difference(set(img_match_vox[phase]))) > 0:
print('error')
if len(set(img_match_vox[phase]).difference(set(voxel_names[phase]))) > 0:
print('error')
for name in voxel_names[phase]:
if name not in img_match_vox[phase]:
print(name)
for name in img_match_vox[phase]:
if name not in voxel_names[phase]:
print(name)
def split_voxel_then_image(cls):
# data_dir = '/Users/heqian/Research/projects/3dprnn/data/pix3d'
split_by_model = True # True-split by 216 models, False-split by 34 images
## split voxels into train, val, test
root = os.path.abspath('.')
out_dir = os.path.join(root, '../output', cls)
voxel_txt_dir = os.path.join(out_dir, 'voxeltxt')
if not os.path.exists(voxel_txt_dir):
os.makedirs(voxel_txt_dir)
voxel_train_txtpath = os.path.join(voxel_txt_dir, 'voxel_train.txt')
voxel_val_txtpath = os.path.join(voxel_txt_dir, 'voxel_val.txt')
voxel_test_txtpath = os.path.join(voxel_txt_dir, 'voxel_test.txt')
voxel_ftrain = open(voxel_train_txtpath, 'w')
voxel_fval = open(voxel_val_txtpath, 'w')
voxel_ftest = open(voxel_test_txtpath, 'w')
voxel_ltrain = []
voxel_lval = []
voxel_ltest = []
voxel_ctrain = 0
voxel_cval = 0
voxel_ctest = 0
with open(os.path.join(out_dir, 'voxels_dir_{}.txt'.format(cls)), 'r') as f:
voxel_dirs = f.readlines()
for i in range(len(voxel_dirs)):
voxel_dirs[i] = voxel_dirs[i].strip()
voxel_dirs[i] = voxel_dirs[i]
tmp = random.random()
if tmp < 0.65:
voxel_ftrain.write(voxel_dirs[i]+'\n')
voxel_ltrain.append(voxel_dirs[i])
voxel_ctrain += 1
elif tmp >= 0.65 and tmp < 0.8:
voxel_fval.write(voxel_dirs[i]+'\n')
voxel_lval.append(voxel_dirs[i])
voxel_cval += 1
else:
voxel_ftest.write(voxel_dirs[i]+'\n')
voxel_ltest.append(voxel_dirs[i])
voxel_ctest += 1
voxel_ftrain.close()
voxel_fval.close()
voxel_ftest.close()
## split images into train, val, test, according to voxels
# img_voxel_idxs = []
img_idxs = []
voxel_idxs = []
train_txtpath = os.path.join(voxel_txt_dir, 'train.txt')
val_txtpath = os.path.join(voxel_txt_dir, 'val.txt')
test_txtpath = os.path.join(voxel_txt_dir, 'test.txt')
ftrain = open(train_txtpath, 'w')
fval = open(val_txtpath, 'w')
ftest = open(test_txtpath, 'w')
ctrain = 0
cval = 0
ctest = 0
pix3d_dir = os.path.join(root, '../input/pix3d.json')
pix3d = json.load(open(pix3d_dir, 'r'))
for i in range(len(pix3d)):
# if json_file[i]['img'][4:9] == 'chair' and json_file[i]['voxel'] not in voxel_dirs:
# print(json_file[i]['img'], json_file[i]['voxel'])
voxel_dir = pix3d[i]['voxel'][6:]
if voxel_dir in voxel_dirs:
# pdb.set_trace()
img_file = pix3d[i]['img'].split('/')[-1] #[10:]
img_id = int(img_file.split('.')[0]) #int(pix3d[i]['img'][10:14])
img_idxs.append(img_id)
voxel_idxs.append(voxel_dirs.index(voxel_dir) + 1)
# img_voxel_idxs.append(voxel_dirs.index(voxel_dir))
# if img_id != len(img_voxel_idxs):
# print('Error!!!=======', img_id)
if split_by_model:
if voxel_dir in voxel_ltrain:
ftrain.write(img_file+'\n')
ctrain += 1
elif voxel_dir in voxel_lval:
fval.write(img_file+'\n')
cval += 1
elif voxel_dir in voxel_ltest:
ftest.write(img_file+'\n')
ctest += 1
else:
tmp = random.random()
if tmp < 0.65:
ftrain.write(img_file+'\n')
ctrain += 1
elif tmp >= 0.65 and tmp < 0.8:
fval.write(img_file+'\n')
cval += 1
else:
ftest.write(img_file+'\n')
ctest += 1
ftrain.close()
fval.close()
ftest.close()
# scipy.io.savemat(os.path.join(out_dir, 'img_voxel_idxs.mat'),
# {'img_voxel_idxs': np.array(img_voxel_idxs)})
scipy.io.savemat(os.path.join(out_dir, 'img_voxel_idxs.mat'),
{'img_idxs': np.array(img_idxs), 'voxel_idxs': np.array(voxel_idxs)})
print(voxel_ctrain+voxel_cval+voxel_ctest, voxel_ctrain, voxel_cval, voxel_ctest)
print(ctrain+cval+ctest, ctrain, cval, ctest)
print(len(img_idxs))
if __name__ == '__main__':
cls_all = ['chair', 'bed', 'bookcase', 'desk', 'misc', 'sofa', 'table', 'tool', 'wardrobe']
cls = 'table'
# for cls in cls_all:
split_voxel_then_image(cls)
check_image_voxel_match(cls)
| 2.234375 | 2 |
poe_client/client.py | moowiz/poe-client | 5 | 12798617 | import logging
from types import TracebackType
from typing import Callable, Dict, List, Optional, Type, TypeVar
import aiohttp
from yarl import URL
from poe_client.rate_limiter import RateLimiter
from poe_client.schemas.account import Account, Realm
from poe_client.schemas.character import Character
from poe_client.schemas.filter import ItemFilter
from poe_client.schemas.league import Ladder, League, LeagueType
from poe_client.schemas.pvp import PvPMatch, PvPMatchLadder, PvPMatchType
from poe_client.schemas.stash import PublicStash, StashTab
Model = TypeVar("Model") # the variable return type
class Client(object):
"""Aiohttp class for interacting with the Path of Exile API."""
_token: Optional[str]
_base_url: URL = URL("https://api.pathofexile.com")
_client: aiohttp.ClientSession
_user_agent: str
_limiter: RateLimiter
# Maps "generic" paths to rate limiting policy names.
# Generic paths are paths with no IDs or unique numbers.
# For example, "/character/moowiz" has an account name, so it's not a base path.
# "/character/" is the equivalent "generic" path.
_path_to_policy_names: Dict[str, str]
def __init__(
self,
user_agent: str,
token: Optional[str] = None,
) -> None:
"""Initialize a new PoE client.
Args:
user_agent: An OAuth user agent. Used when making HTTP requests to the API.
token: Authorization token to pass to the PoE API. If unset, no auth token is used.
"""
self._token = token
self._user_agent = user_agent
self._limiter = RateLimiter()
self._path_to_policy_names = {}
async def __aenter__(self) -> "Client":
"""Runs on entering `async with`."""
self._client = aiohttp.ClientSession(raise_for_status=True)
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> Optional[bool]:
"""Runs on exiting `async with`."""
await self._client.close()
if exc_val:
raise exc_val
return True
# Type ignore is for args and kwargs, which have unknown types we pass to _get_json
async def _get( # type: ignore
self,
model: Callable[..., Model],
result_field: Optional[str] = None,
*args,
**kwargs,
) -> Model:
"""Make a get request and returns the data as an APIType subclass.
Args:
model: The object which contains data retrieved from the API. Must
be a sublclass of APIType.
result_field: If present, returns the data in this field from the request,
rather than the request itself.
See _get_json for other args.
Returns:
The result, parsed into an instance of the `model` type.
"""
json_result = await self._get_json(*args, **kwargs)
assert isinstance(json_result, dict) # noqa: S101
if result_field:
json_result = json_result[result_field]
return model(**json_result)
# Type ignore is for args and kwargs, which have unknown types we pass to _get_json
async def _get_list( # type: ignore
self,
model: Callable[..., Model],
result_field: Optional[str] = None,
*args,
**kwargs,
) -> List[Model]:
"""Make a get request and returns the data as a list of APIType subclass.
Args:
model: The object which contains data retrieved from the API. Must
be a sublclass of APIType.
result_field: If present, returns the data in this field from the request,
rather than the request itself.
See _get_json for other args.
Returns:
The result, parsed into a list of the `model` type.
"""
json_result = await self._get_json(*args, **kwargs)
if result_field:
assert isinstance(json_result, dict) # noqa: S101
json_result = json_result[result_field]
assert isinstance(json_result, list) # noqa: S101
return [model(**objitem) for objitem in json_result]
async def _get_json(
self,
path: str,
path_format_args: Optional[List[str]] = None,
query: Optional[Dict[str, str]] = None,
):
"""Fetches data from the POE API.
Args:
path:
The URL path to use. Appended to the POE API base URL.
If certain parts of the path are non-static (account ID),
those should be encoded as format args ("{0}") in the path,
and the values for those args should be passed into path_format_args.
path_format_args:
Values which should be encoded in the path when the HTTP request gets
made.
query:
An optional dict of query params to add to the HTTP request.
Returns:
The result of the API request, parsed as JSON.
"""
if not path_format_args:
path_format_args = []
path_with_no_args = path.format(("" for _ in range(len(path_format_args))))
policy_name = self._path_to_policy_names.get(path_with_no_args, "")
kwargs = {
"headers": {
"User-Agent": self._user_agent,
},
"params": query,
}
if self._token:
headers = kwargs["headers"]
assert headers # noqa: S101
headers["Authorization"] = "Bearer {0}".format(self._token)
# We key the policy name off the path with no format args. This presumes that
# different requests to the same endpoints with different specific args use the
# same rate limiting. For example, /characters/moowiz and /characters/chris
# presumably use the same rate limiting policy name.
if await self._limiter.get_semaphore(policy_name):
# We ignore typing in the dict assignment. kwargs only has dicts as values,
# but we're assigning booleans here. We can't set the typing inline without
# flake8 complaining about overly complex annotation.
logging.info("NOT BLOCKING")
kwargs["raise_for_status"] = True # type: ignore
else:
logging.info("BLOCKING")
kwargs["raise_for_status"] = False # type: ignore
# The types are ignored because for some reason it can't understand
# that kwargs isn't a positional arg and won't override a different
# positional argument in the function.
async with await self._client.get(
"{0}/{1}".format(self._base_url, path.format(*path_format_args)),
**kwargs, # type: ignore
) as resp:
self._path_to_policy_names[
path_with_no_args
] = await self._limiter.parse_headers(resp.headers)
if resp.status != 200:
raise ValueError(
"Invalid request: status code {0}, expected 200".format(
resp.status,
),
)
return await resp.json()
class _PvPMixin(Client):
"""PVP related methods for the POE API.
CURRENTLY UNTESTED. HAS NOT BEEN USED IN PRODUCTION.
"""
async def get_pvp_matches(
self,
realm: Optional[Realm] = None,
match_type: Optional[PvPMatchType] = None,
season: str = "",
league: str = "",
) -> List[PvPMatch]:
"""Get a list of all pvp matches based on filters."""
if match_type == PvPMatchType.season and not season:
raise ValueError("season cannot be empty if league_type is season.")
if match_type == PvPMatchType.league and not league:
raise ValueError("league cannot be empty if league_type is league.")
# We construct this via a dict so that the linter doesn't complain about
# complexity.
query = {
"type": match_type.value if match_type else None,
"realm": realm.value if realm else None,
"season": season if season else None,
"league": league if league else None,
}
# Removed unset query params
query = {key: query_val for key, query_val in query.items() if query_val}
return await self._get_list(
path="pvp-match",
model=PvPMatch,
result_field="matches",
query=query,
)
async def get_pvp_match(
self,
match: str,
realm: Optional[Realm] = None,
) -> PvPMatch:
"""Get a pvp match based on id."""
query = {}
if realm:
query["realm"] = realm.value
return await self._get(
path="pvp-match/{0}",
path_format_args=(match,),
model=PvPMatch,
result_field="match",
query=query,
)
async def get_pvp_match_ladder(
self,
match: str,
realm: Optional[Realm] = None,
) -> PvPMatchLadder:
"""Get a pvp match based on id."""
query = {}
if realm:
query["realm"] = realm.value
return await self._get(
path="pvp-match/{0}/ladder",
path_format_args=(match,),
model=PvPMatchLadder,
result_field="match",
query=query,
)
class _LeagueMixin(Client):
"""League related methods for the POE API.
CURRENTLY UNTESTED. HAS NOT BEEN USED IN PRODUCTION.
"""
async def list_leagues( # noqa: WPS211
self,
realm: Optional[Realm] = None,
league_type: Optional[LeagueType] = None,
offset: int = 0,
season: str = "",
limit: int = 50,
) -> List[League]:
"""Get a list of all leagues based on filters."""
if league_type == LeagueType.season and not season:
raise ValueError("season cannot be empty if league_type is season.")
# We construct this via a dict so that the linter doesn't complain about
# complexity.
query = {
"realm": realm.value if realm else None,
"type": league_type.value if league_type else None,
"season": season if season else None,
"offset": str(offset) if offset else None,
"limit": str(limit) if limit else None,
}
# Remove unset values
query = {key: query_val for key, query_val in query.items() if query_val}
return await self._get_list(
path="league",
model=League,
result_field="leagues",
query=query,
)
async def get_league(
self,
league: str,
realm: Optional[Realm] = None,
) -> League:
"""Get a league based on league id."""
query = {}
if realm:
query["realm"] = realm.value
return await self._get(
path="league/{0}",
path_format_args=(league,),
model=League,
result_field="league",
query=query,
)
async def get_league_ladder(
self,
league: str,
realm: Optional[Realm] = None,
) -> Ladder:
"""Get the ladder of a league based on id."""
query = {}
if realm:
query["realm"] = realm.value
return await self._get(
path="league/{0}/ladder",
path_format_args=(league,),
model=Ladder,
result_field="ladder",
query=query,
)
class _AccountMixin(Client):
"""User account methods for the POE API.
CURRENTLY UNTESTED. HAS NOT BEEN USED IN PRODUCTION.
"""
async def get_profile(
self,
) -> Account:
"""Get the account beloning to the token."""
return await self._get(path="league", model=Account)
async def get_characters(
self,
) -> List[Character]:
"""Get all characters belonging to token."""
return await self._get_list(
path="character",
model=Character,
result_field="characters",
)
async def get_character(
self,
name: str,
) -> Character:
"""Get a character based on id and account of token."""
return await self._get(
path="character/{0}",
path_format_args=(name,),
model=Character,
result_field="character",
)
async def get_stashes(
self,
league: str,
) -> List[StashTab]:
"""Get all stash tabs belonging to token."""
return await self._get_list(
path="stash/{0}",
path_format_args=(league,),
model=StashTab,
result_field="stashes",
)
async def get_stash(
self,
league: str,
stash_id: str,
substash_id: Optional[str],
) -> StashTab:
"""Get a stash tab based on id."""
path = "stash/{0}/{1}".format(league, stash_id)
path_format_args = [league, stash_id]
if substash_id:
path += "/{2}" # noqa: WPS336
path_format_args.append(substash_id)
return await self._get(
path=path,
path_format_args=path_format_args,
model=StashTab,
result_field="stash",
)
class _FilterMixin(Client):
"""Item Filter methods for the POE API.
CURRENTLY UNTESTED. HAS NOT BEEN USED IN PRODUCTION.
"""
async def get_item_filters(
self,
) -> List[ItemFilter]:
"""Get all item filters."""
return await self._get_list(
path="item-filter",
model=ItemFilter,
result_field="filters",
)
async def get_item_filter(
self,
filterid: str,
) -> ItemFilter:
"""Get a ItemFilter based on id."""
return await self._get(
path="item-filter/{0}",
path_format_args=(filterid,),
model=ItemFilter,
result_field="filter",
)
class _PublicStashMixin(Client):
"""Public stash tab methods for the POE API.
CURRENTLY UNTESTED. HAS NOT BEEN USED IN PRODUCTION.
"""
async def get_public_stash_tabs(
self,
next_change_id: Optional[str] = None,
) -> PublicStash:
"""Get the latest public stash tabs.
Args:
next_change_id: If set, returns the next set of stash tabs, starting
at this change_id. While this is technically optional,
in practice this is required; not setting this value
fetches stash tabs from the beginning of the API's
availability which is several years in the past.
Returns:
A dict representing a public stash change.
"""
query = {}
if next_change_id:
query["id"] = next_change_id
return await self._get_json(
path="public-stash-tabs",
query=query,
)
# Ignore WPS215, error about too many base classes. We use multiple to better split up
# the different APIs to simplify reading. There isn't any complicated inheritance
# going on here.
class PoEClient( # noqa: WPS215
_PvPMixin,
_LeagueMixin,
_AccountMixin,
_FilterMixin,
_PublicStashMixin,
Client,
):
"""Client for PoE API.
This technically has support for every API GGG has exposed. None of these
APIs have been tested in production, so use at your own risk.
"""
| 2.171875 | 2 |
Ex.15-Numpy.py | aguinaldolorandi/100-exercicios-Numpy | 0 | 12798618 | #Exercícios Numpy-15
#*******************
import numpy as np
arr=np.ones((10,10))
arr[1:-1,1:-1]=0
print(arr)
print()
arr_zero=np.zeros((8,8))
arr_zero=np.pad(arr_zero,pad_width=1,mode='constant',constant_values=1)
print(arr_zero) | 3.328125 | 3 |
parser-stories.py | labsletemps/barometre-parite | 1 | 12798619 | <filename>parser-stories.py
# coding: utf-8
import requests
from bs4 import BeautifulSoup
import pandas as pd
import re
from math import nan
# config
from define import config
# NLP
import spacy
import gender_guesser.detector as gender
# time
import datetime
import time
# to send MQTT message
import paho.mqtt.client as mqtt
import struct
# Get the stories as json
response = requests.get(config['get-stories'])
df = pd.DataFrame(response.json())
nlp = spacy.load('fr')
d = gender.Detector()
# countGenders:
# searches for people names with spacy (as nlp)
# count genders with gender.Detector (as d)
# TODO get wikidata gender data?
def countGenders(text):
people = []
male_names = []
female_names = []
unknown_names = []
males = 0
females = 0
unknown = 0
doc = BeautifulSoup(text, 'html.parser')
paragraphs = doc.find_all('p')
paragraphs = [p.text for p in paragraphs]
# or everything at once:
# ptext = "\n".join(paragraphs)
# sp = nlp(ptext)
for p in paragraphs:
sp = nlp(p)
people.extend('{}'.format(i) for i in sp.ents if i.label_ == 'PER')
people = [i.strip() for i in people]
people = [i for i in people if re.search('[A-Z].*[A-Z]', i)]
people = [i for i in people if not re.search('Monsieur|Madame', i)]
people = list(set(people))
for personne in people:
## TODO
# here we could query wikidata
# and if we get a result, check the “gender” property, P21
firstname = personne.split()[0]
# Dirty fix for compound nouns like François-Xavier: we use the “first” first name
if firstname.count('-') > 0:
firstname = firstname.split('-')[0]
result = d.get_gender(firstname)
if result.find('female') >= 0:
female_names.append(personne)
females += 1
elif result.find('male') >= 0:
male_names.append(personne)
males += 1
else:
unknown_names.append(personne)
unknown += 1
return {'male': males, 'female': females, 'total': males + females, 'unknown': unknown, 'male_names': male_names, 'female_names': female_names, 'unknown_names': unknown_names}
df['score'] = df['contenu'].apply(countGenders)
def computeRatio(counts):
if counts['total'] > 0:
return counts['female'] / counts['total']
else:
return nan
df['ratio'] = df['score'].apply(computeRatio)
df['percentage'] = df['ratio'].apply(lambda x: "{0:.2f}%".format(x*100) if x == x else 'n/a')
################
# MQTT
#
# on prend les 25 derniers
mean = df[25:]['ratio'].mean() * 100
client = mqtt.Client("xoxox")
client.connect(config['mqtt-broker'], port=1883)
# change the ratio (from inMin to inMax) to an angle (from outMin to outMax)
def remap( x, inMin, inMax, outMin, outMax ):
portion = (x-inMin)*(outMax-outMin)/(inMax-inMin)
result = portion + outMin # add the new minimal value
return int(round(result))
# let’s remap the ratio, from percentange to servomotor angle
remapped = remap(mean, 0, 100, 12, 181)
print('Mean: {}, angle: {}'.format(mean, remapped))
x = struct.pack('i', remapped)
response = client.publish(config['mqtt-topic'], x, qos=0, retain=True)
print("MQTT message published:", response.is_published())
ts = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
data = {'mean': mean, 'update': ts}
print(data)
################
# Send data to SQLITE database
#
df['male'] = df['score'].apply(lambda x: x['male'])
df['female'] = df['score'].apply(lambda x: x['female'])
df['unknown'] = df['score'].apply(lambda x: x['unknown'])
df['id'] = df['guid'].apply(lambda x: int(x.split('/')[-1]))
df['auteur_seul'] = df['auteur'].apply(lambda x: x.split(',')[0])
df['auteur_uni'] = df.apply(lambda row: row['auteur_int'] if row['auteur_int'] != '' else row['auteur_seul'], axis=1)
df['male_names'] = df['score'].apply(lambda x: ", ".join(x['male_names']))
df['female_names'] = df['score'].apply(lambda x: ", ".join(x['female_names']))
df['unknown_names'] = df['score'].apply(lambda x: ", ".join(x['unknown_names']))
df['ratio'] = df['ratio'].apply(lambda x: 'NULL' if x != x else x)
def sendJson(_url, _json):
r = False
counter = 1
while counter < 4:
try:
r = requests.post(_url, json=_json)
except requests.exceptions.RequestException as e: # This is the correct syntax
print ('Attempt', counter, '>', e)
if r == True:
if r.status_code == 200:
print('JSON sent at attempt', counter)
break
else:
print('Attempt', counter, '>', r.status_code)
counter += 1
return r
payload = {
'records': df[['id', 'section', 'auteur_uni', 'dte_publication', 'titre',
'male', 'female', 'unknown',
'male_names', 'female_names', 'unknown_names',
'ratio']].to_dict(orient='records'),
}
response = sendJson(config['send-sql-stories'], payload)
print('Resultat SQLite:', response.text)
print('End')
| 3 | 3 |
crazyflie_driver/src/test_follower.py | xqgex/CrazyFlie_ros | 2 | 12798620 | <filename>crazyflie_driver/src/test_follower.py
#!/usr/bin/env python
import rospy
import crazyflie
import time
import uav_trajectory
if __name__ == '__main__':
rospy.init_node('test_high_level')
cf = crazyflie.Crazyflie("crazyflie", "crazyflie")
wand = crazyflie.Crazyflie("wand", "wand")
cf.setParam("commander/enHighLevel", 1)
wand.setParam("commander/enHighLevel", 2)
cf1.takeoff(targetHeight = 1.0, duration = 2.0)
time.sleep(5.0)
try:
while True:
wandPosition = wand.wandPosition()
cfPosition = cf.position()
if (cfPosition.position.x > wandPosition.position.x):
cf.goTo(goal = [wandPosition.position.x + 0.2, wandPosition.position.y, wandPosition.position.z], yaw=0.2, duration = 2.0, relative = False)
else:
cf.goTo(goal = [wandPosition.position.x - 0.2, wandPosition.position.y, wandPosition.position.z], yaw=0.2, duration = 2.0, relative = False)
finally:
cf1.land(targetHeight = 0.0, duration = 2.0)
time.sleep(5.0)
cf1.stop()
| 2.4375 | 2 |
acceleration_sensor.py | therealjtgill/slam | 0 | 12798621 | import numpy as np
class AccelerationSensor:
def __init__(self, measurement_covariance):
self.R_meas = measurement_covariance
def getMeasurements(self, true_accel):
return np.random.multivariate_normal(true_accel, self.R_meas)
| 2.859375 | 3 |
eds/openmtc-gevent/server/openmtc-server/src/openmtc_server/plugins/transport_android_intent/IntentError.py | piyush82/elastest-device-emulator-service | 0 | 12798622 | <reponame>piyush82/elastest-device-emulator-service<filename>eds/openmtc-gevent/server/openmtc-server/src/openmtc_server/plugins/transport_android_intent/IntentError.py
#from client import IntentClient
class IntentError(Exception):
pass
# def sendIntent(self, context, action, issuer):
# client = IntentClient(action, issuer)
| 1.65625 | 2 |
iamheadless_projects/lookups/pagination.py | plain-ie/iamheadless_projects | 0 | 12798623 | import json
import math
from django.core.serializers import serialize
from django.db.models.query import QuerySet
ALLOWED_FORMATS = [
'dict',
'json',
'queryset'
]
class Pagination:
def __init__(
self,
page=1,
pages=1,
queryset=QuerySet(),
total=0,
pydantic_model=None,
):
self.page = page
self.pages = pages
self.queryset = queryset
self.total = total
self.pydantic_model = pydantic_model
@property
def data(self):
return {
'page': self.page,
'pages': self.pages,
'results': self.queryset,
'total': self.total
}
@property
def dict(self):
data = self.data
results = data['results']
if self.pydantic_model is None:
data['results'] = self.queryset_to_dict(results)
else:
_results = []
for x in self.pydantic_model.from_django(results, many=True):
_results.append(x.dict())
data['results'] = _results
return data
@property
def json(self):
return json.dumps(self.dict)
def queryset_to_dict(self, queryset):
data = serialize(
'json',
queryset
)
data = json.loads(data)
_d = []
for x in data:
id = x.pop('pk')
field_data = x.pop('fields')
field_data['id'] = id
_d.append(field_data)
return _d
def pagination(queryset, page, count, pydantic_model=None):
if isinstance(queryset, QuerySet) is False:
raise TypeError('"queryset" must be Queryset')
if isinstance(page, int) is False:
raise TypeError('"page" must be int')
if isinstance(count, int) is False:
raise TypeError('"count" must be int')
total = queryset.count()
pages = 1
if count != '__all__':
pages = math.ceil(total/count)
start_index = (page - 1) * count
end_index = start_index + count
queryset = queryset[start_index : end_index]
return Pagination(
page=page,
pages=pages,
queryset=queryset,
total=total,
pydantic_model=pydantic_model
)
| 2.359375 | 2 |
src/petronia/core/platform/api/locale/__init__.py | groboclown/petronia | 19 | 12798624 | <filename>src/petronia/core/platform/api/locale/__init__.py
"""
Information regarding the current user locale.
TODO should this be its own extension? It seems like it should, but that
would mean asking for a translation would need to go through the event bus,
and that doesn't seem right.
"""
| 1.21875 | 1 |
compareSamples.py | golharam/bam-matcher | 0 | 12798625 | <reponame>golharam/bam-matcher
#!/usr/bin/env python
'''
Script to compare genotypes
Not every sample is going to have a meltedResults.txt. For a list of samples (sample1, ..., sampleN),
an all v all nieve approach would require O(n^2) comparisons, but really, we can do this in O(n log(n)).
For example, 5 samples, sample1, ..., sample 5:
s1 s2 s3 s4 s5
s1
s2 x
s3 x x
s4 x x x
s5 x x x x
Instead of visiting every cell, we only need to visit the ones with a X because the matrix is symmetrical
about the axis
'''
import argparse
import boto3
import logging
import os
import time
import subprocess
import sys
from common import find_bucket_key, listFiles, readSamples, uploadFile
__appname__ = 'compareSamples'
__version__ = "0.2"
logger = logging.getLogger(__appname__)
def _run(cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
p.wait()
if p.returncode != 0:
return err
return out
def main(argv):
''' Main Entry Point '''
args = parseArguments(argv)
logging.basicConfig(level=args.log_level)
logger.info("%s v%s" % (__appname__, __version__))
logger.info(args)
batch = boto3.client('batch')
samples = readSamples(args.bamsheet)
if samples is False:
return -1
# We don't need the last sample in the list so let's remove it
samples.pop()
# Get a list of meltedResults files
meltedResultsFiles = listFiles(args.s3_cache_folder, suffix='.meltedResults.txt')
# Upload the bamsheet to the cache directory (because each job will need to determine what samples
#to compare against based on its order in the bamsheet)
if not args.dry_run:
uploadFile(args.bamsheet, "%s/bamsheet.txt" % args.s3_cache_folder)
jobs = []
for sample in samples:
meltedResults = "%s/%s.meltedResults.txt" % (args.s3_cache_folder, sample['name'])
if meltedResults not in meltedResultsFiles:
logger.info("Comparing genotype of %s to other samples", sample['name'])
if args.local:
cmd = ["%s/compareGenotypes.py" % os.path.dirname(__file__),
"-s", sample['name'],
"--s3_cache_folder", args.s3_cache_folder]
if args.dry_run:
logger.info("Would call %s", cmd)
else:
response = _run(cmd)
else:
if args.dry_run:
logger.info("Would call batch.submit_job: compareGenotypes.py -s %s --s3_cache_folder %s",
sample['name'], args.s3_cache_folder)
else:
response = batch.submit_job(jobName='compareGenotypes-%s' % sample['name'],
jobQueue=args.job_queue,
jobDefinition=args.job_definition,
containerOverrides={'vcpus': 1,
'command': ['/compareGenotypes.py',
'-s', sample['name'],
'--s3_cache_folder',
args.s3_cache_folder]})
jobId = response['jobId']
jobs.append(jobId)
logger.debug(response)
logger.info("Submitted %s jobs", len(jobs))
completed_jobs = []
failed_jobs = []
while jobs:
logger.info("Sleeping 60 secs")
time.sleep(60)
logger.info("Checking job %s", jobs[0])
response = batch.describe_jobs(jobs=[jobs[0]])
logger.info("Job %s state is %s", jobs[0], response['jobs'][0]['status'])
if response['jobs'][0]['status'] == 'SUCCEEDED':
completed_jobs.append(jobs.pop())
elif response['jobs'][0]['status'] == 'FAILED':
failed_jobs.append(jobs.pop())
logger.info("Successed: %s", len(completed_jobs))
logger.info("Failed: %s", len(failed_jobs))
def parseArguments(argv):
''' Parse arguments '''
parser = argparse.ArgumentParser(description='Compare a set of samples')
parser.add_argument('-l', '--log-level', help="Prints warnings to console by default",
default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR"])
parser.add_argument('-d', '--dry-run', default=False, action="store_true",
help="Simulate job submission")
required_args = parser.add_argument_group("Required")
required_args.add_argument('-b', '--bamsheet', required=True, help="Bamsheet")
required_args.add_argument("-CD", "--s3_cache_folder", required=True,
help="Specify S3 path for cached VCF/TSV files")
job_args = parser.add_argument_group("AWS Batch Job Settings")
job_args.add_argument('--local', action="store_true", default=False,
help="Run locally instead of in AWS Batch")
job_args.add_argument('-q', "--job-queue", action="store", default="ngs-job-queue",
help="AWS Batch Job Queue")
job_args.add_argument('-j', '--job-definition', action="store", default="cohort-matcher:2",
help="AWS Batch Job Definition")
args = parser.parse_args(argv)
return args
if __name__ == '__main__':
main(sys.argv[1:])
| 2.640625 | 3 |
pkpdapp/pkpdapp/wsgi.py | pkpdapp-team/pkpdapp | 4 | 12798626 | #
# This file is part of PKPDApp (https://github.com/pkpdapp-team/pkpdapp) which
# is released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
"""
WSGI config for pkpdapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pkpdapp.settings')
application = get_wsgi_application()
| 1.421875 | 1 |
generation/SGAN/models.py | nyw-pathfinder/Deep-Learning-Bootcamp-with-PyTorch | 15 | 12798627 | <filename>generation/SGAN/models.py
import torch.nn as nn
class AuxiliaryClassifier(nn.Module):
def __init__(self, in_features, n_classes, kernel_size=1, stride=1, padding=0, bias=False, softmax=False,
cnn=False):
super(AuxiliaryClassifier, self).__init__()
if cnn:
classes = [nn.Conv2d(in_features, n_classes, kernel_size, stride, padding, bias=bias)]
validity = [nn.Conv2d(in_features, n_classes, kernel_size, stride, padding, bias=bias), nn.Sigmoid()]
else:
classes = [nn.Linear(in_features, n_classes)]
validity = [nn.Linear(in_features, 1), nn.Sigmoid()]
self.classes = nn.Sequential(*classes, nn.Softmax if softmax else Identity())
self.validity = nn.Sequential(*validity)
def forward(self, x):
return self.classes(x), self.validity(x)
class Generator(nn.Module):
def __init__(self, cnn=False):
super(Generator, self).__init__()
act = nn.ReLU(inplace=True)
if not cnn:
model = [nn.Linear(in_features=100, out_features=512), act, nn.Dropout(p=0.5)]
model += [nn.Linear(in_features=512, out_features=256), act, nn.Dropout(p=0.5)]
model += [nn.Linear(in_features=256, out_features=28 * 28), nn.Tanh()]
else:
norm = nn.BatchNorm2d
model = [nn.Linear(100, 512 * 4 * 4), View([512, 4, 4]), norm(512), act] # 4x4
model += [nn.ConvTranspose2d(512, 256, 5, stride=2, padding=2), norm(256), act] # 7x7
model += [nn.ConvTranspose2d(256, 128, 5, stride=2, padding=2, output_padding=1), norm(128), act] # 14x14
model += [nn.ConvTranspose2d(128, 1, 5, stride=2, padding=2, output_padding=1), nn.Tanh()] # 28x28
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Discriminator(nn.Module):
def __init__(self, cnn=False):
super(Discriminator, self).__init__()
act = nn.LeakyReLU(inplace=True, negative_slope=0.2)
if not cnn:
model = [nn.Linear(in_features=28 * 28, out_features=512), act]
model += [nn.Linear(in_features=512, out_features=256), act]
model += [AuxiliaryClassifier(256, 10)]
else:
norm = nn.BatchNorm2d
model = [nn.Conv2d(1, 128, 5, stride=2, padding=2, bias=False), act] # 14x14
model += [nn.Conv2d(128, 256, 5, stride=2, padding=2, bias=False), norm(256), act] # 7x7
model += [nn.Conv2d(256, 512, 5, stride=2, padding=2, bias=False), norm(512), act] # 4x4
model += [AuxiliaryClassifier(512, 10, kernel_size=4, bias=False, cnn=True)]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
class View(nn.Module):
def __init__(self, output_shape):
super(View, self).__init__()
self.output_shape = output_shape
def forward(self, x):
return x.view(x.shape[0], *self.output_shape)
def weights_init(module):
if isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d):
module.weight.detach().normal_(mean=0., std=0.02)
elif isinstance(module, nn.BatchNorm2d):
module.weight.detach().normal_(1., 0.02)
module.bias.detach().zero_()
else:
pass
| 2.625 | 3 |
instance_based/tagvote.py | vohoaiviet/tag-image-retrieval | 50 | 12798628 | <reponame>vohoaiviet/tag-image-retrieval
import sys, os, time, random
from basic.constant import ROOT_PATH
from basic.common import printStatus, readRankingResults
from basic.util import readImageSet
from basic.annotationtable import readConcepts
from util.simpleknn import simpleknn
#from sandbox.pquan.pqsearch import load_model
INFO = __file__
DEFAULT_K=1000
DEFAULT_TPP = 'lemm'
DEFAULT_DISTANCE = 'l1'
DEFAULT_BLOCK_SIZE = 1000
DEFAULT_TAGGER = 'tagvote'
class TagVoteTagger:
def __init__(self, collection, annotationName, feature, distance, tpp=DEFAULT_TPP, rootpath=ROOT_PATH):
self.concepts = readConcepts(collection, annotationName, rootpath)
self.nr_of_concepts = len(self.concepts)
self.concept2index = dict(zip(self.concepts, range(self.nr_of_concepts)))
feat_dir = os.path.join(rootpath, collection, "FeatureData", feature)
id_file = os.path.join(feat_dir, 'id.txt')
shape_file = os.path.join(feat_dir, 'shape.txt')
self.nr_of_images, feat_dim = map(int, open(shape_file).readline().split())
self.searcher = simpleknn.load_model(os.path.join(feat_dir, 'feature.bin'), feat_dim, self.nr_of_images, id_file)
self.searcher.set_distance(distance)
self.k = DEFAULT_K
self._load_tag_data(collection, tpp, rootpath)
printStatus(INFO, "%s, %d images, %d unique tags, %s %d neighbours for voting" % (self.__class__.__name__, self.nr_of_images, len(self.tag2freq), distance, self.k))
def _load_tag_data(self, collection, tpp, rootpath):
tagfile = os.path.join(rootpath, collection, "TextData", "id.userid.%stags.txt" % tpp)
self.textstore = {}
self.tag2freq = {}
for line in open(tagfile):
imageid, userid, tags = line.split('\t')
tags = tags.lower()
self.textstore[imageid] = (userid, tags)
tagset = set(tags.split())
for tag in tagset:
self.tag2freq[tag] = self.tag2freq.get(tag,0) + 1
def tagprior(self, tag):
return float(self.k) * self.tag2freq.get(tag,0) / self.nr_of_images
def _get_neighbors(self, content, context):
return self.searcher.search_knn(content, max_hits=max(3000,self.k*3))
def _compute(self, content, context=None):
users_voted = set()
vote = [0-self.tagprior(c) for c in self.concepts] # vote only on the given concept list
voted = 0
skip = 0
neighbors = self._get_neighbors(content, context)
for (name, dist) in neighbors:
(userid,tags) = self.textstore.get(name, (None, None))
if tags is None or userid in users_voted:
skip += 1
continue
users_voted.add(userid)
tagset = set(tags.split())
for tag in tagset:
c_idx = self.concept2index.get(tag, -1)
if c_idx >= 0:
vote[c_idx] += 1
voted += 1
if voted >= self.k:
break
#assert(voted >= self.k), 'too many skips (%d) in %d neighbors' % (skip, len(neighbors))
return vote
def predict(self, content, context=None):
scores = self._compute(content, context)
return sorted(zip(self.concepts, scores), key=lambda v:v[1], reverse=True)
class PreTagVoteTagger (TagVoteTagger):
def __init__(self, collection, annotationName, feature, distance, tpp=DEFAULT_TPP, rootpath=ROOT_PATH):
self.rootpath = rootpath
self.concepts = readConcepts(collection, annotationName, rootpath)
self.nr_of_concepts = len(self.concepts)
self.concept2index = dict(zip(self.concepts, range(self.nr_of_concepts)))
self.imset = readImageSet(collection, collection, rootpath)
self.nr_of_images = len(self.imset)
self.knndir = os.path.join(collection, '%s,%sknn,uu,1500' % (feature, distance))
self.k = DEFAULT_K
self.noise = 0
self._load_tag_data(collection, tpp, rootpath)
printStatus(INFO, "%s, %d images, %d unique tags, %s %d neighbours for voting" % (self.__class__.__name__, self.nr_of_images, len(self.tag2freq), distance, self.k))
def set_noise(self, noise):
self.noise = noise
def _get_neighbors(self, content, context):
testCollection,testid = context.split(',')
knnfile = os.path.join(self.rootpath, testCollection, 'SimilarityIndex', testCollection, self.knndir, testid[-2:], '%s.txt' % testid)
knn = readRankingResults(knnfile)
knn = knn[:self.k]
if self.noise > 1e-3:
n = int(len(knn) * self.noise)
hits = random.sample(xrange(len(knn)), n)
random_set = random.sample(self.imset, n)
for i in range(n):
idx = hits[i]
knn[idx] = (random_set[i], 1000)
return knn
class PreKnnTagger (PreTagVoteTagger):
def __init__(self, collection, annotationName, feature, distance, tpp=DEFAULT_TPP, rootpath=ROOT_PATH, k = DEFAULT_K):
self.rootpath = rootpath
self.concepts = readConcepts(collection, annotationName, rootpath)
self.nr_of_concepts = len(self.concepts)
self.concept2index = dict(zip(self.concepts, range(self.nr_of_concepts)))
self.imset = readImageSet(collection, collection, rootpath)
self.nr_of_images = len(self.imset)
self.knndir = os.path.join(collection, '%s,%sknn,1500' % (feature, distance))
self.k = k
self.noise = 0
self._load_tag_data(collection, tpp, rootpath)
printStatus(INFO, "%s, %d images, %d unique tags, %s %d neighbours for voting" % (self.__class__.__name__, self.nr_of_images, len(self.tag2freq), distance, self.k))
def _compute(self, content, context=None):
vote = [0] * self.nr_of_concepts # vote only on the given concept list
voted = 0
skip = 0
neighbors = self._get_neighbors(content, context)
for (name, dist) in neighbors:
(userid,tags) = self.textstore.get(name, (None, None))
if tags is None:
skip += 1
continue
tagset = set(tags.split())
for tag in tagset:
c_idx = self.concept2index.get(tag, -1)
if c_idx >= 0:
vote[c_idx] += 1
voted += 1
if voted >= self.k:
break
#assert(voted >= self.k), 'too many skips (%d) in %d neighbors' % (skip, len(neighbors))
return vote
class PqTagVoteTagger (TagVoteTagger):
def __init__(self, collection, annotationName, feature, distance, tpp=DEFAULT_TPP, rootpath=ROOT_PATH):
self.rootpath = rootpath
self.concepts = readConcepts(collection, annotationName, rootpath)
self.nr_of_concepts = len(self.concepts)
self.concept2index = dict(zip(self.concepts, range(self.nr_of_concepts)))
featuredir = os.path.join(rootpath,collection,'FeatureData',feature)
id_file = os.path.join(featuredir, "id.txt")
shape_file = os.path.join(feat_dir, 'shape.txt')
self.nr_of_images, feat_dim = map(int, open(shape_file).readline().split())
self.searcher = load_model(featuredir, self.nr_of_images, feat_dim,nr_of_segments=512,segmentk=256,coarsek=4096)
self.k = DEFAULT_K
self._load_tag_data(collection, tpp, rootpath)
printStatus(INFO, "%s, %d images, %d unique tags, %s %d neighbours for voting" % (self.__class__.__name__, self.nr_of_images, len(self.tag2freq), distance, self.k))
def _get_neighbors(self, content, context):
return self.searcher.search_knn(content, requested=max(3000, self.k*3))
NAME_TO_TAGGER = {'tagvote':TagVoteTagger, 'pretagvote':PreTagVoteTagger, 'preknn':PreKnnTagger, 'pqtagvote':PqTagVoteTagger}
if __name__ == '__main__':
feature = 'vgg-verydeep-16-fc7relu'
tagger = TagVoteTagger('train10k', 'concepts81.txt', feature, 'cosine')
tagger = PreTagVoteTagger('train10k', 'concepts81.txt', feature, 'cosine')
tagger = PreKnnTagger('train10k', 'conceptsmir14.txt', feature, 'cosine')
| 2.421875 | 2 |
awe/parser.py | dankilman/pages | 97 | 12798629 | import inspect
import six
import yaml
from . import view
SPECIAL_KWARGS_KEYS = {'id', 'cols', 'updater'}
_init_cache = {}
class ParserContext(object):
def __init__(self, inputs):
self.inputs = inputs or {}
class Parser(object):
def __init__(self, registry):
self.registry = registry
def parse(self, obj, context):
obj = _prepare(obj)
obj = self._normalize_element(obj)
obj = self._process_intrinsic_functions(obj, context)
element_configuration = self._parse_dict(obj)
return element_configuration
def _parse_dict(self, obj):
assert isinstance(obj, dict)
assert len(obj) == 1
element_configuration = {
'kwargs': {
'props': {}
},
'kwargs_children': set(),
'prop_children': {},
'children': [],
'field': None
}
key, value = list(obj.items())[0]
element_type, additional_kwargs = self._parse_str(key)
element_configuration['element_type'] = element_type
element_configuration['kwargs'].update(additional_kwargs)
if isinstance(value, six.string_types):
if issubclass(element_type, view.Raw):
value = [{'Inline': value}]
else:
element_configuration['kwargs']['_awe_arg'] = value
value = []
value = value or []
if not isinstance(value, list):
raise ValueError('Value should be a string or a list, got: {}'.format(value))
if value and isinstance(value[0], list):
self._parse_element_configuration(element_configuration, element_type, value[0])
value = value[1:]
for item in value:
if isinstance(item, six.string_types) and not self._is_element_type(item):
item = {'Inline': item}
else:
item = self._normalize_element(item)
child_element_configuration = self._parse_dict(item)
element_configuration['children'].append(child_element_configuration)
return element_configuration
def _parse_element_configuration(self, result, element_type, configuration_items):
if not configuration_items:
return
if not isinstance(configuration_items, list):
raise ValueError('Element configuration should be passed as a list, got: {}'.format(configuration_items))
if isinstance(configuration_items[0], six.string_types):
result['field'] = configuration_items[0]
configuration_items = configuration_items[1:]
for item in configuration_items:
assert isinstance(item, dict)
assert len(item) == 1
key, value = list(item.items())[0]
is_element_value = self._is_intrinsic(value, '_')
if is_element_value:
value = value['_']
value = self._normalize_element(value)
value = self._parse_dict(value)
if key in SPECIAL_KWARGS_KEYS or key in self._get_init_args(element_type):
result['kwargs'][key] = value
if is_element_value:
result['kwargs_children'].add(key)
elif is_element_value:
result['prop_children'][key] = value
else:
result['kwargs']['props'][key] = value
def _parse_str(self, obj_str):
assert obj_str
if obj_str[0].islower():
return view.Raw, {'tag': obj_str}
elif obj_str in view.builtin_element_types:
return view.builtin_element_types[obj_str], {}
elif obj_str in self.registry.element_types:
return self.registry.element_types[obj_str], {}
raise ValueError('No such element: {}'.format(obj_str))
def _is_element_type(self, str_obj):
return (
str_obj in self.registry.element_types or
str_obj in view.builtin_element_types
)
@staticmethod
def _is_intrinsic(obj, key):
return isinstance(obj, dict) and len(obj) == 1 and bool(obj.get(key))
def _process_input(self, node, context):
input_node = self._process_intrinsic_functions(node['$'], context)
if isinstance(input_node, six.string_types):
input_node = [input_node]
input_name = input_node[0]
input_node = input_node[1:]
default_value = None
for entry in input_node:
assert isinstance(entry, dict)
assert len(entry) == 1
key, value = list(entry.items())[0]
if key == 'default':
default_value = value
else:
raise ValueError('Unknown config option: {}'.format(key))
if default_value:
return context.inputs.get(input_name, default_value)
else:
return context.inputs[input_name]
def _process_intrinsic_functions(self, obj, context):
def process(node):
if isinstance(node, dict):
if self._is_intrinsic(node, '$'):
return self._process_input(node, context)
return {k: process(v) for k, v in node.items()}
elif isinstance(node, list):
return [process(item) for item in node]
return node
return process(obj)
@staticmethod
def _normalize_element(obj):
if isinstance(obj, six.string_types):
obj = {obj: None}
elif isinstance(obj, list):
obj = {'div': obj}
return obj
@staticmethod
def _get_init_args(element_type):
if element_type in _init_cache:
return _init_cache[element_type]
result = set()
getargspec_impl = inspect.getargspec if six.PY2 else inspect.getfullargspec
spec = getargspec_impl(element_type._init)
result |= set(spec.args)
if six.PY3:
result |= set(spec.kwonlyargs)
_init_cache[element_type] = result
return result
def is_parsable(obj):
return isinstance(obj, six.string_types + (list, dict))
def _prepare(obj):
if isinstance(obj, six.string_types):
obj = yaml.load(obj)
return obj
| 2.359375 | 2 |
users/management/commands/utils/init_models.py | GolamMullick/HR_PROJECT | 0 | 12798630 | from users.models import Model
def init_models(license):
Model.load_on_migrate(license)
print("models worked!!") | 1.75 | 2 |
pydefect/analyzer/band_edge_states.py | kumagai-group/pydefect | 20 | 12798631 | <filename>pydefect/analyzer/band_edge_states.py<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright (c) 2020. Distributed under the terms of the MIT License.
from dataclasses import dataclass
from typing import List, Dict, Tuple, Optional, Set
import numpy as np
from monty.json import MSONable
from pydefect.defaults import defaults
from pydefect.util.coords import pretty_coords
from tabulate import tabulate
from vise.util.mix_in import ToJsonFileMixIn
from vise.util.typing import Coords, GenCoords
printed_orbital_weight_threshold = 0.1
@dataclass
class BandEdgeEigenvalues(MSONable, ToJsonFileMixIn):
# [spin, k-idx, band-idx] = energy, occupation
energies_and_occupations: List[List[List[List[float]]]]
kpt_coords: List[Tuple[float, float, float]]
lowest_band_index: int
def pretty_orbital(orbitals: Dict[str, List[float]]):
"""
:param orbitals: An example is {"Mn": [0.5, 0.4, 0.01]} (no f-orbital)
:return: "Mn-s: 0.50, Mn-p: 0.40"
"""
orbital_infos = []
for elem, orbs in orbitals.items():
for orb_name, weight in zip(["s", "p", "d", "f"], orbs):
if weight > printed_orbital_weight_threshold:
orbital_infos.append(f"{elem}-{orb_name}: {weight:.2f}")
return ", ".join(orbital_infos)
@dataclass
class OrbitalInfo(MSONable):
"""Note that this is code and its version dependent quantities. """
energy: float # max eigenvalue
# {"Mn": [0.01, ..], "O": [0.03, 0.5]},
# where lists contain s, p, d, (f) orbital components.
orbitals: Dict[str, List[float]]
occupation: float
participation_ratio: float = None
@dataclass
class BandEdgeOrbitalInfos(MSONable, ToJsonFileMixIn):
orbital_infos: List[List[List["OrbitalInfo"]]] # [spin, k-idx, band-idx]
kpt_coords: List[Coords]
kpt_weights: List[float]
lowest_band_index: int
fermi_level: float
def kpt_idx(self, kpt_coord):
for i, orig_kpt in enumerate(self.kpt_coords):
if sum(abs(k - l) for k, l in zip(orig_kpt, kpt_coord)) < 1e-5:
return i
raise ValueError(f"{kpt_coord} not in kpt_coords {self.kpt_coords}.")
@property
def energies_and_occupations(self) -> List[List[List[List[float]]]]:
result = np.zeros(np.shape(self.orbital_infos) + (2,))
for i, x in enumerate(self.orbital_infos):
for j, y in enumerate(x):
for k, z in enumerate(y):
result[i][j][k] = [z.energy, z.occupation]
return result.tolist()
def __str__(self):
return "\n".join([" -- band-edge orbitals info",
"K-points info",
self._kpt_block,
"",
"Band info near band edges",
self._band_block])
@property
def _band_block(self):
band_block = [["Index", "Kpoint index", "Energy", "Occupation",
"P-ratio", "Orbital"]]
for orbital_info in self.orbital_infos:
max_idx, min_idx, t_orb_info = self._band_idx_range(orbital_info)
for band_idx in range(min_idx, max_idx):
actual_band_idx = band_idx + self.lowest_band_index + 1
for kpt_idx, orb_info in enumerate(t_orb_info[band_idx], 1):
energy = f"{orb_info.energy :5.2f}"
occupation = f"{orb_info.occupation:4.1f}"
p_ratio = f"{orb_info.participation_ratio:4.1f}"
orbs = pretty_orbital(orb_info.orbitals)
band_block.append([actual_band_idx, kpt_idx, energy,
occupation, p_ratio, orbs])
band_block.append(["--"])
band_block.append("")
return tabulate(band_block, tablefmt="plain")
@property
def _kpt_block(self):
kpt_block = [["Index", "Coords", "Weight"]]
for index, (kpt_coord, kpt_weight) in enumerate(
zip(self.kpt_coords, self.kpt_weights), 1):
coord = pretty_coords(kpt_coord)
weight = f"{kpt_weight:4.3f}"
kpt_block.append([index, coord, weight])
return tabulate(kpt_block, tablefmt="plain")
@staticmethod
def _band_idx_range(orbital_info: List[List[OrbitalInfo]]
) -> Tuple[int, int, List[List[OrbitalInfo]]]:
# swap [kpt_idx][band_idx] -> [bane_idx][kpt_idx]
t_orbital_info = np.array(orbital_info).T.tolist()
middle_idx = int(len(t_orbital_info) / 2)
for band_idx, (upper, lower) in enumerate(zip(t_orbital_info[1:],
t_orbital_info[:-1])):
# determine the band_idx where the occupation changes largely.
if lower[0].occupation - upper[0].occupation > 0.1:
middle_idx = band_idx + 1
break
max_idx = min(middle_idx + 3, len(t_orbital_info))
min_idx = max(middle_idx - 3, 0)
return max_idx, min_idx, t_orbital_info
@dataclass
class LocalizedOrbital(MSONable):
band_idx: int
ave_energy: float
occupation: float
orbitals: Dict[str, List[float]]
participation_ratio: Optional[float] = None
radius: Optional[float] = None
center: Optional[GenCoords] = None
@dataclass
class EdgeInfo(MSONable):
band_idx: int
kpt_coord: Coords
orbital_info: "OrbitalInfo"
@property
def orbitals(self):
return self.orbital_info.orbitals
@property
def energy(self):
return self.orbital_info.energy
@property
def occupation(self):
return self.orbital_info.occupation
@property
def p_ratio(self):
return self.orbital_info.participation_ratio
@dataclass
class PerfectBandEdgeState(MSONable, ToJsonFileMixIn):
vbm_info: EdgeInfo
cbm_info: EdgeInfo
def __str__(self):
def show_edge_info(edge_info: EdgeInfo):
return [edge_info.band_idx,
edge_info.energy,
f"{edge_info.occupation:5.2f}",
pretty_orbital(edge_info.orbital_info.orbitals),
pretty_coords(edge_info.kpt_coord)]
return tabulate([
["", "Index", "Energy", "Occupation", "Orbitals", "K-point coords"],
["VBM"] + show_edge_info(self.vbm_info),
["CBM"] + show_edge_info(self.cbm_info)], tablefmt="plain")
@dataclass
class BandEdgeState(MSONable):
vbm_info: EdgeInfo
cbm_info: EdgeInfo
vbm_orbital_diff: float
cbm_orbital_diff: float
localized_orbitals: List[LocalizedOrbital]
vbm_hole_occupation: float = None
cbm_electron_occupation: float = None
@property
def is_shallow(self):
return self.has_donor_phs or self.has_acceptor_phs
@property
def has_donor_phs(self):
return self.cbm_electron_occupation > defaults.state_occupied_threshold
@property
def has_acceptor_phs(self):
return self.vbm_hole_occupation > defaults.state_occupied_threshold
@property
def has_unoccupied_localized_state(self):
return any([lo.occupation < defaults.state_unoccupied_threshold
for lo in self.localized_orbitals])
@property
def has_occupied_localized_state(self):
return any([lo.occupation > defaults.state_occupied_threshold
for lo in self.localized_orbitals])
def __str__(self):
return "\n".join([self._edge_info,
"---", "Localized Orbital(s)",
self._orbital_info])
@property
def _orbital_info(self):
inner_table = [["Index", "Energy", "P-ratio", "Occupation", "Orbitals"]]
w_radius = self.localized_orbitals and self.localized_orbitals[0].radius
if w_radius:
inner_table[0].extend(["Radius", "Center"])
for lo in self.localized_orbitals:
participation_ratio = f"{lo.participation_ratio:5.2f}" \
if lo.participation_ratio else "None"
inner = [lo.band_idx + 1,
f"{lo.ave_energy:7.3f}",
participation_ratio,
f"{lo.occupation:5.2f}",
pretty_orbital(lo.orbitals)]
if w_radius:
inner.extend([f"{lo.radius:5.2f}", pretty_coords(lo.center)])
inner_table.append(inner)
return tabulate(inner_table, tablefmt="plain")
@property
def _edge_info(self):
inner_table = [["", "Index", "Energy", "P-ratio", "Occupation",
"OrbDiff", "Orbitals", "K-point coords"],
["VBM"] + self._show_edge_info(
self.vbm_info, self.vbm_orbital_diff),
["CBM"] + self._show_edge_info(
self.cbm_info, self.cbm_orbital_diff)]
table = tabulate(inner_table, tablefmt="plain")
vbm_phs = f"vbm has acceptor phs: {self.has_acceptor_phs} " \
f"({self.vbm_hole_occupation:5.3f} vs. {defaults.state_occupied_threshold})"
cbm_phs = f"cbm has donor phs: {self.has_donor_phs} " \
f"({self.cbm_electron_occupation:5.3f} vs. {defaults.state_occupied_threshold})"
return "\n".join([table, vbm_phs, cbm_phs])
@staticmethod
def _show_edge_info(edge_info: EdgeInfo, orb_diff: float):
return [edge_info.band_idx + 1,
f"{edge_info.energy:7.3f}",
f"{edge_info.p_ratio:5.2f}",
f"{edge_info.occupation:5.2f}",
f"{orb_diff:5.2f}",
pretty_orbital(edge_info.orbital_info.orbitals),
pretty_coords(edge_info.kpt_coord)]
@dataclass
class BandEdgeStates(MSONable, ToJsonFileMixIn):
states: List[BandEdgeState] # by spin.
@property
def is_shallow(self):
return any([i.is_shallow for i in self.states])
@property
def has_donor_phs(self):
return any([i.has_donor_phs for i in self.states])
@property
def has_acceptor_phs(self):
return any([i.has_acceptor_phs for i in self.states])
@property
def has_unoccupied_localized_state(self):
return any([i.has_unoccupied_localized_state for i in self.states])
@property
def has_occupied_localized_state(self):
return any([i.has_occupied_localized_state for i in self.states])
@property
def band_indices_from_vbm_to_cbm(self) -> List[int]:
indices_set = set()
for state in self.states:
indices_set.add(state.vbm_info.band_idx)
for lo in state.localized_orbitals:
indices_set.add(lo.band_idx)
indices_set.add(state.cbm_info.band_idx)
return sorted([i for i in indices_set])
def __str__(self):
lines = [" -- band-edge states info"]
for spin, state in zip(["up", "down"], self.states):
lines.append(f"Spin-{spin}")
lines.append(state.__str__())
lines.append("")
return "\n".join(lines)
| 2.21875 | 2 |
tests/create_table_test.py | aescwork/sqlitemgr | 1 | 12798632 | <reponame>aescwork/sqlitemgr
import unittest
import os
import sys
sys.path.append("../sqlitemgr/")
import sqlitemgr as sqm
class CreateTableTest(unittest.TestCase):
"""
The way the create_table function is being tested is to have the SQLiteMgr object compose and execute an SQL statement to create a table in fruit.db,
then get the cursor from the object, execute a SELECT statement against the table, then get the name of the columns in the table (in a list),
and compare with what should be the same list assigned to self.comp_names. If they match, the object successfully created the nut table in fruit.db.
"""
def setUp(self):
self.sm = sqm.SQLiteMgr("../fixtures/fruit.db")
self.sm.new_table("nuts").add_table_column("Nmbr", "INT", "PRIMARY KEY").add_table_column("Called", "TEXT", "UNIQUE").add_table_column("Description", "TEXT").create_table()
self.cursor = self.sm.get_cursor()
self.cursor.execute("SELECT * FROM nuts")
self.col_names = [description[0] for description in self.cursor.description] # gets the column names from nuts table because self.sm.get_cursor().execute() is selecting from nuts table
self.comp_names = ['Nmbr', 'Called', 'Description']
def test_create_table(self):
self.assertEqual(self.col_names, self.comp_names)
def test_result(self):
self.assertEqual(self.sm.result, "OK")
def tearDown(self):
self.sm.__del__()
if __name__ == '__main__':
unittest.main()
| 3.75 | 4 |
tests/data/queue_report_test.py | seoss/scs_core | 3 | 12798633 | <reponame>seoss/scs_core
#!/usr/bin/env python3
"""
Created on 27 Aug 2019
@author: <NAME> (<EMAIL>)
"""
from scs_core.data.queue_report import QueueReport, ClientStatus
# --------------------------------------------------------------------------------------------------------------------
filename = '/tmp/southcoastscience/queue_report.json'
report = QueueReport(23, ClientStatus.CONNECTED, True)
print(report)
print(report.as_json())
report.save(filename)
print("-")
report = QueueReport.load(filename)
print(report)
| 1.679688 | 2 |
websocket_server/quick.py | CylonicRaider/websocket-server | 1 | 12798634 | <filename>websocket_server/quick.py
# websocket_server -- WebSocket/HTTP server/client library
# https://github.com/CylonicRaider/websocket-server
"""
Convenience functions for quick usage.
"""
import sys
import argparse
from .server import WebSocketMixIn
from .httpserver import WSSHTTPServer, RoutingRequestHandler
from .httpserver import validate_origin, parse_origin
__all__ = ['DEFAULT_ADDRESS', 'RoutingWebSocketRequestHandler', 'tls_flags',
'resolve_listen_address', 'run']
DEFAULT_ADDRESS = ('', 8080)
class RoutingWebSocketRequestHandler(RoutingRequestHandler, WebSocketMixIn):
"""
An HTTP request handler combining all the package's functionality.
"""
def tls_flags(s):
"""
Parse a comma-separated key-value list as used for command-line TLS
configuration.
Returns a dictionary of the key-value pairs recovered from s.
"""
ret = {}
for item in s.split(','):
if not item: continue
key, sep, value = item.partition('=')
if not sep: raise ValueError('Invalid key-value pair %r' % (item,))
if key in ret: raise ValueError('Duplicate key %r' % (key,))
ret[key] = value
return ret
def resolve_listen_address(addr, origin, default_addr=None):
"""
resolve_listen_address(addr, origin, default_addr=None)
-> (host, port)
Fill in default host and port values into an address an HTTP server should
ultimately listen at.
addr is a (host, port) tuple with explicit values. If any of host
or port is None, a default value is derived for it from
origin or default_addr.
origin is a Web origin that is consulted for default host/port
values (if not None; if this is None, default_addr is used
instead).
default_addr is the ultimate fallback (host, port) tuple, and defaults to
the module-level DEFAULT_ADDRESS constant, viz. ('', 8080).
Returns the (host, port) tuple with defaults filled in.
"""
if default_addr is None: default_addr = DEFAULT_ADDRESS
host, port = addr
if origin: default_addr = parse_origin(origin)[1:]
if host is None: host = default_addr[0]
if port is None: port = default_addr[1]
return (host, port)
def run(handler, server=WSSHTTPServer, prepare=None, postparse=None,
premain=None):
"""
run(handler, server=WSSHTTPServer, prepare=None, postparse=None,
premain=None) -> None
Actually run a WebSocket server instance.
handler is the handler class to use.
server is a callable taking two arguments that creates the server
instance; the arguments are:
bindaddr: A (host, port) tuple containing the address to bind
to. Constructed from command-line arguments.
handler : The request handler. Passed through from the same-
named argument of run().
prepare is a callable that is invoked with the ArgumentParser (from
argparse) instance used to parse options as the only argument.
Can be used to specify additional options.
postparse is a callable invoked after parsing arguments (and resolving
complex default values) with the resulting arguments object as
the only positional argument. It can do things like complex
validation, and prevent the creation and running of a server by
raising an exception.
premain is called immediately before entering the main loop of the
internally created server object with two arguments:
httpd : The server object created; an instance of server.
arguments: The arguments as returned by argparse.ArgumentParser.
It can be used to pass on the values of the options configured
using prepare to the server object and the handler class.
"""
# Named function for better argparse output.
def origin(s): return validate_origin(s)
# Parse command-line arguments.
p = argparse.ArgumentParser()
p.add_argument('--port', '-p', metavar='PORT', type=int,
help='The TCP port to run on (defaults to the port from '
'the origin, or 8080).')
p.add_argument('--host', '-s', metavar='IP',
help='The network interface to bind to (defaults to the '
'host from the origin, or all interfaces).')
p.add_argument('--origin', '-O', type=origin,
help='A SCHEME://HOST[:PORT] string indicating how '
'clients should access this server. If omitted, '
'an attempt is made to guess the value from the '
'--host and --port parameters; if that fails, this '
'remains unset.')
p.add_argument('--tls', '-T', metavar='PARAM=VALUE[,...]', type=tls_flags,
help='Enable (mandatory) TLS, and configure it. The '
'following parameters are defined: "cert": A file '
'containing X.509 certificate in PEM format, along '
'with a CA certificate chain as necessary, to be '
'used by the server; "key": The private key file '
'belonging to cert (if omitted, the private key is '
'taken from the certificate file); "ca": Require '
'clients to be authenticated by one of the '
'certificates in this file.')
# Call preparation callback.
if prepare: prepare(p)
# Actually parse arguments.
arguments = p.parse_args()
# Resolve complex defaults.
arguments.host, arguments.port = resolve_listen_address(
(arguments.host, arguments.port), arguments.origin)
# Call next preparation callback.
if postparse: postparse(arguments)
# Create server.
httpd = server((arguments.host, arguments.port), handler)
if arguments.origin: httpd.origin = arguments.origin
if arguments.tls: httpd.setup_ssl(arguments.tls)
# Print header message.
# Since the server has bound itself when it was contructed above, we can
# insert the final origin value.
if arguments.host:
address = '%s:%s' % (arguments.host, arguments.port)
else:
address = '*:%s' % arguments.port
origin_string = 'N/A' if httpd.origin is None else httpd.origin
sys.stderr.write('Serving HTTP on %s (origin %s)...\n' % (address,
origin_string))
sys.stderr.flush()
# Call final preparation hook.
if premain: premain(httpd, arguments)
# Run it.
try:
httpd.serve_forever()
except KeyboardInterrupt:
# Don't print a noisy stack trace if Ctrl+C'ed.
sys.stderr.write('\n')
| 2.828125 | 3 |
test/test_del_contact.py | vyacheslavmarkov/python_training | 0 | 12798635 | <gh_stars>0
from model.contact import Contact
import random
def test_delete_first_contact(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="Tester", middlename="Something", lastname="Trump",
photo="picture.jpg", nickname="super nickname", title="QA engineer",
company="Google", address="Kremlin", homephone="1111111",
mobilephone="2222222", workphone="3333333", fax="4444444",
email="<EMAIL>", email2="<EMAIL>", email3="<EMAIL>",
homepage="google.com", bday="29", bmonth="April", byear="1991", aday="22",
amonth="August", ayear="2015", address_2="Moscow", secondaryphone="5555555",
notes="Cool guy"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contact.delete_contact_by_id(contact.id)
new_contacts = db.get_contact_list()
assert len(old_contacts) - 1 == len(new_contacts)
old_contacts.remove(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
# tune db contact data to be appropriate for the homepage representation
old_contacts = app.contact.make_contacts_like_on_homepage(old_contacts)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contacts_list(), key=Contact.id_or_max)
| 2.40625 | 2 |
IMLearn/metalearners/adaboost.py | dani3lwinter/IML.HUJI | 0 | 12798636 | <reponame>dani3lwinter/IML.HUJI<filename>IMLearn/metalearners/adaboost.py
import numpy as np
# from ...base import BaseEstimator
from IMLearn.base import BaseEstimator
from typing import Callable, NoReturn
from IMLearn.metrics import misclassification_error
class AdaBoost(BaseEstimator):
"""
AdaBoost class for boosting a specified weak learner
Attributes
----------
self.wl_: Callable[[], BaseEstimator]
Callable for obtaining an instance of type BaseEstimator
self.iterations_: int
Number of boosting iterations to perform
self.models_: List[BaseEstimator]
List of fitted estimators, fitted along the boosting iterations
"""
def __init__(self, wl: Callable[[], BaseEstimator], iterations: int):
"""
Instantiate an AdaBoost class over the specified base estimator
Parameters
----------
wl: Callable[[], BaseEstimator]
Callable for obtaining an instance of type BaseEstimator
iterations: int
Number of boosting iterations to perform
"""
super().__init__()
self.wl_ = wl
self.iterations_ = iterations
self.models_, self.weights_, self.D_ = None, None, None
@staticmethod
def __resample(cur_X, cur_y, sample_weights):
"""
Resample the current dataset according to sample_weights
"""
new_indices = np.random.choice(cur_y.size, size=cur_y.size, p=sample_weights)
return cur_X[new_indices, :], cur_y[new_indices]
@staticmethod
def weighted_loss(y_true, y_pred, sample_weights):
"""
Calculate the weighted loss for a given set of predictions
"""
misses = np.where(y_true != y_pred, 1, 0)
return np.sum(misses * sample_weights)
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit an AdaBoost classifier over given samples
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
"""
if len(X.shape) == 1:
X = X.reshape((-1, 1))
n_samples = X.shape[0]
self.models_ = [self.wl_() for _ in range(self.iterations_)]
self.D_ = np.full(n_samples, 1 / n_samples)
self.weights_ = np.zeros(self.iterations_)
for i in range(self.iterations_):
self.models_[i].fit(X, y * self.D_)
y_pred = self.models_[i].predict(X)
epsilon = self.weighted_loss(y, y_pred, self.D_)
# if loss is 0, then we have a perfect classifier
if epsilon == 0:
self.weights_ = np.zeros(self.iterations_)
self.weights_[i] = 1
self.iterations_ = i + 1
break
self.weights_[i] = np.log(1 / epsilon - 1) / 2 # if cur_loss > 0 else 1
self.D_ = self.D_ * np.exp(-y * self.weights_[i] * y_pred)
self.D_ = self.D_ / np.sum(self.D_)
def _predict(self, X):
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
return self.partial_predict(X, self.iterations_)
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under misclassification loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under missclassification loss function
"""
return misclassification_error(y, self._predict(X), normalize=True)
def partial_predict(self, X: np.ndarray, T: int) -> np.ndarray:
"""
Predict responses for given samples using fitted estimators
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
T: int
The number of classifiers (from 1,...,T) to be used for prediction
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
T = min(T, self.iterations_)
selected_models = self.models_[:T]
all_learners_pred = np.array([m.predict(X) for m in selected_models]).T
weighted_pred = all_learners_pred @ self.weights_[:T]
if len(weighted_pred.shape) == 1:
return np.sign(weighted_pred)
else:
return np.sign(weighted_pred.sum(axis=1))
def partial_loss(self, X: np.ndarray, y: np.ndarray, T: int) -> float:
"""
Evaluate performance under misclassification loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
T: int
The number of classifiers (from 1,...,T) to be used for prediction
Returns
-------
loss : float
Performance under missclassification loss function
"""
y_pred = self.partial_predict(X, T)
return misclassification_error(y, y_pred, normalize=True)
| 2.9375 | 3 |
montydb/engine/core/__init__.py | gitter-badger/MontyDB | 0 | 12798637 | <gh_stars>0
from .field_walker import (
FieldWalker,
FieldWriteError,
_no_val,
)
from .weighted import (
Weighted,
gravity,
_cmp_decimal,
_decimal128_INF,
_decimal128_NaN_ls,
)
__all__ = [
"FieldWalker",
"FieldWriteError",
"_no_val",
"Weighted",
"gravity",
"_cmp_decimal",
"_decimal128_INF",
"_decimal128_NaN_ls",
]
| 1.5 | 2 |
pydigree/__init__.py | jameshicks/pydigree | 18 | 12798638 | #!/usr/bin/env python
import sys
if sys.version_info < (3,3):
raise ImportError('pydigree requires Python 3')
# Common functions (cumsum, table, etc)
import pydigree.common
from pydigree.ibs import ibs
from pydigree.rand import set_seed
# Functions for navigating pedigree structures
from pydigree.paths import path_downward, paths, paths_through_ancestor
from pydigree.paths import common_ancestors, kinship
# Reading and writing files
import pydigree.io
# Population growth models
from pydigree.population import exponential_growth, logistic_growth
# Classes
from pydigree.genotypes import ChromosomeTemplate
from pydigree.population import Population
from pydigree.pedigreecollection import PedigreeCollection
from pydigree.pedigree import Pedigree
from pydigree.individual import Individual
# Functions and classes for doing statistics
import pydigree.stats
# Functions for identifying shared genomic segments (SGS)
import pydigree.sgs
| 1.734375 | 2 |
sandbox/python/speed test/basicTest.py | atabulog/rpi-dashboard | 0 | 12798639 | <reponame>atabulog/rpi-dashboard<gh_stars>0
"""
author(s): <NAME>
date: 12/30/21
email: <EMAIL>
=========================================================
This file is subject to the MIT license copyright notice.
=========================================================
Script to test speedtest related objects.
"""
#imports
import pdb
from mysql.connector.errors import DataError
import dbIntfObj
import SpeedTestObj
if __name__ == "__main__":
#create connection to database
dbTable = dbIntfObj.networkDataTable()
#run a speed test
test = SpeedTestObj.SpeedTest()
test.run_bestTest()
#get data as formatted dictionary, sanitize, and insert into db
results = test.get_testData()
try:
dbTable.record_newEntry(server=results['server'],
ping=results['ping'],
upload=results['upload'],
download=results['download'])
except:
raise DataError("Could not record new entry.") | 1.882813 | 2 |
main.py | szabolcsdombi/heightmap-multitexture-terrain | 1 | 12798640 | <filename>main.py
import math
import struct
import GLWindow
import ModernGL
from PIL import Image
from pyrr import Matrix44
wnd = GLWindow.create_window()
ctx = ModernGL.create_context()
prog = ctx.program([
ctx.vertex_shader('''
#version 330
uniform mat4 Mvp;
uniform sampler2D Heightmap;
in vec2 vert;
out vec2 v_text;
void main() {
vec4 vertex = vec4(vert - 0.5, texture(Heightmap, vert).r * 0.2, 1.0);
gl_Position = Mvp * vertex;
v_text = vert;
}
'''),
ctx.fragment_shader('''
#version 330
uniform sampler2D Heightmap;
uniform sampler2D Color1;
uniform sampler2D Color2;
uniform sampler2D Cracks;
uniform sampler2D Darken;
in vec2 v_text;
out vec4 f_color;
void main() {
float height = texture(Heightmap, v_text).r;
float border = smoothstep(0.5, 0.7, height);
vec3 color1 = texture(Color1, v_text * 7.0).rgb;
vec3 color2 = texture(Color2, v_text * 6.0).rgb;
vec3 color = color1 * (1.0 - border) + color2 * border;
color *= 0.8 + 0.2 * texture(Darken, v_text * 3.0).r;
color *= 0.5 + 0.5 * texture(Cracks, v_text * 5.0).r;
color *= 0.5 + 0.5 * height;
f_color = vec4(color, 1.0);
}
'''),
])
img0 = Image.open('data/heightmap.jpg').convert('L').transpose(Image.FLIP_TOP_BOTTOM)
img1 = Image.open('data/grass.jpg').convert('RGB').transpose(Image.FLIP_TOP_BOTTOM)
img2 = Image.open('data/rock.jpg').convert('RGB').transpose(Image.FLIP_TOP_BOTTOM)
img3 = Image.open('data/cracks.jpg').convert('L').transpose(Image.FLIP_TOP_BOTTOM)
img4 = Image.open('data/checked.jpg').convert('L').transpose(Image.FLIP_TOP_BOTTOM)
tex0 = ctx.texture(img0.size, 1, img0.tobytes())
tex1 = ctx.texture(img1.size, 3, img1.tobytes())
tex2 = ctx.texture(img2.size, 3, img2.tobytes())
tex3 = ctx.texture(img3.size, 1, img3.tobytes())
tex4 = ctx.texture(img4.size, 1, img4.tobytes())
tex0.build_mipmaps()
tex1.build_mipmaps()
tex2.build_mipmaps()
tex3.build_mipmaps()
tex4.build_mipmaps()
tex0.use(0)
tex1.use(1)
tex2.use(2)
tex3.use(3)
tex4.use(4)
prog.uniforms['Heightmap'].value = 0
prog.uniforms['Color1'].value = 1
prog.uniforms['Color2'].value = 2
prog.uniforms['Cracks'].value = 3
prog.uniforms['Darken'].value = 4
index = 0
vertices = bytearray()
indices = bytearray()
for i in range(64 - 1):
for j in range(64):
vertices += struct.pack('2f', i / 64, j / 64)
indices += struct.pack('i', index)
index += 1
vertices += struct.pack('2f', (i + 1) / 64, j / 64)
indices += struct.pack('i', index)
index += 1
indices += struct.pack('i', -1)
vbo = ctx.buffer(vertices)
ibo = ctx.buffer(indices)
vao = ctx.vertex_array(prog, [(vbo, '2f', ['vert'])], ibo)
while wnd.update():
angle = wnd.time * 0.5
width, height = wnd.size
proj = Matrix44.perspective_projection(45.0, width / height, 0.01, 10.0)
look = Matrix44.look_at((math.cos(angle), math.sin(angle), 0.8), (0.0, 0.0, 0.1), (0.0, 0.0, 1.0))
prog.uniforms['Mvp'].write((proj * look).astype('float32').tobytes())
ctx.enable(ModernGL.DEPTH_TEST)
ctx.viewport = wnd.viewport
ctx.clear(1.0, 1.0, 1.0)
vao.render(ModernGL.TRIANGLE_STRIP)
| 2.15625 | 2 |
pubmedmetrics/db.py | wenwei-dev/PubMedMetrics | 3 | 12798641 | import os
import logging
from contextlib import contextmanager
from sqlite3 import dbapi2 as sqlite
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, String, Integer, Sequence
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.sqlite import (BLOB, BOOLEAN, CHAR, DATE, DATETIME,
DECIMAL, FLOAT, INTEGER, NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP,
VARCHAR)
logger = logging.getLogger('db')
engine = create_engine("sqlite+pysqlite:///pubmed.db",
execution_options={"sqlite_raw_colnames": True}, module=sqlite)
Session = sessionmaker(bind=engine)
@contextmanager
def session_scope():
"""Provide a transactional scope around a series of operations."""
session = Session()
try:
yield session
session.commit()
except Exception as ex:
logger.error(ex)
session.rollback()
raise
finally:
session.close()
Base = declarative_base()
class PubMed(Base):
__tablename__ = 'pubmed'
pmid = Column(String(64), primary_key=True)
title = Column(String(256), nullable=False)
authors = Column(String(256))
summary = Column(String(256))
summary_detail = Column(String(256))
link = Column(String(256))
tags = Column(String(256))
key = Column(String(256))
create_dt = Column(DATETIME)
def __repr__(self):
return "<PubMed(pmid=%s,title=%s)>" % (self.pmid, self.title)
class Metric(Base):
__tablename__ = 'metric'
id = Column(Integer, Sequence('user_id_seq'), primary_key=True)
pmid = Column(String(64))
altmetric = Column(FLOAT)
create_dt = Column(DATETIME)
def __repr__(self):
return "<Metric(pmid=%s, metric=%s)>" % (self.pmid, self.altmetric)
Base.metadata.create_all(engine)
if __name__ == '__main__':
import datetime as dt
Base.metadata.create_all(engine)
with session_scope() as session:
pubmed = PubMed(
pmid='000000',
title='title',
authors='authors',
create_dt=dt.datetime.now()
)
session.merge(pubmed)
pubmed = PubMed(
pmid='000000',
title='title',
authors='authors2',
create_dt=dt.datetime.now()
)
session.merge(pubmed)
| 2.75 | 3 |
scripts/pyvision/gui/SimpleLiveDemo.py | wolfram2012/ros_track_ssd | 0 | 12798642 | '''
This file provides a basic framework for a live demo. In this case the
demo is for face and eye detection.
Copyright <NAME>
Created on Jul 9, 2011
@author: bolme
'''
import pyvision as pv
import cv
from pyvision.face.CascadeDetector import CascadeDetector
from pyvision.face.FilterEyeLocator import FilterEyeLocator
def mouseCallback(event, x, y, flags, param):
if event in [cv.CV_EVENT_LBUTTONDOWN,cv.CV_EVENT_LBUTTONUP]:
print "Mouse Event:",event,x,y
if __name__ == '__main__':
# Setup the webcam
webcam = pv.Webcam()
# Setup the face and eye detectors
cd = CascadeDetector(min_size=(100,100))
el = FilterEyeLocator()
# Setup the mouse callback to handle mause events (optional)
cv.NamedWindow("PyVision Live Demo")
cv.SetMouseCallback("PyVision Live Demo", mouseCallback)
while True:
# Grab a frame from the webcam
frame = webcam.query()
# Run Face and Eye Detection
rects = cd(frame)
eyes = el(frame,rects)
# Annotate the result
for rect,leye,reye in eyes:
frame.annotateThickRect(rect, 'green', width=3)
frame.annotatePoint(leye, color='green')
frame.annotatePoint(reye, color='green')
# Annotate instructions
frame.annotateLabel(pv.Point(10,10), "Press 'q' to quit.")
# Show the frame (uses opencv highgui)
key_press = frame.show("PyVision Live Demo")
# Handle key press events.
if key_press == ord('q'):
break | 2.828125 | 3 |
src/Loan Classification.py | Sami-ul/Loan-Classification | 2 | 12798643 | #!/usr/bin/env python
# coding: utf-8
# # Loan Classification Project
# In[1]:
# Libraries we need
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.metrics import confusion_matrix, classification_report, precision_recall_curve,recall_score
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
# In[2]:
df = pd.read_csv("Dataset.csv")
# In[3]:
df.head()
# In[4]:
df.info()
# In[5]:
df.nunique()
# - Above we can see that Reason and Bad are binary variables
# - Nothing needs to be dropped
# In[6]:
df.describe()
# In[7]:
plt.hist(df['BAD'], bins=3)
plt.show()
# In[8]:
df['LOAN'].plot(kind='density')
plt.show()
# In[9]:
plt.pie(df['REASON'].value_counts(), labels=['DebtCon', 'HomeImp'], autopct='%.1f')
plt.show()
df['REASON'].value_counts()
# In[10]:
correlation = df.corr()
sns.heatmap(correlation)
plt.show()
# In[11]:
df['BAD'].value_counts(normalize=True)
# In[12]:
df.fillna(df.mean(), inplace=True)
# In[13]:
one_hot_encoding = pd.get_dummies(df['REASON'])
df = df.drop('REASON', axis=1)
df = df.join(one_hot_encoding)
df
# In[14]:
one_hot_encoding2 = pd.get_dummies(df['JOB'])
df = df.drop('JOB', axis=1)
df = df.join(one_hot_encoding2)
df
# In[15]:
dependent = df['BAD']
independent = df.drop(['BAD'], axis=1)
x_train, x_test, y_train, y_test = train_test_split(independent, dependent, test_size=0.3, random_state=1)
# In[16]:
def metrics_score(actual, predicted):
print(classification_report(actual, predicted))
cm = confusion_matrix(actual, predicted)
plt.figure(figsize=(8,5))
sns.heatmap(cm, annot=True, fmt='.2f', xticklabels=['Not Default', 'Default'], yticklabels=['Not Default', 'Default'])
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
# In[17]:
dtree = DecisionTreeClassifier(class_weight={0:0.20, 1:0.80}, random_state=1)
# In[18]:
dtree.fit(x_train, y_train)
# In[19]:
dependent_performance_dt = dtree.predict(x_train)
metrics_score(y_train, dependent_performance_dt)
# - The above is perfect because we are using the train values, not the test
# - Lets test on test data
# In[20]:
dependent_test_performance_dt = dtree.predict(x_test)
metrics_score(y_test,dependent_test_performance_dt)
# - As we can see, we got decent performance from this model, lets see if we can do better
# - Selfnote: do importance features next
# In[21]:
important = dtree.feature_importances_
columns = independent.columns
important_items_df = pd.DataFrame(important, index=columns, columns=['Importance']).sort_values(by='Importance', ascending=False)
plt.figure(figsize=(13,13))
sns.barplot(important_items_df.Importance, important_items_df.index)
plt.show()
# - I followed this from a previous project to see the most important features
# - We can see that the most important features are DEBTINC, CLAGE and CLNO
# In[22]:
tree_estimator = DecisionTreeClassifier(class_weight={0:0.20, 1:0.80}, random_state=1)
parameters = {
'max_depth':np.arange(2,7),
'criterion':['gini', 'entropy'],
'min_samples_leaf':[5,10,20,25]
}
score = metrics.make_scorer(recall_score, pos_label=1)
gridCV= GridSearchCV(tree_estimator, parameters, scoring=score,cv=10)
gridCV = gridCV.fit(x_train, y_train)
tree_estimator = gridCV.best_estimator_
tree_estimator.fit(x_train, y_train)
# In[23]:
dependent_performance_dt = tree_estimator.predict(x_train)
metrics_score(y_train, dependent_performance_dt)
# - We increased the less harmful error but decreased the harmful error
# In[24]:
dependent_test_performance_dt = tree_estimator.predict(x_test)
metrics_score(y_test, dependent_test_performance_dt)
# - Although the performance is slightly worse, we still reduce harmful error
# In[25]:
important = tree_estimator.feature_importances_
columns=independent.columns
importance_df=pd.DataFrame(important,index=columns,columns=['Importance']).sort_values(by='Importance',ascending=False)
plt.figure(figsize=(13,13))
sns.barplot(importance_df.Importance,importance_df.index)
plt.show()
# In[26]:
features = list(independent.columns)
plt.figure(figsize=(30,20))
tree.plot_tree(dtree,max_depth=4,feature_names=features,filled=True,fontsize=12,node_ids=True,class_names=True)
plt.show()
# - A visualization is one of the advantages that dtrees offer, we can show this to the client ot show the thought process
# In[27]:
forest_estimator = RandomForestClassifier(class_weight={0:0.20, 1:0.80}, random_state=1)
forest_estimator.fit(x_train, y_train)
# In[28]:
y_predict_training_forest = forest_estimator.predict(x_train)
metrics_score(y_train, y_predict_training_forest)
# - A perfect classification
# - This implies overfitting
# In[29]:
y_predict_test_forest = forest_estimator.predict(x_test)
metrics_score(y_test, y_predict_test_forest)
# - The performance is a lot better than the original single tree
# - Lets fix overfitting
# In[30]:
forest_estimator_tuned = RandomForestClassifier(class_weight={0:0.20,1:0.80}, random_state=1)
parameters_rf = {
"n_estimators": [100,250,500],
"min_samples_leaf": np.arange(1, 4,1),
"max_features": [0.7,0.9,'auto'],
}
score = metrics.make_scorer(recall_score, pos_label=1)
# Run the grid search
grid_obj = GridSearchCV(forest_estimator_tuned, parameters_rf, scoring=score, cv=5)
grid_obj = grid_obj.fit(x_train, y_train)
# Set the clf to the best combination of parameters
forest_estimator_tuned = grid_obj.best_estimator_
# In[31]:
forest_estimator_tuned.fit(x_train, y_train)
# In[32]:
y_predict_train_forest_tuned = forest_estimator_tuned.predict(x_train)
metrics_score(y_train, y_predict_train_forest_tuned)
# In[33]:
y_predict_test_forest_tuned = forest_estimator_tuned.predict(x_test)
metrics_score(y_test, y_predict_test_forest_tuned)
# - We now have very good performance
# - We can submit this to the company
# ### Conclusion
# - I made many models to get the best results.
# - The first one I made was a decision tree, this is not as good as random forest but it is transparent as it lets us visualize it. This first one had decent performance.
# - To improve the performance of this we tried to tune the model, this reduced the harmful error.
# - Then to improve even more I created a decision tree model, this had excellent performance once we created a second version which removed overfitting.
# ### Recommendations
# - The biggest thing that effects defaulting on a loan is the debt to income ratio. If someone has a lot of debt and a lower income they may have a harder time paying back a loan.
# - Something else that effects defaulting on a loan is the number of delinquent credit lines. This means that someone who cannot make their credit card payments will have a hard time paying back a loan.
# - Years at job is also a driver of a loans outcome. A large number of years at a job could indicate financial stability.
# - DEROG, or a history of delinquent payments is also a warning sign of not being able to pay back a loan.
# - Those are some warning signs/good signs that should be looked out for when looking for candidates to give loans to.
#
# I will now apply SHAP to look more into this model.
# In[34]:
get_ipython().system('pip install shap')
import shap
# In[35]:
shap.initjs()
# In[36]:
explain = shap.TreeExplainer(forest_estimator_tuned)
shap_vals = explain(x_train)
# In[37]:
type(shap_vals)
# In[38]:
shap.plots.bar(shap_vals[:, :, 0])
# In[39]:
shap.plots.heatmap(shap_vals[:, :, 0])
# In[40]:
shap.summary_plot(shap_vals[:, :, 0], x_train)
# In[53]:
print(forest_estimator_tuned.predict(x_test.iloc[107].to_numpy().reshape(1,-1))) # This predicts for one row, 0 means approved, 1 means no.
| 3.046875 | 3 |
InBuiltExcptionHandling.py | ShanmukhaSrinivas/python-75-hackathon | 0 | 12798644 | <filename>InBuiltExcptionHandling.py
#Handling Built-in Exceptions
try:
a=[int(i) for i in input('Enter a list:').split()]
except Exception as e:
print(e)
else:
print(sum(a))
| 2.9375 | 3 |
gerador_de_senha.py | marcelo-py/gerador_de_senhas | 1 | 12798645 | from random import randint, choice, shuffle
"""gerador de senha simples
ele basicamente pega todo tipo de caractere digitado pelo usuário
e depois de cada um deles é colocado um número de 1 a 10 e mais um símbolo"""
letras = list()
senha = list()
chave = input('Digite a base da sua senha: ')
while len(chave) > 8:
chave = input('A base só pode ter até 8 caracteres, digite a base da sua senha novamente: ')
for c in range(0, len(chave)):
letras.append(chave[c])
senha.append(letras[:])
letras.pop()
caracteres = (')', '*', '/', '%', '!')
print('a senha gerada é: ')
shuffle(senha)
cores = ('\033[1;31m', '\033[1;32m', '\033[1;33m')
for i, l in enumerate(senha):
adcionais = randint(0, 10)
p = l[:][0]
p += str(adcionais) + choice(caracteres)
print(p,end='')
print('\n\033[32;1mBoa sorte em decorar ela!')
| 3.90625 | 4 |
3vennsvg.py | rwst/wikidata-molbio | 2 | 12798646 | from sys import *
from matplotlib_venn import venn3, venn3_circles
from matplotlib import pyplot as plt
s1 = set(open('wd-inst-of-prot', 'r').readlines())
s2 = set(open('wd-subc-of-prot', 'r').readlines())
s3 = set(open('wd-refseqp', 'r').readlines())
#s4 = set(open('t4', 'r').readlines())
venn3([s3,s2,s1], ('RefSeq', 'subc', 'inst of protein'))
c = venn3_circles([s3,s2,s1])
c[0].set_lw(1.0)
plt.show()
#plt.savefig('venn.svg')
| 2.171875 | 2 |
test_plot.py | EntropyF/ia-flood-risk-project | 0 | 12798647 | import datetime
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.stationdata import build_station_list
from floodsystem.plot import plot_water_levels, plot_water_level_with_fit
stations = build_station_list()
import numpy as np
def test_polt_water_level_with_fit():
x = np.linspace(1, 1000, 100000)
y = []
for i in x:
y.append(3*i**2 + 5)
p_coeff = np.polyfit(x, y, 2)
poly = np.poly1d(p_coeff)
assert int(p_coeff[0]) == 2 | 2.65625 | 3 |
src/OTLMOW/OTLModel/Datatypes/KlLEMarkeringSoort.py | davidvlaminck/OTLClassPython | 2 | 12798648 | <gh_stars>1-10
# coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlLEMarkeringSoort(KeuzelijstField):
"""Mogelijke markeringsoorten op een lijvormig element."""
naam = 'KlLEMarkeringSoort'
label = 'Soort markering van lijnvormig element'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlLEMarkeringSoort'
definition = 'Mogelijke markeringsoorten op een lijvormig element.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlLEMarkeringSoort'
options = {
'biggenrug': KeuzelijstWaarde(invulwaarde='biggenrug',
label='biggenrug',
definitie='Een betonnen obstakel dat meestal een infrastructurele en beschermende functie heeft',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlLEMarkeringSoort/biggenrug'),
'boordsteen': KeuzelijstWaarde(invulwaarde='boordsteen',
label='boordsteen',
definitie='Een lijnvormig element dat de scheiding verzorgt tussen een rijbaan en het meestal hoger gelegen trottoir',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlLEMarkeringSoort/boordsteen'),
'boordsteen-parkeerverbod': KeuzelijstWaarde(invulwaarde='boordsteen-parkeerverbod',
label='boordsteen parkeerverbod',
definitie='Een lijnvormig element dat de scheiding verzorgt tussen een rijbaan en het meestal hoger gelegen trottoir met als functie het aanduiden van parkeerverbod',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlLEMarkeringSoort/boordsteen-parkeerverbod'),
'new-Jersey': KeuzelijstWaarde(invulwaarde='new-Jersey',
label='new Jersey',
definitie='Een afschermende constructie uit kunststof, beton of metaal dat naast wegen wordt geplaatst om te voorkomen dat voertuigen de weg in zijdelingse richting verlaten, kantelen of de middenberm doorkruisen.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlLEMarkeringSoort/new-Jersey'),
'vangrail': KeuzelijstWaarde(invulwaarde='vangrail',
label='vangrail',
definitie='Een afschermende constructie die naast wegen wordt geplaatst om te voorkomen dat voertuigen de weg in zijdelingse richting verlaten, kantelen of de middenberm doorkruisen.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlLEMarkeringSoort/vangrail')
}
| 1.84375 | 2 |
CSIKit/csi/frames/esp.py | serrhini/CSIKit | 0 | 12798649 | from CSIKit.csi import CSIFrame
import ast
import numpy as np
class ESP32CSIFrame(CSIFrame):
# https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-reference/network/esp_wifi.html#_CPPv418wifi_pkt_rx_ctrl_t
__slots__ = ["type", "role", "mac", "rssi", "rate", "sig_mode", "mcs", "bandwidth", "smoothing", "not_sounding",
"aggregation", "stbc", "fec_coding", "sgi", "noise_floor", "ampdu_cnt", "channel", "secondary_channel",
"local_timestamp", "ant", "sig_len", "rx_state", "real_time_set", "real_timestamp", "len", "CSI_DATA"]
def __init__(self, csv_line: list):
self.type = csv_line[0]
self.role = csv_line[1]
self.mac = csv_line[2]
self.rssi = csv_line[3]
self.rate = csv_line[4]
self.sig_mode = csv_line[5]
self.mcs = csv_line[6]
self.bandwidth = 20 if csv_line[7] == "0" else 40
self.smoothing = csv_line[8]
self.not_sounding = csv_line[9]
self.aggregation = csv_line[10]
self.stbc = csv_line[11]
self.fec_coding = csv_line[12]
self.sgi = csv_line[13]
self.noise_floor = csv_line[14]
self.ampdu_cnt = csv_line[15]
self.channel = csv_line[16]
self.secondary_channel = csv_line[17]
self.local_timestamp = csv_line[18]
self.ant = csv_line[19]
self.sig_len = csv_line[20]
self.rx_state = csv_line[21]
self.real_time_set = csv_line[22]
self.real_timestamp = csv_line[23]
self.len = csv_line[24]
string_data = csv_line[25]
self.csi_matrix = ESP32CSIFrame.parse_matrix(string_data)
@staticmethod
def parse_matrix(string_data, bandwidth=20):
array_string = string_data.replace(" ", ", ")
array_string_asarray = ast.literal_eval(array_string)
if bandwidth == 20 and len(array_string_asarray) < 128:
ESP32CSIFrame.fill_missing(array_string_asarray, 128)
elif bandwidth == 40 and len(array_string_asarray) < 256:
ESP32CSIFrame.fill_missing(array_string_asarray, 256)
int8_matrix = np.array(array_string_asarray)
int8_matrix = int8_matrix.reshape(-1, 2)
complex_matrix = int8_matrix.astype(np.float32).view(np.complex64)
return complex_matrix
# Seems some CSI lines are missing a value.
# Very rare, I assume weird dropped behaviour.
# Probably not the best way to fill the gap.
@staticmethod
def fill_missing(array, expected_length):
remainder = expected_length - len(array)
for _ in range(remainder):
array.append(0) | 2.3125 | 2 |
examples/simple_expression/exp_autoencoder.py | m-colombo/tf_tree | 0 | 12798650 | from tensorflow_trees.encoder import Encoder, EncoderCellsBuilder
from tensorflow_trees.decoder import Decoder, DecoderCellsBuilder
from examples.simple_expression.exp_definition import BinaryExpressionTreeGen, NaryExpressionTreeGen
from tensorflow_trees.definition import Tree
from examples.simple_expression.flags_definition import *
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import tensorflow.contrib.summary as tfs
import os
import json
FLAGS = tf.flags.FLAGS
def main(argv=None):
#########
# Checkpoints and Summaries
#########
if tf.gfile.Exists(FLAGS.model_dir):
if FLAGS.overwrite:
tf.logging.warn("Deleting old log directory at {}".format(FLAGS.model_dir))
tf.gfile.DeleteRecursively(FLAGS.model_dir)
tf.gfile.MakeDirs(FLAGS.model_dir)
else:
raise ValueError("Log dir already exists!")
else:
tf.gfile.MakeDirs(FLAGS.model_dir)
summary_writer = tfs.create_file_writer(FLAGS.model_dir, flush_millis=1000)
summary_writer.set_as_default()
print("Summaries in " + FLAGS.model_dir)
with open(os.path.join(FLAGS.model_dir, "flags.json"), 'w') as f:
json.dump(FLAGS.flag_values_dict(), f)
#########
# DATA
#########
if FLAGS.fixed_arity:
tree_gen = BinaryExpressionTreeGen(9)
else:
tree_gen = NaryExpressionTreeGen(9, FLAGS.max_arity)
def get_batch():
return [tree_gen.generate(FLAGS.max_depth) for _ in range(FLAGS.batch_size)]
#########
# MODEL
#########
activation = getattr(tf.nn, FLAGS.activation)
encoder = Encoder(tree_def=tree_gen.tree_def,
embedding_size=FLAGS.embedding_size,
cut_arity=FLAGS.cut_arity, max_arity=FLAGS.max_arity,
variable_arity_strategy=FLAGS.enc_variable_arity_strategy,
cellsbuilder=EncoderCellsBuilder(
EncoderCellsBuilder.simple_cell_builder(hidden_coef=FLAGS.hidden_cell_coef,
activation=activation,
gate=FLAGS.encoder_gate),
EncoderCellsBuilder.simple_dense_embedder_builder(activation=activation)),
name='encoder')
decoder = Decoder(tree_def=tree_gen.tree_def,
embedding_size=FLAGS.embedding_size,
max_node_count=FLAGS.max_node_count,
max_depth=FLAGS.max_depth,
max_arity=FLAGS.max_arity,
cut_arity=FLAGS.cut_arity,
cellsbuilder=DecoderCellsBuilder(
distrib_builder=
DecoderCellsBuilder.simple_distrib_cell_builder(FLAGS.hidden_cell_coef,
activation=activation),
categorical_value_inflater_builder=
DecoderCellsBuilder.simple_1ofk_value_inflater_builder(FLAGS.hidden_cell_coef,
activation=activation),
dense_value_inflater_builder=None, # unused
node_inflater_builder=
DecoderCellsBuilder.simple_node_inflater_builder(FLAGS.hidden_cell_coef,
activation=activation,
gate=FLAGS.decoder_gate)),
variable_arity_strategy=FLAGS.dec_variable_arity_strategy)
###########
# TRAINING
###########
optimizer = tf.train.AdamOptimizer()
with tfs.always_record_summaries():
for i in range(FLAGS.max_iter):
with tfe.GradientTape() as tape:
xs = get_batch()
batch_enc = encoder(xs)
batch_dec = decoder(encodings=batch_enc.get_root_embeddings(), targets=xs)
loss_struct, loss_val = batch_dec.reconstruction_loss()
loss = loss_struct + loss_val
variables = encoder.variables + decoder.variables
grad = tape.gradient(loss, variables)
gnorm = tf.global_norm(grad)
grad, _ = tf.clip_by_global_norm(grad, 0.02, gnorm)
tfs.scalar("norms/grad", gnorm)
optimizer.apply_gradients(zip(grad, variables), global_step=tf.train.get_or_create_global_step())
if i % FLAGS.check_every == 0:
batch_unsuperv = decoder(encodings=batch_enc.get_root_embeddings())
_, _, v_avg_sup, v_acc_sup = Tree.compare_trees(xs, batch_dec.decoded_trees)
s_avg, s_acc, v_avg, v_acc = Tree.compare_trees(xs, batch_unsuperv.decoded_trees)
print("{0}:\t{1:.3f}".format(i, loss))
tfs.scalar("loss/struct", loss_struct)
tfs.scalar("loss/val", loss_val)
tfs.scalar("loss/loss", loss)
tfs.scalar("overlaps/supervised/value_avg", v_avg_sup)
tfs.scalar("overlaps/supervised/value_acc", v_acc_sup)
tfs.scalar("overlaps/unsupervised/struct_avg", s_avg)
tfs.scalar("overlaps/unsupervised/struct_acc", s_acc)
tfs.scalar("overlaps/unsupervised/value_avg", v_avg)
tfs.scalar("overlaps/unsupervised/value_acc", v_acc)
if __name__ == "__main__":
define_common_flags()
define_encoder_flags()
define_decoder_flags()
tfe.run()
| 2.171875 | 2 |
Subsets and Splits