max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
model/contact.py | karolinahalawin/python_training | 0 | 12795851 | from sys import maxsize
class Contact:
def __init__(self, first_name=None, middle_name=None, last_name=None, nickname=None, title=None, company=None,
address=None, phone_home=None, phone_mobile=None, phone_work=None, fax=None, email_1=None,
email_2=None, email_3=None, homepage=None, birthday_day=None, birthday_month=None, birthday_year=None,
anniversary_day=None, anniversary_month=None, anniversary_year=None, address_2=None, phone_2=None,
notes=None, id=None, all_phones_from_home_page=None, all_emails_from_home_page=None):
self.first_name = first_name
self.middle_name = middle_name
self.last_name = last_name
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.phone_home = phone_home
self.phone_mobile = phone_mobile
self.phone_work = phone_work
self.fax = fax
self.email_1 = email_1
self.email_2 = email_2
self.email_3 = email_3
self.homepage = homepage
self.birthday_day = birthday_day
self.birthday_month = birthday_month
self.birthday_year = birthday_year
self.anniversary_day = anniversary_day
self.anniversary_month = anniversary_month
self.anniversary_year = anniversary_year
self.address_2 = address_2
self.phone_2 = phone_2
self.notes = notes
self.id = id
self.all_phones_from_home_page = all_phones_from_home_page
self.all_emails_from_home_page = all_emails_from_home_page
def __repr__(self):
return "%s:%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s" % (
self.first_name, self.last_name, self.middle_name, self.nickname, self.title, self.company, self.address,
self.phone_home, self.phone_mobile, self.phone_work, self.fax, self.email_1, self.email_2, self.email_3,
self.homepage, self.address_2, self.phone_2, self.notes)
def __eq__(self, other):
return (
self.id is None or other.id is None or self.id == other.id) and self.first_name == other.first_name and (
self.last_name is None or other.last_name is None or self.last_name == other.last_name)
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 2.65625 | 3 |
src/test/tests/databases/xform_precision.py | visit-dav/vis | 226 | 12795852 | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: xform_precision.py
#
# Tests: Transform manager's conversion to float
#
# Programmer: <NAME>
# Date: September 24, 2006
#
# Modifications:
#
# <NAME>, Wed Jan 20 07:37:11 PST 2010
# Added ability to swtich between Silo's HDF5 and PDB data.
# ----------------------------------------------------------------------------
OpenDatabase(silo_data_path("quad_disk.silo"))
#
# Turn off force single precision for this test
#
readOptions=GetDefaultFileOpenOptions("Silo")
readOptions["Force Single"] = 0
SetDefaultFileOpenOptions("Silo", readOptions)
#
# Test ordinary float data (no conversion) first
#
AddPlot("Mesh","mesh")
DrawPlots()
Test("float_xform_01")
DeleteAllPlots()
#
# Ok, now read a mesh with double coords
#
AddPlot("Mesh","meshD")
DrawPlots()
Test("float_xform_02")
DeleteAllPlots()
CloseDatabase(silo_data_path("quad_disk.silo"))
OpenDatabase(silo_data_path("quad_disk.silo"))
#
# test float data on a float mesh
#
AddPlot("Pseudocolor","sphElev_on_mesh")
DrawPlots()
Test("float_xform_03")
DeleteAllPlots()
#
# test float data on a double mesh
#
AddPlot("Pseudocolor","sphElev_on_meshD")
DrawPlots()
Test("float_xform_04")
DeleteAllPlots()
#
# test double data on a float mesh
#
AddPlot("Pseudocolor","sphElevD_on_mesh")
DrawPlots()
Test("float_xform_05")
DeleteAllPlots()
CloseDatabase(silo_data_path("quad_disk.silo"))
OpenDatabase(silo_data_path("quad_disk.silo"))
#
# test double data on a double mesh
#
AddPlot("Pseudocolor","sphElevD_on_meshD")
DrawPlots()
Test("float_xform_06")
DeleteAllPlots()
Exit()
| 1.765625 | 2 |
rustiql/view/tree_hook.py | pyrustic/rustiql | 1 | 12795853 | import tkinter as tk
from megawidget.tree import Hook
class TreeHook(Hook):
def __init__(self, parent_view, nodebar_builder, host):
self._parent_view = parent_view
self._nodebar_builder = nodebar_builder
self._host = host
self._stringvar_expander = tk.StringVar()
self._stringvar_title = tk.StringVar()
self._collapsable_frame = None
self._nodebar = None
self._formatter = None
def on_change_database(self, path):
self._parent_view.open_database(path)
def on_click_truncate(self, table):
sql = "DELETE FROM {}".format(table)
formatter = "inline"
self._parent_view.push_sql(sql, formatter, execute=True)
def on_click_drop(self, table):
sql = "DROP TABLE {}".format(table)
formatter = "inline"
self._parent_view.push_sql(sql, formatter, execute=True)
def on_click_explore(self, table):
sql = "SELECT * FROM {}".format(table)
formatter = "inline"
self._parent_view.push_sql(sql, formatter, execute=True)
def build_node(self, tree, node, frame):
node_id = node["node_id"]
if node_id == 0:
return
# some vars
title = node["title"]
result = node["data"]["result"]
datatype = node["data"]["type"]
description = node["data"]["description"]
file = node["data"]["file"]
path = node["data"]["path"]
real_path = node["data"]["realpath"]
self._formatter = node["data"]["formatter"]
# Populate stringvars
self._stringvar_expander.set("-" if node["expanded"] else "+")
self._stringvar_title.set(title)
# config header frame
frame.columnconfigure(0, weight=0)
frame.columnconfigure(1, weight=0)
frame.columnconfigure(2, weight=1)
# Fill titlebar
# - button expander
command = (lambda tree=tree, node_id=node_id:
tree.collexp(node_id))
button_expander = tk.Button(frame,
name="treeExpanderButton",
textvariable=self._stringvar_expander,
command=command)
# - button edit
button_edit = tk.Button(frame,
text="edit",
name="buttonEdit",
command=lambda self=self,
node_id=node_id,
tree=tree:
self._on_click_edit(tree, node_id))
# - entry title
entry_title = tk.Entry(frame, name="treeTitle",
state="readonly",
textvariable=self._stringvar_title)
entry_title.bind("<Button-1>",
lambda e, self=self,
node_id=node_id,
tree=tree:
self._on_click_sql(tree, node_id))
# - install
button_expander.grid(row=0, column=0, padx=(0, 5), sticky="w")
button_edit.grid(row=0, column=1, padx=(0, 5), sticky="w")
entry_title.grid(row=0, column=2, sticky="nswe")
# collapsable_frame
self._collapsable_frame = tk.Frame(frame, class_="CollapsableFrame")
self._collapsable_frame.columnconfigure(0, weight=1)
# - install
self._collapsable_frame.grid(row=1, column=2, sticky="w", padx=(0, 20))
# Fill collapsable frame
self._nodebar = self._nodebar_builder.build(self, node_id,
self._collapsable_frame,
file, path, real_path, result,
datatype, description)
def on_map_node(self, tree, node):
pass
def on_destroy_node(self, tree, node):
pass
def on_feed_node(self, tree, node, *args, **kwargs):
pass
def on_expand_node(self, tree, node):
node_id = node["node_id"]
if node_id == 0:
return
self._stringvar_expander.set("-")
self._collapsable_frame.grid()
def on_collapse_node(self, tree, node):
node_id = node["node_id"]
if node_id == 0:
return
self._stringvar_expander.set("+")
self._collapsable_frame.grid_remove()
def _on_click_sql(self, tree, node_id):
tree.collexp(node_id)
def _on_click_edit(self, tree, node_id):
sql = self._stringvar_title.get()
self._parent_view.push_sql(sql, self._formatter)
| 2.65625 | 3 |
tests/test_services.py | lycantropos/monty | 0 | 12795854 | <gh_stars>0
import pytest
from hypothesis import given
from monty import monty
from tests import strategies
from tests.utils import Secured
@given(strategies.dockerhub_logins,
strategies.invalid_dockerhub_logins)
def test_load_dockerhub_user(dockerhub_login: str,
invalid_dockerhub_login: str) -> None:
user = monty.load_dockerhub_user(dockerhub_login)
assert user['username'] == dockerhub_login
with pytest.raises(ValueError):
monty.load_dockerhub_user(invalid_dockerhub_login)
@given(strategies.github_logins,
strategies.github_access_tokens,
strategies.invalid_github_logins)
def test_load_github_user(github_login: str,
github_access_token: Secured,
invalid_github_login: str) -> None:
user = monty.load_github_user(github_login,
access_token=github_access_token.value)
assert user['login'] == github_login
with pytest.raises(ValueError):
monty.load_github_user(invalid_github_login,
access_token=github_access_token.value)
| 2.21875 | 2 |
code/solverADMMmodel7.py | qingzheli/partial-correlation-based-contrast-pattern-mining | 1 | 12795855 | <reponame>qingzheli/partial-correlation-based-contrast-pattern-mining<gh_stars>1-10
#For ICDM review only, please do not distribute
#ALL RIGHTS RESERVED
#ADMM solver for CPM-C model with parital correlation based translation function
import numpy as np
import copy
import time
class myADMMSolver:
def __init__(self,lamb, nw, rho,x,hat_x,rho_update_func=None):
self.rho = rho
self.lamb = lamb
self.length = nw*(nw+1)/2 #vector length of trianglular matrix
self.S = np.cov(np.transpose(x))
self.hatS = np.cov(np.transpose(hat_x))
self.nw = nw;
self.m = x.shape[0]
self.hatm = hat_x.shape[0]
self.theta = np.zeros((nw,nw))
self.hat_theta = np.zeros((nw,nw))
self.Q= np.zeros((nw,nw))
self.hatQ = np.zeros((nw,nw))
self.T =np.zeros((nw,nw)) # \Gamma in the paper
self.hatT = np.zeros((nw,nw)) # \hat \Gamma
self.P= np.zeros((nw,nw))
self.hatP = np.zeros((nw,nw))
self.V = np.zeros((nw,nw))
self.Y = np.zeros((nw,nw))
self.hatY = np.zeros((nw,nw))
self.Z = np.zeros((nw,nw)) # theta & T
self.hatZ = np.zeros((nw,nw))
self.U = np.zeros((nw,nw)) # P & V
self.plist = []
self.dlist = []
self.eprilist = []
self.edualist = []
self.objlist = []
# In[] help/debug methods
def computePartialCorrelation(self, T):
P = np.ones(T.shape)
nw = T.shape[0]
for i in range(nw):
for j in range(i+1,nw):
P[i,j] = -T[i,j]/np.sqrt(T[i,i]*T[j,j])
P[j,i] = P[i,j]
return P
def h(self,theta,S,m):
return 0.5*m*(np.trace(np.dot(S,theta))-np.log(np.linalg.det(theta)))
def obj_overall(self):
V = self.computePartialCorrelation(self.T)-self.computePartialCorrelation(self.hatT)
return self.h(self.T,self.S,self.m)+self.h(self.hatT,self.hatS,self.hatm)+self.lamb*(np.linalg.norm(V)**2)
def check_symmetric(self,a, tol=1e-3):
return np.allclose(a, np.transpose(a), atol=tol)
# In[] update variables
def update_T(self):
LAMBDA,D = np.linalg.eigh(2*self.rho*(self.theta-self.Z)-self.m*self.S)
D = np.matrix(D)
theii = (LAMBDA+np.sqrt(LAMBDA**2+8*self.rho*self.m))/(4*self.rho)
self.T = np.dot(np.dot(D,np.diag(theii)),D.T)
# self.objT.append(self.objective_T())
def update_hatT(self):
LAMBDA,D = np.linalg.eigh(2*self.rho*(self.hat_theta-self.hatZ)-self.hatm*self.hatS)
D = np.matrix(D)
theii = (LAMBDA+np.sqrt(LAMBDA**2+ 8*self.rho*self.hatm))/(4*self.rho)
self.hatT = np.dot(np.dot(D,np.diag(theii)),D.T)
# self.objhatT.append(self.objective_hatT())
def update_theta(self):
theta = np.eye(self.nw)
for i in range(self.nw):
theta[i,i] = self.T[i,i]+self.Z[i,i]
for i in range(self.nw):
for j in range(i+1,self.nw):
c = 1/np.sqrt(theta[i,i]*theta[j,j])
theta[i,j] = (self.T[i,j]+self.Z[i,j]- c*self.P[i,j]-c*self.Y[i,j])/(c**2+1)
theta[j,i] = theta[i,j]
self.theta = theta
assert self.check_symmetric(self.theta)
self.Q = self.computePartialCorrelation(self.theta)
def update_hat_theta(self):
hat_theta = np.eye(self.nw)
for i in range(self.nw):
hat_theta[i,i] = self.hatT[i,i]+self.hatZ[i,i]
for i in range(self.nw):
for j in range(i+1,self.nw):
c = 1/np.sqrt(hat_theta[i,i]*hat_theta[j,j])
hat_theta[i,j] = (self.hatT[i,j]+self.hatZ[i,j]-c*self.hatP[i,j]-c*self.hatY[i,j])/(1+c**2)
hat_theta[j,i] = hat_theta[i,j]
self.hat_theta = hat_theta
assert self.check_symmetric(self.hat_theta)
self.hatQ = self.computePartialCorrelation(self.hat_theta)
def update_V(self):
self.V = self.rho*(self.P-self.hatP-self.U)/(2*self.lamb+self.rho)
assert self.check_symmetric(self.V)
assert np.linalg.norm(np.diag(self.V))==0
def proj2Symmetric(self,A):
n = A.shape[0]
for i in xrange(n):
for j in xrange(i+1,n):
mean = (A[i,j]+A[j,i])/2
# if mean<0:
# mean = 0
A[i,j] = mean
A[j,i] = mean
return A
def update_P(self):
self.P = (self.V+self.hatP+self.U+self.Q-self.Y)/2
for i in range(self.nw):
self.P[i,i] = 1
assert self.check_symmetric(self.P)
def update_hatP(self):
self.hatP = (self.P-self.V-self.U+self.hatQ-self.hatY)/2
for i in range(self.nw):
self.hatP[i,i] = 1
assert self.check_symmetric(self.hatP)
#
def update_duals(self):
self.Y = self.P-self.Q+self.Y
self.hatY = self.hatP-self.hatQ+self.hatY
self.Z = self.T-self.theta+self.Z
self.hatZ = self.hatT-self.hat_theta+self.hatZ
self.U = self.V-self.P+self.hatP+self.U
def CheckConvergence(self,Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, e_abs, e_rel, verbose):
r1 = self.T -self.theta
r2 = self.hatT-self.hat_theta
r3 = self.V-self.P+self.hatP
r4 = self.P-self.Q
r5 = self.hatP-self.hatQ
allR = np.concatenate((r1,r2,r3,r4,r5))
norm = np.linalg.norm
r = norm(allR)
s1 = self.Q-Q_pre
s2 = self.hatQ-hatQ_pre
s3 = self.V-V_pre
s4 = self.P-P_pre
s5 = self.hatP - hatP_pre
allS = np.concatenate((s1,s2,s3,s4,s5))*self.rho
s = norm(allS)
e_pri = self.nw * e_abs + e_rel * max(norm(self.theta),norm(self.hat_theta), norm(self.T)+norm(self.hatT),norm(self.P),norm(self.hatP),norm(self.V))
e_dual = np.sqrt((self.nw**2)) * e_abs + e_rel * (np.sqrt(self.rho *( norm(self.Z)**2+norm(self.hatZ)**2+norm(self.Y)**2+norm(self.hatY)**2+norm(self.U)**2)))
res_pri = r
res_dual = s
self.plist.append(r)
self.dlist.append(s)
self.eprilist.append(e_pri)
self.edualist.append(e_dual)
stop = (res_pri <= e_pri) and (res_dual <= e_dual)
return (stop, res_pri, e_pri, res_dual, e_dual)
# solve
def __call__(self, eps_abs, eps_rel, verbose,admmMaxIters=1000):
# print '\n solver ADMM model 7: lambdaADMM = ',self.lamb
self.status = 'Incomplete: max iterations reached'
t1 = time.time()
for i in range(admmMaxIters):
self.iter = i
Q_pre = copy.deepcopy(self.Q)
hatQ_pre = copy.deepcopy(self.hatQ)
theta_pre = copy.deepcopy(self.theta)
hat_theta_pre = copy.deepcopy(self.hat_theta)
P_pre = copy.deepcopy(self.P)
hatP_pre = copy.deepcopy(self.hatP)
V_pre = copy.deepcopy(self.V)
try:
self.update_T()
self.update_hatT()
self.update_theta()
self.update_hat_theta()
self.update_V()
self.update_P()
self.update_hatP()
self.update_duals()
self.objlist.append(self.obj_overall())
except np.linalg.LinAlgError as err:
if 'Singular matrix' in str(err):
print 'Encounter LinAlgError: Singular matrix, exit ADMM'
break
else:
raise
#
if i>=admmMaxIters-1:
print 'Incomplete: max iterations reached', i
if i != 0:
stop, res_pri, e_pri, res_dual, e_dual = self.CheckConvergence(Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, eps_abs, eps_rel, verbose)
if stop:
self.status = 'Optimal'
if verbose:
print "Admm stop early at Iteration ",i
print ' r:', res_pri
print ' e_pri:', e_pri
print ' s:', res_dual
print ' e_dual:', e_dual
break
#
new_rho = self.rho
threshold = 10
if (res_pri>threshold*res_dual):
new_rho = 2*self.rho
elif (threshold*res_pri<res_dual):
new_rho = self.rho/2.0
scale = self.rho / new_rho
self.rho = new_rho
self.U = scale*self.U
self.Y = scale*self.Y
self.hatY = scale*self.hatY
self.Z = scale*self.Z
self.hatZ = scale*self.hatZ
t2 = time.time()
avgIterTime = (t2-t1)/(self.iter+1)
# print " avgPerADMMIterTime",avgIterTime
if self.nw>=50:
saveTime = open('data/KDD/efficiency_nw/model6Time_nw'+str(self.nw)+'.txt','a')
saveTime.write(str(avgIterTime)+' ')
saveTime.close()
retVal = np.zeros([self.length,2])
result = np.asmatrix(self.T[np.triu_indices(self.nw)]).T.getA()
hatresult = np.asmatrix(self.hatT[np.triu_indices(self.nw)]).T.getA()
retVal[:,0] = np.reshape(result,(self.length,))
retVal[:,1] = np.reshape(hatresult,(self.length,))
return retVal
| 2 | 2 |
cdd/doctrans.py | SamuelMarks/docstring2class | 0 | 12795856 | <reponame>SamuelMarks/docstring2class<gh_stars>0
"""
Helper to traverse the AST of the input file, extract the docstring out, parse and format to intended style, and emit
"""
from ast import fix_missing_locations
from copy import deepcopy
from operator import attrgetter
from cdd.ast_utils import cmp_ast
from cdd.cst import cst_parse
from cdd.doctrans_utils import DocTrans, doctransify_cst, has_type_annotations
from cdd.source_transformer import ast_parse
def doctrans(filename, docstring_format, type_annotations, no_word_wrap):
"""
Transform the docstrings found within provided filename to intended docstring_format
:param filename: Python file to convert docstrings within. Edited in place.
:type filename: ```str```
:param docstring_format: Format of docstring
:type docstring_format: ```Literal['rest', 'numpydoc', 'google']```
:param type_annotations: True to have type annotations (3.6+), False to place in docstring
:type type_annotations: ```bool```
:param no_word_wrap: Whether word-wrap is disabled (on emission).
:type no_word_wrap: ```Optional[Literal[True]]```
"""
with open(filename, "rt") as f:
original_source = f.read()
node = ast_parse(original_source, skip_docstring_remit=False)
original_module = deepcopy(node)
node = fix_missing_locations(
DocTrans(
docstring_format=docstring_format,
word_wrap=no_word_wrap is None,
type_annotations=type_annotations,
existing_type_annotations=has_type_annotations(node),
whole_ast=original_module,
).visit(node)
)
if not cmp_ast(node, original_module):
cst_list = list(cst_parse(original_source))
# Carefully replace only docstrings, function return annotations, assignment and annotation assignments.
# Maintaining all other existing whitespace, comments, &etc.
doctransify_cst(cst_list, node)
with open(filename, "wt") as f:
f.write("".join(map(attrgetter("value"), cst_list)))
__all__ = ["doctrans"]
| 2.90625 | 3 |
flikrWallpaper.py | DaemonF/Flikr-Wallpaper-Downloader | 0 | 12795857 | <gh_stars>0
import urllib, signal, os
import simplejson # install with pip
try:
import config
except:
print ("You must configure config.py.template and rename\n"
" it to config.py to use this tool.")
exit()
flikrApiUrl = "http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=2a5a2148ffc21588facb65536aa91b7d&extras=dims_o%2Curl_o%2Cviews&per_page=500&media=photos&format=json&nojsoncallback=1"
for search in config.searches:
page = 1
while True:
print "Grabbing new page from Flikr API"
recentPhotos = simplejson.loads(urllib.urlopen("%s&text=%s&page=%s"%(flikrApiUrl, search, page)).read())
for photo in recentPhotos['photos']['photo']:
if 'url_o' in photo and 'id' in photo and config.matchesCriteria(photo):
filename = config.downloadAs(photo)
if os.path.exists(filename):
print "Photo '%s' has already been downloaded. Ignoring."%filename
else:
try:
print "Downloading %s"%filename
image = urllib.urlopen(photo['url_o']).read()
with open(filename, 'w+') as f:
f.write(image)
print "Done."
except KeyboardInterrupt:
raise
except:
print "Failed to download %s"%filename
page += 1
if page > recentPhotos['photos']['pages']:
break
print "Downloaded all new photos from the recent photo feed that match your criteria."
| 2.59375 | 3 |
hardhat/recipes/python/attrs.py | stangelandcl/hardhat | 0 | 12795858 | from .base import SetupPyRecipe
class AttrsRecipe(SetupPyRecipe):
def __init__(self, *args, **kwargs):
super(AttrsRecipe, self).__init__(*args, **kwargs)
self.sha256 = 'e0d0eb91441a3b53dab4d9b743eafc1a' \
'c44476296a2053b6ca3af0b139faf87b'
self.pythons = ['python3']
self.name = 'attrs'
self.version = '18.1.0'
self.url = 'https://files.pythonhosted.org/packages/e4/ac/a04671e118b57bee87dabca1e0f2d3bda816b7a551036012d0ca24190e71/attrs-18.1.0.tar.gz'
| 1.820313 | 2 |
BOJ/Q4949.py | hyungilk/ProblemSolving | 0 | 12795859 | # text = 'So when I die (the [first] I will see in (heaven) is a score list).'
# stack 클래스 생성
class Stack():
def __init__(self):
self.stack = []
def push(self, data):
self.stack.append(data)
def top(self):
if len(self.stack) == 0:
return -1
else:
return self.stack[-1]
def pop(self):
if len(self.stack) == 0:
return -1
else:
tmp = self.stack.pop()
return tmp
# 반복구문 생성
while True:
# 텍스트 입력 저장
text = input().rstrip()
# break 조건 입력
if len(text) == 1 and text[0] == '.':
break
# 스택 생성자 호출
stacks = Stack()
# for문 종료 flag 생성
flag_true = True
# for문 설계
# 1.text 내 있는 문자 호출
# 2. (, [ 가 나올 경우 스택에 push
# 3. ), ] 이 나올 경우 스택에 complement 기호가 있는지 파악하고(top 활용) 맞을 경우 pop
# 4. 아닐 경우 for문 강제 종류 후 flag 판별
for ch in text:
if ch == '(':
stacks.push(ch)
continue
if ch == '[':
stacks.push(ch)
continue
if ch == ')' and stacks.top() != '(':
flag_true = False
break
if ch == ']' and stacks.top() != '[':
flag_true = False
break
if ch == ')' and stacks.top() == '(':
stacks.pop()
continue
if ch == ']' and stacks.top() == '[':
stacks.pop()
continue
if flag_true == True and len(stacks.stack) == 0:
print('yes')
else:
print('no')
| 3.515625 | 4 |
Python Datascience/chapter8/app.py | Haji-Fuji/iGEM2018 | 0 | 12795860 | <gh_stars>0
import flask
import pandas as pd
from sklearn.linear_model import LinearRegression
train_X = pd.read_csv("blood_fat.csv")
train_y = train_X.pop("blood fat")
model = LinearRegression().fit(train_X, train_y)
app = flask.Flask(__name__)
@app.route("/")
def index():
return app.send_static_file("index.html")
@app.route("/bloodfat")
def bloodfat():
age = flask.request.args.get("age", default=None, type=int)
weight = flask.request.args.get("weight", default=None, type=float)
if age is None or weight is None:
return flask.jsonify({
"code": 400,
"msg": "Bad Request"
})
x = [age, weight]
blood_fat = model.predict(x)[0]
return flask.jsonify({
"code": 200,
"msg": "OK",
"result": blood_fat
})
if __name__ == "__main__":
app.run(debug=True)
| 3.125 | 3 |
Sparrow/action/action_action.py | eleme/Sparrow | 75 | 12795861 | <filename>Sparrow/action/action_action.py<gh_stars>10-100
from django.forms.models import model_to_dict
from Sparrow.action.common_action import *
from dal.dao.action_dao import ActionDao
from django.contrib.auth.decorators import login_required
from Sparrow.action.response import *
from django.http.request import *
from Sparrow.action.track import track
from dal.models import *
class ActionAction:
@track(ActionType.ActionDailyActiveInfo)
def daily_active_info(request: HttpRequest):
daily_active_info = ActionDao.get_daily_active_info(14)
response = Response(Success, 'Success', daily_active_info)
return HttpResponse(response.to_json_with_mm_dd(), content_type='application/json')
@track(ActionType.ActionTopActiveUserInfo)
def top_active_users_info(request: HttpRequest):
top_active_users_info = ActionDao.get_top_active_users_info(10)
response = Response(Success, 'Success', top_active_users_info)
return HttpResponse(response.toJson(), content_type='application/json')
@track(ActionType.ActionTopActiveApisInfo)
def top_active_apis_info(request: HttpRequest):
top_active_apis_info = ActionDao.get_top_active_apis_info(10)
response = Response(Success, 'Success', top_active_apis_info)
return HttpResponse(response.toJson(), content_type='application/json') | 1.960938 | 2 |
dashboard/utils/browse/book.py | TheBoringDude/zeta | 0 | 12795862 | from dashboard.utils.finder import Finder
import requests
class Book(Finder):
def search_book(self, query):
# search and get the response
resp = requests.get(self.open_library.replace("[query]", query)).json()["docs"]
return resp | 2.46875 | 2 |
monypy/__init__.py | ybibaev/monypy | 6 | 12795863 | from .doc import Doc
from .exceptions import DocumentDoesNotExist, DocumentInitDataError
from .manager import Manager
__version__ = '2.0.1'
__all__ = (
'Doc',
'Manager',
'DocumentInitDataError',
'DocumentDoesNotExist',
)
| 1.609375 | 2 |
car_core/tests/test_py_common.py | vstucar/vstucar | 0 | 12795864 | #!/usr/bin/python
# This file is licensed under MIT license.
# See the LICENSE file in the project root for more information.
import unittest
import rostest
import rosunit
import numpy as np
from numpy.testing import assert_almost_equal
from std_msgs.msg import Header
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion
from nav_msgs.msg import Path
from car_core.common import msgs_helpers, geom_helpers
def get_poses_helper(points):
poses = []
for p in points:
pose = PoseStamped()
pose.pose.position = Point(p[0], p[1], p[2])
poses.append(pose)
return poses
class TestMsgsHelpers(unittest.TestCase):
def test_quaterion_to_array_ok(self):
q = Quaternion(1,2,3,4)
arr = msgs_helpers.quaterion_to_array(q)
assert_almost_equal(arr, np.array([1,2, 3, 4]))
self.assertTrue(True)
def test_point_to_array_ok(self):
p = Point(1,2,3)
arr = msgs_helpers.point_to_array(p)
assert_almost_equal(arr, np.array([1,2]))
self.assertTrue(True)
def test_path_poses_to_array_ok(self):
poses = get_poses_helper([[1,2,3],
[4,5,6],
[7,8,9]])
arr = msgs_helpers.path_poses_to_array(poses)
assert_almost_equal(arr, np.array([[1,2],
[4,5],
[7,8]]))
self.assertTrue(True)
def test_array_to_point_ok(self):
arr = np.array([1,2])
point = msgs_helpers.array_to_point(arr)
self.assertEqual(point, Point(1,2,0))
def test_array_to_path_poses_ok(self):
arr = np.array([[1,2],
[4,5],
[6,7]])
poses = msgs_helpers.array_to_path_poses(arr)
poses_true = get_poses_helper([[1,2,0],
[4,5,0],
[6,7,0]])
self.assertEqual(poses, poses)
class TestGeomHelpers(unittest.TestCase):
def test_get_closest_path_point_regular(self):
poses = np.array([[0,0],
[1,1],
[2,2],
[3,3]])
point = np.array([0.9, 0.9])
index = geom_helpers.get_closest_path_point(poses, point)
self.assertEqual(index, 1)
def test_get_closest_path_point_far(self):
poses = np.array([[0,0],
[1,1],
[2,2],
[3,3]])
point = np.array([-1, 3])
index = geom_helpers.get_closest_path_point(poses, point)
self.assertEqual(index, 1)
def test_get_closest_path_point_first(self):
poses = np.array([[0,0],
[1,1],
[2,2],
[3,3]])
point = np.array([-1, 1])
index = geom_helpers.get_closest_path_point(poses, point)
self.assertEqual(index, 0)
def test_get_closest_path_point_last(self):
poses = np.array([[0,0],
[1,1],
[2,2],
[3,3]])
point = np.array([4, 4])
index = geom_helpers.get_closest_path_point(poses, point)
self.assertEqual(index, 3)
def test_get_closest_path_point_single_point(self):
poses = np.array([[0,0]])
point = np.array([4, 4])
index = geom_helpers.get_closest_path_point(poses, point)
self.assertEqual(index, 0)
def test_get_closest_path_point_matching_points(self):
poses = np.array([[0,0],
[1,1],
[1,1],
[3,3]])
point = np.array([1.1, 1.1])
index = geom_helpers.get_closest_path_point(poses, point)
self.assertEqual(index, 1)
if __name__ == '__main__':
import rosunit
rosunit.unitrun("car_core", 'test_msgs_helpers', TestMsgsHelpers)
rosunit.unitrun("car_core", 'test_geom_helpers', TestGeomHelpers) | 2.5 | 2 |
QaA/ask/test/FriendSearchViewTest.py | jedrzejkozal/QuestionsAndAnswers | 0 | 12795865 | <gh_stars>0
from django.shortcuts import reverse
from django.test import TestCase
from ..test.FriendsMixIn import *
from ..test.LoginMixIn import *
class FriendSearchViewTest(TestCase, FriendsMixIn, LoginMixIn):
def setUp(self):
self.create_users()
self.make_friends()
self.create_invitations()
def test_search_for_user_with_no_friends_returns_empty_list(self):
self.login_user(username="IhaveNoFriends")
form = {'search_text': 'TestUser'}
response = self.client.post(reverse('ask:friends.search'), data=form)
self.assertEqual(list(response.context['friends']), [])
def test_search_for_user2_returns_matching_users(self):
self.login_user(username="TestUser2")
form = {'search_text': 'TestUser'}
response = self.client.post(reverse('ask:friends.search'), data=form)
self.assertEqual(list(response.context['friends']), [
self.user4, self.user1, self.user5])
def test_search_for_user2_with_one_matching_user_returns_one_user(self):
self.login_user(username="TestUser2")
form = {'search_text': 'TestUser4'}
response = self.client.post(reverse('ask:friends.search'), data=form)
self.assertEqual(list(response.context['friends']), [self.user4])
def test_search_for_user2_invalid_searchtext_returns_empty_list(self):
self.login_user(username="TestUser2")
form = {'search_text': 'TestUser3'}
response = self.client.post(reverse('ask:friends.search'), data=form)
self.assertEqual(list(response.context['friends']), [])
| 2.59375 | 3 |
qt__pyqt__pyside__pyqode/pyqt5__draw_text_with_word_wrap.py | DazEB2/SimplePyScripts | 117 | 12795866 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.QtGui import QPixmap, QPainter, QFont
from PyQt5.QtWidgets import QApplication, QLabel
from PyQt5.QtCore import Qt, QRect
app = QApplication([])
text = "Hello World!"
pixmap = QPixmap(180, 130)
pixmap.fill(Qt.white)
painter = QPainter(pixmap)
painter.setFont(QFont('Arial', 12))
rect = QRect(0, 0, 70, 50)
painter.drawRect(rect)
painter.drawText(rect, Qt.TextWordWrap, text)
rect = QRect(0, 60, 70, 50)
painter.drawRect(rect)
painter.drawText(rect, Qt.AlignLeft, text)
w = QLabel()
w.setPixmap(pixmap)
w.show()
app.exec()
| 2.765625 | 3 |
SOMEChecker/some_finder.py | richard-clifford/Useful-Scripts | 0 | 12795867 | <filename>SOMEChecker/some_finder.py
import requests
import re
import ssl
targets = open("targets.txt", 'r').readlines()
for target in targets:
target = target.rstrip()
headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)'}
response = requests.get(target.split('|')[0], headers=headers).text
scanjs = re.findall(r'src="([^"]+\.js|json)?"',response)
for i in scanjs:
new_target = target + i
if(re.match(r'(http|https)\:\/\/',i)):
new_target = i
js_file_request = requests.get(new_target, headers=headers).text
callback_possibru = re.findall(r'(callback|jsonp)', js_file_request)
for x in callback_possibru:
print " --- VULN --- \n"
print "["+target+"] " + new_target + " " + x | 3 | 3 |
userena/contrib/umessages/signals.py | jdavidagudelo/django-userena-ce | 86 | 12795868 | from django.dispatch import Signal
# Arguments: "msg"
email_sent = Signal()
| 1.234375 | 1 |
simple_repr/__init__.py | mr-strawberry66/python-repr-generation | 1 | 12795869 | <gh_stars>1-10
"""Expose public methods of simple_repr module."""
from .simple_repr import SimpleRepr
__all__ = ["SimpleRepr"]
| 1.28125 | 1 |
data/real/so/dump_data.py | destinyyzy/tf_rmtpp | 40 | 12795870 | from __future__ import print_function
import psycopg2 as pg
import getpass as G
from collections import namedtuple
output_events = 'events.txt'
output_time = 'time.txt'
output_userids = 'userids.txt'
output_badge_labels = 'badges.csv'
SO_events = namedtuple('SO_events', ['times', 'events', 'badge_map', 'userids'])
def write():
try:
with pg.connect(database='stackexchange', user='utkarsh', password=<PASSWORD>('DB password: '), host='psql-science') as conn:
cur = conn.cursor()
# Ordering is important for mapping results back to the data, if needed.
cur.execute('''SELECT userid, BadgeNames, Timestamp FROM so_data ORDER BY userid''')
badge_map = {}
badge_count = 1
userids = []
with open(output_events, 'w') as f_events, open(output_time, 'w') as f_time:
for row in cur:
userid, events, times = row[0], row[1], row[2]
if len(set(times)) != len(times):
# If there are any repeated events, just skip the user.
continue
userids.append(userid)
event_ids = []
for badge in events:
if badge not in badge_map:
badge_map[badge] = badge_count
badge_count += 1
event_ids.append(badge_map[badge])
f_events.write(' '.join(str(x) for x in event_ids) + '\n')
# Can change times to something more granular than seconds.
f_time.write(' '.join(str(x) for x in times) + '\n')
with open(output_userids, 'w') as f_userids:
f_userids.write('userid\n')
f_userids.writelines([str(x) + '\n' for x in userids])
with open(output_badge_labels, 'w') as f_badges:
f_badges.write('id, badge\n')
for badge in badge_map:
f_badges.write('{}, {}\n'.format(badge_map[badge], badge))
except pg.OperationalError:
print('Not running on DB.')
def read_events():
with open(output_events) as f_events:
events = [[int(y) for y in x.split()] for x in f_events]
with open(output_time) as f_times:
times = [[float(y) for y in x.split()] for x in f_times]
with open(output_userids) as f_userids:
next(f_userids)
userids = [int(x) for x in f_userids]
badge_map = {}
with open(output_badge_labels) as f_badge_labels:
next(f_badge_labels)
for row in f_badge_labels:
id, name = row.split(',')
badge_map[int(id)] = name.strip()
return SO_events(events=events, times=times, badge_map=badge_map, userids=userids)
if __name__ == '__main__':
write()
| 2.859375 | 3 |
src/oauth2_routes.py | CSIRO-enviro-informatics/cosmoz-rest-wrapper | 0 | 12795871 | <reponame>CSIRO-enviro-informatics/cosmoz-rest-wrapper<filename>src/oauth2_routes.py
# -*- coding: utf-8 -*-
"""
Copyright 2019 CSIRO Land and Water
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from inspect import isawaitable
from os import getenv
from sanic.response import redirect, text
from spf import SanicPluginsFramework
from spf.plugins.contextualize import contextualize
from sanic_oauthlib.client import oauthclient
from sanic_session_spf import session as session_plugin
from filesystem_session_interface import FilesystemSessionInterface
from util import load_env
#having these in a module-local _hopefully_ shouldn't be a problem
#using them async might be an issue, but maybe not
OAUTH2_REMOTES = {}
def add_oauth_plugin(app):
spf = SanicPluginsFramework(app)
try:
oauth = spf.register_plugin(oauthclient)
except ValueError as v:
_, oauth = v.args
return oauth
def create_oauth2_remote(app, oauth=None):
if not oauth:
oauth = add_oauth_plugin(app)
consumer_key = getenv("OAUTH2_CSIRO_LDAP_CONSUMER_KEY", "example1")
consumer_secret = getenv("OAUTH2_CSIRO_LDAP_CONSUMER_SECRET", "password1")
remote = oauth.remote_app(
'csiro-to-ldap2',
consumer_key=consumer_key,
consumer_secret=consumer_secret,
request_token_params={'scope': 'profile'},
base_url='https://oauth.esoil.io/api/',
access_token_method='POST',
access_token_url='https://oauth.esoil.io/oauth2/token',
authorize_url='https://oauth.esoil.io/oauth2/authorize'
)
OAUTH2_REMOTES['csiro-to-ldap2'] = remote
return remote
def add_to_app(app, oauth=None, remote=None):
load_env()
if not oauth:
oauth = add_oauth_plugin(app)
if not remote:
remote = create_oauth2_remote(app, oauth)
spf = SanicPluginsFramework(app)
try:
session_interface = FilesystemSessionInterface()
spf.register_plugin(session_plugin, interface=session_interface)
except ValueError:
pass
try:
ctx = spf.register_plugin(contextualize)
except ValueError as v:
_, ctx = v.args
# @app.route('/')
# async def index(request):
# if 'csiro-to-ldap_oauth' in session:
# ret = await oauth.get('email')
# if isinstance(ret.data, dict):
# return json(ret.data)
# return str(ret.data)
# return redirect(app.url_for('login'))
@app.route('/create_oauth2')
@remote.autoauthorize
async def create_oauth2(request, context):
override_server_name = getenv("SANIC_OVERRIDE_SERVER_NAME", "localhost:9001")
callback = request.app.url_for('oauth2_auth', _external=True, _scheme='http', _server=override_server_name)
proxy_route_base = getenv("SANIC_PROXY_ROUTE_BASE", "")
if len(proxy_route_base):
callback = callback.replace("/oauth2/auth", "/{}oauth2/auth".format(proxy_route_base))
print("In AutoAuthorize. Asking for request_token using callback: {}".format(callback))
after_this = request.args.get("after_authorized", "/apikey")
state = {"remote_app": 'csiro-to-ldap2', "oauth_version": "2.0", "after_authorized": after_this}
#Oauth1 cannot put state in the request, we need to put it in the session
shared_context = context.shared
shared_request_context = shared_context.request[id(request)]
session = shared_request_context.get('session', {})
session['oauth_state'] = state
return {'callback': callback}
@ctx.route('/oauth2/logout')
def logout(request, context):
shared_context = context.shared
shared_request_context = shared_context.request[id(request)]
session = shared_request_context.get('session', {})
session.pop('csiro-to-ldap2_oauth', None)
return redirect(app.url_for('index'))
@app.route('/oauth2/auth')
@remote.authorized_handler
async def oauth2_auth(request, data, context):
if data is None:
return 'Access denied: error=%s' % (
request.args['error']
)
resp = {k: v[0] if isinstance(v, (tuple, list)) else v for k, v in data.items()}
shared_context = context.shared
shared_request_context = shared_context.request[id(request)]
session = shared_request_context.get('session', {})
state = session.get('oauth_state', None)
after_authorized = state.get('after_authorized', "/apikey") if state else "/apikey"
if 'access_token' in resp:
session['csiro-to-ldap2_oauth'] = resp
if state:
state['access_token_session_key'] = "csiro-to-ldap2_oauth"
session['oauth_state'] = state
return redirect(after_authorized)
@app.route('/oauth2/method/<name>')
async def oauth2_method(request, name):
func = getattr(remote, name)
ret = func('method')
if isawaitable(ret):
ret = await ret
return text(ret.raw_data)
def make_token_getter(_remote):
context = oauth.context
shared_context = context.shared
@_remote.tokengetter
async def get_oauth_token():
nonlocal context, shared_context
raise NotImplementedError("Out-of-order token getter is not implemented. Pass the token to the requester when its required.")
# if 'dev_oauth' in session:
# resp = session['dev_oauth']
# return resp['oauth_token'], resp['oauth_token_secret']
make_token_getter(remote)
return remote
#TODO: maybe cache this to prevent repeated hits to the api?
async def test_oauth2_token(client_name, access_token):
if client_name is None or client_name.startswith("_") or \
client_name.lower() == "none":
# use the first one. This is a bit hacky.
client_name = next(iter(OAUTH2_REMOTES.keys()))
remote = OAUTH2_REMOTES.get(client_name, None)
if remote is None:
raise RuntimeError("Cannot get oauth2 remote with name \"{}\"".format(client_name))
resp = await remote.get("/api/method", token=access_token)
if resp.status in (200, 201):
if resp.data is not None and isinstance(resp.data, dict):
method = str(resp.data.get("method")).upper()
if method == "GET":
return True
return False
| 1.851563 | 2 |
topologic/embedding/tsne.py | microsoft/topologic | 24 | 12795872 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
from typing import Union
from sklearn.manifold import TSNE
def tsne(
embedding: np.ndarray,
num_components: int = 2,
perplexity: float = 30.0,
early_exaggeration: float = 12.0,
learning_rate: float = 200.0,
num_iterations: int = 1000,
num_iterations_without_progress: int = 300,
min_grad_norm: float = 1e-7,
metric: str = "euclidean",
init: str = "random",
verbose: int = 1,
random_state: Union[int, np.random.RandomState, None] = None,
method: str = 'barnes_hut',
angle: float = 0.5
) -> np.ndarray:
"""
t-distributed Stochastic Neighbor Embedding.
t-SNE is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples.
:param numpy.ndarray embedding: The embedding in which PCA will be applied
:param int num_components: Dimension of the embedded space. Default 2
:param float perplexity: The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter. Default 30.0
:param float early_exaggeration: Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. Default 12.0
:param float learning_rate: The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help. Default 200.0
:param int num_iterations: Maximum number of iterations for the optimization. Should be at
least 250. Default 1000
:param int num_iterations_without_progress: Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50. Default 300
:param float min_grad_norm: If the gradient norm is below this threshold, the optimization will
be stopped. Default 1e-7
:param metric: The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance. Default 'euclidean'
:type metric: Union[str, Callable]
:param init: Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, num_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization. Default 'random'
:type init: Union[string, numpy.ndarray]
:param int verbose: Verbosity level. Default 1
:param random_state: If int, random_state is the seed used by the random number
generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
:type random_state: Optional[Union[int, numpy.random.RandomState]]
:param str method: By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples. Default 'barnes_hut'
:param float angle: Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnpcaes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error. Default 0.5
:return: A np.ndarray of principal axes in feature space, representing the directions of
maximum variance in the data. The components are sorted by variance`
:rtype: numpy.ndarray
"""
if embedding is None:
raise ValueError('embedding must be specified but was None')
if not num_components:
raise ValueError('num_components must be specified but was None')
model = TSNE(
n_components=num_components,
perplexity=perplexity,
early_exaggeration=early_exaggeration,
learning_rate=learning_rate,
n_iter=num_iterations,
n_iter_without_progress=num_iterations_without_progress,
min_grad_norm=min_grad_norm,
metric=metric,
init=init,
verbose=verbose,
random_state=random_state,
method=method,
angle=angle
)
return model.fit_transform(embedding)
| 3.203125 | 3 |
tests/test.py | sokolegg/titanoboa | 0 | 12795873 | <reponame>sokolegg/titanoboa<gh_stars>0
import unittest
import os
import sequentia as seq
import pandas as pd
class TestProjector(unittest.TestCase):
def setUp(self):
demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'],
'temperature': [1, 3, 4]}
df = pd.DataFrame(demo)
df['date'] = pd.to_datetime(df['date'])
df = df.set_index('date')
self.df = df
def test_demo_missed(self):
# 2nd january is lacked
h = seq.Historical(self.df)
daily = h.interpolate('linear')['12-31-2018','01-06-2019',1]
missed_must_be = 2
print(daily.head())
self.assertEqual(daily['temperature']['01-02-2019'], missed_must_be)
def test_expand(self):
h = seq.Historical(self.df).expand('month', 'year')
print(h.head())
# def test_fragmentation(self):
# demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'],
# 'temperature': [1, 3, 4]}
# h = tnb.Historical(df)
# f = h.fragmentate(['28days','1day'], names=['past', 'target'])[::'1day']
# past_aggr = {
# 'weekly': lambda x: x[::7],
# 'max': lambda x: x.max(),
# 'std': lambda x: x.std()
# }
# result = f.apply(past=past_aggr, target=lambda x: x[0])
# result =
if __name__ == '__main__':
unittest.main() | 2.703125 | 3 |
part_2/week_14/bubbleSort.py | eduardovivi/Intro-to-Computer-Science-with-Python-Part-1-and-2-IME-USP-Coursera | 16 | 12795874 | <filename>part_2/week_14/bubbleSort.py
def bubble_sort(lista):
for passnum in range(len(lista)-1,0,-1):
for i in range(passnum):
if lista[i]>lista[i+1]:
temp = lista[i]
lista[i] = lista[i+1]
lista[i+1] = temp
print(lista)
return lista | 4.03125 | 4 |
src/main.py | anonymousicml2021/paper2888 | 85 | 12795875 | <gh_stars>10-100
import pathlib
import sys
from torchvision.utils import save_image
curr_path = pathlib.Path(__file__).parent.absolute()
sys.path.insert(0, str(curr_path / 'better_corruptions'))
import argparse
import os
from pathlib import Path
import cox.store
import cox.utils
import dill
import json
import numpy as np
import torch as ch
from robustness import datasets, defaults, loaders, model_utils, train
from robustness.tools import breeds_helpers
from torch import nn
from torchvision import models
from torchvision.datasets import CIFAR10
from . import boosters, constants
from .utils import custom_datasets, LinearModel
from uuid import uuid4
# ch.set_default_tensor_type(ch.cuda.FloatTensor)
BOOSTING_FP = 'boosting.ch'
parser = argparse.ArgumentParser(conflict_handler='resolve')
parser = defaults.add_args_to_parser(defaults.CONFIG_ARGS, parser)
parser = defaults.add_args_to_parser(defaults.MODEL_LOADER_ARGS, parser)
parser = defaults.add_args_to_parser(defaults.TRAINING_ARGS, parser)
parser = defaults.add_args_to_parser(defaults.PGD_ARGS, parser)
# Custom arguments
parser.add_argument('--boosting', choices=['none', 'class_consistent', '3d'],
default='class_consistent',
help='Dataset (Overrides the one in robustness.defaults)')
parser.add_argument('--no-tqdm', type=int, default=1, choices=[0, 1],
help='Do not use tqdm.')
parser.add_argument('--exp-name', type=str, required=False)
parser.add_argument('--augmentations', type=str, default=None, help='e.g. fog,gaussian_noise')
parser.add_argument('--dataset', choices=['cifar', 'imagenet', 'entity13', 'living17', 'solids', 'city'],
default='imagenet')
parser.add_argument('--info-dir', type=str, help='Where to find (or download) info files for breeds')
parser.add_argument('--patch-size', type=int, default=70)
parser.add_argument('--training-mode', type=str, choices=['joint','model','booster'])
parser.add_argument('--arch', type=str, default='resnet18')
parser.add_argument('--lr', type=float, default=0.005)
parser.add_argument('--patch-lr', type=float, default=0.005)
parser.add_argument('--pytorch-pretrained', action='store_true')
parser.add_argument('--save-freq', type=int, default=50,
help="How frequently we should save images")
parser.add_argument('--save-only-last', action='store_true',
help="Only keep the last visualizations instead of all")
parser.add_argument('--resume', action='store_true',
help='Whether to resume training the DataAugmentedModel or not.'
'Useful to continue training if job is pre-empted.'
'(Overrides the one in robustness.defaults)')
parser.add_argument('--model-path', type=str, default=None,
help='Path to a checkpoint to load (useful for training a patch using a pretrained model).')
parser.add_argument('--zipped', action='store_true')
parser.add_argument('--apply-booster-transforms', type=int, default=1, choices=[0, 1],
help='Apply random transforms to the booster.')
parser.add_argument('--debug', action='store_true', help='Print debug stuff')
parser.add_argument('--json-config', help='Path to a JSON config file **that will override argparse args**')
## Arguments for 3D boosters:
parser.add_argument('--single-class', type=int, help="Whether to act "
"in single-class mode. If given, will be used as a fixed "
"target class (only optimize ONE texture across all images)")
parser.add_argument('--num-texcoord-renderers', default=1, type=int)
parser.add_argument('--forward-render', action='store_true',
help="Use blender rendering on forward pass instead of matmul")
parser.add_argument('--add-corruptions', action='store_true',
help="Add corruptions in the loop (see constants.py for details)")
# Render configuration
parser.add_argument('--render-samples', type=int, default=1)
parser.add_argument('--custom-file', help='If given, use object from file instead of Cube')
# Zoom (bigger = more zoomed out)
parser.add_argument('--min-zoom', type=int, default=20, help="Minimum zoom (i.e., most zoomed in)")
parser.add_argument('--max-zoom', type=int, default=40, help="Maximum zoom (i.e., most zoomed out)")
# Lighting
parser.add_argument('--min-light', type=float, default=0.5, help="Minimum lighting (darkest)")
parser.add_argument('--max-light', type=float, default=0.5, help="Maximum lighting (lightest)")
"""
Example usage:
python main.py --arch resnet50 --dataset cifar --batch-size 64 --out-dir outdir
--exp-name tmp --patch-size 10 --patch-lr 0.01 --training-mode joint
"""
def get_dataset_and_loaders(args):
if args.dataset == 'solids':
ds = datasets.ImageNet(args.data,
custom_class=custom_datasets.SolidColors,
custom_class_args={'image_size': constants.DS_TO_DIM[args.dataset]})
elif args.dataset == 'city':
ds = datasets.ImageNet(args.data)
elif args.dataset == 'cifar':
ds = datasets.CIFAR('/tmp')
elif args.dataset == 'imagenet':
ds = datasets.ImageNet(args.data)
if args.zipped:
ds.custom_class = 'Zipped'
elif args.dataset == 'entity13':
split = breeds_helpers.make_entity13(args.info_dir)[1][0]
ds = datasets.CustomImageNet(args.data, split)
elif args.dataset == 'living17':
split = breeds_helpers.make_living17(args.info_dir)[1][0]
ds = datasets.CustomImageNet(args.data, split)
else:
raise NotImplementedError
# TODO: with_index
train_loader, val_loader = ds.make_loaders(batch_size=args.batch_size,
val_batch_size=args.batch_size,
workers=args.workers,
data_aug=True)
return ds, (train_loader, val_loader)
def get_boosted_model(args, ds):
is_pt_model = args.arch in constants.NAME_TO_ARCH and args.dataset == 'imagenet'
arch = constants.NAME_TO_ARCH[args.arch](args.pytorch_pretrained) if is_pt_model else args.arch
num_classes = 1 if args.single_class else ds.num_classes
if arch == 'linear':
arch = LinearModel(num_classes, constants.DS_TO_DIM[args.dataset])
kwargs = {'arch': arch, 'dataset': ds, 'resume_path': args.model_path,
'add_custom_forward': is_pt_model or args.arch=='linear',
'pytorch_pretrained': args.pytorch_pretrained}
model, _ = model_utils.make_and_restore_model(**kwargs)
# Wrap the model wtith DataAugmentedModel even if there are not corruptions.
# For consistenct when loading from checkpoints
model = boosters.DataAugmentedModel(model, ds.ds_name,
args.augmentations.split(',') if args.augmentations else [])
# don't pass checkpoint to train_model do avoid resuming for epoch, optimizers etc.
if args.boosting == 'class_consistent':
boosting_path = Path(args.out_dir) / BOOSTING_FP
if boosting_path.exists():
booster = ch.load(boosting_path)
else:
dim = constants.DS_TO_DIM[args.dataset]
booster = boosters.ClassConsistentBooster(ds.num_classes, dim,
constants.PATCH_TRANSFORMS,
args.patch_size,
model,
apply_transforms=args.apply_booster_transforms)
model = boosters.BoostedModel(model, booster, args.training_mode)
elif args.boosting == '3d':
boosting_path = Path(args.out_dir) / BOOSTING_FP
if boosting_path.exists():
booster = ch.load(boosting_path)
else:
dim = constants.DS_TO_DIM[args.dataset]
render_options = {
'min_zoom': args.min_zoom,
'max_zoom': args.max_zoom,
'min_light': args.min_light,
'max_light': args.max_light,
'samples': args.render_samples
}
corruptions = constants.THREE_D_CORRUPTIONS if args.add_corruptions else None
booster = boosters.ThreeDBooster(num_classes=num_classes,
tex_size=args.patch_size,
image_size=dim,
batch_size=args.batch_size,
render_options=render_options,
num_texcoords=args.num_texcoord_renderers,
num_gpus=ch.cuda.device_count(),
debug=args.debug,
forward_render=args.forward_render,
custom_file=args.custom_file,
corruptions=corruptions)
model = boosters.BoostedModel(model, booster, args.training_mode)
elif args.boosting == 'none':
# assert args.eval_only
model = boosters.BoostedModel(model, None, args.training_mode)
else:
raise ValueError(f'boosting not found: {args.boosting}')
return model.cuda()
def main_trainer(args, store):
ds, (train_loader, val_loader) = get_dataset_and_loaders(args)
if args.single_class is not None:
print(f"Boosting towards a single class {args.single_class}")
# Transform everything to have the same label
class_tx = lambda x, y: (x, ch.ones_like(y) * args.single_class)
train_loader = loaders.LambdaLoader(train_loader, class_tx)
val_loader = loaders.LambdaLoader(val_loader, class_tx)
model = get_boosted_model(args, ds)
# Resume traing the boosted model from a checkpoint
resume_path = os.path.join(args.out_dir, args.exp_name, 'checkpoint.pt.latest')
checkpoint = None
if args.resume and os.path.isfile(resume_path):
print('[Resuming training BoostedModel from a checkpoint...]')
checkpoint = ch.load(resume_path, pickle_module=dill)
sd = checkpoint['model']
sd = {k[len('module.'):]:v for k,v in sd.items()}
model.load_state_dict(sd)
print("=> loaded checkpoint of BoostedModel'{}' (epoch {})".format(resume_path, checkpoint['epoch']))
print(f"Dataset: {args.dataset} | Model: {args.arch}")
if args.eval_only:
print('==>[Evaluating the model]')
return train.eval_model(args, model, val_loader, store=store)
parameters = [model.dummy] # avoids empty param list to optimizer when optimizing the booster alone
if args.training_mode in ['joint', 'model']:
parameters = model.boosted_model.parameters()
def iteration_hook(model, i, loop_type, inp, target):
if loop_type == 'val' or model.module.booster is None:
return
if args.training_mode in ['booster', 'joint']:
model.module.booster.step_booster(lr=args.patch_lr)
if i % args.save_freq == 0:
save_dir = Path(store.save_dir)
#TODO: Move this part inside the 2D boosters. It is
# a bit tricky cause if we do that, we cannot save the "corrupted"
# boosted images, but only the boosted images
if args.boosting != '3d':
inp, target = inp.cuda(), target.cuda()
example_boosted = model.module.booster(inp, target)
bs_path = save_dir / f'boosted_{i}.jpg'
save_image(example_boosted[:4], bs_path)
example_adversaried = model.module.boosted_model.apply(example_boosted)
inp_path = save_dir / f'inp_{i}.jpg'
adv_path = save_dir / f'adv_{i}.jpg'
save_image(inp[:4], inp_path)
save_image(example_adversaried[:4], adv_path)
else:
if not args.save_only_last:
save_dir = save_dir / f'iteration_{i}'
os.makedirs(save_dir)
with ch.no_grad():
model(inp, target, save_dir=save_dir)
if i == 0:
print(f'Saved in {store.save_dir}')
args.iteration_hook = iteration_hook
return train.train_model(args, model, (train_loader, val_loader),
store=store, checkpoint=checkpoint,
update_params=parameters)
if __name__ == "__main__":
args = parser.parse_args()
if args.json_config is not None:
print("Overriding args with JSON...")
new_args = json.load(open(args.json_config))
assert all(hasattr(args, k) for k in new_args), set(new_args.keys()) - set(vars(args).keys())
for k in new_args: setattr(args, k, new_args[k])
assert not args.adv_train, 'not supported yet slatta dog'
assert args.training_mode is not None, "training_mode is required"
# Important for automatic job retries on the cluster in case of premptions. Avoid uuids.
if args.exp_name == 'random':
args.exp_name = str(uuid4())
print(f"Experiment name: {args.exp_name}")
assert args.exp_name != None
# Preprocess args
default_ds = args.dataset if args.dataset in datasets.DATASETS else "imagenet"
args = defaults.check_and_fill_args(
args, defaults.CONFIG_ARGS, datasets.DATASETS[default_ds])
if not args.eval_only:
args = defaults.check_and_fill_args(
args, defaults.TRAINING_ARGS, datasets.DATASETS[default_ds])
if False and (args.adv_train or args.adv_eval):
args = defaults.check_and_fill_args(
args, defaults.PGD_ARGS, datasets.DATASETS[default_ds])
args = defaults.check_and_fill_args(
args, defaults.MODEL_LOADER_ARGS, datasets.DATASETS[default_ds])
store = cox.store.Store(args.out_dir, args.exp_name)
if 'metadata' not in store.keys:
args_dict = args.__dict__
schema = cox.store.schema_from_dict(args_dict)
store.add_table('metadata', schema)
store['metadata'].append_row(args_dict)
else:
print('[Found existing metadata in store. Skipping this part.]')
print(args)
main_trainer(args, store)
| 2 | 2 |
scato/ui/splash.py | michurin/scato | 1 | 12795876 | splash='''bgcolor 0 0 0
color 0 1 0
jump .45 .43
left 35
scale .32
width .15
jump 0 1
iterate 60 begin
draw 0 .7
iterate 7 begin
draw 0.096441350201699999 -0.013433702204987999
scale 0.99371847379 right 3.27272727273 mixcolor 1 1 0 .012
end
draw 0 -.7
iterate 3 begin
draw 0.056730206001 -0.00790217776764
scale 0.99371847379 right 3.27272727273 mixcolor 1 1 0 .012
end
end'''
| 0.949219 | 1 |
upload.py | hydrogen18/fairywren | 39 | 12795877 | import sys
import json
import os
import math
import subprocess
import urllib
import urllib2
import MultipartPostHandler
import cookielib
import hashlib
import base64
import types
import xml.dom.minidom
def mktorrent(target,announce,pieceLength,private):
cmd = ['/usr/bin/mktorrent']
cmd.append('--announce=' + announce)
cmd.append('--piece-length=' + str(pieceLength))
if private:
cmd.append('--private')
outfile = '%s.%i.torrent' % (os.tempnam(),os.getpid(),)
cmd.append('--output=' + outfile)
cmd.append(target)
if 0!= subprocess.call(cmd):
raise EnvironmentError("mktorrent failed")
return outfile
def mediainfo(*files):
cmd = ['/usr/bin/mediainfo','--output=XML']
cmd += files
proc = subprocess.Popen(cmd,stdout=subprocess.PIPE)
#Read all the output
stdout, stderr = proc.communicate()
#Check for failure
if 0!=proc.returncode:
print stdout
print stderr
raise SystemError('mediainfo failed')
retval = {}
#Parse the output
doc = xml.dom.minidom.parseString(stdout)
#Ignore anything not in the first Mediainfo tag
doc = doc.getElementsByTagName('Mediainfo')[0]
#Extract the mediainfo version
retval['version'] = doc.getAttribute('version').strip()
retval['files'] = {}
#For each file, extract the information about the tracks
for f in doc.getElementsByTagName('File'):
f_ = {}
f_['tracks'] = []
name = None
for track in f.getElementsByTagName('track'):
t = {}
t['type'] = str(track.getAttribute('type'))
for tag in track.childNodes:
if len(tag.childNodes)==1 and 'text' in tag.childNodes[0].nodeName:
key = tag.tagName.strip()
value = tag.childNodes[0].nodeValue.strip()
#Mediainfo shows the name of the file in the
#General track
if t['type'] == 'General' and 'Complete_name' == key:
name = value
else:
t[key] = value
f_['tracks'].append(t)
name = name.strip().split(os.sep)[-1]
retval['files'][name] = f_
return retval
def listFiles(filesPath):
try:
files = os.listdir(filesPath)
except OSError as e:
if e.errno!=20:
raise e
files = [filesPath]
return files
files = [os.path.join(filesPath,f) for f in files]
return files
def buildOpener(url,username,password):
url = str(url)
def hashPassword(pw):
h = hashlib.sha512()
h.update(pw)
return base64.urlsafe_b64encode(h.digest()).replace('=','')
qp=urllib.urlencode({"username":username,"password":<PASSWORD>(password)})
request = urllib2.Request('%s/api/session' % url,data=qp)
response = urllib2.urlopen(request)
body = json.load(response)
if 'error' in body:
raise Exception(body['error'])
cookies = cookielib.CookieJar()
cookies.extract_cookies(response,request)
return urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),MultipartPostHandler.MultipartPostHandler)
if __name__ == "__main__":
with open(sys.argv[1],'r') as fin:
conf = json.load(fin)
#Login to the fairywren instance
fairywren = buildOpener(**conf['fairywren'])
fwurl = str(conf['fairywren']['url'])
#Retrieve the announce url
account = json.loads(fairywren.open('%s/api/session' % fwurl ).read())
announceUrl = json.loads(fairywren.open('%s/%s' % ( fwurl, account['my']['href'] ) ).read())['announce']['href']
#Get the current piece size as a power of 2
pieceLength = 18
filesPath = sys.argv[2]
#Create a new torrent
newTorrentPath = mktorrent(filesPath,announceUrl,pieceLength,True)
files = listFiles(filesPath)
extendedInfo = {}
try:
minfo = mediainfo(*files)
extendedInfo['mediainfo'] = minfo
except SystemError as e:
print 'No mediainfo on upload...'
if len(sys.argv) == 4:
title = sys.argv[3]
else:
title = os.path.split(filesPath)[-1]
#Upload the torrent to fairywren
fairywren.open('%s/api/torrents' % fwurl ,data={"extended": json.dumps(extendedInfo) , "title":str(title),"torrent":open(newTorrentPath,'rb')})
os.unlink(newTorrentPath)
| 2.296875 | 2 |
sciencebeam_judge/evaluation_config.py | elifesciences/sciencebeam-judge | 0 | 12795878 | <reponame>elifesciences/sciencebeam-judge<gh_stars>0
from typing import Dict, List, NamedTuple, Optional
import yaml
from sciencebeam_utils.utils.string import parse_list
from .utils.string import parse_dict
from .utils.config import parse_config_as_dict
DEFAULT_EVALUATION_YAML_FILENAME = 'evaluation.yml'
class CustomEvaluationFieldSourceConfig(NamedTuple):
field_names: List[str]
@staticmethod
def from_json(data: dict):
return CustomEvaluationFieldSourceConfig(
field_names=data['field_names']
)
class CustomEvaluationFieldConfig(NamedTuple):
name: str
evaluation_type: str
expected: CustomEvaluationFieldSourceConfig
actual: CustomEvaluationFieldSourceConfig
evaluation_type_config: Optional[dict] = None
@staticmethod
def from_json(data: dict):
return CustomEvaluationFieldConfig(
name=data['name'],
evaluation_type=data['evaluation_type'],
expected=CustomEvaluationFieldSourceConfig.from_json(data['expected']),
actual=CustomEvaluationFieldSourceConfig.from_json(data['actual']),
evaluation_type_config=data.get('evaluation_type_config')
)
class CustomEvaluationConfig(NamedTuple):
fields: List[CustomEvaluationFieldConfig]
@staticmethod
def from_json(data: Optional[dict]):
if not data:
return None
return CustomEvaluationConfig(
fields=[
CustomEvaluationFieldConfig.from_json(field_data)
for field_data in data['fields']
]
)
class EvaluationConfig(NamedTuple):
custom: CustomEvaluationConfig = CustomEvaluationConfig(fields=[])
@staticmethod
def from_json(data: dict):
return EvaluationConfig(
custom=CustomEvaluationConfig.from_json(
data.get('custom')
)
)
def parse_evaluation_config(filename_or_fp) -> Dict[str, Dict[str, str]]:
return parse_config_as_dict(filename_or_fp)
def parse_evaluation_yaml_config(filename_or_fp) -> dict:
if isinstance(filename_or_fp, str):
with open(filename_or_fp, 'r') as fp:
return yaml.safe_load(fp)
return yaml.safe_load(filename_or_fp)
def get_evaluation_config_object(evaluation_json: dict) -> EvaluationConfig:
return EvaluationConfig.from_json(evaluation_json)
def parse_scoring_type_overrides(
scoring_type_overrides_str: str) -> Dict[str, List[str]]:
return {
key: parse_list(value)
for key, value in parse_dict(scoring_type_overrides_str).items()
}
def get_scoring_types_by_field_map_from_config(
config_map: Dict[str, Dict[str, str]]) -> Dict[str, List[str]]:
scoring_type_config = config_map.get('scoring_type', {})
return {
key: parse_list(value)
for key, value in scoring_type_config.items()
}
| 2.34375 | 2 |
Programs/table.py | jatiinyadav/Python | 0 | 12795879 | <filename>Programs/table.py
l1 = ['1','2','3']
l2 = ['4','5','6']
for i in l1:
for j in l2:
print(i,j)
print("--------")
i = 1
n = 2
while(i<=5):
print(n," * ", i , " = " , n*i)
i +=1
print("--------") | 3.46875 | 3 |
app.py | OPERANDOH2020/op-web-crawler | 0 | 12795880 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from time import sleep
import ConfigParser
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from flask import Flask
import atexit
import json
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
user_agent = ("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36")
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = user_agent
driver = webdriver.PhantomJS(desired_capabilities=dcap)
app = Flask(__name__)
Config = ConfigParser.ConfigParser()
Config.read("credentials.ini")
def bye():
driver.quit()
def save2file(name, content):
fout = open(name, "w")
fout.write(content.encode('utf8'))
fout.close
def linkedin(driver, uname, pwd):
# login to linkedin
driver.get("http://linkedin.com/uas/login")
emailelement = driver.find_element_by_id("session_key-login")
passwordelement = driver.find_element_by_id("session_password-login")
emailelement.send_keys(uname)
passwordelement.send_keys(<PASSWORD>)
passwordelement.submit()
sleep(2)
driver.get("https://www.linkedin.com/psettings/")
settings = driver.find_element_by_xpath(
"//div[contains(@class, 'settings-grid')]").text
return settings
def twitter(driver, uname, pwd):
# login to twitter
driver.get("http://twitter.com/login")
emailelement = driver.find_element_by_xpath(
"//div[@class='signin-wrapper']//input[@name='session[username_or_email]']")
passwordelement = driver.find_element_by_xpath(
"//div[@class='signin-wrapper']//input[@name='session[password]']")
emailelement.send_keys(uname)
passwordelement.send_keys(<PASSWORD>)
passwordelement.submit()
# get the security & privacy settings
driver.get("https://twitter.com/settings/security")
settings = driver.find_element_by_xpath(
"//div[contains(@class, 'content-main')]").text
return settings
def fb(driver, uname, pwd):
# login to facebook
driver.get("https://facebook.com")
login = "loginbutton"
emailelement = driver.find_element_by_name("email")
passwordelement = driver.find_element_by_name("pass")
emailelement.send_keys(uname)
passwordelement.send_keys(<PASSWORD>)
loginelement = driver.find_element_by_id(login)
loginelement.click()
# get the privacy page
driver.get("https://www.facebook.com/settings?tab=privacy")
settings = driver.find_element_by_id('contentArea').text
return settings
def google(driver, uname, pwd):
# login to google
url = 'https://accounts.google.com/Login'
driver.get(url)
driver.find_element_by_id("Email").send_keys(uname)
driver.find_element_by_id("next").click()
# needs to sleep otherwise it will not find the element
sleep(1)
driver.find_element_by_id("Passwd").send_keys(<PASSWORD>)
driver.find_element_by_id("signIn").click()
# get the privacy page
driver.get("https://myaccount.google.com/privacy?pli=1")
settings = driver.find_element_by_xpath("//div[contains(@class, 'lc-mc')]")
return settings.text
def googlePT(driver):
url = 'https://www.google.com/policies/privacy/'
driver.get(url)
terms = driver.find_element_by_xpath("//div[contains(@class, 'maia-article')]").text
return terms
def InstagramPT(driver):
url = 'http://instagram.com/legal/privacy/'
driver.get(url)
terms = driver.find_element_by_id('hc2content').text
return terms
def TwitterPT(driver):
url = 'https://twitter.com/privacy?lang=en'
driver.get(url)
terms = driver.find_element_by_xpath("//div[contains(@class, 'UserPolicy-content')]").text
return terms
def LinkedInPT(driver):
url = 'https://www.linkedin.com/legal/privacy-policy'
driver.get(url)
terms = driver.find_element_by_xpath("//div[contains(@class, 'legal')]").text
return terms
def FBPT(driver):
url = 'https://www.facebook.com/legal/terms/update'
driver.get(url)
terms = driver.find_element_by_id('content').text
return terms
@app.route("/GetPrivacyTerms", methods=['GET'])
def GetPrivacyTerms():
res = {}
res['fb'] = FBPT(driver)
res['g'] = googlePT(driver)
res['tw'] = TwitterPT(driver)
res['l'] = LinkedInPT(driver)
res['i'] = InstagramPT(driver)
json_data = json.dumps(res)
return str(json_data)
@app.route("/OSPSettings", methods=['GET'])
def GetGlobalSettings():
res={}
res['fb'] = fb(driver, Config.get("facebook", "user"),Config.get("facebook", "password"))
res['g'] = google(driver, Config.get("google", "user"),Config.get("google", "password"))
res['tw'] = twitter(driver, Config.get("twitter", "user"),Config.get("twitter", "password"))
res['l'] = linkedin(driver, Config.get("linkedin", "user"),Config.get("linkedin", "password"))
json_data = json.dumps(res)
return str(json_data)
if __name__ == "__main__":
app.run(debug=True)
atexit.register(bye)
| 2.640625 | 3 |
voxel_globe/download/forms.py | ngageoint/voxel-globe | 28 | 12795881 | from django import forms
import voxel_globe.meta.models as models
class TiePointForm(forms.Form):
image_set = forms.ModelChoiceField(label="Image Set",
queryset=models.ImageSet.objects.all().order_by('name'))
class PointCloudForm(forms.Form):
point_cloud = forms.ModelChoiceField(label="Point Cloud",
queryset=models.PointCloud.objects.all().order_by('name'))
class ImageForm(forms.Form):
image = forms.ModelChoiceField(label="Image",
queryset=models.Image.objects.all().order_by('name'))
class CameraForm(forms.Form):
image_set = forms.ModelChoiceField(label="Image Set",
queryset=models.ImageSet.objects.all().order_by('name'))
camera_set = forms.ModelChoiceField(label="Camera Set",
queryset=models.CameraSet.objects.all().order_by('name'))
| 2.25 | 2 |
borax/ui/aiotk.py | kinegratii/borax | 51 | 12795882 | <gh_stars>10-100
# coding=utf8
import tkinter as tk
import asyncio
__all__ = ['run_loop']
async def run_loop(app, interval=0.05):
try:
while True:
app.update()
await asyncio.sleep(interval)
except tk.TclError as e:
if "application has been destroyed" not in e.args[0]:
raise
| 2.765625 | 3 |
ejemplo1.py | 030701Ivan/mirepositorio | 0 | 12795883 | import cv2
import numpy as np
imagen = cv2.imread('imagen.jpg')
imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2RGB)
print(imagen.shape)
print(imagen[0][0][0])
imagen = cv2.resize(imagen,(256, 256))
imagen = cv2.imread('imagen.jpg')
imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY)
print(imagen.shape)
print(imagen[0][0])
imagen[0][0] = 0
imagen[0][1] = 0
imagen[0][2] = 0
cv2.imwrite('grayimagen.jpg',imagen)
matriz = np.zeros((256,256),np.float32)
print(matriz.shape)
cv2.imwrite('matrizImagen.jpg',matriz)
imagen = cv2.cvtColor(matriz,cv2.COLOR_GRAY2BGR)
print(imagen.shape)
cv2.imwrite('matrizColorImagen.jpg',imagen)
#cv2.imwrite('resizeImagen.jpg',imagen)
#cv2.imshow('image',imagen)
#cv2.waitKey(0)
| 2.984375 | 3 |
foobar 2.2.py | SambhavG/Google-foobar | 0 | 12795884 | def solution(h, q):
returnList = []
for i in q:
if (i == pow(2, h)-1):
returnList.append(-1)
else:
currentLevel = h
currentTop = pow(2, h)-1
currentLower = 1
currentUpper = pow(2, h)-2
topList = []
while currentLevel > 1:
topList.append(currentTop)
#prune to left tree
if (i <= (currentLower+currentUpper-1)/2):
currentLevel-=1
currentTop = (currentLower+currentUpper-1)/2
#currentLower stays the same
currentUpper = currentTop-1
#prune to the right tree
elif (i > (currentLower+currentUpper-1)/2 and i < currentTop):
currentLevel-=1
currentTop-=1
currentLower = (currentLower+currentUpper-1)/2+1
currentUpper = currentTop-1
if (i == currentTop):
returnList.append(int(topList[len(topList)-1]))
currentLevel = 0
return returnList
| 3.28125 | 3 |
Mundo 2/Ex066.py | FelipeDreissig/Prog-em-Py---CursoEmVideo | 0 | 12795885 | #soma sem considerar o flag
l = j = 0
print('Digite 999 para parar.')
while True:
c = int(input('Digite um número inteiro:\n'))
if c != 999:
j = j + c
l = l + 1
else:
break
print(f'Fim do programa.\nA soma é {j} e foram digitados {l} números.') | 3.625 | 4 |
mlmodels/model_tch/nbeats.py | gitter-badger/mlmodels | 1 | 12795886 | <reponame>gitter-badger/mlmodels
import os
import pandas as pd
import numpy as np
import torch
from torch import optim
from torch.nn import functional as F
####################################################################################################
from mlmodels.model_tch.nbeats.model import NBeatsNet
VERBOSE = False
####################################################################################################
# Helper functions
def os_package_root_path(filepath, sublevel=0, path_add=""):
"""
get the module package root folder
"""
from pathlib import Path
path = Path(filepath).parent
for i in range(1, sublevel + 1):
path = path.parent
path = os.path.join(path.absolute(), path_add)
return path
def log(*s, n=0, m=1):
sspace = "#" * n
sjump = "\n" * m
print(sjump, sspace, s, sspace, flush=True)
####################################################################################################
# Model
Model = NBeatsNet
####################################################################################################
# Dataaset
def get_dataset(**kw):
data_path = kw['data_path']
train_split_ratio = kw.get("train_split_ratio", 1)
df = pd.read_csv(data_path, index_col=0, parse_dates=True)
if VERBOSE: print(df.head(5))
#### Preprocess
df = df.values # just keep np array here for simplicity.
norm_constant = np.max(df)
df = df / norm_constant # small leak to the test set here.
x_train_batch, y = [], []
backcast_length = kw['backcast_length']
forecast_length = kw['forecast_length']
for i in range(backcast_length, len(df) - forecast_length):
x_train_batch.append(df[i - backcast_length:i])
y.append(df[i:i + forecast_length])
x_train_batch = np.array(x_train_batch)[..., 0]
y = np.array(y)[..., 0]
#### Split
c = int(len(x_train_batch) * train_split_ratio)
x_train, y_train = x_train_batch[:c], y[:c]
x_test, y_test = x_train_batch[c:], y[c:]
return x_train, y_train, x_test, y_test, norm_constant
def data_generator(x_full, y_full, bs):
def split(arr, size):
arrays = []
while len(arr) > size:
slice_ = arr[:size]
arrays.append(slice_)
arr = arr[size:]
arrays.append(arr)
return arrays
while True:
for rr in split((x_full, y_full), bs):
yield rr
######################################################################################################
# Model fit
def fit(model, data_pars, compute_pars=None, out_pars=None, **kw):
device = torch.device('cpu')
forecast_length = data_pars["forecast_length"]
backcast_length = data_pars["backcast_length"]
batch_size = compute_pars["batch_size"] # greater than 4 for viz
disable_plot = compute_pars["disable_plot"]
### Get Data
x_train, y_train, x_test, y_test, _ = get_dataset(**data_pars)
data_gen = data_generator(x_train, y_train, batch_size)
### Setup session
optimiser = optim.Adam(model.parameters())
### fit model
net, optimiser= fit_simple(model, optimiser, data_gen, plot_model, device, data_pars)
return net, optimiser
def fit_simple(net, optimiser, data_generator, on_save_callback, device, data_pars, max_grad_steps=500):
print('--- fiting ---')
initial_grad_step = load(net, optimiser)
for grad_step, (x, target) in enumerate(data_generator):
grad_step += initial_grad_step
optimiser.zero_grad()
net.train()
backcast, forecast = net(torch.tensor(x, dtype=torch.float).to(device))
loss = F.mse_loss(forecast, torch.tensor(target, dtype=torch.float).to(device))
loss.backward()
optimiser.step()
print(f'grad_step = {str(grad_step).zfill(6)}, loss = {loss.item():.6f}')
if grad_step % 100 == 0 or (grad_step < 100 and grad_step % 100 == 0):
with torch.no_grad():
save(net, optimiser, grad_step)
if on_save_callback is not None:
on_save_callback(net, x, target, grad_step, data_pars)
if grad_step > max_grad_steps:
print('Finished.')
break
return net, optimiser
def predict(model, data_pars, compute_pars=None, out_pars=None, **kw):
data_pars["train_split_ratio"] = 1
x_test, y_test, _, _, _ = get_dataset(**data_pars)
test_losses = []
model.eval()
_, f = model(torch.tensor(x_test, dtype=torch.float))
test_losses.append(F.mse_loss(f, torch.tensor(y_test, dtype=torch.float)).item())
p = f.detach().numpy()
return p
###############################################################################################################
def plot(net, x, target, backcast_length, forecast_length, grad_step, out_path="./"):
import matplotlib.pyplot as plt
net.eval()
_, f = net(torch.tensor(x, dtype=torch.float))
subplots = [221, 222, 223, 224]
plt.figure(1)
plt.subplots_adjust(top=0.88)
for i in range(4):
ff, xx, yy = f.cpu().numpy()[i], x[i], target[i]
plt.subplot(subplots[i])
plt.plot(range(0, backcast_length), xx, color='b')
plt.plot(range(backcast_length, backcast_length + forecast_length), yy, color='g')
plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r')
# plt.title(f'step #{grad_step} ({i})')
output = f'{out_path}/n_beats_{grad_step}.png'
plt.savefig(output)
plt.clf()
print('Saved image to {}.'.format(output))
def plot_model(net, x, target, grad_step, data_pars, disable_plot=False):
forecast_length = data_pars["forecast_length"]
backcast_length = data_pars["backcast_length"]
# batch_size = compute_pars["batch_size"] # greater than 4 for viz
# disable_plot = compute_pars.get("disable_plot", False)
if not disable_plot:
print('plot()')
plot(net, x, target, backcast_length, forecast_length, grad_step)
def plot_predict(x_test, y_test, p, data_pars, compute_pars, out_pars):
import matplotlib.pyplot as plt
forecast_length = data_pars["forecast_length"]
backcast_length = data_pars["backcast_length"]
norm_constant = compute_pars["norm_contsant"]
out_path = out_pars['out_path']
output = f'{out_path}/n_beats_test.png'
subplots = [221, 222, 223, 224]
plt.figure(1)
plt.subplots_adjust(top=0.88)
for plot_id, i in enumerate(np.random.choice(range(len(p)), size=4, replace=False)):
ff, xx, yy = p[i] * norm_constant, x_test[i] * norm_constant, y_test[i] * norm_constant
plt.subplot(subplots[plot_id])
plt.grid()
plt.plot(range(0, backcast_length), xx, color='b')
plt.plot(range(backcast_length, backcast_length + forecast_length), yy, color='g')
plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r')
plt.savefig(output)
plt.clf()
print('Saved image to {}.'.format(output))
###############################################################################################################
# save and load model helper function
def save(model, optimiser, grad_step,CHECKPOINT_NAME="mycheckpoint"):
torch.save({
'grad_step': grad_step,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimiser.state_dict(),
}, CHECKPOINT_NAME)
def load(model, optimiser, CHECKPOINT_NAME = 'nbeats-fiting-checkpoint.th'):
if os.path.exists(CHECKPOINT_NAME):
checkpoint = torch.load(CHECKPOINT_NAME)
model.load_state_dict(checkpoint['model_state_dict'])
optimiser.load_state_dict(checkpoint['optimizer_state_dict'])
grad_step = checkpoint['grad_step']
print(f'Restored checkpoint from {CHECKPOINT_NAME}.')
return grad_step
return 0
#############################################################################################################
def get_params(choice=0, data_path="dataset/", **kw):
if choice == 0:
log("#### Path params ################################################")
data_path = os_package_root_path(__file__, sublevel=1, path_add=data_path)
out_path = os.get_cwd() + "/nbeats_test/"
os.makedirs(out_path, exists_ok=True)
log(data_path, out_path)
data_pars = {"data_path": data_path, "forecast_length": 5, "backcast_length": 10}
log("## Model params #########################################")
device = torch.device('cpu')
model_pars = {"stack_types": [NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK],
"device": device,
"nb_blocks_per_stack": 3, "forecast_length": 5, "backcast_length": 10,
"thetas_dims": [7, 8], "share_weights_in_stack": False, "hidden_layer_units": 256}
compute_pars = {"batch_size": 100, "disable_plot": False,
"norm_contsant": 1.0,
"result_path": 'n_beats_test{}.png',
"model_path": "mycheckpoint"}
out_pars = {"out_path": out_path + "/"}
return model_pars, data_pars, compute_pars, out_pars
def test2(data_path="dataset/milk.csv", out_path="n_beats_test{}.png", reset=True):
###loading the command line arguments
# arg = load_arguments()
model_uri = "model_tch/nbeats.py"
log("#### Loading params #######################################")
model_pars, data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path)
log("############ Model preparation #########################")
from mlmodels.models import module_load_full, fit, predict
module, model = module_load_full(model_uri, model_pars)
print(module, model)
log("############ Model fit ##################################")
model, sess= fit(model, module, data_pars=data_pars, out_pars=out_pars, compute_pars={})
print("fit success", sess)
log("############ Prediction ##################################")
preds = predict(model, module, sess, data_pars=data_pars,
out_pars=out_pars, compute_pars=compute_pars)
print(preds)
def test(data_path="dataset/milk.csv"):
###loading the command line arguments
log("#### Loading params #######################################")
model_pars, data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path)
log("#### Loading dataset #######################################")
x_train, y_train, x_test, y_test, norm_const = get_dataset(**data_pars)
log("#### Model setup ##########################################")
model = NBeatsNet(**model_pars)
log("#### Model fit ############################################")
model, optimiser = fit(model, data_pars, compute_pars)
log("#### Predict #############################################")
ypred = predict(model, data_pars, compute_pars, out_pars)
print(ypred)
log("#### Plot ###############################################")
plot_predict(ypred, data_pars, compute_pars, out_pars)
if __name__ == '__main__':
VERBOSE = True
test()
| 2.09375 | 2 |
lib/model/repn/relpn_target_layer.py | brbzjl/my_graph_rcnn | 4 | 12795887 | # --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
# --------------------------------------------------------
# Reorganized and modified by <NAME> and <NAME>
# --------------------------------------------------------
import torch
import torch.nn as nn
import numpy as np
import numpy.random as npr
from ..utils.config import cfg
from bbox_transform import bbox_transform, bbox_overlaps, co_bbox_overlaps_batch2, bbox_transform_batch2, bbox_overlaps_batch2
import pdb
DEBUG = False
class _RelProposalTargetLayer(nn.Module):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
"""
def __init__(self, nclasses_rel):
super(_RelProposalTargetLayer, self).__init__()
self._num_classes_rel = nclasses_rel
self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)
self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS)
self.BBOX_INSIDE_WEIGHTS = torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS)
def forward(self, roi_pairs, gt_boxes, num_boxes):
batch_size = gt_boxes.size(0)
# compute overlap between gt rel pairs and all roi pairs
gt_box_pairs = roi_pairs.new(batch_size, cfg.MAX_ROI_PAIR_NUMBER, 9).zero_()
for i in range(batch_size):
if (gt_boxes[i, :, 21:] > 0).sum() == 0: # no relation
continue
gt_pairs_i = (gt_boxes[i, :, 21:] > 0).nonzero()
n_rel = min(gt_box_pairs[i].size(0), gt_pairs_i.size(0))
gt_box_pairs[i][:n_rel, 0:4] = gt_boxes[i][gt_pairs_i[:n_rel, 0]][:, :4]
gt_box_pairs[i][:n_rel, 4:8] = gt_boxes[i][gt_pairs_i[:n_rel, 1]][:, :4]
gt_box_pairs[i][:n_rel, 8] = gt_boxes[i][gt_pairs_i[:n_rel, 0], 21 + gt_pairs_i[:n_rel, 1]]
# Include ground-truth boxes in the set of candidate rois
# gt_box_pairs_append = roi_pairs.new(batch_size, gt_box_pairs.size(1), roi_pairs.size(2)).zero_()
# gt_box_pairs_append[:,:,1:9] = gt_box_pairs[:,:,:8]
# for i in range(batch_size):
# gt_box_pairs_append[i, :, 0] = i
#
# roi_pairs = torch.cat([roi_pairs, gt_box_pairs_append], 1)
roi_pairs = roi_pairs.contiguous()
num_images = 1
rois_per_image = int(cfg.TRAIN.BATCH_SIZE / num_images)
fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image))
labels, rois, keeps = self._sample_roi_pairs_pytorch(roi_pairs, gt_box_pairs, fg_rois_per_image,
rois_per_image, self._num_classes_rel)
return rois, labels, keeps
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def _sample_roi_pairs_pytorch(self, all_roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# overlaps: (rois x gt_boxes)
overlaps = co_bbox_overlaps_batch2(all_roi_pairs[:,:,1:].contiguous(),
gt_box_pairs[:,:,:8].contiguous())
max_overlaps, gt_assignment = torch.max(overlaps, 2)
batch_size = overlaps.size(0)
num_proposal = overlaps.size(1)
num_boxes_per_img = overlaps.size(2)
offset = torch.arange(0, batch_size) * gt_box_pairs.size(1)
offset = offset.view(-1, 1).type_as(gt_assignment) + gt_assignment
labels = gt_box_pairs[:,:,8].contiguous().view(-1).index(offset.view(-1))\
.view(batch_size, -1)
fg_mask = max_overlaps >= cfg.TRAIN.RELPN_FG_THRESH
keep_inds_batch = labels.new(batch_size, rois_per_image).zero_()
labels_rel_batch = labels.new(batch_size, rois_per_image).zero_()
roi_pairs_batch = all_roi_pairs.new(batch_size, rois_per_image, 9).zero_()
# Guard against the case when an image has fewer than max_fg_rois_per_image
# foreground RoIs
for i in range(batch_size):
fg_inds = torch.nonzero(max_overlaps[i] >= cfg.TRAIN.RELPN_FG_THRESH).view(-1)
fg_num_rois = fg_inds.numel()
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = torch.nonzero((max_overlaps[i] < cfg.TRAIN.RELPN_BG_THRESH_HI) &
(max_overlaps[i] >= cfg.TRAIN.RELPN_BG_THRESH_LO)).view(-1)
bg_num_rois = bg_inds.numel()
# print(fg_num_rois, bg_num_rois)
# pdb.set_trace()
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
# rand_num = torch.randperm(fg_num_rois).long().cuda()
rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).long().cuda()
fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]]
# sampling bg
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
# Seems torch.rand has a bug, it will generate very large number and make an error.
# We use numpy rand instead.
#rand_num = (torch.rand(bg_rois_per_this_image) * bg_num_rois).long().cuda()
rand_num = np.floor(np.random.rand(bg_rois_per_this_image) * bg_num_rois)
rand_num = torch.from_numpy(rand_num).long().cuda()
bg_inds = bg_inds[rand_num]
elif fg_num_rois > 0 and bg_num_rois == 0:
# sampling fg
#rand_num = torch.floor(torch.rand(rois_per_image) * fg_num_rois).long().cuda()
rand_num = np.floor(np.random.rand(rois_per_image) * fg_num_rois)
rand_num = torch.from_numpy(rand_num).long().cuda()
fg_inds = fg_inds[rand_num]
fg_rois_per_this_image = rois_per_image
bg_rois_per_this_image = 0
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
#rand_num = torch.floor(torch.rand(rois_per_image) * bg_num_rois).long().cuda()
rand_num = np.floor(np.random.rand(rois_per_image) * bg_num_rois)
rand_num = torch.from_numpy(rand_num).long().cuda()
bg_inds = bg_inds[rand_num]
bg_rois_per_this_image = rois_per_image
fg_rois_per_this_image = 0
else:
print("relpn: bg_num_rois = 0 and fg_num_rois = 0, this should not happen!")
# The indices that we're selecting (both fg and bg)
keep_inds = torch.cat([fg_inds, bg_inds], 0)
keep_inds_batch[i].copy_(keep_inds)
# Select sampled values from various arrays:
labels_rel_batch[i].copy_(labels[i][keep_inds])
# Clamp relation labels for the background RoIs to 0
labels_rel_batch[i][fg_rois_per_this_image:] = 0
roi_pairs_batch[i].copy_(all_roi_pairs[i][keep_inds])
roi_pairs_batch[i,:,0] = i
return labels_rel_batch, roi_pairs_batch, keep_inds_batch
| 1.960938 | 2 |
adminclass.py | desuraj/LMS | 1 | 12795888 | <filename>adminclass.py
import pickle
from function import*
###############################################################################
###################################################################
class Admin:
sdist={}
fdist={}
bdist={}
# def Main(self): ############## Admin Menu ###################
# main(self)
def getbook(self): ############ book reg. #################
Add_Book(self,Admin.bdist)
def showbook(self): ############### book detail ##########
Display_Book(self)
def getstudent(self): ############### student reg. ################
Register_Student(self,Admin.sdist)
def showstudent(self): ############### student detail #############
Display_Student(self)
def getfaculty(self): ############## faculty reg. ###############
Register_Faculty(self,Admin.fdist)
def showfaculty(self): ############# faculty detail ############
Display_Faculty(self)
def adminlogin(self): ############## Admin login ##############
if Admin_LogIn(self):
main()
def sbookissue(self): ############## Book Issue #################
SBook_Issue(self)
def bookremove(self): ################ book remove ##################
Book_Remove(self)
def sbookreturn(self): ################# student book return ###############
SBook_Return(self)
def fbookissue(self): ##################### faculty book issue ##############
FBook_Issue(self)
def fbookreturn(self): ######################## faculty book return ###########
FBook_Return(self)
def dayfine(self): #################### fine cal ######################
DayFine_cal(self)
################################################################################################
#from adminclass import*
#import pickle
# from studentclass import*
# from facultyclass import*
########################## BOOK regs. start ##########################################
def br():
l=[]
n=int(input("Enter No of Book to Add: "))
for _ in range(n):
print("\n_______________________________________________")
print(f"Enter Detail for Book: {_+1}")
l.append(Admin())
l[_].getbook()
with open("bookdetail.pkl","wb") as fp:
for _ in l:
pickle.dump(_,fp)
print("\nTHANK YOU_!!!!\nBooks Are Added Successfully")
######################## BOOK regs. End ######################################
########################### Student reg. start ####################################
def sr():
l=[]
n = int(input("Enter No of Student To Add: "))
for _ in range(n):
print("\n_______________________________________________")
print(f"Enter Detail for Student: {_+1}\n")
l.append(Admin())
l[_].getstudent()
with open("studentdetail.pkl","wb") as fp:
for _ in l:
pickle.dump(_,fp)
print("\nTHANK YOU_!!!!\nStudents Are Added Successfully")
########################### student regs. End ####################################
########################## faculti reg. start ##################################
def fr():
l=[]
n=int(input("How many Faculty you want to ADD: "))
for _ in range(n):
print("\n_______________________________________________")
print(f"Enter Detail for Faculty: {_+1}\n")
l.append(Admin())
l[_].getfaculty()
with open("facultydetail.pkl","wb") as fp:
for _ in l:
pickle.dump(_,fp)
print("\nTHANK YOU_!!!!\nFaculties Are Added Successfully")
######################## faculti register end #############################
################################# show book detail ##############################z
def bs():
with open("bookdetail.pkl","rb") as fp:
while True:
try:
obj = pickle.load(fp)
obj.showbook()
except EOFError:
print("\n___________________Data Finish")
break
print("_"*55)
with open("bdist.pkl","rb") as p:
objdist=pickle.load(p)
print("Book code with Remaining Copies and Name :")
print("'{Book code : [no. of Copies , Book Name]}'\n")
print(objdist)
############################### show book detail end ###########################
####################### show student detail ##############################
def ss():
with open("studentdetail.pkl","rb") as fp:
while True:
try:
obj = pickle.load(fp)
# print(type(obj))
# print(obj)
obj.showstudent()
except EOFError:
print("\n______________________Data Finish")
break
print("_"*55)
with open("sdist.pkl","rb") as p:
objdist=pickle.load(p)
print(objdist)
############################ show student detail end #################################
############################ show faculty detail start ##########################
def fs():
with open("facultydetail.pkl","rb") as fp:
while True:
try:
obj = pickle.load(fp)
obj.showfaculty()
except EOFError:
print("\nData Finish")
break
print("_"*55)
with open("fdist.pkl","rb") as p:
objdist=pickle.load(p)
print(objdist)
############################### show faculty detail end ##################################
################################# Admin LogIn ################################
# def al():
# a1=Admin()
# a1.adminlogin()
############################# Admin LogIn End ##################################
############################ Student Log start ##################################
# def sl():
# s1=Student()
# s1.studentlogin()
############################## student log end ##############################
############################## faculty login start ######################
# def fl():
# f1=Faculty()
# f1.facultylogin()
############################ faculty login End #######################
############################ studentbook Issue ###################
def sbi():
a1=Admin()
a1.sbookissue()
#################### book remove ###################
def rb():
a2=Admin()
a2.bookremove()
def sbr(): ############# student book return #########################
a3=Admin()
a3.sbookreturn()
def fbi(): ############### faculty book issue ###################
a4=Admin()
a4.fbookissue()
def fbr(): ############### faculty book return ######################
a5=Admin()
a5.fbookreturn()
def fc(): #################### fine calculate ###################
a5=Admin()
a5.dayfine()
def sb():
Search_Book()
def main():
while True:
print("\n************************************* Admin Menu ***************************************")
op = int(input("\nEnter Your Opetion\n1 Book Register\n2 Book Deatil\n3 Student Reg.\n4 Student Detail\n5 Faculty Reg.\n6 Faculty Detail\n7 Student Book Issue \n8 Remove Book \n9 Student Book Return \n10 Faculty Book Issue\n11 Faculty Book Return\n12 To Calculate Fine\n13 for Search Book \n0 for Exit\n"))
if op == 1:
br()
elif op == 2:
bs()
elif op == 3:
sr()
elif op == 4:
ss()
elif op == 5:
fr()
elif op == 6:
fs()
elif op == 7:
sbi()
elif op == 8 :
rb()
elif op == 9 :
sbr()
elif op == 10:
fbi()
elif op == 11:
fbr()
elif op == 12:
fc()
elif op == 13:
sb()
elif op == 0:
break
| 2.875 | 3 |
server/edd/campaign/urls.py | trussworks/edd | 13 | 12795889 | from django.contrib.auth.decorators import login_required
from django.urls import path
from . import views
app_name = "edd.campaign"
urlpatterns = [
path("campaign/", login_required(views.CampaignIndexView.as_view()), name="index"),
path(
"campaign/<int:page>/",
login_required(views.CampaignIndexView.as_view()),
name="index-paged",
),
path(
"c/<str:slug>/",
login_required(views.CampaignDetailView.as_view()),
name="detail",
),
path(
"c/<str:slug>/permissions/",
login_required(views.CampaignPermissionView.as_view()),
name="permission",
),
path(
"c/<str:slug>/page/<int:page>/",
login_required(views.CampaignDetailView.as_view()),
name="detail-paged",
),
]
| 1.96875 | 2 |
hknweb/course_surveys/views.py | erictang000/hknweb | 20 | 12795890 | <reponame>erictang000/hknweb<filename>hknweb/course_surveys/views.py
from django.views.generic import TemplateView
from hknweb.markdown_pages.models import MarkdownPage
from hknweb.academics.models import Course, Instructor
from hknweb.course_surveys.constants import (
Attr,
COURSE_SURVEY_PREFIX,
COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS,
)
class IndexView(TemplateView):
template_name = "course_surveys/index.html"
def get_context_data(self, **kwargs):
context = {
Attr.PAGES: self._get_pages(),
Attr.COURSES: self._get_courses(),
Attr.INSTRUCTORS: self._get_instructors(),
}
return context
@staticmethod
def _get_courses():
courses = []
seen_courses = set()
for course in Course.objects.all():
if not course.icsr_course.exists():
continue
most_recent_icsr = course.icsr_course.latest(
"icsr_semester__year",
"-icsr_semester__year_section",
)
key = "{dept} {number}".format(
dept=most_recent_icsr.icsr_department.abbr,
number=most_recent_icsr.course_number,
)
if key in seen_courses:
continue
seen_courses.add(key)
courses.append(
{
Attr.DEPT: most_recent_icsr.icsr_department.abbr,
Attr.NUMBER: most_recent_icsr.course_number,
}
)
return courses
@staticmethod
def _get_instructors():
instructors = []
seen_instructors = set()
for instructor in Instructor.objects.all():
if not instructor.icsr_instructor.exists():
continue
most_recent_icsr = instructor.icsr_instructor.latest(
"icsr_semester__year",
"-icsr_semester__year_section",
)
name = "{first_name} {last_name}".format(
first_name=most_recent_icsr.first_name,
last_name=most_recent_icsr.last_name,
)
key = name
if key in seen_instructors:
continue
seen_instructors.add(key)
instructors.append(
{
Attr.NAME: name,
}
)
return instructors
@staticmethod
def _get_pages():
pages = []
for page_path in COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS:
page = MarkdownPage.objects.filter(path=page_path).first()
if page is not None:
page_name = page.name
if page_name.startswith(COURSE_SURVEY_PREFIX):
page_name = page_name[len(COURSE_SURVEY_PREFIX) :]
pages.append(
{
Attr.NAME: page_name,
Attr.PATH: "/pages/" + page_path,
}
)
return pages
| 2.28125 | 2 |
python codes/triangle.py | doom3007/Hacktoberfest2020 | 1 | 12795891 | <filename>python codes/triangle.py
# Lines count, changable.
PYRAMID_HEIGHT = 6
WALL = "|"
for line in range(1, PYRAMID_HEIGHT+1):
print(WALL + "*" * line + " " * (PYRAMID_HEIGHT - line) + WALL) | 3.5625 | 4 |
peach_invasion/ui/stats.py | tealc-indeed/peach_invasion | 2 | 12795892 | from peach_invasion.settings import Settings
from peach_invasion.ui.scoreboard import Scoreboard
class Stats:
""" Track gaming statistics """
def __init__(self, settings: Settings, scoreboard: Scoreboard):
self._settings = settings
self._scoreboard = scoreboard
# High score never resets throughout the game
self._high_score = 0
self._set_initial_values()
def reset(self):
self._set_initial_values()
def _set_initial_values(self):
self._score = 0
self._level = 1
self._health = self._settings.health_limit
self._ammo = self._settings.ammo_limit
# Rerender all displaying statistics after it has been changed
self._scoreboard.render_all(self._score, self._high_score, self._level, self._health, self._ammo)
@property
def score(self):
return self._score
@property
def high_score(self):
return self._high_score
def score_up(self, kills):
""" Adds user scores based on the number of kills and the level """
self._score += kills * self._level * 2
self._scoreboard.render_score(self._score)
# Update high score
if self._score > self._high_score:
self._high_score = self._score
self._scoreboard.render_high_score(self._high_score)
@property
def health(self):
return self._health
def player_lost_life(self):
self._health -= 1
self._scoreboard.render_health(self._health)
@property
def level(self):
return self._level
def level_up(self):
self._level += 1
self._scoreboard.render_level(self._level)
@property
def player_speed(self):
return self._settings.player_speed * self._game_speed
@property
def enemy_speed(self):
return self._settings.enemy_x_speed * self._game_speed, \
self._settings.enemy_y_speed * self._game_speed
@property
def bullet_speed(self):
return 0, -self._settings.bullet_speed * self._game_speed
@property
def _game_speed(self):
return self._settings.speedup_rate ** self._level
@property
def ammo(self):
return self._ammo
@ammo.setter
def ammo(self, ammo):
if ammo != self._ammo:
self._scoreboard.render_ammo(ammo)
self._ammo = ammo
| 2.9375 | 3 |
model.py | stanleefdz/Predict-Flight-Delays | 0 | 12795893 | # -*- coding: utf-8 -*-
"""Model.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1QPnK5YOh8kRYPOOue6txwrgUqwKOMS0I
"""
# # Use seaborn for pairplot
# !pip install -q seaborn
# !pip install tensorflow==2.0.0
# # Use some functions from tensorflow_docs
# !pip install -q git+https://github.com/tensorflow/docs
# !pip install h5py pyyaml
from __future__ import absolute_import, division, print_function, unicode_literals
import pathlib
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
import tensorflow_docs as tfdocs
import tensorflow_docs.plots
import tensorflow_docs.modeling
# Commented out IPython magic to ensure Python compatibility.
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.patches import ConnectionPatch
from collections import OrderedDict
from matplotlib.gridspec import GridSpec
from sklearn import metrics, linear_model
from sklearn.preprocessing import PolynomialFeatures, StandardScaler, normalize
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from scipy.optimize import curve_fit
import warnings
plt.rcParams["patch.force_edgecolor"] = True
plt.style.use('fivethirtyeight')
mpl.rc('patch', edgecolor = 'dimgray', linewidth=1)
# from IPython.core.interactiveshell import InteractiveShell
# InteractiveShell.ast_node_interactivity = "last_expr"
pd.options.display.max_columns = 50
# %matplotlib inline
warnings.filterwarnings("ignore")
# import pickle
# create and save all the models
airlines = pd.read_csv('airlines.csv')
carriers = list(airlines['IATA_CODE'])
# print(carriers)
global train_stats
def norm(x):
global train_stats
return (x - train_stats['mean']) / train_stats['std']
def ret_stats():
return train_stats
def build_model(train_ds):
model = keras.Sequential([
tf.keras.layers.Dense(64, activation='relu', input_shape=[len(train_ds.keys())]),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
def do_create_models():
for carrier in carriers:
# create a model and save it for each carrier
global train_stats
df = pd.read_csv('carriers/carrier' + str(carrier) + 'data.csv')
df.drop(['Unnamed: 0'], axis=1, inplace=True)
# encode the origin
encoder = LabelEncoder()
encoder.fit(df['ORIGIN_AIRPORT'])
encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_)))
df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT'])
# create the train and test dataset
train_dataset = df.sample(frac=0.8,random_state=0)
test_dataset = df.drop(train_dataset.index)
# getting the stats
train_stats = train_dataset.describe()
train_stats.pop("ARRIVAL_DELAY")
train_stats = train_stats.transpose()
train_stats.to_csv('stats/train_stats' + str(carrier) + '.csv')
# defining the train and test labels
train_labels = train_dataset.pop('ARRIVAL_DELAY')
test_labels = test_dataset.pop('ARRIVAL_DELAY')
# normalize the data
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
# # define the model
# model = build_model(train_dataset)
# # train the model
# EPOCHS = 100
# # The patience parameter is the amount of epochs to check for improvement
# early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
# early_history = model.fit(normed_train_data, train_labels,
# epochs=EPOCHS, validation_split = 0.2, verbose=0,
# callbacks=[early_stop, tfdocs.modeling.EpochDots()])
# # calculating the loss
# loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2)
# # weights = model.get_weights()
# # fpkl = open('drive/My Drive/pickle_models/model-' + str(carrier) + '-weights.pkl', 'wb')
# # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL)
# print("Testing set Mean Abs Error: {:5.2f} minutes".format(mae))
# model.save('models/model-' + str(carrier) + '.h5')
print('OK ' + str(carrier))
# let's create the input pipeline
from datetime import datetime
def conv_to_datetime(str_):
return datetime.strptime(str_, '%Y-%m-%d %H:%M:%S')
def conv_to_time(str_):
return datetime.strptime(str_, '%H:%M:%S')
import datetime
def string_to_time(time_string):
if pd.isnull(time_string):
return np.nan
else:
if time_string == 2400:
time_string = 0
time_string = "{0:04d}".format(int(time_string))
time_ = datetime.time(int(time_string[0:2]), int(time_string[2:4]))
return time_
def func(x):
return x.hour * 3600 + x.minute * 60 + x.second
dayOfWeek = 6
airline = 'AA'
origin = 'LAX'
dest = 'SEA'
sd = 200
ddelay = -10
sa = 800
dist = 1200
do_create_models()
# global train_stats
# stats = ret_stats()
# print(stats)
def processInput(input_):
global train_stats
processed = []
time_sd = string_to_time(np.int64(input_["sd"]))
time_sa = string_to_time(np.int64(input_["sa"]))
time_sd = func(time_sd)
time_sa = func(time_sa)
# encode airlines to their numbers
df = pd.read_csv('carriers/carrier' + str(input_["carrier"]) + 'data.csv')
df.drop(['Unnamed: 0'], axis=1, inplace=True)
encoder = LabelEncoder()
encoder.fit(df['ORIGIN_AIRPORT'])
encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_)))
carrier = input_["carrier"]
for carr_ in carriers:
# create a model and save it for each carrier
if carr_ == carrier:
df = pd.read_csv('carriers/carrier' + str(carr_) + 'data.csv')
df.drop(['Unnamed: 0'], axis=1, inplace=True)
# encode the origin
encoder = LabelEncoder()
encoder.fit(df['ORIGIN_AIRPORT'])
encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_)))
# print(encoded_data_map)
df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT'])
# # create the train and test dataset
# train_dataset = df.sample(frac=0.8,random_state=0)
# test_dataset = df.drop(train_dataset.index)
# # getting the stats
# train_stats = train_dataset.describe()
# train_stats.pop("ARRIVAL_DELAY")
# train_stats = train_stats.transpose()
# # defining the train and test labels
# train_labels = train_dataset.pop('ARRIVAL_DELAY')
# test_labels = test_dataset.pop('ARRIVAL_DELAY')
# # normalize the data
# normed_train_data = norm(train_dataset)
# normed_test_data = norm(test_dataset)
# # define the model
# model = build_model(train_dataset)
# # train the model
# EPOCHS = 100
# # The patience parameter is the amount of epochs to check for improvement
# early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
# early_history = model.fit(normed_train_data, train_labels,
# epochs=EPOCHS, validation_split = 0.2, verbose=0,
# callbacks=[early_stop, tfdocs.modeling.EpochDots()])
# # calculating the loss
# loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2)
# print("Testing set Mean Abs Error: {:5.2f} minutes".format(mae))
# model.save('models/model-' + str(carrier) + '.h5')
# weights = model.get_weights()
# fpkl = open('model-' + str(carrier) + '-weights.pkl', 'wb')
# pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL)
# print('OK ' + str(carrier))
origin = input_["origin"]
ddelay = input_["ddelay"]
origin_ = encoded_data_map[origin]
dist = input_["dist"]
weekday = input_["dayOfWeek"]
input_ = {"time_insec_dep" : time_sd, "time_insec_arr": time_sa,
"ORIGIN_AIRPORT": origin_, "DEPARTURE_DELAY": ddelay,
"DISTANCE": dist, "weekday": weekday }
df = pd.DataFrame([input_])
df = norm(df)
model = keras.models.load_model('models/model-' + str(carrier) +'.h5')
print("OK")
return df, model
# input_ = {
# "dayOfWeek": dayOfWeek,
# "carrier": airline,
# "origin": origin,
# "sd": sd,
# "ddelay": ddelay,
# "sa": sa,
# "dist": dist
# }
# test_input, model = processInput(input_)
# from google.colab import drive
# drive.mount('/content/drive')
# !ls
# test_predictions_input = model.predict(test_input).flatten()
# print("The delay is: ", test_predictions_input[0], " minutes")
| 2.140625 | 2 |
video/migrations/0001_initial.py | Sinchard/myvideo | 0 | 12795894 | <reponame>Sinchard/myvideo
# Generated by Django 3.1.3 on 2020-12-03 07:17
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.core.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0059_apply_collection_ordering'),
('wagtailimages', '0022_uploadedimage'),
]
operations = [
migrations.CreateModel(
name='VideoIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='VideoPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')),
('date', models.DateField(verbose_name='Post date')),
('intro', models.CharField(max_length=250)),
('body', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='VideoPageGalleryImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('caption', models.CharField(blank=True, max_length=250)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailimages.image')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gallery_images', to='video.videopage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| 1.789063 | 2 |
ConfigRouter.py | Dogblack/click_ddos | 0 | 12795895 | <filename>ConfigRouter.py
import re
from define import *
'''
'rst_attack'
'echo_attack'
'smuf_attack'
'land_attack'
'red'
'''
class ConfigWriter(object):
def __init__(self,ControlPort,Ip,IpDst,IpBrodCast,GateWay,Mac):
#basic
self.Out_default = 'out :: Queue(1024) -> ToDevice('+GateWay+')\n'
self.Out_red = 'out :: RED(768,1024,0.02)->Queue(1024) -> ToDevice('+GateWay+')\n'
self.dropLog ='dropLog :: ToIPSummaryDump(/root/log/droplog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto count)\n'
self.passLog ='passLog :: ToIPSummaryDump(/root/log/passlog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto count)\n'
self.Classifier ='FromDevice('+GateWay+')-> cl :: Classifier(12/0806 20/0001,12/0806 20/0002,12/0800)\n'
self.arpr ='-> arpr :: ARPResponder('+Ip+' '+Mac+')\n->out;\n'
self.arpq ='cl[1] -> [1]arpq :: ARPQuerier('+Ip+','+Mac+')\n->out;\n'
self.Set_IPAddr ='SetIPAddress('+IpDst+')'
self.Ip_strip = 'cl[2]->Strip(14)\n-> CheckIPHeader(CHECKSUM false)\n->CheckLength(65535)\n'
self.IpPrintR ='-> IPPrint("recv IP detail")\n'
self.IpRewriter ='rw :: IPAddrPairRewriter(pattern - '+IpDst+' 0 0)\n'
self.DecIpTTL ='-> dt :: DecIPTTL\n'
self.IpFragment ='-> fr :: IPFragmenter(300)\n'
self.IpPrintS ='-> IPPrint("send IP detail")\n'
self.IpOut ='-> arpq;\n'
self.red_flag =0
#strategy
self.rst_attack = 'rst,'
self.echo_attack ='dst udp port 7 or 19,'
self.smuf_attack ='src host '+IpBrodCast+' and icmp,'
self.land_attack = 'dst '+Ip+' and src '+Ip+' and syn,'
# def ChangePort(self,newPort):
# self.Control = 'CONTROL :: ControlSocket(tcp,'+newPort+')\n'
def strategy_init(self,Strategy:list,IpBanList:list,IpPassList:list):
self.Strategy_build=''
self.length =len(Strategy)+len(IpBanList)+len(IpPassList)
for i in Strategy:
if i == 'rst_attack':
self.Strategy_build+= self.rst_attack
elif i =='echo_attack':
self.Strategy_build += self.echo_attack
elif i =='smuf_attack':
self.Strategy_build += self.smuf_attack
elif i =='land_attack':
self.Strategy_build += self.land_attack
elif i =='red':
self.red_flag = 1
self.length=self.length-1
else:
print('STRATEGY ERROR')
if IpBanList:
for i in IpBanList:
self.Strategy_build+='src '+i+','
if IpPassList:
for i in IpPassList:
self.Strategy_build+='src '+i+','
#IpClassfier
self.Ip_Classfier = '->ic :: IPClassifier( '+self.Strategy_build+ '-)\n'
final_list = Strategy + IpBanList
port = ''
for i in range(self.length):
port +='ic['+str(i)+']->dropLog\n->Print("['+final_list[i]+' droped]")\n->Discard\n'
if IpPassList:
for i in range(len(IpPassList)):
port += 'ic[' + str(i) + ']->dropLog\n->Print("[' + final_list[i] + ' passed]")\n->out\n'
port +='ic['+str(self.length)+']->'+self.IpRewriter+self.DecIpTTL+self.IpFragment+self.IpPrintS+'->passLog'+self.IpOut+'\n'
if self.red_flag == 0:
basic =self.Control + self.Out_default + self.dropLog + self.passLog + self.Classifier + self.arpr + self.arpq + self.Ip_strip
basic+=self.IpPrintR
self.basic = basic
else:
basic = self.Control + self.Out_red + self.dropLog + self.passLog + self.Classifier + self.arpr + self.arpq + self.Ip_strip
basic += self.IpPrintR
self.basic =basic
self.port = port
'''添加了白名单(IpPassList),在学姐论文中看到好像队列的输入端口可以有多个,我是依据这一基础改的,具体的可以看一下队列元素'''
def NewConfig(self,controlPort,Strategy,IpBanList,IpPassList,id):
self.Control = 'CONTROL :: ControlSocket(tcp,'+str(controlPort)+')\n'
self.strategy_init(Strategy,IpBanList,IpPassList)
config =self.basic+self.Ip_Classfier+self.port
# try:
# file = open('click_'+str(id)+'.click', 'w',encoding='UTF-8')
# file.write(config)
# except IOError:
# print('FILE WRITE ERROR')
# file.close()
# else:
# print('FILE WRITE SUCCESS')
# file.close()
return config
'''
def ConfigDefine(self,conf,id):
try:
file = open('click_'+id+'.click','w')
file.write(conf)
except IOError:
print('FILE WRITE ERROR')
file.close()
else:
print('FILE WRITE SUCCESS')
file.close()
'''
if __name__ == '__main__':
witer = ConfigWriter(22222,'192.168.3.128','192.168.3.129','192.168.3.255','ens34','00:0c:29:44:f4:4c')
witer.NewConfig(999,('smuf_attack','land_attack','red'),('10.1.1.2','10.1.1.3'),'',1124)
'''这里的参数我调试的时候乱改的''' | 2.609375 | 3 |
main.py | dsymbol/reddit-nft-freebies | 8 | 12795896 | from datetime import datetime
from utils.api import API
from time import sleep
from config import *
import random
def load_file(file):
try:
l = []
with open(file, 'r') as f:
for line in f:
l.append(line.rstrip())
return l
except FileNotFoundError:
with open('comment.db', 'w') as f:
pass
return []
def get_nft():
account = API(REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET, REDDIT_USERNAME, REDDIT_PASSWORD)
account.shadowban_check()
reddit = account.authorize()
account.authorized(reddit)
reddit.read_only = False
commented = load_file("comment.db")
subreddit = reddit.subreddit("NFTsMarketplace")
keywords = ["wallet", "address"]
sleep(1)
while True:
try:
for post in subreddit.hot(limit=25):
if (post not in commented and any(x in post.title.lower() for x in keywords)
or post not in commented and keywords[1] in post.link_flair_text):
commented.append(post)
with open('comment.db', 'a') as f:
f.write(f"{str(post)}\n")
post.reply(ETH_ADDRESS)
post.upvote()
print(f'{post.title}')
rndm_sleep = random.randint(300, 600);
to_mins = rndm_sleep / 60;
to_mins = round(to_mins, 1)
print(f"zZz for {str(to_mins)} minutes")
sleep(rndm_sleep)
except:
print("Error occurred, retrying.")
sleep(500)
print("+")
print(f"[{datetime.now().replace(microsecond=0)}] zZz for 6 hours")
sleep(21600)
if __name__ == '__main__':
get_nft()
| 3.046875 | 3 |
fbcrawl/items.py | JoshuaKissoon/fbcrawl | 1 | 12795897 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.loader.processors import TakeFirst, Join, MapCompose
from datetime import datetime, timedelta
def comments_strip(string,loader_context):
lang = loader_context['lang']
if lang == 'it':
if string[0].rfind('Commenta') != -1:
return
else:
return string[0].rstrip(' commenti')
elif lang == 'en':
if(string[0] == 'Share'):
return '0'
new_string = string[0].rstrip(' Comments')
while new_string.rfind(',') != -1:
new_string = new_string[0:new_string.rfind(',')] + new_string[new_string.rfind(',')+1:]
return new_string
else:
return string
def reactions_strip(string,loader_context):
lang = loader_context['lang']
if lang == 'it':
newstring = string[0]
#19.298.873
if len(newstring.split()) == 1:
while newstring.rfind('.') != -1:
newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:]
return newstring
#Pamela, Luigi e altri 4
else:
return string
friends = newstring.count(' e ') + newstring.count(',')
newstring = newstring.split()[::-1][0]
while newstring.rfind('.') != -1:
newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:]
return int(newstring) + friends
elif lang == 'en':
newstring = string[0]
#19,298,873
if len(newstring.split()) == 1:
while newstring.rfind(',') != -1:
newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:]
return newstring
# #Mark and other 254,134
# elif newstring.split()[::-1][1].isdigit():
# friends = newstring.count(' and ') + newstring.count(',')
# newstring = newstring.split()[::-1][1]
# while newstring.rfind(',') != -1:
# newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:]
# return int(newstring) + friends
# #Philip and 1K others
else:
return newstring
else:
return string
def url_strip(url):
fullurl = url[0]
#catchin '&id=' is enough to identify the post
i = fullurl.find('&id=')
if i != -1:
return fullurl[:i+4] + fullurl[i+4:].split('&')[0]
else: #catch photos
i = fullurl.find('/photos/')
if i != -1:
return fullurl[:i+8] + fullurl[i+8:].split('/?')[0]
else: #catch albums
i = fullurl.find('/albums/')
if i != -1:
return fullurl[:i+8] + fullurl[i+8:].split('/?')[0]
else:
return fullurl
def parse_date(date):
import json
d = json.loads(date[0]) #nested dict of features
flat_d = dict() #only retain 'leaves' of d tree
def recursive_items(dictionary):
'''
Get most nested key:value pair of nested dict
'''
for key, value in dictionary.items():
if type(value) is dict:
yield from recursive_items(value)
else:
yield (key, value)
for key, value in recursive_items(d):
flat_d[key] = value
#returns timestamp in localtime conversion from linux timestamp UTC
return str(datetime.fromtimestamp(flat_d['publish_time']))
def id_strip(post_id):
import json
d = json.loads(post_id[::-1][0]) #nested dict of features
return str(d['top_level_post_id'])
class FbcrawlItem(scrapy.Item):
source = scrapy.Field()
date = scrapy.Field()
text = scrapy.Field(
output_processor=Join(separator=u'')
) # full text of the post
comments = scrapy.Field(
output_processor=comments_strip
)
reactions = scrapy.Field(
output_processor=reactions_strip
) # num of reactions
likes = scrapy.Field(
output_processor=reactions_strip
)
ahah = scrapy.Field()
love = scrapy.Field()
wow = scrapy.Field()
sigh = scrapy.Field()
grrr = scrapy.Field()
share = scrapy.Field() # num of shares
url = scrapy.Field(
output_processor=url_strip
)
post_id = scrapy.Field(
output_processor=id_strip
)
shared_from = scrapy.Field()
class CommentsItem(scrapy.Item):
source = scrapy.Field()
reply_to=scrapy.Field()
date = scrapy.Field( # when was the post published
output_processor=parse_date
)
text = scrapy.Field(
output_processor=Join(separator=u'')
) # full text of the post
reactions = scrapy.Field(
output_processor=reactions_strip
) # num of reactions
likes = scrapy.Field(
output_processor=reactions_strip
)
source_url = scrapy.Field()
url = scrapy.Field()
#ahah = scrapy.Field()
#love = scrapy.Field()
#wow = scrapy.Field()
#sigh = scrapy.Field()
#grrr = scrapy.Field()
#share = scrapy.Field() # num of shares
| 2.46875 | 2 |
messagebird/webhook.py | JornEngelbart/python-rest-api | 0 | 12795898 | from messagebird.base import Base
class Webhook(Base):
def __init__(self):
self.url = None
self.token = None
| 1.640625 | 2 |
bluedonkey.py | jadonk/BlueDonkey | 9 | 12795899 | #!/usr/bin/env python3
import os, sys, subprocess, socket
#import cgroups
def start_mjpg_streamer():
print("Starting up mjpg_streamer.")
# TODO: Add notification if either mjpg-streamer or
# cvfilter_py.so aren't installed
# TODO: Detect any error if process exits,
# such as the uvcvideo crash I'm seeing
subprocess.run(["mjpg_streamer", "-i",
"input_opencv.so -r 640x480 --filter /usr/lib/mjpg-streamer/cvfilter_py.so --fargs " + os.path.realpath(__file__),
"-o",
"output_http.so -p 8090 -w /usr/share/mjpg-streamer/www"],
stdin=subprocess.PIPE
#, stdout=subprocess.PIPE #Commented to allow visibility of
#, stderr=subprocess.PIPE #responses from the system on commandline
)
if __name__ == "__main__":
start_mjpg_streamer()
# This method is called by the mjpg_streamer command run above.
# This is what calls and executes the running code
def init_filter():
## Socket streams that were here previously are
## now moved to multiple sockets where they are used.
import line_follower
dc = dummy_car_control()
f = line_follower.mjs_filter(dc)
print("Returning process")
return f.process
# This class houses the car_control class
class dummy_car_control():
def __init__(self):
## Commented per jkridner's advice
import car_control
self.c = car_control.car_control()
#Output for the status in update method below
self.status_port = 3004
self.status_out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.status_out.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.status_out.connect(("", self.status_port))
# This filehandle sends the data to the socket broadcast
self.status_file = self.status_out.makefile('w', buffering=None)
def tick(self):
self.c.tick()
return
def update(self, line, threshold):
(self.paused, self.throttle, self.steering, self.fps) = \
self.c.update(line)
# Code has been reworked to output to a separate filehandle pointing
# to the socket 3004, output to the dashboard under 'Status'
# Replaced the Status output below to be a JSON string
stri = "{"
if self.paused:
stri += '"Status":"Paused"'
else:
stri += '"Status":"Unpaused"'
if line:
stri += ', "Line_X":' + str(line[2]) + ', "Line_Y":' + str(line[3])
else:
stri += ', "Line_X":"No Line", "Line_Y":"No Line"'
stri += ',"Throttle":' + str(self.throttle) + ',"Steering":' + \
str(self.steering)
stri += ',"FPS":' + str(self.fps) + ',"Min_Threshold":' + \
str(threshold) + '}'
print(stri, "\r", end="", flush=True, file=self.status_file)
return ""
| 2.375 | 2 |
tests/ccxt_test.py | longniao/pointer_worker | 0 | 12795900 | <filename>tests/ccxt_test.py
# -*- coding: utf-8 -*-
import time
import ccxt
gateio = ccxt.gateio({
'proxies': {
'http': 'socks5://127.0.0.1:1080',
'https': 'socks5h://127.0.0.1:1080'
},
})
'''
symbol = 'ETH/USD'
timeframe = '5m'
limit = 300
since = bitmex.milliseconds() - limit * 60 * 1000
params = {'partial': False}
ret = bitmex.fetch_ohlcv(symbol, timeframe, since, 12*24, params)
print(ret)
'''
# print(huobi.id, huobi.load_markets())
'''
print(gateio.id)
markets = gateio.load_markets()
if markets:
for market, detail in markets.items():
if 'USD' in market:
print(market)
ohlcvs = gateio.fetch_ohlcv('BTC/USDT', timeframe='1d', limit=100)
if ohlcvs:
for ohlcv in ohlcvs:
t = time.localtime(ohlcv[0]/1000)
print(t, ohlcv)
tickers = gateio.fetch_ticker('BTC/USDT')
print(tickers)
'''
symbol = 'BTC/USDT'
print(symbol, gateio.fetch_ohlcv(symbol, '1d')) # one day
| 2.09375 | 2 |
models/user.py | linkian209/PyBitWarden | 0 | 12795901 | <reponame>linkian209/PyBitWarden<gh_stars>0
"""models.user
This Module contains the User Model
"""
import pyotp
from app import db
from models import funcs
from lib.bitwarden import Bitwarden
from sqlalchemy import sql
class User(db.Model):
"""
This model is used to store users.
Attributes:
id (int): User ID
name (str): User's Name
email (str): User's Email
email_verified (bool): User's Email is verified
premium (bool): User's Premium Status
master_password_hint (str): Master Password Hint
culture (str): Language/Country string
totp_secret (str): Two Factor Authentication secret key
two_factor_enabled (bool): User has Two Factor Authentication Enabled
key (str): User's encryption key
security_stamp (str): Security Stamp
folders (relationship): Folders owned by user
cipers (relationship): Ciphers owned by user
devices (relationship): Devices owned by user
create_date (DateTime): Time when this user was created
update_date (DateTime): The timestamp of the last update
Args:
:param db.Model: The Model Base Class
"""
# Member Variables
id = db.Column(
db.String(64), name='id', primary_key=True,
default=funcs.generateSecureUUID
)
name = db.Column(db.String(128), nullable=False)
email = db.Column(db.String(128), nullable=False)
password_hash = db.Column(db.String(128), nullable=False)
email_verified = db.Column(
db.Boolean, nullable=False, default=False
)
premium = db.Column(
db.Boolean, nullable=False, default=False
)
master_password_hint = db.Column(db.Text, nullable=True)
culture = db.Column(
db.String(64), nullable=False, default='en-US'
)
totp_secret = db.Column(db.String(256), nullable=True)
two_factor_enabled = db.Column(
db.Boolean, nullable=False, default=False
)
key = db.Column(db.String(256), nullable=False)
security_stamp = db.Column(
db.String(64), nullable=False,
default=funcs.generateSecureUUID
)
folders = db.relationship(
'Folder', backref='user', lazy=True, passive_deletes=True
)
ciphers = db.relationship(
'Cipher', backref='user', lazy=True, passive_deletes=True
)
devices = db.relationship(
'Device', backref='user', lazy=True, passive_deletes=True
)
create_date = db.Column(db.DateTime(), server_default=sql.func.now())
update_date = db.Column(
db.DateTime, default=sql.func.now(),
onupdate=sql.func.now()
)
# Functions
def __repr__(self):
"""
Representation of this object as a string
Args:
:param self: This object
Returns:
str: String representation of object
"""
return '<User {}>'.format(self.name)
def toHash(self):
"""
Returns this object as a dict
Args:
:param self: This object
Returns:
dict: This object as a dict
"""
return {
'Id': self.id,
'Name': self.name,
'Email': self.email,
'EmailVerified': self.email_verified,
'Premium': self.premium,
'MasterPasswordHint': self.master_password_hint,
'Culture': self.culture,
'TwoFactorEnabled': self.two_factor_enabled,
'Key': self.key,
'PrivateKey': None,
'SecurityStamp': self.security_stamp,
'Organizations': [],
'Object': 'profile'
}
def verifyOTP(self, code):
"""
Verify the passed in code against the user's current OTP.
Args:
:param1 self: This object
:param2 code: The passed in OTP
Returns:
bool: True if the codes match, false otherwise.
"""
if(pyotp.TOTP(self.totp_secret).now() == code):
return True
return False
def decryptDataUsingMasterKey(self, data, master_key):
"""
The user model contains an encrypted version of its encryption key.
First, decrypt the master key then decrypt the data.
Args:
:param self: This user
:param data: The cipher string that needs decrypted
:param master_key: The master password used to decrypt the
encryption key
Returns:
bytes: The decrypted plain text as a byte string
"""
enc_key = Bitwarden.decrypt(
self.key.encode(), master_key[:32], mac_key=master_key[32:64]
)
return Bitwarden.decrypt(
data, enc_key[:32], mac_key=enc_key[32:64]
)
def encryptDataUsingMasterKey(self, data, master_key):
"""
The user model contains an encrypted version of the encryption key.
First decrypt that key then encrypt the data
Args:
:param self: This user
:param data: The plain text to be encrypted
:param master_key: The master key
Returns:
str: The encrypted cipher string
"""
enc_key = Bitwarden.decrypt(
self.key.encode(), master_key[:32], mac_key=master_key[32:64]
)
return Bitwarden.encrypt(
data, enc_key[:32], mac_key=enc_key[32:64]
)
def comparePasswordHash(self, in_hash):
"""
Compares if the user's password hash matches the inputed one
Args:
:param self: The user
:param in_hash: The hash to compare against
Returns:
bool: True if the hashes are the same, false otherwise.
"""
return funcs.constantTimeCompare(self.password_hash, in_hash)
def updateMasterKey(self, old_password, new_password):
"""
This function updates the master key for the random encryption key. We
want to preserve this random encryption key. So we will decrypt with
the old key, then recrypt with the new key.
Args:
:param self: This user
:param old_password: The <PASSWORD>
:param new_password: The <PASSWORD>
"""
enc_key = Bitwarden.decrypt(
self.key, Bitwarden.makeKey(old_password, self.email), None
)
self.key = Bitwarden.encrypt(
enc_key, Bitwarden.makeKey(new_password, self.email)
)
self.password_hash = Bit<PASSWORD>en.hashPassword(<PASSWORD>, self.email)
self.security_stamp = funcs.generateSecureUUID()
| 2.765625 | 3 |
deprecated/prototypes/acceptor/feedstock_acceptor.py | materials-data-facility/connect | 1 | 12795902 | import os
import json
from flask import Flask
from tqdm import tqdm
from ..validator.schema_validator import Validator
from ..utils.paths import get_path
app = Flask(__name__)
# Function to accept user-generated feedstock
# Args:
# path: Path to the feedstock
# remove_old: Should fully accepted feedstock be removed? Default False.
# verbose: Should status messages be printed? Default False.
def accept_feedstock(path, remove_old=False, verbose=False):
removed = False
with open(path) as feedstock:
val = Validator(json.loads(feedstock.readline()))
for line in tqdm(feedstock, desc="Accepting " + os.path.basename(path), disable= not verbose):
res = val.write_record(json.loads(line))
if not res["success"]:
if not val.cancel_validation()["success"]:
print("ERROR: Validation not cancelled. Feedstock may not have been removed.")
raise ValueError(result["message"] + "\n" + result.get("details"))
if remove_old:
os.remove(path)
removed = True
return {
"success": True,
"source_deleted": removed
}
# Function to accept all feedstock in a directory
# Args:
# path: Path to the feedstock directory
# remove_old: Should fully accepted feedstock be removed? Default False.
# verbose: Should status messages be printed? Default False.
def accept_all(path=None, remove_old=False, verbose=False):
if not path:
path = get_path(__file__, "submissions")
if verbose:
print("Accepting all feedstock from '", path, "'", sep="")
removed = []
count = 0
for feedstock in tqdm(os.listdir(path), desc="Accepting feedstock", disable= not verbose):
# Must be actual feedstock
if feedstock.endswith("_all.json"):
result = accept_feedstock(os.path.join(path, feedstock), remove_old=remove_old, verbose=verbose)
count += 1
if result["source_deleted"]:
removed.append(feedstock)
if verbose:
print("Accepted", count, "total feedstock files")
return {
"success": True,
"removed": removed,
"total": count
}
| 2.640625 | 3 |
config.py | ada-shen/Interpret_quality | 1 | 12795903 | <filename>config.py
CONFIG = {
"shapley_batch_size": {
"pointnet2": 5,
"pointnet": 50,
"dgcnn": 5,
"gcnn": 10,
"pointconv": 20
},
"interaction_batch_size": {
"pointnet2": 25,
"pointnet": 100,
"dgcnn": 25,
"gcnn": 50,
"pointconv": 100
}
} | 1.15625 | 1 |
dataProcessor.py | Synapt1x/PythonDataProcessor | 0 | 12795904 | """
Data Processor
============================
Created by: <NAME>
For: Dr. <NAME> lab
----------------------------
This program was developed to automatically
format input excel data for statistical
analysis in the statistical analysis
software.
"""
from Tkinter import *
from ttk import Frame, Style
from os import chdir, path, sep
import pandas as pd # import pandas data structures (DataFrame) and read_excel
# Import module with class/functions handling pigeon procesing
from pigeon import Pigeon
# Import tool tip function developed by <NAME> at
# http://tkinter.unpythonic.net/wiki/ToolTip, Licensed under
# GNU General Public License, Ver 2
from ToolTip import ToolTip
# Import for directory dialog
import tkFileDialog, tkMessageBox, tkFont, glob, time
# =============================================================================#
root = Tk() # create GUI root
root.wm_title("Data Processor") # create title label
root.geometry("840x520+300+300") # set the size of the window
# Initialize variables
toolTipDelay = 700 # ms
defaultThreshold = 50
outputFilename = ""
pigeonName = ""
allPigeons = {}
allData = {}
groupsForOutput = []
trialButtons = []
trialButtonTooltips = []
animalButtons = []
# locate the current directory and file location
dirname, mainFile = path.split(path.abspath("__file__"))
dirname = dirname.replace('/', sep)
# Ask user to identify the data directory
numErrors = 0
while True:
if (numErrors > 4):
result = tkMessageBox.askyesno(title="Quit?", message="No directory \
selected over multipled attempts. Do you want to quit instead?")
if (result == True):
print "Exiting program..."
exit()
else:
numError = 0
break
try:
dataDirname = tkFileDialog.askdirectory(parent=root,
initialdir=sep, title="Please select the data directory.")
if not dataDirname:
raise ValueError("empty string")
break
except ValueError:
numErrors += 1
tkMessageBox.showinfo("Invalid directory - Failed \
attempt %0.0f/5" % numErrors, "Please select a valid directory...")
dataDirname = dataDirname.replace('/', sep)
# cd to data directory
chdir(dataDirname)
# list all files of type .xls
allFiles = glob.glob("*.xls")
try:
numFiles = len(allFiles)
except:
tkMessageBox.showinfo("No excel spreadsheets found. Please restart the program.")
# First read-in the data
for file in allFiles:
datafile = pd.ExcelFile(file)
index = allFiles.index(file)
# now read excel file data into a DataFrame
pigeonData = pd.read_excel(datafile)
# extract pigeon name
pigeonNametemp = pigeonData["Trial Information"][0].split('_')[0] # take first
# term from trial information in first entry
# convert unicode to utf8
pigeonName = pigeonNametemp.encode('utf8')
# create pigeon
allPigeons[pigeonName] = Pigeon(pigeonData)
def printInfo(processingTime, outputFilename):
print "Processing the selected data files took %1.2f seconds." % processingTime
print "\nFormatted output of all selected data files located in " + outputFilename + '.'
def analyzePigeons(calcForThreshold, path):
print "\nProcessing %1.0f data files with a threshold of %0.0f units, \
please wait..." % (numFiles, calcForThreshold)
startTime = time.time() # start timer
progressTime = startTime
# define the output spreadsheet
outputFilename = path.join(dirname, "output-threshold-%0.0f.xls" % calcForThreshold)
allWriter = pd.ExcelWriter(outputFilename)
currFile = 0
progressTime = 0
# loop through all of the pigeons loaded into the dictionary allPigeons
for pigeonName, pigeon in allPigeons.iteritems():
currFile += 1
if ((time.time() - progressTime) > 5): # display progress
progressTime = time.time() # update progress time
# find the indices of the goal locations in (x,y)
pigeon.calcDist(calcForThreshold)
# use the excel writer to save this pigeon to a data sheet in output.xls
pigeon.dataframe.to_excel(allWriter, sheet_name=pigeonName)
print "Progress: %0.0f/%0.0f..." % (currFile, numFiles)
# also save each pigeon data to a dictionary for GUI processing
allData[pigeonName] = pigeon.dataframe
# also calculate how long formatting takes
processingTime = time.time() - startTime
try:
allWriter.save()
printInfo(processingTime, outputFilename)
except:
print "Processing the selected data files took %1.2f seconds." % processingTime
tkMessageBox.showinfo("Initial processing output cancelled", "Although \
processing of the selected data files occurred as usual, there was an issue \
writing to the designated excel file. Check to make sure it is not currently \
in use. Since processing will not likely change for the same threshold values, \
this may not be an issue. Saving the output of initial data processing was \
cancelled.")
# =============================================================================#
# ==========main function for handling processing and GUI functions============#
# =============================================================================#
class App(Frame):
# Constructor
def __init__(self, parent):
Frame.__init__(self, parent)
self.pack(fill=BOTH, expand=True)
# run the initial formatting on the data folder
analyzePigeons(defaultThreshold, path)
print "\nTips for using the GUI of this program can be found in the supplied \
README file. Tooltips are also available upon hovering over any \
element within the GUI.\n\n"
self.createComponents()
# function for creating the select all and de-select button frames
def createButtons(self, frame, vals, text):
# create canvas for select all and deselect all buttons
canv = Canvas(frame, width=220, height=10)
canv.create_line(20, 10, 220, 10, dash=(2, 4))
canv.pack(fill=X)
# create each button separately
selectAll = Button(frame, text="Select All",
command=lambda:
self.allButtons(vals, "Select"))
selectAll.pack()
selectTrialToolTip = ToolTip(selectAll, delay=toolTipDelay,
text="Select all " + text + " for analysis.")
deselectAll = Button(frame, text="De-Select All",
command=lambda:
self.allButtons(vals, "De-Select"))
deselectAll.pack()
deselectTrialToolTip = ToolTip(deselectAll, delay=toolTipDelay,
text="Deselect all " + text + " marked for analysis.")
return (selectAll, deselectAll)
# Callback for select all and de-select all buttons
def allButtons(self, buttonGroup, event):
for buttonNum in range(len(buttonGroup)):
if event == "Select":
buttonGroup[buttonNum].set(1)
else:
buttonGroup[buttonNum].set(0)
# Output the desired analyses
def run(self):
trialsForOutput = self.getGroups(self.trialVals, "trials")
animalsForOutput = self.getGroups(self.animalVals, "animals")
if ((trialsForOutput != []) and (animalsForOutput != [])):
outputFrames = self.analyzeGroups(trialsForOutput, animalsForOutput)
# get the output name for saving the excel file
todaysDate = time.strftime("%Y-%m-%d")
initialFileName = todaysDate + '-' + '-'.join(trialsForOutput) + ".xls"
chosenName = tkFileDialog.asksaveasfilename(initialdir=dirname,
initialfile=initialFileName)
chosenName = chosenName.replace('/', sep);
if (chosenName != dirname + sep + initialFileName) and (".xls" not in chosenName):
chosenName = chosenName + ".xls"
try:
# create excelwriter object for outputting to excel
writer = pd.ExcelWriter(chosenName)
# create the excel writer object
for frameIndex in outputFrames:
outputFrames[frameIndex].to_excel(writer, sheet_name=frameIndex)
except:
tkMessageBox.showinfo("Saving cancelled", "No output file name \
was selected. Saving operation cancelled.")
try:
writer.save()
print "Saving output of chosen groups and pigeons to ", chosenName
except:
tkMessageBox.showinfo("Saving cancelled", "Sorry there as an \
issue writing to the designated excel file. Check to make sure it is not \
currently in use. Saving operation cancelled.")
elif (trialsForOutput == [] and animalsForOutput == []):
tkMessageBox.showinfo("Nothing selected",
"Please select something to analyze.")
elif (trialsForOutput == []):
tkMessageBox.showinfo("No groups selected",
"Please select at least one grouping to analyze.")
elif (animalsForOutput == []):
tkMessageBox.showinfo("No birds selected",
"Please select at least one bird to analyze.")
def checkReformat(self, thresholdBox, reset): # re-run if threshold has been changed
value = float(thresholdBox.get())
try:
if (value == defaultThreshold):
print "Threshold has not changed from default"
return
if (reset == True):
thresholdBox.delete(0, END)
thresholdBox.insert(0, defaultThreshold)
value = defaultThreshold
analyzePigeons(value, path)
except:
tkMessageBox.showinfo("Not a number", "Please enter a valid number.")
thresholdBox.delete(0, END)
thresholdBox.insert(0, defaultThreshold)
def scrollFunc(self, event):
self.animalCanvas.configure(scrollregion=self.animalCanvas.bbox("all"))
# Create all of the buttons and components of the GUI
def createComponents(self):
# Create text fonts for components
self.titleFont = tkFont.Font(family="Arial", size=18)
self.componentFont = tkFont.Font(family="Helvetica", size=16)
# Create a frame for the title section
# ======================================================================
titleFrame = Frame(self)
titleFrame.pack(fill=X)
# Create the title label
title_label = Label(titleFrame, text="Data Processor For Pigeon Experiment",
font=self.titleFont)
title_label.pack(fill=X, expand=True)
title_labelTooltip = ToolTip(title_label, delay=toolTipDelay + 500,
text="This program was created by <NAME> for use \
in the laboratory of Dr. <NAME>.")
# Create a canvas for drawing a separation line
canv = Canvas(titleFrame, width=840, height=10)
canv.create_line(0, 10, 840, 10)
canv.pack(fill=X, anchor=CENTER, expand=True)
# Create a frame for the bottom section
# ======================================================================
footerFrame = Frame(self)
footerFrame.pack(anchor=S, expand=True, side=BOTTOM)
# Create a run button
runButton = Button(footerFrame, width=200, text="Run Processing", command=self.run)
runButton.pack(fill=Y)
runToolTip = ToolTip(runButton, delay=toolTipDelay,
text="Run analysis based on the groups and animals\
selected above.")
# Create and populate group and trial button frames
# ======================================================================
trialFrame = Frame(self)
trialFrame.pack(expand=True, anchor=W, side=LEFT)
# Create a checkbox for each test group
self.trialLabels = ["Non-reinforced training", "Control 1", "Control 2",
"Feature Only", "Geometry Only", "Affine"]
self.trialKeys = ["Nrtr", "C1", "C2", "FO", "GO", "AF"]
self.trialTooltips = ["Non-reinforced training group.", "Control group 1",
"Control group 2", "Group where an extra wall and a \
feature wall are placed in the environment to create an enclosed square.",
"Group where the feature wall is removed, but the geometry of the environment \
remains the same.", "Group where the feature wall is moved to the end of the \
long wall."]
self.trialVals = []
# create all of the group buttons
for num in range(len(self.trialLabels)):
self.trialVals.append(IntVar())
trialButtons.append(Checkbutton(trialFrame, text=self.trialLabels[num],
variable=self.trialVals[num], font=self.componentFont))
trialButtons[-1].pack(pady=8)
trialButtonTooltips.append(ToolTip(trialButtons[-1],
delay=toolTipDelay, text=self.trialTooltips[num]))
# create select/deselect all buttons
self.createButtons(trialFrame, self.trialVals, "experimental phases")
# Create a frame for handling all of the birds
# ======================================================================
animalsFrame = Frame(self, width=100, height=360)
animalsFrame.pack(expand=True, anchor=CENTER, side=RIGHT)
self.animalCanvas = Canvas(animalsFrame, width=100, height=360, scrollregion=(0, 0, 500, 1000))
self.newFrame = Frame(self.animalCanvas, width=100, height=360)
self.animalScrollbar = Scrollbar(animalsFrame, orient="vertical", command=self.animalCanvas.yview)
self.animalCanvas.configure(yscrollcommand=self.animalScrollbar.set)
self.animalScrollbar.pack(side="right", fill="y")
self.animalCanvas.pack(side="top")
self.animalCanvas.create_window((0, 0), window=self.newFrame, anchor='nw')
self.newFrame.bind("<Configure>", self.scrollFunc)
self.animals = list(allData.keys())
self.animalVals = []
# Create a button for each bird in the data directory
for bird in range(len(self.animals)):
self.animalVals.append(IntVar())
animalButtons.append(Checkbutton(self.newFrame, text=self.animals[bird],
variable=self.animalVals[bird],
font=self.componentFont))
self.animalVals[-1].set(1)
animalButtons[-1].pack(pady=6)
# create select/deselect all buttons
self.createButtons(animalsFrame, self.animalVals, "animals")
# Create a frame for handling all of the additional buttons
# ======================================================================
buttonsFrame = Frame(self)
buttonsFrame.pack(fill=X, expand=True)
# Threshold label
thresholdLabel = Label(buttonsFrame, text="Change threshold: ")
# Threshold entry box
thresholdBox = Entry(buttonsFrame, width=10)
thresholdBox.pack()
thresholdBox.insert(0, defaultThreshold)
thresholdBoxTooltip = ToolTip(thresholdBox, delay=toolTipDelay,
text="Change this value to set a new threshold value \
for calculating the max distance away from a goal to be kept for data analysis.")
# Re-analyze with new thresholdBox
reformatButton = Button(buttonsFrame, text="Apply new threshold",
command=lambda: self.checkReformat(thresholdBox, False))
reformatButton.pack()
reformatTooltip = ToolTip(reformatButton, delay=toolTipDelay,
text="Click to apply any changes to threshold box above.")
# Reset threshold to defaultThreshold
resetButton = Button(buttonsFrame, text="Reset threshold and run",
command=lambda: self.checkReformat(thresholdBox, True))
resetButton.pack()
resetButtonTooltip = ToolTip(resetButton, delay=toolTipDelay,
text="Click to reset threshold to default value.")
# Create a sort button
self.sortOutput = IntVar()
sortButton = Checkbutton(buttonsFrame, text="Sort",
variable=self.sortOutput, font=self.componentFont)
sortButton.pack()
sortTooltip = ToolTip(sortButton, delay=toolTipDelay,
text="Select to auto-sort the output excel spreadsheets by \
trial type.")
# Create a quit button
quitButton = Button(buttonsFrame, text="Quit", command=self.quit)
quitButton.pack()
quitToolTip = ToolTip(quitButton, delay=toolTipDelay,
text="Quit the program and close the GUI.")
def create_window(self):
self.counter += 1
t = Toplevel(self)
t.wm_title("Window #%s" % self.counter)
l = Label(t, text="This is window #%s" % self.counter)
l.pack(side="top", fill="both", expand=True, padx=100, pady=100)
# function for determining which groups/animals will be analyzed
def getGroups(self, buttons, groupType):
groupsForOutput = []
if (groupType == "animals"):
keys = self.animals
else:
keys = self.trialKeys
# check which buttons are selected
for buttonNum in buttons:
if buttonNum.get():
indexOfButton = buttons.index(buttonNum)
groupsForOutput.append(keys[indexOfButton])
return groupsForOutput
# function for parsing dataframe based on groups
def analyzeGroups(self, trials, animals):
outputFrames = {}
columns = ["Pigeon Name", "Trial Type", "Removed Pecks", "Average Dist"]
goColumns = list(columns)
goColumns[-1] = "Average Opp Dist"
AFColumns = list(goColumns)
AFColumns.extend(["Average AF Dist"])
'''if X and Y coordinates option selected
columns = columns.append(["X Dist", "Y Dist"])'''
for trial in trials:
trialFrame = pd.DataFrame({}) # storage frame for each trial
gotrialFrame = pd.DataFrame({})
AFtrialFrame = pd.DataFrame({})
# loop over each pigeon and acquire data matching requested trials
for pigeon in animals:
tempFrame = pd.DataFrame({})
pigeonFrame = allData[pigeon]
if (trial == "GO"):
goFrame = self.getFrame(pigeonFrame, goColumns, trial)
gotrialFrame = gotrialFrame.append(goFrame)
elif (trial == "AF"):
goFrame = self.getFrame(pigeonFrame, goColumns, trial)
gotrialFrame = gotrialFrame.append(goFrame)
AFFrame = self.getFrame(pigeonFrame, AFColumns, trial)
AFtrialFrame = AFtrialFrame.append(AFFrame)
tempFrame = self.getFrame(pigeonFrame, columns, trial)
trialFrame = trialFrame.append(tempFrame) # add this pigeon to trial frame
# sort by group and store in list of dataframes if selected to
if (self.sortOutput == 1):
if (trial == "GO"):
outputFrames["GO-Opp Distance"] = gotrialFrame.sort(["Trial Type", "Pigeon Name"])
elif (trial == "AF"):
outputFrames["AF-Opp Distance"] = gotrialFrame.sort(["Trial Type", "Pigeon Name"])
outputFrames["AF-AF Distance"] = AFtrialFrame.sort(["Trial Type", "Pigeon Name"])
outputFrames[trial] = trialFrame.sort(["Trial Type", "Pigeon Name"])
else:
if (trial == "GO"):
outputFrames["GO-Opp Distance"] = gotrialFrame
elif (trial == "AF"):
outputFrames["AF-Opp Distance"] = gotrialFrame
outputFrames["AF-AF Distance"] = AFtrialFrame
outputFrames[trial] = trialFrame
return outputFrames
# function to also create a processed dataframe for each pigeon/trial
def getFrame(self, pigeonFrame, columns, trial):
tempFrame = pigeonFrame.loc[pigeonFrame["Experiment Phase"] == trial][columns]
tempFrame = tempFrame.dropna()
# tempFrame = tempFrame[~tempFrame[columns[-1]].isin(["No Pecks"])==True]
return tempFrame
# run the GUI
app = App(root)
root.resizable(width=FALSE, height=FALSE)
root.mainloop()
| 3.1875 | 3 |
blueprints/loggers/demos/basic_logger_demo.py | keithpij/python-blueprints | 0 | 12795905 | <filename>blueprints/loggers/demos/basic_logger_demo.py
import argparse
import os
from blueprints.loggers import basic_logger
def log_sample_messages():
'''
Sends a sample message for each logging level.
'''
queue_name = 'orders'
message_data = {'customerId': 42, 'productId': 12345, 'quantity': 5}
attempt = 2
logger = basic_logger.get_logger()
logger.debug('Message recieved from queue: %s, message data: %s', queue_name, message_data)
logger.info('Message received from queue: %s', queue_name)
logger.warning('Old message format detected. Queue: %s, Message data: %s', queue_name, message_data)
logger.error('Could not connect to queue. Attempt: %i', attempt, stack_info=True)
logger.critical('Error processing message from queue: %s, message_data: %s.',
queue_name, message_data, stack_info=True)
if __name__ == '__main__':
# Setup all the CLI arguments for this module.
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--logger_name',
help='Specify the name of your logger.')
parser.add_argument('-l', '--logging_level',
help='Specify a logging level (NOTSET, DEBUG, INFO, WARNING, ERROR, or CRITICAL) for the basic logger.')
# Parse what was passed in.
args = parser.parse_args()
# Get the name of the logger.
if args.logger_name:
os.environ['LOGGER_NAME'] = args.logger_name
else:
os.environ['LOGGER_NAME'] = __name__
# Get the level to be used for the logger's handler.
if args.logging_level:
os.environ['LOGGING_LEVEL'] = args.logging_level
else:
os.environ['LOGGING_LEVEL'] = 'INFO'
os.environ['FORMAT_STRING'] = '%(asctime)s — %(name)s — %(levelname)s — %(module)s:%(funcName)s:%(lineno)d — %(message)s'
print('Logger name: ' + os.environ['LOGGER_NAME'])
print('Logging level: ' + os.environ['LOGGING_LEVEL'])
print('Format: ' + os.environ['FORMAT_STRING'])
basic_logger.create_logger()
log_sample_messages()
| 2.8125 | 3 |
monkey-broker.py | cferrisroig/monkey-broker | 0 | 12795906 | <reponame>cferrisroig/monkey-broker<filename>monkey-broker.py
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 10 21:44:03 2019
@author: Christian
"""
##########################################
######## IMPORT LIBRARIES ########
##########################################
import datetime
#import pandas as pd
import pandas_datareader.data as web
import numpy as np
import plotly
import plotly.graph_objects as go
import plotly.figure_factory as ff
##########################################
######## INPUT DATA ########
##########################################
#Define start and end date for the data
start = datetime.datetime(2015, 12, 11)
end = datetime.datetime(2019, 12, 10)
#Declare the tickers in the analysis
ticker_list = ["HPQ"] #Other tickers: ^GSPC, HPQ, AMZN, WM, IBM, CL, PG, TSLA
#Define the initial amount of money available per investor
initial_cash = 10000
##########################################
######## FUNCTIONS DECLARATION ########
##########################################
#||||==-- CREATE A VECTOR WITH THE OPERATIONS OF THE MONKEYS --==||||
def monkey_broker (trade_days, freq_op):
#trade_days is the days of the experiment
#freq_op is the average hold time for a position
next_op = "B" #B for BUY // S for SELL
hold_op = round(np.random.uniform(1, freq_op)) #days between operations
operations = [] #Vector with operations executed
op_day = 0
#Build vector with operations
for i in range(trade_days-1):
if op_day < hold_op:
operations.append(0)
op_day += 1
else:
operations.append(next_op)
hold_op = round(np.random.uniform(1, freq_op))
op_day = 0
if next_op == "B":
next_op = "S"
elif next_op == "S":
next_op = "B"
#Avoid last operation is a BUY by setting a SELL the last day (if needed)
if next_op == "S":
operations.append("S")
else:
operations.append(0)
return operations
#||||==-- CREATE A DICTIONARY WITH THE OPERATIONS OF MULTIPLE MONKEYS --==||||
def monkey_population (n, freq_op):
monkeys = {}
for i in range(n):
monkeys[i] = monkey_broker (trade_days, freq_op)
return monkeys
#||||==-- CREATE A VECTOR WITH LONG TERM OPERATION --==||||
def lt_broker (trade_days):
operations = [0] * (trade_days)
operations[0] = "B"
operations[-1] = "S"
return operations
#||||==-- GET THE PRICES AND ADD THE DAILY AVERAGE VALUE CALCULATED WITH THE HIGH AND LOW --==||||
def get_prices (ticker_list):
stocks_price = {}
for ticker in ticker_list:
price = web.DataReader(ticker, 'yahoo', start, end)
price["AVG"] = (price["High"] + price["Low"]) / 2
price["Benchmark"] = price["AVG"] * initial_cash / price["AVG"][0]
stocks_price[ticker] = price
return stocks_price
#||||==-- EXECUTE THE ORDERS FOR ONE INVESTOR --==||||
def wallet_evolution (operations, ticker):
free_cash = []
shares_owned = []
wallet_value = []
for op in operations:
if op == 0:
if len(free_cash) == 0: #First day and no buy
free_cash.append(initial_cash)
shares_owned.append(0)
wallet_value.append(0)
elif shares_owned[-1] == 0: #Days without stocks
free_cash.append(free_cash[-1])
shares_owned.append(0)
wallet_value.append(0)
elif shares_owned[-1] > 0: #Days when hold position
free_cash.append(free_cash[-1])
new_value = stocks_price[ticker]["AVG"][len(shares_owned)] * shares_owned[-1]
shares_owned.append(shares_owned[-1])
wallet_value.append(new_value)
elif op == "B": #Days when buy shares
share_price = stocks_price[ticker]["AVG"][len(free_cash)]
if len(free_cash) == 0:
shares_ex = int(initial_cash / share_price)
wallet_value.append(share_price * shares_ex)
free_cash.append(initial_cash - wallet_value[-1])
else:
shares_ex = int(free_cash[-1] / share_price)
wallet_value.append(share_price * shares_ex)
free_cash.append(free_cash[-1] - wallet_value[-1])
shares_owned.append(shares_ex)
elif op == "S": #Days when sell shares
share_price = stocks_price[ticker]["AVG"][len(free_cash)]
shares_ex = shares_owned[-1]
shares_owned.append(0)
wallet_value.append(0)
free_cash.append(free_cash[-1] + share_price * shares_ex)
total_value = [x + y for x, y in zip(free_cash, wallet_value)]
return {"Free cash": free_cash, "Wallet value": wallet_value, "Total": total_value}
#||||==-- Execution of the orders for an investors group --==||||
def wallets_evolution (investors_group, ticker):
wallet = {}
if ticker in ticker_list:
for investor in investors_group:
wallet[investor] = wallet_evolution (investors_group[investor], ticker)
#print ("Monkey number: " + str(investor))
#print ("Ends period with: " + str(wallet[investor]["Total"][-1]) )
return wallet
else:
print ("Ticker not in list, include it and run the program again")
return False
#||||==-- Growth percentage of the investment for an invetsors group --==||||
def benchmark_growth (ticker):
if ticker in ticker_list:
growth = ( stocks_price[ticker]["AVG"][-1] - stocks_price[ticker]["AVG"][0] ) / stocks_price[ticker]["AVG"][0]
return growth
else:
print ("Ticker not in list, include it and run the program again")
return False
#||||==-- Growth percentage of the investment for an invetsors group --==||||
def wallet_growth (wallets):
total_growth = []
for wallet in wallets:
growth = ( wallets[wallet]["Total"][-1] - wallets[wallet]["Total"][0] ) / wallets[wallet]["Total"][0]
wallets[wallet]["Growth"] = growth
total_growth.append(growth)
return total_growth
#||||==-- Plot of wallets for an investors group --==||||
def wallets_plot (investors_wallets, ticker, file):
file_name = file + ".html"
data = []
for wallet in investors_wallets:
investor_data = go.Scatter(
x = stocks_price[ticker].index,
y = investors_wallets[wallet]["Total"],
mode = 'lines',
line = dict(color = 'rgb(130,130,130)', width = 1),
name = "Investor_" + str(wallet))
data = data + [investor_data]
"""
lt_evolution = go.Scatter(
x = stocks_price[ticker].index,
y = wallet_evolution(lt_broker(trade_days),ticker)["Total"],
mode = 'lines',
line = dict(color = 'rgb(30,30,30)', width = 5 ),
name = ticker)
data = data + [lt_evolution]
"""
benchmark = go.Scatter(
x = stocks_price[ticker].index,
y = stocks_price[ticker]["Benchmark"],
mode = 'lines',
line = dict(color = 'rgb(30,30,30)', width = 5 ),
name = ticker)
data = data + [benchmark]
layout = go.Layout(
title = file,
xaxis = dict(title='Time'),
yaxis = dict(title='Monetary Units'))
fig = go.Figure(data=data, layout=layout)
plotly.offline.plot(fig, show_link = False, output_type = 'file',
filename = file_name, auto_open = True)
return True
#||||==-- Plot histogram for growths from two groups --==||||
def growth_plot (growth_1, growth_2, file):
file_name = file + ".html"
# Group data together
hist_data = [growth_1, growth_2]
group_labels = ['Group 1', 'Group 2']
# Create distplot with custom bin_size
fig = ff.create_distplot(hist_data, group_labels, bin_size=.05, curve_type='normal')
fig.show()
plotly.offline.plot(fig, show_link = False, output_type = 'file',
filename = file_name, auto_open = True)
return True
##########################################
######## SIMULATION EXECUTION ########
##########################################
import time
start_time = time.time()
print("--- %s seconds at start ---" % (time.time() - start_time))
#Get the prices and calculate the days of tradding data available
stocks_price = get_prices (ticker_list)
trade_days = stocks_price[ticker_list[0]]["AVG"].count()
print("--- %s seconds to get prices ---" % (time.time() - start_time))
#Generate the dictionaries with the operations for the monkeys
impatient_monkeys = monkey_population (200, 8)
patient_monkeys = monkey_population (200, 65)
print("--- %s seconds to populate all monkeys ---" % (time.time() - start_time))
#Generate the dictionaries with the evolutoin of the wallets
wallets_impatients = wallets_evolution (impatient_monkeys, "HPQ")
wallets_patients = wallets_evolution (patient_monkeys, "HPQ")
print("--- %s seconds to calculate all monkeys wallets ---" % (time.time() - start_time))
#Calculate the growth for the benchmark and the wallets
hpq_growth = benchmark_growth ("HPQ")
print("Benchmark growth is: " + str(hpq_growth) )
impatient_growth = wallet_growth (wallets_impatients)
patient_growth = wallet_growth (wallets_patients)
print("Impatient monkey got an average growth of: " + str(np.average(impatient_growth)) )
print("Patient monkey got an average growth of: " + str(np.average(patient_growth)) )
print("--- %s seconds to calculate all wallets growth ---" % (time.time() - start_time))
growth_plot (impatient_growth, patient_growth, "growth")
#Plot the evolution for every wallet
#wallets_plot (wallets_impatients, "HPQ", "Prueba1")
#print("--- %s seconds to plot impatient monkeys wallets---" % (time.time() - start_time))
#wallets_plot (wallets_patients, "HPQ", "Prueba2")
#print("--- %s seconds to plot patient monkeys wallets---" % (time.time() - start_time)) | 2.078125 | 2 |
Python Tkinter Text Bold and Italics Text/textWidgetBoldItalicsText.py | BrianMarquez3/Python-Course | 20 | 12795907 | <filename>Python Tkinter Text Bold and Italics Text/textWidgetBoldItalicsText.py
# Python Tkinter Text Bold and Italics Text
# Texto de Python Tkinter Texto en negrita y en cursiva
from tkinter import *
from tkinter import filedialog
from tkinter import font
root = Tk()
root.title('Python Tkinter Text Bold and Italics Text')
root.iconbitmap('Python Tkinter Text Bold and Italics Text/icons/panda.ico')
root.geometry("600x600")
# Read Only r
# read and write r+ (beginning of file)
# Write Only w (over-Written)
# Write and Read w+ (over written)
# Append Only a (end of file)
# Append and Read a+ (end of file)
#----------------------------------------------------------------------------Function--------------------------------------------------------------------------#
def open_txt():
text_file = filedialog.askopenfilename(initialdir="Python Tkinter Text Bold and Italics Text/", title ="Open Text File", filetypes=(("Text Files", "*.txt"),))
text_file = open(text_file, "r")
stuff = text_file.read()
my_text.insert(END, stuff)
text_file.close()
def save_txt():
text_file = filedialog.askopenfilename(initialdir="Python Tkinter Text Bold and Italics Text/", title ="Open Text File", filetypes=(("Text Files", "*.txt"),))
text_file = open(text_file, "w")
text_file.write(my_text.get(1.0, END))
def add_image():
# Add Image
global my_image
my_image = PhotoImage(file="Python Tkinter Text Bold and Italics Text/images/softwares.png")
position = my_text.index(INSERT)
my_text.image_create(position, image=my_image)
my_label.config(text=position)
def select_text():
selected = my_text.selection_get()
my_label.config(text=selected)
def bolder():
bold_font = font.Font(my_text, my_text.cget("font"))
bold_font.configure(weight="bold")
my_text.tag_configure("bold", font=bold_font)
current_tags = my_text.tag_names("sel.first")
if "bold" in current_tags:
my_text.tag_remove("bold", "sel.first", "sel.last")
else:
my_text.tag_add("bold", "sel.first", "sel.last")
def italics_it():
italic_font = font.Font(my_text, my_text.cget("font"))
italic_font.configure(slant="italic")
my_text.tag_configure("italic", font=italic_font)
current_tags = my_text.tag_names("sel.first")
if "italic" in current_tags:
my_text.tag_remove("italic", "sel.first", "sel.last")
else:
my_text.tag_add("italic", "sel.first", "sel.last")
#-------------------------------------------------Frame-------------------------------------------------------#
my_frame = Frame(root)
my_frame.pack(pady=10)
# Create Scrolbar
text_scroll = Scrollbar(my_frame)
text_scroll.pack(side=RIGHT, fill=Y)
my_text = Text(my_frame, width=40, height=10, font=("Helvetica", 16), selectbackground="green", selectforeground="black", yscrollcommand=text_scroll)
my_text.pack()
# Configure our scrollbar
text_scroll.config(command=my_text.yview)
open_button = Button(root, text="Open Text File", command=open_txt)
open_button.pack(pady=20)
save_button = Button(root, text="Save File", command=save_txt)
save_button.pack(pady=20)
image_button = Button(root, text="Add Image", command=add_image)
image_button.pack(pady=5)
select_button = Button(root, text="Select Text", command=select_text)
select_button.pack(pady=5)
bold_button = Button(root, text="Bold", command=bolder)
bold_button.pack(pady=5)
italics_button = Button(root, text="italics", command=italics_it)
italics_button.pack(pady=5)
my_label = Label(root, text="")
my_label.pack(pady=5)
root.mainloop()
| 4.09375 | 4 |
examples/verilator/mainsim.py | splhack/mantle | 33 | 12795908 | import os
os.environ['MANTLE'] = 'lattice'
from magma import *
from mantle import And, XOr
from simulator import testvectors
main = DefineCircuit('main', "a", In(Bit), "b", In(Bit), "c", In(Bit), "d", Out(Bit), 'CLK', In(Bit))
t = And(2)(main.a,main.b)
d = XOr(2)(t,main.c)
wire(d,main.d)
EndCircuit()
print(testvectors(main))
| 2.265625 | 2 |
lib/model/dcr/dcr_layer.py | fregulationn/SANM | 0 | 12795909 | from __future__ import absolute_import
# --------------------------------------------------------
# Spatial Attention Network withFeature Mimicking
# Copyright (c) 2018 University of Illinois
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# --------------------------------------------------------
# Reorganized and modified Modified by <NAME>
# -------------------------------------------------------
import torch
import torch.nn as nn
import numpy as np
import math
import yaml
from model.utils.config import cfg
from model.rpn.generate_anchors import generate_anchors
# from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch
from .bbox.bbox_transform import bbox_pred, clip_boxes, bbox_overlaps
# from model.nms.nms_wrapper import nms
from model.roi_layers import nms
import pdb
DEBUG = False
class _DCRProposalLayer(nn.Module):
def __init__(self, class_agnostic):
super(_DCRProposalLayer, self).__init__()
self.class_agnostic = class_agnostic
self._top = cfg.DCR.TOP
def forward(self, rois, cls_prob, bbox_pred_tensor, im_info):
num_keep_index = int(rois.shape[0] * self._top)
rois = rois[0].cpu().detach().numpy()[:, 1:]
bbox_deltas = bbox_pred_tensor.cpu().detach().numpy()[:, 4:8]
im_info = im_info.cpu().detach().numpy()[0, :]
cls_prob = cls_prob.cpu().detach().numpy()[:, 1:] # ignore bg
# sort scores
max_scores = np.amax(cls_prob, axis=1)
# keep top scores
keep_index = np.argsort(-max_scores)[:num_keep_index]
proposals = bbox_pred(rois, bbox_deltas)
proposals = clip_boxes(proposals, im_info[:2])
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return blob[keep_index, :], keep_index
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass | 1.867188 | 2 |
DTL/gui/widgets/progresswidget.py | rocktavious/DevToolsLib | 1 | 12795910 | <filename>DTL/gui/widgets/progresswidget.py
from DTL.qt import QtGui
from DTL.api import apiUtils
from DTL.gui import Core, Dialog
#------------------------------------------------------------
#------------------------------------------------------------
class ProgressWidget(Dialog):
#------------------------------------------------------------
def onFinalize(self, total=1, current=0, message='Loading...'):
apiUtils.synthesize(self, 'total', total)
apiUtils.synthesize(self, 'current', current)
apiUtils.synthesize(self, 'message', message)
self.ui_ProgressBar.setValue(1)
self.ui_Label.setText(self.message)
self.center()
self.show()
self.update()
#------------------------------------------------------------
def update(self):
self.ui_ProgressBar.setValue(self.value())
self.ui_Label.setText(self.message)
super(ProgressWidget, self).update()
#------------------------------------------------------------
def increment(self):
self.setCurrent(self.current + 1)
self.update()
#------------------------------------------------------------
def percent(self):
if self.total > 0 :
return 1.0 / self.total
else:
return 0
#------------------------------------------------------------
def value(self, recursive=True):
return (100 * self.current * self.percent())
#------------------------------------------------------------
if __name__ == '__main__':
import time
prg = ProgressWidget(total=5, message='Test Loading...')
for i in range(5):
time.sleep(1)
prg.setMessage(str(i))
prg.increment()
prg.close()
| 2.203125 | 2 |
onadata/apps/fsforms/models.py | awemulya/fieldsight-kobocat | 38 | 12795911 | from __future__ import unicode_literals
import datetime
import os
import json
import re
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Max
from django.db.models.signals import post_save, pre_delete
from django.utils.translation import ugettext_lazy as _
from django.dispatch import receiver
from jsonfield import JSONField
from pyxform import create_survey_from_xls, SurveyElementBuilder
from pyxform.xform2json import create_survey_element_from_xml
from xml.dom import Node
from onadata.apps.fieldsight.models import Site, Project, Organization
from onadata.apps.fsforms.fieldsight_models import IntegerRangeField
from onadata.apps.fsforms.utils import send_message, send_message_project_form, check_version
from onadata.apps.logger.models import XForm, Instance
from onadata.apps.logger.xform_instance_parser import clean_and_parse_xml
from onadata.apps.viewer.models import ParsedInstance
from onadata.apps.fsforms.fsxform_responses import get_instances_for_field_sight_form
from onadata.settings.local_settings import XML_VERSION_MAX_ITER
#To get domain to give complete url for app devs to make them easier.
from django.contrib.sites.models import Site as DjangoSite
from onadata.libs.utils.model_tools import set_uuid
SHARED_LEVEL = [(0, 'Global'), (1, 'Organization'), (2, 'Project'),]
SCHEDULED_LEVEL = [(0, 'Daily'), (1, 'Weekly'), (2, 'Monthly'),]
FORM_STATUS = [(0, 'Pending'), (1, 'Rejected'), (2, 'Flagged'), (3, 'Approved'), ]
class FormGroup(models.Model):
name = models.CharField(max_length=256, unique=True)
description = models.TextField(blank=True, null=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
creator = models.ForeignKey(User, related_name="form_group")
is_global = models.BooleanField(default=False)
organization = models.ForeignKey(Organization, null=True, blank=True)
project = models.ForeignKey(Project, null=True, blank=True)
logs = GenericRelation('eventlog.FieldSightLog')
class Meta:
db_table = 'fieldsight_forms_group'
verbose_name = _("FieldSight Form Group")
verbose_name_plural = _("FieldSight Form Groups")
ordering = ("-date_modified",)
def __unicode__(self):
return getattr(self, "name", "")
class Stage(models.Model):
name = models.CharField(max_length=256)
description = models.TextField(blank=True, null=True)
group = models.ForeignKey(FormGroup,related_name="stage", null=True, blank=True)
order = IntegerRangeField(min_value=0, max_value=30,default=0)
stage = models.ForeignKey('self', blank=True, null=True, related_name="parent")
shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
site = models.ForeignKey(Site, related_name="stages", null=True, blank=True)
project = models.ForeignKey(Project, related_name="stages", null=True, blank=True)
ready = models.BooleanField(default=False)
project_stage_id = models.IntegerField(default=0)
weight = models.IntegerField(default=0)
tags = ArrayField(models.IntegerField(), default=[])
logs = GenericRelation('eventlog.FieldSightLog')
class Meta:
db_table = 'fieldsight_forms_stage'
verbose_name = _("FieldSight Form Stage")
verbose_name_plural = _("FieldSight Form Stages")
ordering = ("order",)
def save(self, *args, **kwargs):
if self.stage:
self.group = self.stage.group
super(Stage, self).save(*args, **kwargs)
def get_display_name(self):
return "Stage" if not self.stage else "SubStage"
def is_main_stage(self):
return True if not self.stage else False
def sub_stage_count(self):
if not self.stage:
return Stage.objects.filter(stage=self).count()
return 0
def form_exists(self):
return True if FieldSightXF.objects.filter(stage=self).count() > 0 else False
def form_name(self):
if not FieldSightXF.objects.filter(stage=self).count():
return ""
return FieldSightXF.objects.filter(stage=self)[0].xf.title
def form(self):
if not FieldSightXF.objects.filter(stage=self).count():
return None
return FieldSightXF.objects.filter(stage=self)[0]
def active_substages(self):
return self.parent.filter(stage_forms__isnull=False)
def get_sub_stage_list(self):
if not self.stage:
return Stage.objects.filter(stage=self).values('stage_forms__id','name','stage_id')
return []
@property
def xf(self):
return FieldSightXF.objects.filter(stage=self)[0].xf.pk if self.form_exists() else None
@property
def form_status(self):
status = 0
if self.stage_forms.site_form_instances.filter(form_status=3).exists():
status = 1
return status
@property
def form_count(self):
return self.stage_forms.site_form_instances.all().count()
@staticmethod
def site_submission_count(id, site_id):
return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(site_id=site_id).count()
@staticmethod
def rejected_submission_count(id, site_id):
return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=1, site_id=site_id).count()
@staticmethod
def flagged_submission_count(id, site_id):
return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=2, site_id=site_id).count()
@classmethod
def get_order(cls, site, project, stage):
if site:
if not Stage.objects.filter(site=site).exists():
return 1
elif stage is not None:
if not Stage.objects.filter(stage=stage).exists():
return 1
else:
mo = Stage.objects.filter(stage=stage).aggregate(Max('order'))
order = mo.get('order__max', 0)
return order + 1
else:
mo = Stage.objects.filter(site=site, stage__isnull=True).aggregate(Max('order'))
order = mo.get('order__max', 0)
return order + 1
else:
if not Stage.objects.filter(project=project).exists():
return 1
elif stage is not None:
if not Stage.objects.filter(stage=stage).exists():
return 1
else:
mo = Stage.objects.filter(stage=stage).aggregate(Max('order'))
order = mo.get('order__max', 0)
return order + 1
else:
mo = Stage.objects.filter(project=project, stage__isnull=True).aggregate(Max('order'))
order = mo.get('order__max', 0)
return order + 1
def __unicode__(self):
return getattr(self, "name", "")
class Days(models.Model):
day = models.CharField(max_length=9)
index = models.IntegerField()
def __unicode__(self):
return getattr(self, "day", "")
class Schedule(models.Model):
name = models.CharField("Schedule Name", max_length=256, blank=True, null=True)
site = models.ForeignKey(Site, related_name="schedules", null=True, blank=True)
project = models.ForeignKey(Project, related_name="schedules", null=True, blank=True)
date_range_start = models.DateField(default=datetime.date.today)
date_range_end = models.DateField(default=datetime.date.today)
selected_days = models.ManyToManyField(Days, related_name='days', blank=True,)
shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL)
schedule_level_id = models.IntegerField(default=0, choices=SCHEDULED_LEVEL)
date_created = models.DateTimeField(auto_now_add=True)
logs = GenericRelation('eventlog.FieldSightLog')
class Meta:
db_table = 'fieldsight_forms_schedule'
verbose_name = _("Form Schedule")
verbose_name_plural = _("Form Schedules")
ordering = ('-date_range_start', 'date_range_end')
def form_exists(self):
return True if FieldSightXF.objects.filter(schedule=self).count() > 0 else False
def form(self):
return FieldSightXF.objects.filter(schedule=self)[0] if self.form_exists() else None
@property
def xf(self):
return FieldSightXF.objects.filter(schedule=self)[0].xf.pk if self.form_exists() else None
def __unicode__(self):
return getattr(self, "name", "")
class DeletedXForm(models.Model):
xf = models.OneToOneField(XForm, related_name="deleted_xform")
date_created = models.DateTimeField(auto_now=True)
class FieldSightXF(models.Model):
xf = models.ForeignKey(XForm, related_name="field_sight_form")
site = models.ForeignKey(Site, related_name="site_forms", null=True, blank=True)
project = models.ForeignKey(Project, related_name="project_forms", null=True, blank=True)
is_staged = models.BooleanField(default=False)
is_scheduled = models.BooleanField(default=False)
date_created = models.DateTimeField(auto_now=True)
date_modified = models.DateTimeField(auto_now=True)
schedule = models.OneToOneField(Schedule, blank=True, null=True, related_name="schedule_forms")
stage = models.OneToOneField(Stage, blank=True, null=True, related_name="stage_forms")
shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL)
form_status = models.IntegerField(default=0, choices=FORM_STATUS)
fsform = models.ForeignKey('self', blank=True, null=True, related_name="parent")
is_deployed = models.BooleanField(default=False)
is_deleted = models.BooleanField(default=False)
is_survey = models.BooleanField(default=False)
from_project = models.BooleanField(default=True)
default_submission_status = models.IntegerField(default=0, choices=FORM_STATUS)
logs = GenericRelation('eventlog.FieldSightLog')
class Meta:
db_table = 'fieldsight_forms_data'
# unique_together = (("xf", "site"), ("xf", "is_staged", "stage"),("xf", "is_scheduled", "schedule"))
verbose_name = _("XForm")
verbose_name_plural = _("XForms")
ordering = ("-date_created",)
def url(self):
return reverse(
"download_fild_sight_form",
kwargs={
"site": self.site.username,
"id_string": self.id_string
}
)
def getname(self):
return '{0} form {1}'.format(self.form_type(),
self.xf.title,)
def getresponces(self):
return get_instances_for_field_sight_form(self.pk)
def getlatestsubmittiondate(self):
if self.site is not None:
return self.site_form_instances.order_by('-pk').values('date')[:1]
else:
return self.project_form_instances.order_by('-pk').values('date')[:1]
def get_absolute_url(self):
if self.project:
# return reverse('forms:project_html_export', kwargs={'fsxf_id': self.pk})
return reverse('forms:setup-forms', kwargs={'is_project':1, 'pk':self.project_id})
else:
# return reverse('forms:formpack_html_export', kwargs={'fsxf_id': self.pk})
return reverse('forms:setup-forms', kwargs={'is_project':0, 'pk':self.site_id})
def form_type(self):
if self.is_scheduled:
return "scheduled"
if self.is_staged:
return "staged"
if self.is_survey:
return "survey"
if not self.is_scheduled and not self.is_staged:
return "general"
def form_type_id(self):
if self.is_scheduled and self.schedule: return self.schedule.id
if self.is_staged and self.stage: return self.stage.id
return None
def stage_name(self):
if self.stage: return self.stage.name
def schedule_name(self):
if self.schedule: return self.schedule.name
def clean(self):
if self.is_staged:
if FieldSightXF.objects.filter(stage=self.stage).exists():
if not FieldSightXF.objects.filter(stage=self.stage).pk == self.pk:
raise ValidationError({
'xf': ValidationError(_('Duplicate Stage Data')),
})
if self.is_scheduled:
if FieldSightXF.objects.filter(schedule=self.schedule).exists():
if not FieldSightXF.objects.filter(schedule=self.schedule)[0].pk == self.pk:
raise ValidationError({
'xf': ValidationError(_('Duplicate Schedule Data')),
})
if not self.is_scheduled and not self.is_staged:
if self.site:
if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,project=self.site.project).exists():
raise ValidationError({
'xf': ValidationError(_('Form Already Used in Project Level')),
})
else:
if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,
site=self.site, project=self.project).exists():
if not FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,
site=self.site, project=self.project)[0].pk == self.pk:
raise ValidationError({
'xf': ValidationError(_('Duplicate General Form Data')),
})
@staticmethod
def get_xform_id_list(site_id):
fs_form_list = FieldSightXF.objects.filter(site__id=site_id).order_by('xf__id').distinct('xf__id')
return [fsform.xf.pk for fsform in fs_form_list]
@property
def site_name(self):
if self.site is not None:
return u'{}'.format(self.site.name)\
@property
def site_or_project_display(self):
if self.site is not None:
return u'{}'.format(self.site.name)
return u'{}'.format(self.project.name)
@property
def project_info(self):
if self.fsform:
self.fsform.pk
return None
@property
def has_versions(self):
return self.xf.fshistory.exists()
def __unicode__(self):
return u'{}- {}- {}'.format(self.xf, self.site, self.is_staged)
@receiver(post_save, sender=FieldSightXF)
def create_messages(sender, instance, created, **kwargs):
if instance.project is not None and created and not instance.is_staged:
send_message_project_form(instance)
elif created and instance.site is not None and not instance.is_staged:
send_message(instance)
@receiver(pre_delete, sender=FieldSightXF)
def send_delete_message(sender, instance, using, **kwargs):
if instance.project is not None:
pass
elif instance.is_staged:
pass
else:
fxf = instance
send_message(fxf)
post_save.connect(create_messages, sender=FieldSightXF)
class FieldSightParsedInstance(ParsedInstance):
_update_fs_data = None
class Meta:
proxy = True
def save(self, *args, **kwargs):
self._update_fs_data = kwargs.pop('update_fs_data', {})
super(FieldSightParsedInstance, self).save(*args, **kwargs)
def to_dict_for_mongo(self):
mongo_dict = super(FieldSightParsedInstance, self).to_dict_for_mongo()
mongo_dict.update(self._update_fs_data)
return mongo_dict
@staticmethod
def get_or_create(instance, update_data=None):
if update_data is None:
update_data = {}
created = False
try:
fspi = FieldSightParsedInstance.objects.get(instance__pk=instance.pk)
fspi.save(update_fs_data=update_data, async=False)
except FieldSightParsedInstance.DoesNotExist:
created = True
fspi = FieldSightParsedInstance(instance=instance)
fspi.save(update_fs_data=update_data, async=False)
return fspi, created
class FInstanceManager(models.Manager):
def get_queryset(self):
return super(FInstanceManager, self).get_queryset().filter(is_deleted=False)
class FInstanceDeletedManager(models.Manager):
def get_queryset(self):
return super(FInstanceDeletedManager, self).get_queryset().filter(is_deleted=True)
class FInstance(models.Model):
instance = models.OneToOneField(Instance, related_name='fieldsight_instance')
site = models.ForeignKey(Site, null=True, related_name='site_instances')
project = models.ForeignKey(Project, null=True, related_name='project_instances')
site_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='site_form_instances', on_delete=models.SET_NULL)
project_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='project_form_instances')
form_status = models.IntegerField(null=True, blank=True, choices=FORM_STATUS)
date = models.DateTimeField(auto_now=True)
submitted_by = models.ForeignKey(User, related_name="supervisor")
is_deleted = models.BooleanField(default=False)
version = models.CharField(max_length=255, default=u'')
objects = FInstanceManager()
deleted_objects = FInstanceDeletedManager()
logs = GenericRelation('eventlog.FieldSightLog')
@property
def get_version(self):
return self.instance.json['__version__']
def save(self, *args, **kwargs):
self.version = self.get_version
if self.project_fxf is not None and self.project_fxf.is_staged and self.site is not None:
self.site.update_current_progress()
elif self.site is not None:
self.site.update_status()
if self.form_status is None:
if self.site_fxf:
self.form_status = self.site_fxf.default_submission_status
else:
self.form_status = self.project_fxf.default_submission_status
super(FInstance, self).save(*args, **kwargs) # Call the "real" save() method.
@property
def fsxfid(self):
if self.project_fxf:
return self.project_fxf.id
else:
return self.site_fxf.id\
@property
def fsxf(self):
if self.project_fxf:
return self.project_fxf
else:
return self.site_fxf
def get_absolute_url(self):
if self.site_fxf:
fxf_id = self.site_fxf_id
else:
fxf_id = self.project_fxf_id
return "/forms/forms/" + str(fxf_id) + "#/" + str(self.instance.id)
def get_abr_form_status(self):
return dict(FORM_STATUS)[self.form_status]
def getname(self):
if self.site_fxf is None:
return '{0} form {1}'.format(self.project_fxf.form_type(), self.project_fxf.xf.title,)
return '{0} form {1}'.format(self.site_fxf.form_type(),
self.site_fxf.xf.title,)
def __unicode__(self):
if self.site_fxf is None:
return u"%s" % str(self.submitted_by) + "---" + self.project_fxf.xf.title
return u"%s" % str(self.submitted_by) + "---" + self.site_fxf.xf.title
def instance_json(self):
return json.dumps(self.instance.json)
def get_responces(self):
data=[]
json_answer = self.instance.json
json_question = json.loads(self.instance.xform.json)
base_url = DjangoSite.objects.get_current().domain
media_folder = self.instance.xform.user.username
def parse_repeat(r_object):
r_question = r_object['name']
data.append(r_question)
if r_question in json_answer:
for gnr_answer in json_answer[r_question]:
for first_children in r_object['children']:
question_type = first_children['type']
question = first_children['name']
group_answer = json_answer[r_question]
answer = ''
if r_question+"/"+question in gnr_answer:
if first_children['type'] == 'note':
answer= ''
elif first_children['type'] == 'photo' or first_children['type'] == 'audio' or first_children['type'] == 'video':
answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+gnr_answer[r_question+"/"+question]
else:
answer = gnr_answer[r_question+"/"+question]
if 'label' in first_children:
question = first_children['label']
row={'type':question_type, 'question':question, 'answer':answer}
data.append(row)
else:
for first_children in r_object['children']:
question_type = first_children['type']
question = first_children['name']
answer = ''
if 'label' in first_children:
question = first_children['label']
row={'type':question_type, 'question':question, 'answer':answer}
data.append(row)
def parse_group(prev_groupname, g_object):
g_question = prev_groupname+g_object['name']
for first_children in g_object['children']:
question = first_children['name']
question_type = first_children['type']
if question_type == 'group':
parse_group(g_question+"/",first_children)
continue
answer = ''
if g_question+"/"+question in json_answer:
if question_type == 'note':
answer= ''
elif question_type == 'photo' or question_type == 'audio' or question_type == 'video':
answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[g_question+"/"+question]
else:
answer = json_answer[g_question+"/"+question]
if 'label' in first_children:
question = first_children['label']
row={'type':question_type, 'question':question, 'answer':answer}
data.append(row)
def parse_individual_questions(parent_object):
for first_children in parent_object:
if first_children['type'] == "repeat":
parse_repeat(first_children)
elif first_children['type'] == 'group':
parse_group("",first_children)
else:
question = first_children['name']
question_type = first_children['type']
answer= ''
if question in json_answer:
if first_children['type'] == 'note':
answer= ''
elif first_children['type'] == 'photo' or first_children['type'] == 'audio' or first_children['type'] == 'video':
answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[question]
else:
answer = json_answer[question]
if 'label' in first_children:
question = first_children['label']
row={"type":question_type, "question":question, "answer":answer}
data.append(row)
submitted_by={'type':'submitted_by','question':'Submitted by', 'answer':json_answer['_submitted_by']}
submittion_time={'type':'submittion_time','question':'Submittion Time', 'answer':json_answer['_submission_time']}
data.append(submitted_by)
data.append(submittion_time)
parse_individual_questions(json_question['children'])
return data
class InstanceStatusChanged(models.Model):
finstance = models.ForeignKey(FInstance, related_name="comments")
message = models.TextField(null=True, blank=True)
date = models.DateTimeField(auto_now=True)
old_status = models.IntegerField(default=0, choices=FORM_STATUS)
new_status = models.IntegerField(default=0, choices=FORM_STATUS)
user = models.ForeignKey(User, related_name="submission_comments")
logs = GenericRelation('eventlog.FieldSightLog')
class Meta:
ordering = ['-date']
def get_absolute_url(self):
return reverse('forms:alter-status-detail', kwargs={'pk': self.pk})
def getname(self):
return '{0} form {1}'.format(self.finstance.site_fxf.form_type(), self.finstance.site_fxf.xf.title)
class InstanceImages(models.Model):
instance_status = models.ForeignKey(InstanceStatusChanged, related_name="images")
image = models.ImageField(upload_to="submission-feedback-images",
verbose_name='Status Changed Images',)
class FieldSightFormLibrary(models.Model):
xf = models.ForeignKey(XForm)
is_global = models.BooleanField(default=False)
shared_date = models.DateTimeField(auto_now=True)
organization = models.ForeignKey(Organization, null=True, blank=True)
project = models.ForeignKey(Project, null=True, blank=True)
logs = GenericRelation('eventlog.FieldSightLog')
class Meta:
verbose_name = _("Library")
verbose_name_plural = _("Library")
ordering = ("-shared_date",)
class EducationMaterial(models.Model):
is_pdf = models.BooleanField(default=False)
pdf = models.FileField(upload_to="education-material-pdf", null=True, blank=True)
title = models.CharField(max_length=31, blank=True, null=True)
text = models.TextField(blank=True, null=True)
stage = models.OneToOneField(Stage, related_name="em", null=True, blank=True)
fsxf = models.OneToOneField(FieldSightXF, related_name="em", null=True, blank=True)
class EducationalImages(models.Model):
educational_material = models.ForeignKey(EducationMaterial, related_name="em_images")
image = models.ImageField(upload_to="education-material-images",
verbose_name='Education Images',)
# @receiver(post_save, sender=Site)
# def copy_stages_from_project(sender, **kwargs):
# site = kwargs.get('instance')
# created = kwargs.get('created')
# if created:
# project = site.project
# project_main_stages = project.stages.filter(stage__isnull=True)
# for pms in project_main_stages:
# project_sub_stages = Stage.objects.filter(stage__id=pms.pk, stage_forms__is_deleted=False, stage_forms__is_deployed=True)
# if not project_sub_stages:
# continue
# site_main_stage = Stage(name=pms.name, order=pms.order, site=site, description=pms.description,
# project_stage_id=pms.id, weight=pms.weight)
# site_main_stage.save()
# for pss in project_sub_stages:
# if pss.tags and site.type:
# if site.type.id not in pss.tags:
# continue
# site_sub_stage = Stage(name=pss.name, order=pss.order, site=site,
# description=pss.description, stage=site_main_stage, project_stage_id=pss.id, weight=pss.weight)
# site_sub_stage.save()
# if FieldSightXF.objects.filter(stage=pss).exists():
# fsxf = pss.stage_forms
# site_form = FieldSightXF(is_staged=True, default_submission_status=fsxf.default_submission_status, xf=fsxf.xf, site=site,fsform=fsxf, stage=site_sub_stage, is_deployed=True)
# site_form.save()
# general_forms = project.project_forms.filter(is_staged=False, is_scheduled=False, is_deployed=True, is_deleted=False)
# for general_form in general_forms:
# FieldSightXF.objects.create(is_staged=False, default_submission_status=general_form.default_submission_status, is_scheduled=False, is_deployed=True, site=site,
# xf=general_form.xf, fsform=general_form)
#
# schedule_forms = project.project_forms.filter(is_scheduled=True, is_deployed=True, is_deleted=False)
# for schedule_form in schedule_forms:
# schedule = schedule_form.schedule
# selected_days = tuple(schedule.selected_days.all())
# s = Schedule.objects.create(name=schedule.name, site=site, date_range_start=schedule.date_range_start,
# date_range_end=schedule.date_range_end)
# s.selected_days.add(*selected_days)
# s.save()
# FieldSightXF.objects.create(is_scheduled=True, default_submission_status=schedule_form.default_submission_status, xf=schedule_form.xf, site=site, fsform=schedule_form,
# schedule=s, is_deployed=True)
class DeployEvent(models.Model):
form_changed = models.BooleanField(default=True)
data = JSONField(default={})
date = models.DateTimeField(auto_now=True)
site = models.ForeignKey(Site, related_name="deploy_data", null=True)
project = models.ForeignKey(Project, related_name="deploy_data", null=True)
def upload_to(instance, filename):
return os.path.join(
'versions', str(instance.pk),
'xls',
os.path.split(filename)[1])
class XformHistory(models.Model):
class Meta:
unique_together = ('xform', 'version')
def _set_uuid_in_xml(self, file_name=None):
"""
Add bind to automatically set UUID node in XML.
"""
if not file_name:
file_name = self.file_name()
file_name, file_ext = os.path.splitext(file_name)
doc = clean_and_parse_xml(self.xml)
model_nodes = doc.getElementsByTagName("model")
if len(model_nodes) != 1:
raise Exception(u"xml contains multiple model nodes")
model_node = model_nodes[0]
instance_nodes = [node for node in model_node.childNodes if
node.nodeType == Node.ELEMENT_NODE and
node.tagName.lower() == "instance" and
not node.hasAttribute("id")]
if len(instance_nodes) != 1:
raise Exception(u"Multiple instance nodes without the id "
u"attribute, can't tell which is the main one")
instance_node = instance_nodes[0]
# get the first child whose id attribute matches our id_string
survey_nodes = [node for node in instance_node.childNodes
if node.nodeType == Node.ELEMENT_NODE and
(node.tagName == file_name or
node.attributes.get('id'))]
if len(survey_nodes) != 1:
raise Exception(
u"Multiple survey nodes with the id '%s'" % self.id_string)
survey_node = survey_nodes[0]
formhub_nodes = [n for n in survey_node.childNodes
if n.nodeType == Node.ELEMENT_NODE and
n.tagName == "formhub"]
if len(formhub_nodes) > 1:
raise Exception(
u"Multiple formhub nodes within main instance node")
elif len(formhub_nodes) == 1:
formhub_node = formhub_nodes[0]
else:
formhub_node = survey_node.insertBefore(
doc.createElement("formhub"), survey_node.firstChild)
uuid_nodes = [node for node in formhub_node.childNodes if
node.nodeType == Node.ELEMENT_NODE and
node.tagName == "uuid"]
if len(uuid_nodes) == 0:
formhub_node.appendChild(doc.createElement("uuid"))
if len(formhub_nodes) == 0:
# append the calculate bind node
calculate_node = doc.createElement("bind")
calculate_node.setAttribute(
"nodeset", "/%s/formhub/uuid" % file_name)
calculate_node.setAttribute("type", "string")
calculate_node.setAttribute("calculate", "'%s'" % self.uuid)
model_node.appendChild(calculate_node)
self.xml = doc.toprettyxml(indent=" ", encoding='utf-8')
# hack
# http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\
# and-silly-whitespace/
text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL)
output_re = re.compile('\n.*(<output.*>)\n( )*')
prettyXml = text_re.sub('>\g<1></', self.xml.decode('utf-8'))
inlineOutput = output_re.sub('\g<1>', prettyXml)
inlineOutput = re.compile('<label>\s*\n*\s*\n*\s*</label>').sub(
'<label></label>', inlineOutput)
self.xml = inlineOutput
xform = models.ForeignKey(XForm, related_name="fshistory")
date = models.DateTimeField(auto_now=True)
xls = models.FileField(upload_to=upload_to, null=True)
json = models.TextField(default=u'')
description = models.TextField(default=u'', null=True)
xml = models.TextField()
id_string = models.CharField(editable=False, max_length=255)
title = models.CharField(editable=False, max_length=255)
uuid = models.CharField(max_length=32, default=u'')
version = models.CharField(max_length=255, default=u'')
@property
def get_version(self):
import re
n = XML_VERSION_MAX_ITER
xml = self.xml
p = re.compile('version="(.*)">')
m = p.search(xml)
if m:
return m.group(1)
version = check_version(xml)
if version:
return version
else:
p = re.compile("""<bind calculate="\'(.*)\'" nodeset="/(.*)/_version_" """)
m = p.search(xml)
if m:
return m.group(1)
p1 = re.compile("""<bind calculate="(.*)" nodeset="/(.*)/_version_" """)
m1 = p.search(xml)
if m1:
return m1.group(1)
p1 = re.compile("""<bind calculate="\'(.*)\'" nodeset="/(.*)/__version__" """)
m1 = p1.search(xml)
if m1:
return m1.group(1)
p1 = re.compile("""<bind calculate="(.*)" nodeset="/(.*)/__version__" """)
m1 = p1.search(xml)
if m1:
return m1.group(1)
return None
def check_version(xml, n):
for i in range(n, 0, -1):
p = re.compile("""<bind calculate="\'(.*)\'" nodeset="/(.*)/_version__00{0}" """.format(i))
m = p.search(xml)
if m:
return m.group(1)
p = re.compile("""<bind calculate="(.*)" nodeset="/(.*)/_version__00{0}" """.format(i))
m1 = p.search(xml)
if m1:
return m1.group(1)
return None
def save(self, *args, **kwargs):
if self.xls and not self.xml:
survey = create_survey_from_xls(self.xls)
self.json = survey.to_json()
self.xml = survey.to_xml()
self._mark_start_time_boolean()
# set_uuid(self)
# self._set_uuid_in_xml()
if not self.version:
self.version = self.get_version
super(XformHistory, self).save(*args, **kwargs)
def file_name(self):
return os.path.split(self.xls.name)[-1]
def _mark_start_time_boolean(self):
starttime_substring = 'jr:preloadParams="start"'
if self.xml.find(starttime_substring) != -1:
self.has_start_time = True
else:
self.has_start_time = False
def get_survey(self):
if not hasattr(self, "_survey"):
try:
builder = SurveyElementBuilder()
self._survey = \
builder.create_survey_element_from_json(self.json)
except ValueError:
xml = bytes(bytearray(self.xml, encoding='utf-8'))
self._survey = create_survey_element_from_xml(xml)
return self._survey
survey = property(get_survey)
class SubmissionOfflineSite(models.Model):
offline_site_id = models.CharField(max_length=20)
temporary_site = models.ForeignKey(Site, related_name="offline_submissions")
instance = models.OneToOneField(FInstance, blank=True, null=True, related_name="offline_submission")
fieldsight_form = models.ForeignKey(FieldSightXF, related_name="offline_submissiob" , null=True, blank=True)
def __unicode__(self):
if self.instance:
return u"%s ---------------%s" % (str(self.instance.id) ,self.offline_site_id)
return u"%s" % str(self.offline_site_id)
| 1.539063 | 2 |
hardhat/recipes/python/pygit2.py | stangelandcl/hardhat | 0 | 12795912 | <gh_stars>0
from .base import PipBaseRecipe
class PyGit2Recipe(PipBaseRecipe):
def __init__(self, *args, **kwargs):
super(PyGit2Recipe, self).__init__(*args, **kwargs)
self.depends = ['libgit2']
self.name = 'pygit2'
self.version = '0.24.0'
| 1.929688 | 2 |
src/message_writers/database_message_writer.py | Kaltsoon/telegram-analytics | 0 | 12795913 | <reponame>Kaltsoon/telegram-analytics
from message_writers.message_writer import MessageWriter
from entities.message import Message
class DatabaseMessageWriter(MessageWriter):
def __init__(self, connection):
self._connection = connection
def write_messages(self, messages):
message_rows = [(message.id, message.text, message.user_id)
for message in messages]
tags = []
for message in messages:
tags = tags + message.tags
tag_rows = [(tag.category, tag.text, tag.message_id)
for tag in tags]
cursor = self._connection.cursor()
cursor.executemany(
'insert into messages values (?, ?, ?)',
message_rows
)
cursor.executemany('insert into tags values (?, ?, ?)', tag_rows)
self._connection.commit()
| 2.46875 | 2 |
scripts/wiki_sp_tokenize_json.py | ceshine/modern_chinese_nlp | 42 | 12795914 | """SentencePiece Tokenization for Wiki Dataset
Example:
* python scripts/wiki_sp_tokenize_json.py --word --unigram
"""
import gzip
import json
import subprocess
from pathlib import Path
import sentencepiece as spm
import joblib
import numpy as np
import click
from tqdm import tqdm
from opencc import OpenCC
from wiki_tokenize_json import clean_text, filter_texts, SECTION_BLACKLIST
DATAPATH = "/mnt/Intel/zhwiki.json.gz"
TMPPATH = "/mnt/Intel/tmp_texts.txt"
TMPPATH_WORD = "/mnt/Intel/tmp_words.txt"
MODEL_PREFIX = "data/{algorithm}_{seg_word}_model"
CC = OpenCC('t2s')
VOC_SIZE = 7500
PAD = 1
UNK = 0
def json_to_txt():
with gzip.open(DATAPATH) as f:
with open(TMPPATH, "w") as fw:
for _, line in tqdm(enumerate(f.readlines())):
article = json.loads(line)
if "年表" in article["title"] or "列表" in article["title"]:
continue
for title, section in zip(article["section_titles"], article["section_texts"]):
title = CC.convert(title)
if title in SECTION_BLACKLIST:
continue
for paragraph in [x for x in section.split("\n") if len(x) > 50]:
paragraph = clean_text(paragraph)
if len(paragraph) < 200 or filter_texts(paragraph):
continue
for sentence in [x for x in paragraph.split("。") if len(x) > 10]:
fw.write(sentence + "。\n")
def fit_model(seg_word=True, algorithm="bpe"):
if not Path(TMPPATH).exists():
json_to_txt()
if seg_word:
print("Performing word segmentation...")
res = subprocess.run([
"thulac", "-model_dir", "/mnt/SSD_Data/openai_nlp/THULAC/models/",
"-seg_only", "-input", TMPPATH, "-output", TMPPATH_WORD
], stdout=subprocess.PIPE)
print(res)
# Train Model
print("Training model...")
spm.SentencePieceTrainer.Train(
'--input={} --model_prefix={} --vocab_size={} '
'--input_sentence_size=20000000 '
'--character_coverage=0.995 --model_type={algorithm}'.format(
TMPPATH_WORD if seg_word else TMPPATH,
MODEL_PREFIX.format(algorithm=algorithm, seg_word=seg_word),
VOC_SIZE, algorithm="unigram"
)
)
def tokenize(seg_word=True, algorithm="bpe"):
print("Tokenizing...")
sp = spm.SentencePieceProcessor()
sp.Load(MODEL_PREFIX.format(
algorithm=algorithm, seg_word=seg_word) + ".model")
tokens = []
with open(TMPPATH_WORD if seg_word else TMPPATH) as f:
for _, sentence in tqdm(enumerate(f.readlines())):
tokens.append(
np.array(sp.EncodeAsIds(sentence))
)
joblib.dump(np.array(tokens), f"data/tokens_{algorithm}_{seg_word}.pkl")
@click.command()
@click.option("--word", is_flag=True)
@click.option("--bpe/--unigram", default=True)
def main(word, bpe):
seg_word = True if word else False
algorithm = "bpe" if bpe else "unigram"
# fit_model(seg_word, algorithm)
tokenize(seg_word, algorithm)
if __name__ == "__main__":
# pylint: disable=no-value-for-parameter
main()
| 2.71875 | 3 |
paranuara/citizens/apps.py | SPLAYER-HD/Paranuara | 0 | 12795915 | <filename>paranuara/citizens/apps.py
"""Citizens app"""
# Django
from django.apps import AppConfig
class CitizensAppConfig(AppConfig):
"""Citizens app config"""
name = "paranuara.citizens"
verbose_name = 'Citizens'
| 1.476563 | 1 |
playground/jax_basic/test_pmap.py | yf225/alpa | 114 | 12795916 | <reponame>yf225/alpa
from functools import partial
import jax
from jax import lax
import jax.numpy as jnp
def debug_pmap():
@jax.pmap
def func(x, w):
return x @ w
y = func(jnp.ones((2, 4)), jnp.ones((2, 4)))
print(y, type(y))
def test_nested_pmap():
@partial(jax.pmap, axis_name='a0', in_axes=(0, None), out_axes=0)
def add(a, b):
# a.shape = (32, 64)
# b.shape = (64, 2, 32)
@partial(jax.pmap, axis_name='a1', in_axes=(None, 1), out_axes=1)
def add_inner(x, y):
# x.shape = (32, 64)
# y.shape = (64, 32)
return x @ y
# ret.shape = (32, 2, 32)
ret = add_inner(a, b)
return ret
a = jnp.ones((2, 32, 64))
b = jnp.ones((64, 2, 32))
#jaxpr = jax.make_jaxpr(add)(a, b)
#print(jaxpr)
#print(jaxpr.jaxpr.outvars[0].aval.shape)
c = add(a, b)
print(c)
def test_allreduce_sum():
@partial(jax.pmap, axis_name='i')
def normalize(x):
return x / lax.psum(x, 'i')
print(normalize(jnp.arange(2)))
if __name__ == "__main__":
#debug_pmap()
#test_nested_pmap()
test_allreduce_sum()
| 2.09375 | 2 |
laboratory/strangedemo/criteo_predict.py | acmore/OpenEmbedding | 20 | 12795917 | <filename>laboratory/strangedemo/criteo_predict.py
import os
import json
import pandas
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data', required=True)
parser.add_argument('--rows', type=int, required=True)
parser.add_argument('--model', required=True)
parser.add_argument('--host', required=True)
args = parser.parse_args()
data = pandas.read_csv(args.data, nrows=args.rows)
inputs = dict()
for name in data.columns:
if name[0] == 'C':
inputs[name] = [[int(value)] for value in data[name]]
elif name[0] == 'I':
inputs[name] = [[float(value)] for value in data[name]]
post = json.dumps({'inputs':inputs})
command = f"curl -d '{post}' {args.host}/v1/models/{args.model}:predict"
print(command)
os.system(command)
| 2.953125 | 3 |
spherical_stats/_watson.py | dschmitz89/spherical_stats | 1 | 12795918 | <filename>spherical_stats/_watson.py
import numpy as np
from scipy.special import erfi
from ._utils import rotation_matrix
from ._descriptive_stats import orientation_matrix
from numba import njit
from scipy.optimize import brentq
class Watson:
r"""
Watson distribution
.. note::
The Watson distribution is only implemented for positive concentration parameter :math:`\kappa`.
Args:
mu (optional, ndarray (3, ) ): Mean axis
kappa (optional, float): positive concentration parameter
The Watson distribution is an isotropic distribution for
axial data. Its PDF is defined as
.. math::
p_{Watson}(\pm\mathbf{x}| \boldsymbol{\mu}, \kappa) & = M\left(\frac{1}{2},\frac{3}{2},\kappa\right)\exp(\kappa (\boldsymbol{\mu}^T\mathbf{x})^2) \\
& = \frac{\sqrt{\pi}\mathrm{erfi}(\sqrt{\kappa})}{2\sqrt{\kappa}}\exp(\kappa (\boldsymbol{\mu}^T\mathbf{x})^2)
where :math:`M` denotes `Kummer's confluent hypergeometric function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hyp1f1.html#scipy.special.hyp1f1>`_
and :math:`\mathrm{erfi}` the `imaginary error function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfi.html>`_ .
References:
Mardia, Jupp. Directional Statistics, 1999.
Chen. Generate Random Samples from von Mises-Fisher and Watson Distributions. 2012
"""
def __init__(self, mu = None, kappa = None):
self.mu = mu
self.kappa = kappa
def rvs(self, size = 1):
'''
Generate samples from the Watson distribution
Arguments
----------
size : int, optional, default 1
Number of samples
Returns
----------
samples : ndarray (size, 3)
samples as ndarray of shape (size, 3)
'''
if self.mu is not None and self.kappa is not None:
sqrt_kappa = np.sqrt(self.kappa)
constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa)
z = np.array([0., 0., 1.])
rot_matrix = rotation_matrix(z, self.mu)
samples = _sample(self.kappa, constant, rot_matrix, size)
return samples
else:
raise ValueError("Watson distribution not parameterized. Fit it to data or set parameters manually.")
def pdf(self, x):
'''
Calculate probability density function of a set of vectors ``x`` given a parameterized
Watson distribution
Arguments
----------
x : ndarray (size, 3)
Vectors to evaluate the PDF at
Returns
----------
pdfvals : ndarray (size,)
PDF values as ndarray of shape (size, )
'''
if self.mu is not None and self.kappa is not None:
sqrt_kappa = np.sqrt(self.kappa)
constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa)
pdf = _pdf_wo_constant(self.mu, self.kappa, x)
pdf = pdf * constant
return pdf
else:
raise ValueError("Watson distribution not parameterized. Fit it to data or set parameters manually.")
def fit(self, data):
'''
Fits the Watson distribution to data
Arguments
----------
data : ndarray (n, 3)
Vector data the distribution is fitted to
'''
T = 1/data.shape[0] * orientation_matrix(data)
evals, evectors = np.linalg.eigh(T)
mu_fitted = evectors[:, 2]
intermed_res = np.sum(mu_fitted * (T@mu_fitted))
def obj(kappa):
sqrt_kappa = np.sqrt(kappa)
nominator = (2*np.exp(kappa)*sqrt_kappa - np.sqrt(np.pi) * erfi(sqrt_kappa))/(4*kappa**1.5)
denominator = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa)
f = nominator/denominator - intermed_res
return f
kappa_fit, root_res = brentq(obj, 1e-4, 500., full_output=True)
if root_res.converged == True:
self.mu = mu_fitted
self.kappa = kappa_fit
else:
raise ValueError("Concentration parameter could not be estimated.")
@njit(cache = True)
def _pdf_wo_constant(mu, kappa, x):
n_samples, _ = x.shape
unnormalized_pdf = np.zeros((n_samples, ))
for i in range(n_samples):
unnormalized_pdf[i] = np.exp(kappa * ((x[i, :] * mu).sum())**2)
return unnormalized_pdf
@njit(cache = True)
def rejection_sampling_numba(kappa, constant, size):
res_array = np.zeros((size, ))
#maximal density for given kappa
maxy = constant * np.exp(kappa)
number_samples = 0
while number_samples < size:
#draw uniform samples
x_rand = np.random.uniform(-1.0, 1.0)
y_rand = np.random.uniform(0.0, maxy)
#calculate density at position x
f = constant * np.exp(kappa * x_rand * x_rand)
#accept or reject
if y_rand < f:
res_array[number_samples] = x_rand
number_samples +=1
return res_array
@njit(cache = True)
def _sample(kappa, constant, rot_matrix, size):
ones = np.ones((size, ))
z = rejection_sampling_numba(kappa, constant, size)
temp = np.sqrt(ones - np.square(z))
uniformcirle = 2 * np.pi * np.random.random(size)
x = np.cos(uniformcirle)
y = np.sin(uniformcirle)
samples = np.empty((size, 3))
samples[:, 0] = temp * x
samples[:, 1] = temp * y
samples[:, 2] = z
for i in range(size):
vec=samples[i, :]
samples[i, :] = rot_matrix.dot(vec)
return samples | 2.984375 | 3 |
data/studio21_generated/introductory/3257/starter_code.py | vijaykumawat256/Prompt-Summarization | 0 | 12795919 | <gh_stars>0
def slogan_maker(array):
| 0.929688 | 1 |
tests/test_data/test_believers/test_meta_view.py | ffeldmann/edflow | 23 | 12795920 | <reponame>ffeldmann/edflow
import pytest
import numpy as np
import os
from edflow.data.believers.meta_view import MetaViewDataset
from edflow.util import walk, retrieve
def _setup(root, N=100, V=25):
from PIL import Image
super_root = os.path.join(root, "METAVIEW__test_data__METAVIEW")
super_root = os.path.abspath(super_root)
root = os.path.join(super_root, "base")
os.makedirs(os.path.join(root, "images"), exist_ok=True)
os.makedirs(os.path.join(root, "labels"), exist_ok=True)
paths = np.array([os.path.join(root, "images", f"{i:0>3d}.png") for i in range(N)])
mmap_path = os.path.join(root, "labels", f"image:image-*-{N}-*-{paths.dtype}.npy")
mmap = np.memmap(mmap_path, dtype=paths.dtype, mode="w+", shape=(N,))
mmap[:] = paths
data = np.arange(N)
mmap_path = os.path.join(root, "labels", f"attr1-*-{N}-*-{data.dtype}.npy")
mmap = np.memmap(mmap_path, dtype=data.dtype, mode="w+", shape=(N,))
mmap[:] = data
data = np.zeros(shape=(N, 2))
mmap_path = os.path.join(root, "labels", f"attr2-*-{N}x2-*-{data.dtype}.npy")
mmap = np.memmap(mmap_path, dtype=data.dtype, mode="w+", shape=(N, 2))
mmap[:] = data
data = np.ones(shape=(N, 17, 2))
mmap_path = os.path.join(root, "labels", f"keypoints-*-{N}x17x2-*-{data.dtype}.npy")
mmap = np.memmap(mmap_path, dtype=data.dtype, mode="w+", shape=(N, 17, 2))
mmap[:] = data
for p in paths:
image = (255 * np.ones((64, 64, 3))).astype(np.uint8)
im = Image.fromarray(image)
im.save(p)
with open(os.path.join(root, "meta.yaml"), "w+") as mfile:
mfile.write(
"""
description: |
# Test Dataset
This is a dataset which loads images.
All paths to the images are in the label `image`.
## Content
image: images
loader_kwargs:
image:
support: "-1->1"
"""
)
view_root = os.path.join(super_root, "mview")
os.makedirs(os.path.join(view_root, "labels", "views"), exist_ok=True)
# view 1
data = np.arange(V).astype(int)
mmap_path = os.path.join(
view_root, "labels", "views", f"simple-*-{V}-*-{data.dtype}.npy"
)
mmap = np.memmap(mmap_path, dtype=data.dtype, mode="w+", shape=(V,))
mmap[:] = data
# view 2
data = np.zeros(shape=(V, 5, 3)).astype(int)
mmap_path = os.path.join(
view_root, "labels", "views", f"complex-*-{V}x5x3-*-{data.dtype}.npy"
)
mmap = np.memmap(mmap_path, dtype=data.dtype, mode="w+", shape=(V, 5, 3))
mmap[:] = data
# view 3
data = np.arange(V).astype(int)
mmap_path = os.path.join(view_root, "labels", f"simple-*-{V}-*-{data.dtype}.npy")
mmap = np.memmap(mmap_path, dtype=data.dtype, mode="w+", shape=(V,))
mmap[:] = data
with open(os.path.join(view_root, "meta.yaml"), "w+") as mfile:
mfile.write(
"""
description: |
# Test Dataset
This is a view dataset which loads images from a base.
base_dset: edflow.data.believers.meta.MetaDataset
base_kwargs:
root: {}
views:
simple1: simple
simple: views/simple
complex:
- views/complex
- views/simple
""".format(
root
)
)
return super_root, root, view_root
def _teardown(test_data_root):
if test_data_root == ".":
raise ValueError("Are you sure you want to delete this directory?")
os.system(f"rm -rf {test_data_root}")
def test_meta_view_dset():
N = 100
V = 25
try:
super_root, base_root, view_root = _setup(".", N, V)
M = MetaViewDataset(view_root)
M.expand = True
M.append_labels = False
M.show()
assert len(M) == V
for kk in ["simple1", "simple", "complex"]:
assert kk in M.labels
if kk == "complex":
for i in range(2):
for k in ["attr1", "attr2", "image_", "keypoints"]:
assert k in M.labels[kk][i]
assert len(M.labels[kk][i][k]) == V
else:
for k in ["attr1", "attr2", "image_", "keypoints"]:
assert k in M.labels[kk]
assert len(M.labels[kk][k]) == V
d = M[0]
# For ex 0 this is the same for both complex and simple
single_ref = {"image": np.ones(shape=(64, 64, 3)), "index_": 0}
ref_simple = single_ref
ref_complex = [[single_ref] * 3] * 20
ref = {
"simple1": ref_simple,
"simple": ref_simple,
"complex": [ref_complex, ref_simple],
"index_": 0,
}
def tester(key, val):
assert np.all(val == retrieve(ref, key))
walk(d, tester, pass_key=True)
assert hasattr(M, "meta")
finally:
_teardown(super_root)
| 2.125 | 2 |
WiFi/mp3/mp3/urls.py | jpan127/RJD-MP3 | 0 | 12795921 |
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('server.urls'), name='server'),
]
| 1.65625 | 2 |
Packages/Dead/demo/Script/tutorials/template_file.py | xylar/cdat | 62 | 12795922 | # Adapted for numpy/ma/cdms2 by convertcdms.py
# Import the modules needed for the tuturial
import vcs, cdms2 as cdms, cdutil, time, os, sys
# Open data file:
filepath = os.path.join(vcs.sample_data, 'clt.nc')
cdmsfile = cdms.open( filepath )
# Extract a 3 dimensional data set and get a subset of the time dimension
data = cdmsfile('clt', longitude=(-180, 180), latitude = (-90., 90.))
# Initial VCS:
v = vcs.init()
# Assign the variable "t_asd" to the persistent 'ASD' template.
t_asd = v.gettemplate( 'ASD' )
# Create a new template from the existing 'ASD' template
t2_asd = v.createtemplate( 'new', 'ASD' )
# Plot the data using the above 'ASD' template.
v.plot( data, t_asd )
# Remove picture segments from the page.
t_asd.list( )
t_asd.xlabel2.priority = 0
t_asd.xtic2.priority = 0
t_asd.xtic2.priority = 0
t_asd.legend.priority=0
# save current 'Mean' placemant for x and y coordinates
xmean_current = t_asd.mean.x
ymean_current = t_asd.mean.y
# now change the placement
t_asd.mean.x=0.5 # move the "Mean" text to x-axis center
t_asd.mean.y=0.5 # move the "Mean" text to y-axis center
t_asd.data.priority = 0 # remove the data so the "Mean" text is visable.
v.update()
#############################################################################
# Place the colorbar legend vertically and to the right side #############################################################################
t_asd.data.priority = 1
t_asd.legend.priority = 1
t_asd.legend.list() # list the legend members
v.mode=0 # turn the automatic update off
# move 'Mean' text back where it was
t_asd.mean.x = xmean_current
t_asd.mean.y = ymean_current
# move the right side of a plot to the left to make space for the legend
# first move the inner plot
t_asd.data.x2 = 0.87
# then move the sorrounding box - the right y-axis
t_asd.box1.x2 = 0.87
# set the top x-axis (secind y axis) to be blank
t_asd.xlabel2.priority = 0
t_asd.xtic2.priority = 0
# set the right y-axis (second y axis) to be blank (priority=0)
t_asd.ylabel2.priority = 0
t_asd.ytic2.priority = 0
# move the colorbar legend position, to be vertial and to the right
t_asd.legend.x1=0.9
t_asd.legend.y1=0.82
t_asd.legend.x2=0.95
t_asd.legend.y2=0.3
# clear the canvas and plot the template again
v.clear()
v.plot( data, t_asd )
| 2.671875 | 3 |
bcbiovm/docker/install.py | kern3020/bcbio-nextgen-vm | 0 | 12795923 | """Install or upgrade a bcbio-nextgen installation.
"""
from __future__ import print_function
import os
import subprocess
import sys
import yaml
from bcbiovm.docker import manage, mounts
DEFAULT_IMAGE = "chapmanb/bcbio-nextgen-devel"
def full(args, dockerconf):
"""Full installaction of docker image and data.
"""
updates = []
args = add_install_defaults(args)
if args.wrapper:
updates.append("wrapper scripts")
upgrade_bcbio_vm()
dmounts = mounts.prepare_system(args.datadir, dockerconf["biodata_dir"])
if args.install_tools:
updates.append("bcbio-nextgen code and third party tools")
pull(args, dockerconf)
_check_docker_image(args)
# Ensure external galaxy configuration in sync when doing tool upgrade
manage.run_bcbio_cmd(args.image, dmounts, ["upgrade"])
if args.install_data:
if len(args.genomes) == 0:
print("Data not installed, no genomes provided with `--genomes` flag")
sys.exit(1)
elif len(args.aligners) == 0:
print("Data not installed, no aligners provided with `--aligners` flag")
sys.exit(1)
else:
updates.append("biological data")
_check_docker_image(args)
manage.run_bcbio_cmd(args.image, dmounts, _get_cl(args))
_save_install_defaults(args)
if updates:
print("\nbcbio-nextgen-vm updated with latest %s" % " and ".join(updates))
else:
print("\nNo update targets specified, need '--wrapper', '--tools' or '--data'\n"
"See 'bcbio_vm.py upgrade -h' for more details.")
def _get_cl(args):
clargs = ["upgrade"]
if args.install_data:
clargs.append("--data")
for g in args.genomes:
clargs.extend(["--genomes", g])
for a in args.aligners:
clargs.extend(["--aligners", a])
return clargs
def upgrade_bcbio_vm():
"""Upgrade bcbio-nextgen-vm wrapper code.
"""
conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "conda")
if not os.path.exists(conda_bin):
print("Cannot update bcbio-nextgen-vm; not installed with conda")
else:
subprocess.check_call([conda_bin, "install", "--yes",
"-c", "https://conda.binstar.org/bcbio",
"bcbio-nextgen-vm"])
def pull(args, dockerconf):
"""Pull down latest docker image, using export uploaded to S3 bucket.
Long term plan is to use the docker index server but upload size is
currently smaller with an exported gzipped image.
"""
print("Retrieving bcbio-nextgen docker image with code and tools")
# subprocess.check_call(["docker", "pull", image])
assert args.image, "Unspecified image name for docker import"
subprocess.check_call(["docker", "import", dockerconf["image_url"], args.image])
def _save_install_defaults(args):
"""Save arguments passed to installation to be used on subsequent upgrades.
Avoids needing to re-include genomes and aligners on command line.
"""
install_config = _get_config_file(args)
if install_config is None:
return
if os.path.exists(install_config) and os.path.getsize(install_config) > 0:
with open(install_config) as in_handle:
cur_config = yaml.load(in_handle)
else:
cur_config = {}
for attr in ["genomes", "aligners"]:
if not cur_config.get(attr):
cur_config[attr] = []
for x in getattr(args, attr):
if x not in cur_config[attr]:
cur_config[attr].append(str(x))
if args.image != DEFAULT_IMAGE and args.image:
cur_config["image"] = args.image
with open(install_config, "w") as out_handle:
yaml.dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False)
def _get_install_defaults(args):
install_config = _get_config_file(args)
if install_config and os.path.exists(install_config) and os.path.getsize(install_config) > 0:
with open(install_config) as in_handle:
return yaml.load(in_handle)
return {}
def _add_docker_defaults(args, default_args):
if not hasattr(args, "image") or not args.image:
if default_args.get("image") and not default_args.get("images") == "None":
args.image = default_args["image"]
else:
args.image = DEFAULT_IMAGE
return args
def add_install_defaults(args):
"""Add previously saved installation defaults to command line arguments.
"""
default_args = _get_install_defaults(args)
for attr in ["genomes", "aligners"]:
for x in default_args.get(attr, []):
new_val = getattr(args, attr)
if x not in getattr(args, attr):
new_val.append(x)
setattr(args, attr, new_val)
args = _add_docker_defaults(args, default_args)
return args
def _check_docker_image(args):
"""Ensure docker image exists.
"""
for image in subprocess.check_output(["docker", "images"]).split("\n"):
parts = image.split()
if len(parts) > 1 and parts[0] == args.image:
return
raise ValueError("Could not find docker image %s in local repository" % args.image)
def docker_image_arg(args):
if not hasattr(args, "image") or not args.image:
default_args = _get_install_defaults(args)
args = _add_docker_defaults(args, default_args)
_check_docker_image(args)
return args
def _get_config_file(args):
config_dir = os.path.join(args.datadir, "config")
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, "install-params.yaml")
| 2.1875 | 2 |
SystemCode/neo.py | IRS-PM/IRS-PM-2021-07-05-IS03FT-Group-8-SnapYummy-Cooking-Assistant | 1 | 12795924 | import enum
from pandas.io.pytables import DuplicateWarning
from py2neo import Node, Relationship, Graph, NodeMatcher
import pandas as pd
from operator import itemgetter
from typing import List, Dict
import random
graph = Graph("http://localhost:7474", username="neo4j", password='<PASSWORD>')
main_ingr = set(['apple', 'banana', 'bell pepper', 'broccoli', 'cabbage', 'carrot', 'cheese', 'coconut', 'cucumber', 'egg', 'fish', 'grapes', 'lemon', 'mango', 'milk', 'mushroom', 'oranges', 'peach', 'pear', 'pineapple', 'potatoes', 'pumpkin', 'seafood', 'shrimp', 'strawberry', 'tomatoes', 'watermelon', 'winter melon', 'garlic', 'corn', 'eggplant', 'lettuce', 'onion', 'scallion', 'chicken', 'beef', 'lamb', 'pork', 'sauce', 'duck', 'meatball', 'wine', 'berries', 'crabmeat', 'kiwi', 'bitter melon', 'pepper', 'peas', 'ginger', 'shells', 'chili', 'ham', 'sausage', 'butter', 'bread', 'rice', 'vanilla'])
def getRecipes(
ingr: List[str],
topk: int = 10,
dietaryList: List[str] = None,
cuisine: str = None,
skip: int = 0) -> List[Dict]:
n = len(ingr)
if (n == 0): return [{}]
ingr_type = {}
for it in ingr:
it = it.lower()
if it in main_ingr:
ingr_type[it] = (it.upper(), 'Has_Main_Ingredient', 'main_ingredient')
print(it, ' is main ingredient')
else:
ingr_type[it] = (it.lower(), 'Has_Ingredient', 'ingredient')
cand = {name: 0 for name in ingr}
query_indegree = "WITH "
for i in range(n):
query_indegree += "size((:recipe)-[:{2}]->(:{3}{{Name:'{0}'}})) as a{1},".format(ingr_type[ingr[i]][0], str(i), ingr_type[ingr[i]][1], ingr_type[ingr[i]][2])
query_indegree = query_indegree[:-1] + " RETURN "
for i in range(n):
query_indegree += "a{0},".format(str(i))
query_indegree = query_indegree[:-1]
res = graph.run(query_indegree)
indegrees = pd.DataFrame(res)
for i, name in enumerate(ingr):
cand[name] = indegrees.iloc[[0],[i]].values[0][0]
sorted_ingr = sorted(cand, key=lambda x : x[1])
query = ''
for i in range(n):
query += "OPTIONAL MATCH ((rep:recipe)-[r{0}:{3}]->(i{1}:{4}{{Name: '{2}'}})) ".format(str(i), str(i), ingr_type[sorted_ingr[i]][0], ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2])
if dietaryList is not None:
for dietary in dietaryList:
if dietary == 'halal':
query += "MATCH (rep) WHERE rep.halal is null "
elif dietary == 'vegetarian':
vegan = 'vegan'
query += "MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (rep)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) ".format(vegan, vegan)
elif dietary == 'fruitarian':
query += "MATCH (rep)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) "
elif dietary == 'eggetarian':
query += "MATCH (rep)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) "
if cuisine is not None:
query += "MATCH (rep)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) ".format(cuisine)
query += "WITH rep, "
for i in range(n):
query += "r{0}, i{1}, ".format(str(i), str(i))
query += "(size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, "
for i in range(n):
query += "size((rep:recipe)-[:{2}]->(:{3}{{Name: '{0}'}})) as minus_degree{1},".format(ingr_type[sorted_ingr[i]][0], str(i), ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2])
query = query[:-1] + ' '
query += "RETURN rep, "
for i in range(n):
query += "r{0}, i{1}, minus_degree{2},".format(str(i), str(i), str(i))
query += "degree ORDER BY degree"
for i in range(n):
query += "-minus_degree{0} * 2".format(str(i))
query += ","
for i in range(n):
query += "(case when minus_degree{0}>=1 then 1 else 0 end)+".format(str(i))
query = query[:-1] + " desc"
query += ",degree SKIP {0} LIMIT 25;".format(skip * topk)
print(query)
res = graph.run(query)
res = pd.DataFrame(res)
# print(res)
recipes = []
for i in range(min(topk, res.shape[0])):
recipes.append(res.iloc[i,0])
return recipes
########################################
# Unit Test 1
########################################
# res = getRecipes(['apple','banana', 'strawberry'], dietaryList=['vegetarian'], cuisine='chinese')
# print(type(res[0]))
# Sample query
# query =
# '''
# OPTIONAL MATCH ((rep:recipe)-[r0:Has_Main_Ingredient]->(i0:main_ingredient{Name: 'BANANA'}))
# OPTIONAL MATCH ((rep:recipe)-[r1:Has_Main_Ingredient]->(i1:main_ingredient{Name: 'APPLE'}))
# OPTIONAL MATCH ((rep:recipe)-[r2:Has_Main_Ingredient]->(i2:main_ingredient{Name: 'STRAWBERRY'}))
# MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{Name: 'vegan'})
# WHERE (rep)-[:Has_Meal_Type]->(:meal_type{Name: 'vegan'})
# WITH rep, r0, i0, r1, i1, r2, i2, rs,
# (size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'BANANA'})) as minus_degree0,
# size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'APPLE'})) as minus_degree1,
# size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'STRAWBERRY'})) as minus_degree2
# RETURN rep, r0, i0, minus_degree0,r1, i1, rs, minus_degree1,r2, i2, minus_degree2,degree
# ORDER BY degree-minus_degree0 * 2-minus_degree1 * 2-minus_degree2 * 2,
# (case when minus_degree0>=1 then 1 else 0 end)+(case when minus_degree1>=1 then 1 else 0 end)+(case when minus_degree2>=1 then 1 else 0 end) desc,degree LIMIT 25;
# '''
def getRecipeByName(rep: str) -> Dict:
query = "MATCH (rep:recipe) WHERE rep.Name=~'(?i){0}' RETURN rep".format(rep)
res = graph.run(query)
res = pd.DataFrame(res)
if res.empty:
return None
return res.iloc[0,0]
########################################
# Unit Test 2
########################################
# rep = 'super Fruity Smoothie'
# print(getRecipeByName(rep))
# Sample query
# MATCH (rep:recipe)
# WHERE rep.Name=~'(?i)super Fruity Smoothie'
# RETURN rep
def getIngredient(id: str, rep: str) -> List[str]:
query = "MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) WHERE rep.Name=~'(?i){0}' AND rep.RecipeId='{1}' RETURN a".format(rep, id)
res = graph.run(query)
res = pd.DataFrame(res)
ingrs = []
for i in range(res.shape[0]):
ingrs.append(res.iloc[i,0]['Name'])
return ingrs
########################################
# Unit Test 3
########################################
# rep = 'super Fruity Smoothie'
# print(getIngredient(rep))
# Sample query
# MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient)
# WHERE rep.Name=~'(?i)super Fruity Smoothie'
# RETURN a
# def random_init(length = 50):
# query = "MATCH (n:recipe) RETURN n LIMIT {0}".format(str(length))
# res = graph.run(query)
# res = pd.DataFrame(res)
# for i in range(res.shape[0]):
# random_set[i] = res.iloc[i,0]
def browser(topk: int = 10,
dietaryList: List[str] = None,
cuisine: str = None) -> List[Dict]:
query = "MATCH (a:recipe) WITH rand() as r, a "
if dietaryList is not None:
for dietary in dietaryList:
if dietary == 'halal':
query += "MATCH (a) WHERE a.halal is null "
elif dietary == 'vegetarian':
vegan = 'vegan'
query += "MATCH (a)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (a)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) ".format(vegan, vegan)
elif dietary == 'fruitarian':
query += "MATCH (a)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) "
elif dietary == 'eggetarian':
query += "MATCH (a)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) "
if cuisine is not None:
query += "MATCH (a)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) ".format(cuisine)
query += "RETURN a ORDER BY r LIMIT {0};".format(topk)
print(query)
res = graph.run(query)
res = pd.DataFrame(res)
recipes = []
for i in range(res.shape[0]):
recipes.append(res.iloc[i,0])
return recipes
########################################
# Unit Test 3
########################################
# print(browser(dietaryList=['halal','fruitarian'], cuisine='chinese'))
| 2.90625 | 3 |
gui.py | sp6hfe/CwGen | 0 | 12795925 | import cwgen
import os
import sys
import PySimpleGUI as sg
class CwGenUI:
# General
# GUI - window config
WINDOW_DESCRIPTION = 'CW training material generator by SP6HFE'
# GUI - text config
E2CW_VER_LOCAL_KEY = '-E2CW VER LOCAL-'
E2CW_VER_ONLINE_KEY = '-E2CW VER ONLINE-'
# GUI - button config
FILE_BROWSE_KEY = '-ADD FILE-'
FILE_REMOVE_KEY = '-REMOVE FILE-'
E2CW_DOWNLOAD_KEY = '-E2CW DOWNLOAD-'
E2CW_GENERATE_KEY = '-E2CW GENERATE-'
# GUI - input config
FILE_PATH_INPUT_KEY = '-FILE PATH-'
# GUI - table config
FILES_DATA_TABLE_KEY = '-FILES DATA-'
WORDS_FILTERED_TABLE_KEY = '-WORDS FILTERED-'
WORDS_TO_GEN_TABLE_KEY = '-WORDS TO GEN-'
# GUI - sliders config
H_SLIDER_WIDTH = 21
H_SLIDER_HEIGHT = 10
LETTERS_MIN_KEY = '-LETTERS MIN-'
LETTERS_MAX_KEY = '-LETTERS MAX-'
LETTERS_MIN_RANGE_START_KEY = '-LETTERS MIN RANGE START-'
LETTERS_MIN_RANGE_STOP_KEY = '-LETTERS MIN RANGE STOP-'
LETTERS_MAX_RANGE_START_KEY = '-LETTERS MAX RANGE START-'
LETTERS_MAX_RANGE_STOP_KEY = '-LETTERS MAX RANGE STOP-'
WORDS_TO_TRAIN_KEY = '-WORDS TO TRAIN-'
WORDS_TO_TRAIN_RANGE_START_KEY = 'WORDS TO TRAIN RANGE START-'
WORDS_TO_TRAIN_RANGE_STOP_KEY = 'WORDS TO TRAIN RANGE STOP-'
E2CW_WPM_KEY = '-E2CW WPM-'
E2CW_WPM_RANGE_START_KEY = '-E2CW WPM RANGE START-'
E2CW_WPM_RANGE_STOP_KEY = '-E2CW WPM RANGE STOP-'
E2CW_FARNS_KEY = '-E2CW FARNS-'
E2CW_FARNS_RANGE_START_KEY = '-E2CW FARNS RANGE START-'
E2CW_FARNS_RANGE_STOP_KEY = '-E2CW FARNS RANGE STOP-'
E2CW_PITCH_KEY = '-E2CW PITCH-'
E2CW_PITCH_RANGE_START_KEY = '-E2CW PITCH RANGE START-'
E2CW_PITCH_RANGE_STOP_KEY = '-E2CW PITCH RANGE STOP-'
# GUI - combo config
COMBO_LETTERS_SET_KEY = '-LETTERS SET-'
COMBO_MATERIAL_GENERATION_KEY = '-MATERIAL GENERATION-'
def __init__(self):
"""Class initialization"""
# Members
self.files_table_idx = -1
self.cw_gen = cwgen.CwGen()
self.letters_sets = self.cw_gen.get_letters_sets()
self.training_generator_schemes = self.cw_gen.get_training_generator_schemes()
ebook2cw_version_local = self.cw_gen.get_ebook2cw_version_local()
ebook2cw_version_online = self.cw_gen.get_ebook2cw_version_online()
# GUI - header columns -> name, column size, visible?
files_data_header = [
("UUID", 0, False),
("File name", 14, True),
("Words", 6, True),
("Min len", 7, True),
("Max len", 7, True)
]
words_filtered_header = [
("Word length", 15, True),
("Count", 15, True)
]
words_to_gen_header = [
("Word length", 15, True),
("Count", 15, True)
]
# GUI - tables
files_data_table = [sg.Table(values=[],
headings=[name for name, _size,
_visible in files_data_header],
col_widths=[size for _name, size,
_visible in files_data_header],
visible_column_map=[
visible for _name, _size, visible in files_data_header],
num_rows=5,
justification='left',
auto_size_columns=False,
enable_events=True,
key=self.FILES_DATA_TABLE_KEY
)]
words_filtered_table = [sg.Table(values=[],
headings=[
name for name, _size, _visible in words_filtered_header],
col_widths=[
size for _name, size, _visible in words_filtered_header],
num_rows=5,
justification='left',
auto_size_columns=False,
key=self.WORDS_FILTERED_TABLE_KEY)]
words_to_gen_table = [sg.Table(values=[],
headings=[
name for name, _size, _visible in words_to_gen_header],
col_widths=[
size for _name, size, _visible in words_to_gen_header],
num_rows=5,
justification='left',
auto_size_columns=False,
key=self.WORDS_TO_GEN_TABLE_KEY)]
# GUI - rows
files_operation = [sg.Input(enable_events=True, visible=False, key=self.FILE_PATH_INPUT_KEY),
sg.FileBrowse(button_text="Add", file_types=(
("ALL Files", "*.*"), ("CWOPS sessions", "*.cwo")), target=self.FILE_PATH_INPUT_KEY, key=self.FILE_BROWSE_KEY),
sg.Button(button_text="Remove selected", key=self.FILE_REMOVE_KEY)]
letters_min = [sg.Text("MIN:", size=(4, 1)),
sg.Text("0", size=(2, 1),
key=self.LETTERS_MIN_RANGE_START_KEY),
sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),
orientation='h', enable_events=True, key=self.LETTERS_MIN_KEY),
sg.Text("0", size=(2, 1), key=self.LETTERS_MIN_RANGE_STOP_KEY)]
letters_max = [sg.Text("MAX:", size=(4, 1)),
sg.Text("0", size=(2, 1),
key=self.LETTERS_MAX_RANGE_START_KEY),
sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),
orientation='h', enable_events=True, key=self.LETTERS_MAX_KEY),
sg.Text("0", size=(2, 1), key=self.LETTERS_MAX_RANGE_STOP_KEY)]
letters_set = [sg.Text('From set:'),
sg.Combo(values=([data['description'] for _id, data in self.letters_sets.items()]),
default_value=list(
self.letters_sets.items())[0][1]['description'],
size=(max(len(data['description'])
for _id, data in self.letters_sets.items()), 1),
readonly=True,
enable_events=True,
key=self.COMBO_LETTERS_SET_KEY)]
generator_scheme = [sg.Text('Using scheme:'),
sg.Combo(values=([name for _id, name in self.training_generator_schemes.items()]),
default_value=list(
self.training_generator_schemes.items())[0][1],
size=(
max(len(name) for _id, name in self.training_generator_schemes.items()), 1),
readonly=True,
enable_events=True,
key=self.COMBO_MATERIAL_GENERATION_KEY)]
words_to_train = [sg.Text("SIZE:", size=(6, 1)),
sg.Text("0", size=(2, 1),
key=self.WORDS_TO_TRAIN_KEY),
sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),
orientation='h', enable_events=True, key=self.WORDS_TO_TRAIN_RANGE_START_KEY),
sg.Text("0", size=(2, 1), key=self.WORDS_TO_TRAIN_RANGE_STOP_KEY)]
e2cw_version = [sg.Text('Local version:', size=(15, 1)), sg.Text(ebook2cw_version_local, key=self.E2CW_VER_LOCAL_KEY),
sg.Text('Online version:', size=(15, 1)), sg.Text(ebook2cw_version_online, key=self.E2CW_VER_ONLINE_KEY)]
e2cw_buttons = [sg.Button('Download / Update Ebook2CW', key=self.E2CW_DOWNLOAD_KEY),
sg.Button('Generate training files', key=self.E2CW_GENERATE_KEY)]
e2cw_wpm = [sg.Text("WPM:", size=(6, 1)),
sg.Text("0", size=(2, 1),
key=self.E2CW_WPM_RANGE_START_KEY),
sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),
orientation='h', enable_events=True, key=self.E2CW_WPM_KEY),
sg.Text("0", size=(2, 1), key=self.E2CW_WPM_RANGE_STOP_KEY)]
e2cw_farns = [sg.Text("FARNS:", size=(6, 1)),
sg.Text("0", size=(2, 1),
key=self.E2CW_FARNS_RANGE_START_KEY),
sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),
orientation='h', enable_events=True, key=self.E2CW_FARNS_KEY),
sg.Text("0", size=(2, 1), key=self.E2CW_FARNS_RANGE_STOP_KEY)]
e2cw_pitch = [sg.Text("PITCH:", size=(6, 1)),
sg.Text("0", size=(2, 1),
key=self.E2CW_PITCH_RANGE_START_KEY),
sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),
orientation='h', enable_events=True, key=self.E2CW_PITCH_KEY),
sg.Text("0", size=(2, 1), key=self.E2CW_PITCH_RANGE_STOP_KEY)]
# GUI - columns
left_col = [
[sg.Frame('Dictionaries', [files_operation, files_data_table])],
[sg.Frame('Letters selection', [letters_set])],
[sg.Frame('Words length', [letters_min, letters_max])],
[sg.Frame('Training input', [words_filtered_table])]]
right_col = [
[sg.Frame('Training generator', [generator_scheme])],
[sg.Frame('Training set size', [words_to_train])],
[sg.Frame('Training output', [words_to_gen_table])],
[sg.Frame('Audible parameters', [e2cw_wpm, e2cw_farns, e2cw_pitch])],
[sg.Frame('Ebook2CW', [e2cw_version, e2cw_buttons])]]
# App layout
layout = [[sg.Column(left_col), sg.VSeparator(), sg.Column(right_col)]]
# Configure and create the window
self.window = sg.Window(self.WINDOW_DESCRIPTION, layout)
def _get_dictionary_key_by_value(self, dictionary, lookup_value, nested_key=None):
'''Retrieves a key based on provided string value
keeping insertion order, meaning if dictionary
contain a number of keys with exact same value
first key (in insertion order) will be returned.
Args:
dictionary (dict): dictionary to search for a key
lookup_value (str): value for which key should be found
nested_key (str): key in nested dictionary where lookup_value is
Returns:
result (str): key or None if lookup_value not found
'''
result = None
for key, value in dictionary.items():
if nested_key is not None:
data = value[nested_key]
else:
data = value
if data == lookup_value:
result = key
break
return result
def _update_ui_on_dictionary_set_change(self, values):
"""Updates relevant UI elements according to change
in dictionary set.
Args:
values (dict): Dictionary containing GUI elements values
Returns:
None
"""
table_data = []
sliders_range = (0, 0)
# get information related to already loaded data
dictionaries_info = self.cw_gen.get_dictionaries_info()
words_info = self.cw_gen.get_words_stat()
# generate updated data for UI elements
if len(dictionaries_info) > 0:
for dictionary_data in dictionaries_info:
row = [dictionary_data['uuid'],
dictionary_data['name'],
dictionary_data['stat']['words_count'],
dictionary_data['stat']['min_length'],
dictionary_data['stat']['max_length']]
table_data.append(row)
if len(words_info) > 0:
sliders_range = (words_info['min_length'],
words_info['max_length'])
# update UI
self.window[self.FILES_DATA_TABLE_KEY].update(
values=table_data)
words_min_length, words_max_length = self.update_words_length_sliders_config(
values, (sliders_range))
self._update_ui_on_words_filtering_change(
values, words_min_length, words_max_length)
def _update_ui_on_words_filtering_change(self, values, min_length=None, max_length=None):
'''Updates words stat with filtered result
which allow user to see the data out of which
training material could be generated.
Args:
values():
min_length (int): Minimal words length
passed in when reading the value from self.window is not yet updated
(window hadling did not advanced to the next loop yet)
max_length (int): Maximal words length
passed in when reading the value from self.window is not yet updated
(window hadling did not advanced to the next loop yet)
Returns:
None
'''
words_min_length = int(values[self.LETTERS_MIN_KEY])
words_max_length = int(values[self.LETTERS_MAX_KEY])
letters_set = self.window[self.COMBO_LETTERS_SET_KEY].get()
generator_scheme = self.window[self.COMBO_MATERIAL_GENERATION_KEY].get(
)
if min_length is not None:
words_min_length = min_length
if max_length is not None:
words_max_length = max_length
# get filtered words stat
words_stat_filtered = self.cw_gen.get_words_stat_filtered(
words_min_length, words_max_length,
self._get_dictionary_key_by_value(
self.letters_sets, letters_set, 'description'),
self._get_dictionary_key_by_value(self.training_generator_schemes, generator_scheme))
# assemble words stat table (sorted by word length)
stat = []
if words_stat_filtered:
for word_length in sorted(words_stat_filtered['words_stat'].keys()):
stat.append(
[word_length, words_stat_filtered['words_stat'][word_length]])
# update UI
self.window[self.WORDS_FILTERED_TABLE_KEY].update(values=stat)
def handle_dictionary_add(self, values):
"""Handle new dictionary addition
by passing file path to cwgen. UI gets updated.
Args:
values (dict): Dictionary containing GUI elements values
Returns:
None
"""
# on file selection cancel values[FILE_PATH_INPUT_KEY] is empty
if len(values[self.FILE_PATH_INPUT_KEY]) > 0:
file_path = os.path.normpath(values[self.FILE_PATH_INPUT_KEY])
if os.path.isfile(file_path):
if self.cw_gen.add_dictionary(file_path):
self._update_ui_on_dictionary_set_change(values)
# clear file path storage to properly handle CANCEL situation
self.window[self.FILE_PATH_INPUT_KEY].update(value="")
def handle_dictionary_delete(self, values):
"""Handle dictionary deletion
by passing its generated UUID to cwgen. UI gets updated.
Args:
values (dict): Dictionary containing GUI elements values
Returns:
None
"""
# self.files_table_idx == -1 when no dictionary in the table is selected
if self.files_table_idx >= 0:
table_data = self.window[self.FILES_DATA_TABLE_KEY].get()
selected_dictionary_uuid = table_data[self.files_table_idx][0]
if self.cw_gen.remove_dictionary(selected_dictionary_uuid):
self._update_ui_on_dictionary_set_change(values)
# set table index to negative to properly handle dictionary remove button click
self.files_table_idx = -1
def handle_words_length_sliders(self, event, values):
"""Handle words length sliders movement
to not let their values become ridiculous.
Args:
event (str): GUI event name
values (dict): Dictionary containing GUI elements values
Returns:
None
"""
# get current positions
slider_min_val = int(values[self.LETTERS_MIN_KEY])
slider_max_val = int(values[self.LETTERS_MAX_KEY])
# update them if needed
if event == self.LETTERS_MIN_KEY:
if slider_min_val > slider_max_val:
slider_max_val = slider_min_val
self.window[self.LETTERS_MAX_KEY].update(
value=slider_max_val)
if event == self.LETTERS_MAX_KEY:
if slider_max_val < slider_min_val:
slider_min_val = slider_max_val
self.window[self.LETTERS_MIN_KEY].update(
value=slider_min_val)
return (slider_min_val, slider_max_val)
def update_words_length_sliders_config(self, values, new_range):
"""Updates UI part related to words length sliders change their range
assuring that sliders values gets updated when needed
Args:
values (dict): Dictionary containing GUI elements values
new_range (tuple): New value range
Returns:
new_min_val, new_max_val (tuple): Updated words length sliders values
"""
current_range_min, current_range_max = self.window[self.LETTERS_MIN_KEY].Range
current_min_val = int(values[self.LETTERS_MIN_KEY])
current_max_val = int(values[self.LETTERS_MAX_KEY])
new_range_min, new_range_max = new_range
new_min_val = current_min_val
new_max_val = current_max_val
# range min value may affect sliders position
if new_range_min > current_range_min:
if new_range_min > current_min_val:
new_min_val = new_range_min
if new_min_val > current_max_val:
new_max_val = new_min_val
# range max value may affect sliders position
if new_range_max < current_range_max:
if new_range_max < current_max_val:
new_max_val = new_range_max
if new_max_val < current_min_val:
new_min_val = new_max_val
self.window[self.LETTERS_MIN_KEY].update(
range=new_range, value=new_min_val)
self.window[self.LETTERS_MAX_KEY].update(
range=new_range, value=new_max_val)
self.window[self.LETTERS_MIN_RANGE_START_KEY].update(
value=new_range_min)
self.window[self.LETTERS_MIN_RANGE_STOP_KEY].update(
value=new_range_max)
self.window[self.LETTERS_MAX_RANGE_START_KEY].update(
value=new_range_min)
self.window[self.LETTERS_MAX_RANGE_STOP_KEY].update(
value=new_range_max)
return (new_min_val, new_max_val)
def handleGui(self):
"""GUI main loop
where all events gets dispatched for handling
Args:
None
Returns:
None
"""
event, values = self.window.read()
# See if user wants to quit or window was closed
if event == sg.WINDOW_CLOSED:
self.window.close()
return False
# Remember index of selected table row
if event == self.FILES_DATA_TABLE_KEY:
self.files_table_idx = values[self.FILES_DATA_TABLE_KEY][0]
# Add a dictionary to the list
if event == self.FILE_PATH_INPUT_KEY:
self.handle_dictionary_add(values)
# remove dictionary from the list
if event == self.FILE_REMOVE_KEY:
self.handle_dictionary_delete(values)
# handle words length change
if (event == self.LETTERS_MIN_KEY) or (event == self.LETTERS_MAX_KEY):
words_min_length, words_max_length = self.handle_words_length_sliders(
event, values)
self._update_ui_on_words_filtering_change(
values, words_min_length, words_max_length)
# handle letters set and generator scheme change
if (event == self.COMBO_LETTERS_SET_KEY) or (event == self.COMBO_MATERIAL_GENERATION_KEY):
self._update_ui_on_words_filtering_change(values)
return True
# UI theming
sg.theme('Default1')
# Start the GUI
ui = CwGenUI()
# Display and interact with the GUI using an Event Loop
while ui.handleGui():
pass
# Game over
del ui
| 2.328125 | 2 |
build/lib/dupecheck/chunks.py | spacerockzero/dupecheck-py | 0 | 12795926 | # TODO: use sliding windows instead of chunking.
# the chunking method fails when part of a dupe string
# crosses the border between chunks
import colorama
from colorama import Fore
from tqdm import trange, tqdm
import os
colorama.init(autoreset=True)
DEFAULT_MIN = 5
DEFAULT_MAX = 10
def striplist(l):
# clean out some unneeded chars
return [x.strip(" \t\n\r") for x in l]
def compare(input):
# main comparison function
test = input[0]
data = input[1]
chunk_length = input[2]
# print("data", data)
found = data.find(test)
if found != -1:
words = test.split()
# don't return matched chunks shorter than the current chunk
# length, even if they are rounding remainder orphans from the
# chunking process
if len(words) >= chunk_length:
return test
def make_chunks(lst, n):
# Yield successive n-sized chunks from lst.
for i in range(0, len(lst), n):
yield lst[i : i + n]
def chunkit(str, chunk_length):
# make chunks of strings the way we like
chunks = []
list = str.split()
list = striplist(list)
wordLists = make_chunks(list, chunk_length)
for chunk in wordLists:
if chunk != "":
chunk = " ".join(chunk)
chunks.append(chunk)
return chunks
def run(chunk_length, text, dataset):
dataset = dataset.replace(os.linesep, " ")
testChunks = chunkit(text, chunk_length)
# remove empty lines
testChunks = list(filter(None, testChunks))
results = []
for testLine in testChunks:
found = compare([testLine, dataset, chunk_length])
if found != None:
print("found", found)
results.append(found)
return results
def dupecheck(min=DEFAULT_MIN, max=DEFAULT_MAX, text=None, dataset=None, verbose=False):
assert text != None
assert dataset != None
text = text.replace(os.linesep, "")
matches = []
for i in trange(max, min, -1):
# print('text, i',text, i)
res = run(i, text, dataset)
if len(res) > 0:
for r in res:
if r not in matches:
matches.append(r)
return matches
| 3 | 3 |
script/run_ga.py | Tricker-z/CSE5001-GA-mTSP | 3 | 12795927 | <gh_stars>1-10
import os
import sys
import subprocess
from argparse import ArgumentParser
from pathlib import Path
from shlex import split
ga2path = {
'baseline' : 'GA/baseline/main.py',
'vns-ga' : 'GA/vns-ga/main.py',
'ipga' : 'GA/IPGA/main.py'
}
def parse_args():
parser = ArgumentParser(description='Running scirpt for Ga')
parser.add_argument('-n', '--number', type=int, default=30,
help='Number of repeat runs')
parser.add_argument('-a', '--algorithm', type=str, required=True,
help='GA from baseline, vns-ga, ipga')
parser.add_argument('-i', '--input', type=Path, required=True,
help='Path of the input for mTSP')
parser.add_argument('-o', '--output', type=Path, required=True,
help='Path of the output log file')
return parser.parse_args()
def run(cmd, logfile):
p = subprocess.Popen(cmd, stdout=logfile)
return p
def main():
args = parse_args()
if args.algorithm not in ga2path.keys():
raise Exception('Algorithm should select from [baseline, vns-ga, ipga')
ga_path = ga2path[args.algorithm]
log_path = args.output
if not log_path.exists():
log_path.parent.mkdir(parents=True, exist_ok=True)
log = open(log_path, 'a+')
cmd = f'python {ga_path} -i {args.input} -t 300'
for idx in range(args.number):
run(split(cmd), log)
log.close()
return 0
if __name__ == '__main__':
sys.exit(main()) | 2.578125 | 3 |
counselor/filter.py | joergeschmann/counselor | 0 | 12795928 | class Operators:
"""Operator constants"""
OPERATOR_EQUALITY = "=="
OPERATOR_INEQUALITY = "!="
OPERATOR_EMPTY = "empty"
OPERATOR_NOT_EMPTY = "not empty"
OPERATOR_IN = "in"
OPERATOR_NOT_IN = "not in"
OPERATOR_CONTAINS = "contains"
OPERATOR_NOT_CONTAINS = "not contains"
class Fields:
"""Field name constants"""
FIELD_TAGS = "Tags"
FIELD_META = "Meta"
class KeyValuePair:
"""Simple representation of a key value pair.
"""
def __init__(self, key: str, value: str):
self.key = key
self.value = value
class Filter:
"""Filter to provide simple search functionality in Consul.
"""
def __init__(self, selector: str, operator: str, value: str):
self.selector = selector
self.operator = operator
self.value = value
@staticmethod
def new_tag_filter(operator: str, value: str):
return Filter(selector=Fields.FIELD_TAGS, operator=operator, value=value)
@staticmethod
def new_meta_filter(key: str, operator: str, value: str):
return Filter(selector=Fields.FIELD_META + "." + key, operator=operator, value=value)
def as_expression(self) -> str:
"""
// Equality & Inequality checks
<Selector> == <Value>
<Selector> != <Value>
// Emptiness checks
<Selector> is empty
<Selector> is not empty
// Contains checks or Substring Matching
<Value> in <Selector>
<Value> not in <Selector>
<Selector> contains <Value>
<Selector> not contains <Value>
"""
if self.operator == Operators.OPERATOR_EQUALITY or self.operator == Operators.OPERATOR_INEQUALITY:
return "{} {} {}".format(self.selector, self.operator, self.value)
elif self.operator == Operators.OPERATOR_EMPTY or self.operator == Operators.OPERATOR_NOT_EMPTY:
return "{} {} {}".format(self.selector, self.operator, self.value)
elif self.operator == Operators.OPERATOR_IN or self.operator == Operators.OPERATOR_NOT_IN:
return "{} {} {}".format(self.value, self.operator, self.selector)
elif self.operator == Operators.OPERATOR_CONTAINS or self.operator == Operators.OPERATOR_NOT_CONTAINS:
return "{} {} {}".format(self.selector, self.operator, self.value)
| 2.921875 | 3 |
tests/test_download_remote_sitemap.py | OneHappyForever/wayback-machine-archiver | 44 | 12795929 | <gh_stars>10-100
import pytest
from wayback_machine_archiver.archiver import download_remote_sitemap
from requests.adapters import HTTPAdapter
import requests
SITEMAP = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd" xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url>
<loc>https://alexgude.com/blog/double-checking-538/</loc>
<lastmod>2016-04-28T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://alexgude.com/files/undergrad_thesis.pdf</loc>
<lastmod>2019-05-09T16:19:45+00:00</lastmod>
</url>
</urlset>
"""
@pytest.fixture
def session():
session = requests.Session()
session.mount("https://", HTTPAdapter())
session.mount("http://", HTTPAdapter())
return session
def test_download_remote_sitemap(requests_mock, session):
url = 'https://www.radiokeysmusic.com/sitemap.xml'
requests_mock.get(url, text=SITEMAP)
returned_contents = download_remote_sitemap(url, session)
assert returned_contents == SITEMAP.encode("UTF-8")
def test_download_remote_sitemap_with_status_error(requests_mock, session):
url = 'https://www.radiokeysmusic.com/sitemap.xml'
requests_mock.get(url, text=SITEMAP, status_code=404)
with pytest.raises(requests.exceptions.HTTPError):
download_remote_sitemap(url, session)
| 2.25 | 2 |
agent/indy_catalyst_agent/messaging/connections/handlers/connection_response_handler.py | blhagadorn/indy-catalyst | 0 | 12795930 | """Connection response handler."""
from ...base_handler import BaseHandler, BaseResponder, RequestContext
from ..messages.connection_response import ConnectionResponse
from ..manager import ConnectionManager
from ...trustping.messages.ping import Ping
class ConnectionResponseHandler(BaseHandler):
"""Handler class for connection responses."""
async def handle(self, context: RequestContext, responder: BaseResponder):
"""
Handle connection response.
Args:
context: Request context
responder: Responder callback
"""
self._logger.debug(f"ConnectionResponseHandler called with context {context}")
assert isinstance(context.message, ConnectionResponse)
mgr = ConnectionManager(context)
connection = await mgr.accept_response(context.message)
target = await mgr.get_connection_target(connection)
# send trust ping in response
await responder.send_outbound(Ping(), target)
| 2.4375 | 2 |
ascended/imath.py | xRiis/ascended | 2 | 12795931 | <filename>ascended/imath.py
# It's easy (and fun) to do math with custom objects as opposed to arrays and tuples, so each necessary point will
# be assigned to a Cartesian coordinate that fits somewhere on the pixel array
class CartesianVals:
def __init__(self, x, y):
self.x = x
self.y = y
# At some point, we subtract two Cartesian coordinates against each other
def __sub__(self, other):
x = self.x - other.x
y = self.y - other.y
return CartesianVals(x, y)
# Calculate the average between the midpoints of two lines
# Basically a centroid, but without doing a bunch of weird linear algebra stuff with numpy
def seg_avg(line1_start, line1_end, line2_start, line2_end):
# Calculate midpoints in each dimension for each line
line1_x_mp = (line1_start[0] + line1_end[0]) / 2
line1_y_mp = (line1_start[1] + line1_end[1]) / 2
line2_x_mp = (line2_start[0] + line2_end[0]) / 2
line2_y_mp = (line2_start[1] + line2_end[1]) / 2
# Calculate the average between each midpoint
x_avg = (line1_x_mp + line2_x_mp) / 2
y_avg = (line1_y_mp + line2_y_mp) / 2
return CartesianVals(x_avg, y_avg)
# Load two empty Cartesian coordinates for now, to be used in main.py
l_flare_loc = CartesianVals(0, 0)
r_flare_loc = CartesianVals(0, 0) | 3.078125 | 3 |
self_supervised/vision/dino.py | jwuphysics/self_supervised | 243 | 12795932 | <gh_stars>100-1000
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/15 - dino.ipynb (unless otherwise specified).
__all__ = ['DINOHead', 'get_dino_aug_pipelines', 'DINOModel', 'DINO']
# Cell
from fastai.vision.all import *
from ..augmentations import *
from ..layers import *
from ..models.vision_transformer import *
# Cell
class DINOHead(nn.Module):
'''
copy.deepcopy:
RuntimeError: Only Tensors created explicitly by the user (graph leaves)
support the deepcopy protocol at the moment
https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html
https://pytorch.org/docs/stable/generated/torch.nn.GELU.html
'''
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
# Cell
@delegates(get_multi_aug_pipelines, but=['n', 'size', 'resize_scale'])
def get_dino_aug_pipelines(num_crops=(2,4), crop_sizes=(224,96), min_scales=(0.4,0.05), max_scales=(1.,0.4), **kwargs):
aug_pipelines = []
for nc, size, mins, maxs in zip(num_crops, crop_sizes, min_scales, max_scales):
aug_pipelines += get_multi_aug_pipelines(n=nc, size=size, resize_scale=(mins,maxs), **kwargs)
return aug_pipelines
# Cell
class DINOModel(Module):
def __init__(self, student, teacher):
"A module for loading and saving all training params together"
self.student,self.teacher = student,teacher
self.teacher.load_state_dict(student.state_dict())
for p in self.teacher.parameters(): p.requires_grad = False
self.register_buffer('C', torch.zeros(1,num_features_model(teacher)))
def forward(self,x): return self.student(x)
# Cell
class DINO(Callback):
order,run_valid = 9,True
def __init__(self, aug_pipelines, large_crop_ids=[0,1],
cmom=0.9,
tmom_start=0.996, tmom_end=1., tmom_sched=SchedCos,
tpt_start=0.04, tpt_end=0.04, tpt_warmup_pct=0., tpt_sched=SchedLin,
tps=0.1,
freeze_last_layer=1,
print_augs=False):
"""
DINO teacher student training with distillation.
Refer to original repo:
https://github.com/facebookresearch/dino/blob/0be6e112dd579203caaa1d0f066e29ca536f76dd/main_dino.py#L41
cmom: Center update momentum.
tmom: Teacher update momentum. Set larger, e.g. 0.9995, for small batches or 0.996 for large batches (256+).
tpt_warmup: Warm up starting temperature
tpt_warmup_pct: Percentage of training for warmup
tpt_sched: Warm up scheduler, e.g. SchedLin, SchedCos, SchedExp
tpt: Teacher temperature after warm up. Decrease if training loss does not decrease.
Smaller temperature means more sharpening.
tps: Student temperature.
freeze_last_layer: How many epochs to freeze the last layer
"""
store_attr('large_crop_ids,cmom,freeze_last_layer,tps')
self.augs = aug_pipelines
self.tpt_scheduler = combine_scheds([tpt_warmup_pct,1-tpt_warmup_pct],
[tpt_sched(tpt_start,tpt_end),SchedNo(tpt_end,tpt_end)])
self.tmom_scheduler = tmom_sched(tmom_start, tmom_end)
if print_augs:
for aug in self.augs: print(aug)
def before_fit(self):
"Create teacher model as a copy of student"
self.learn.loss_func = self.lf
self.tpt = self.tpt_scheduler(0.)
self.tmom = self.tmom_scheduler(0.)
self.model.teacher.eval()
for n,p in self.learn.model.student[1].last_layer.named_parameters():
if n == 'weight_v' : p.requires_grad = False
def before_batch(self):
"Augment multi crop views"
self.bs = self.x.size(0)
self.learn.xb = ([aug(self.x) for aug in self.augs],)
x_large = [self.learn.xb[0][i] for i in self.large_crop_ids]
# TODO: Do we need to put the teacher in eval(), not it original repo?
with torch.no_grad():
targs = self.model.teacher(x_large)
self.learn.yb = (targs,)
self.cb = targs.mean(0, keepdim=True)
def _momentum_update_teacher(self):
for param_s, param_t in zip(self.learn.model.student.parameters(), self.model.teacher.parameters()):
param_t.data = param_t.data * self.tmom + param_s.data * (1. - self.tmom)
def _momentum_update_center(self):
self.model.C = self.model.C*self.cmom + self.cb*(1-self.cmom)
def after_step(self):
"Center and teacher updates"
self._momentum_update_teacher(); self._momentum_update_center()
def after_epoch(self):
"Update tpt at the end of each epoch"
self.tpt = self.tpt_scheduler(self.pct_train)
self.tmom = self.tmom_scheduler(self.pct_train)
if self.epoch == self.freeze_last_layer:
print("Setting last layer to trainable")
for n,p in self.learn.model.student[1].last_layer.named_parameters():
if n == 'weight_v' : p.requires_grad = True
def lf(self, pred, *yb):
"Multi crop cross entropy loss: -qlog(p)"
yb = yb[0]
pred = F.log_softmax(pred / self.tps, dim=-1)
yb = F.softmax((yb - self.model.C) / self.tpt, dim=-1)
n_targs, n_preds = yb.size(0)//self.bs, pred.size(0)//self.bs
yb, pred = yb.chunk(n_targs), pred.chunk(n_preds)
loss, npairs = 0, n_targs*(n_preds-1)
for ti in range(n_targs):
for pi in range(n_preds):
if ti != pi:
loss += (-yb[ti]*pred[pi]).sum(-1).mean() / npairs
return loss
@torch.no_grad()
def show(self, n=1):
xbs = self.learn.xb[0]
idxs = np.random.choice(range(self.bs), n, False)
images = [aug.decode(xb.to('cpu').clone()).clamp(0, 1)[i]
for i in idxs
for xb, aug in zip(xbs, self.augs)]
return show_batch(images[0], None, images, max_n=len(images), nrows=n) | 2.0625 | 2 |
iou.py | trojerz/keras-eardetection | 0 | 12795933 | def iou(rect1,rect2):
'''
Calculate the intersection ratio of two rectangles
: param rect1: the first rectangle. Denoted by X, y, W, h, where x, y are the coordinates of the upper right corner of the rectangle
: param rect2: the second rectangle.
: Return: returns intersection ratio, that is, intersection ratio Union
'''
x1,y1,w1,h1=rect1
x2,y2,w2,h2=rect2
inter_w=(w1+w2)-(max(x1+w1,x2+w2)-min(x1,x2))
inter_h=(h1+h2)-(max(y1+h1,y2+h2)-min(y1,y2))
if inter_h<=0 or inter_w <= 0:
return 0
inter=inter_w * inter_h
union=w1*h1+w2*h2-inter
return inter/union
| 3.859375 | 4 |
face/common/constants.py | kkltcjk/face | 0 | 12795934 | <reponame>kkltcjk/face
import os
CONFIG_FILE = '/etc/face/face.conf'
LOG_DIR = '/var/log/face'
LOG_FILE = os.path.join(LOG_DIR, 'face.log')
| 1.703125 | 2 |
run.py | nbc-pet-task/functional.py | 2 | 12795935 | """test run
"""
def fun(*args):
print(args)
fun(1,2,3,4,5)
def add(a,b,c):
return a+b+c
p = (1,2,3)
q = (5,6,7)
print(p+q)
#print(type(*p))
print(add(*p))
print(add.__code__.co_argcount)
def curry(func):
len_func = func.__code__.co_argcount
def func_a(*a):
len_a = len(a)
def func_b(*b):
return func(*(a+b))
return func_b
return func_a
a1 = curry(add)(1,2)
print(a1(4))
a2 = curry(add)(2)
print(a2(4,6)) | 3.484375 | 3 |
seahub/help/urls.py | MJochim/seahub | 0 | 12795936 | <filename>seahub/help/urls.py
# Copyright (c) 2012-2016 Seafile Ltd.
from django.conf.urls import url, include
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name="help/install.html") ),
url(r'^install/$', TemplateView.as_view(template_name="help/install.html") ),
url(r'^sync_existing/$', TemplateView.as_view(template_name="help/sync_existing.html") ),
url(r'^selective_sync/$', TemplateView.as_view(template_name="help/selective_sync.html") ),
url(r'^unsync_resync/$', TemplateView.as_view(template_name="help/unsync_resync.html") ),
url(r'^sync_interval/$', TemplateView.as_view(template_name="help/sync_interval.html") ),
url(r'^desktop_proxy/$', TemplateView.as_view(template_name="help/desktop_proxy.html") ),
url(r'^conflicts/$', TemplateView.as_view(template_name="help/conflicts.html") ),
url(r'^ignore/$', TemplateView.as_view(template_name="help/ignore.html") ),
url(r'^encrypted_libraries/$', TemplateView.as_view(template_name="help/encrypted_libraries.html") ),
]
| 2.109375 | 2 |
aws-inventory/lambda/inventory-buckets.py | falequin/antiope | 0 | 12795937 |
import boto3
from botocore.exceptions import ClientError
import json
import os
import time
import datetime
from dateutil import tz
from lib.account import *
from lib.common import *
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
RESOURCE_PATH = "s3/bucket"
def lambda_handler(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=True))
message = json.loads(event['Records'][0]['Sns']['Message'])
logger.info("Received message: " + json.dumps(message, sort_keys=True))
try:
target_account = AWSAccount(message['account_id'])
discover_buckets(target_account)
except AssumeRoleError as e:
logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id))
return()
except ClientError as e:
logger.error("AWS Error getting info for {}: {}".format(target_account.account_name, e))
return()
except Exception as e:
logger.error("{}\nMessage: {}\nContext: {}".format(e, message, vars(context)))
raise
def discover_buckets(account):
'''
Gathers all the S3 Buckets and various details about them
'''
bucket_list = []
# Not all Public IPs are attached to instances. So we use ec2 describe_network_interfaces()
# All results are saved to S3. Public IPs and metadata go to DDB (based on the the presense of PublicIp in the Association)
s3_client = account.get_client('s3')
response = s3_client.list_buckets() # This API call doesn't paganate. Go fig...
bucket_list += response['Buckets']
for b in bucket_list:
bucket_name = b['Name']
# Decorate with the account info
b['account_id'] = account.account_id
b['account_name'] = account.account_name
b['resource_type'] = "s3-bucket"
b['last_seen'] = str(datetime.datetime.now(tz.gettz('US/Eastern')))
b['errors'] = {}
# Go through a bunch of API calls to get details on this bucket
try:
response = s3_client.get_bucket_encryption(Bucket=bucket_name)
if 'ServerSideEncryptionConfiguration' in response:
b['ServerSideEncryptionConfiguration'] = response['ServerSideEncryptionConfiguration']
except ClientError as e:
if e.response['Error']['Code'] != 'ServerSideEncryptionConfigurationNotFoundError':
b['errors']['ServerSideEncryptionConfiguration'] = e
try:
response = s3_client.get_bucket_acl(Bucket=bucket_name)
if 'Grants' in response:
b['Grants'] = response['Grants']
except ClientError as e:
b['errors']['Grants'] = e
try:
response = s3_client.get_bucket_location(Bucket=bucket_name)
if 'LocationConstraint' in response:
if response['LocationConstraint'] is None:
b['Location'] = "us-east-1"
else:
b['Location'] = response['LocationConstraint']
except ClientError as e:
b['errors']['Location'] = e
try:
response = s3_client.get_bucket_policy(Bucket=bucket_name)
if 'Policy' in response:
b['BucketPolicy'] = json.loads(response['Policy'])
except ClientError as e:
if e.response['Error']['Code'] != 'NoSuchBucketPolicy':
b['errors']['BucketPolicy'] = e
try:
response = s3_client.get_bucket_tagging(Bucket=bucket_name)
if 'TagSet' in response:
b['TagSet'] = response['TagSet']
except ClientError as e:
if e.response['Error']['Code'] != 'NoSuchTagSet':
b['errors']['TagSet'] = e
try:
response = s3_client.get_bucket_versioning(Bucket=bucket_name)
del response['ResponseMetadata']
b['Versioning'] = response
except ClientError as e:
b['errors']['Versioning'] = e
try:
response = s3_client.get_bucket_request_payment(Bucket=bucket_name)
del response['ResponseMetadata']
b['RequestPayer'] = response
except ClientError as e:
b['errors']['RequestPayer'] = e
try:
response = s3_client.get_bucket_website(Bucket=bucket_name)
del response['ResponseMetadata']
b['Website'] = response
except ClientError as e:
if e.response['Error']['Code'] != 'NoSuchWebsiteConfiguration':
b['errors']['Website'] = e
try:
response = s3_client.get_bucket_logging(Bucket=bucket_name)
if 'LoggingEnabled' in response:
b['Logging'] = response['LoggingEnabled']
except ClientError as e:
b['errors']['Logging'] = e
try:
response = s3_client.get_bucket_cors(Bucket=bucket_name)
if 'CORSRules' in response:
b['CORSRules'] = response['CORSRules']
except ClientError as e:
if e.response['Error']['Code'] != 'NoSuchCORSConfiguration':
b['errors']['CORSRules'] = e
save_resource_to_s3(RESOURCE_PATH, bucket_name, b)
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError ("Type %s not serializable" % type(obj)) | 2.015625 | 2 |
main.py | suanyouyou/Hello-world | 0 | 12795938 | <filename>main.py<gh_stars>0
print("your name")
| 1.320313 | 1 |
portfolio/gui/ui_components/charts/balance_distribution_pie.py | timeerr/portfolio | 0 | 12795939 | #!/usr/bin/python3
from PyQt5.QtGui import QBrush, QColor, QPainter
from PyQt5.QtChart import QChartView, QChart, QPieSeries
from portfolio.utils import confighandler
from portfolio.db.fdbhandler import balances
from portfolio.db.cdbhandler import cbalances
from portfolio.gui.ui_components.fonts import ChartTitleFont
class DistributionPieChart(QChartView):
"""
Pie chart that shows the distribution of capital according
to several criteria
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Chart
self.chart = QChart()
self.chart.setTheme(QChart.ChartThemeDark)
self.chart.legend().hide()
self.chart.createDefaultAxes()
self.chart.setAnimationOptions(QChart.SeriesAnimations)
self.chart.setBackgroundVisible(False)
self.chart.setTitle(" ")
self.chart.setTitleBrush(QBrush(QColor('white')))
self.setChart(self.chart)
self.setRenderHint(QPainter.Antialiasing)
self.setStyleSheet("border: 0px; background-color: rgba(0,0,0,0)")
self.setupSeries() # Initialize to all mode
def setupSeries(self, mode="all"):
"""
Chart gets updated displaying the new data.
Modes:
- all : distribution between all accounts
- accs : distribution between portfolio accounts
- cryptoaccs : distribution between crypto accounts
- strategies : distribution between strategies
"""
# Series
self.chart.removeAllSeries() # Remove any previous series
self.series = QPieSeries()
# Get data
if mode == "all":
data = balances.get_all_accounts(
) + cbalances.get_all_accounts_with_amount_fiat()
elif mode == "accounts":
data = balances.get_all_accounts()
elif mode == "crypto":
data = cbalances.get_all_accounts_with_amount_fiat()
elif mode == "currency":
data = [(confighandler.get_fiat_currency().upper(), balances.get_total_balance_all_accounts(
)), ("BTC", cbalances.get_total_balance_all_accounts_fiat())]
data.sort(key=lambda x: x[1]) # Sort
# Set Chart Title
self.total = sum([i[1] for i in data])
self.setDefaultTitle()
# Add to series
for d in data:
self.series.append(d[0], d[1])
# Hide little slices' labels
self.series.setLabelsVisible(True)
for slc in self.series.slices():
if slc.angleSpan() < 5:
slc.setLabelVisible(False)
slc.setLabelArmLengthFactor(0.05)
self.chart.addSeries(self.series)
# Signals and functionality
self.series.hovered.connect(self.selectSlice)
def selectSlice(self, _slice, state):
""" Highlight selected slice """
font = ChartTitleFont()
if state:
font.setPointSize(20)
_slice.setLabelVisible(True)
self.chart.setTitle(
f"{int(_slice.value())} {confighandler.get_fiat_currency().upper()} {round(_slice.percentage()*100,1)}%")
else:
font.setBold(False)
if _slice.angleSpan() < 5:
_slice.setLabelVisible(False)
_slice.setExploded(False)
self.setDefaultTitle()
_slice.setLabelFont(font)
def setDefaultTitle(self):
""" Sets title as total balance from all pie slices """
self.chart.setTitle(
f"{int(self.total)} {confighandler.get_fiat_currency().upper()}")
font = ChartTitleFont(fontsize=20)
self.chart.setTitleFont(font)
| 2.46875 | 2 |
bot_v0.0.1.py | jeeinn/wechat-web-robot | 1 | 12795940 | #coding=utf8
import re
import itchat
from itchat.content import *
'''
0.0.1版本
功能:
1.匹配群聊关键字 说 ,然后回复接受到的消息
'''
# 群聊监控
@itchat.msg_register(TEXT, isGroupChat = True)
def groupchat_reply(msg):
room_name = itchat.search_chatrooms(userName=msg[u'FromUserName'])
print(u"来自-%s-群聊消息|%s:%s"%(room_name[ u'NickName'],msg['ActualNickName'],msg['Text']))
# 匹配说关键字
if(re.match(u'^说', msg['Text'])):
itchat.send_msg(msg['Text'].replace(u'说', ''),msg[u'FromUserName'])
if(re.match(u'^搜', msg['Text'])):
itchat.send_msg(u'电影名xxx',msg[u'FromUserName'])
itchat.auto_login(hotReload=True,enableCmdQR=True)
itchat.run(debug=True)
| 2.75 | 3 |
archived/analysis/analyses/postgres-stats/process.py | HazyResearch/dd-genomics | 13 | 12795941 | #!/usr/bin/env python
# A script for seeing basic statistics about the number and type of gene mentions extracted
# Author: <NAME> <<EMAIL>>
# Created: 2015-01-25
import sys
from dd_analysis_utils import process_pg_statistics
if __name__ == '__main__':
if len(sys.argv) < 3:
print "Process.py: Insufficient arguments"
else:
process_pg_statistics(sys.argv[1], sys.argv[2])
| 2.0625 | 2 |
convert.py | JackKenney/readings-ocr | 0 | 12795942 | <gh_stars>0
"""Converter from unsearchable PDFs to searchable PDFs."""
# Author: <NAME>
# Date: 2018/03/05
# This program requires the installation of 'pdfocr' by <NAME>
# Which can be found at https://launchpad.net/~gezakovacs/+archive/ubuntu/pdfocr/+build/7671902
# Place files to be scanned in the 'readings' directory. They must be PDFs.
import os
from subprocess import call
# Determine which files to convert and move
names = []
for root, dirs, files in os.walk("./readings"):
for file in files:
if file.endswith(".pdf"):
names.append(file)
# Make sure the OCR directory has been created.
if not os.path.isdir("searchable/pdf"):
call(["mkdir", "-p", "searchable/pdf"])
if not os.path.isdir("searchable/txt"):
call(["mkdir", "-p", "searchable/txt"])
if not os.path.isdir("read"):
call(["mkdir", "-p", "read"])
# Begin
for name in names:
name = name[0 : len(name) - 4]
read = "./readings/" + name + ".pdf"
out = "./searchable/pdf/" + name + "-OCR.pdf"
# Convert the files
call(["pdfocr", "-i", read, "-o", out])
# Move files to read
call(["mv", "readings/" + name + ".pdf", "read/" + name + ".pdf"])
call(["sh", "text.sh"]) # make txt files
# Sources for python scripting:
# https://docs.python.org/3/library/os.html#os.fwalk
# https://stackoverflow.com/questions/1274506/how-can-i-create-a-list-of-files-in-the-current-directory-and-its-subdirectories
# https://stackoverflow.com/questions/89228/calling-an-external-command-in-python
# https://stackoverflow.com/questions/8933237/how-to-find-if-directory-exists-in-python
| 3.109375 | 3 |
modules/finance_range.py | redahe/opportuner | 2 | 12795943 | <reponame>redahe/opportuner
#!/usr/bin/python
import sys
import json
for line in sys.stdin.readlines():
data = json.loads(line)
symbol = data.pop('SYMBOL')
try:
pe = float(data['PE'] or 1000)
except:
pe = 1000
try:
dy = float(data['DY'] or 0)
except:
dy = 0
score = 1.0/(pe)*10 + dy
data['PE'] = pe
data['DY'] = dy
print score, symbol, json.dumps(data)
| 2.765625 | 3 |
tailfile.py | finalstate/tailfile | 2 | 12795944 | #! /usr/bin/python3
# -*- coding: utf-8 -*-
####################################################################################################
import io
####################################################################################################
def TailFile(p_FileName, p_BufferSize=io.DEFAULT_BUFFER_SIZE, p_Encoding='utf8', p_Separator = '\n', p_KeepSeparator=True):
'''
Iterator used to read a file starting with the end, and proceeding backwards.
p_FileName : the full path to the file to be read backwards
p_BufferSize : the size of the file chunk to read into memory for processing
p_Encoding : the encoding of the file, default is utf-8
p_Separator : the character(s) used to separate the stream. Usually either newline or space.
p_KeepNewLine : keep the newline character at the end of the line (to be compatible with readline() )
'''
l_Separator = bytes(p_Separator, p_Encoding)
l_KeepSeparator = l_Separator if p_KeepSeparator else b''
l_Fragment = bytearray()
with open(p_FileName, 'rb') as l_File:
l_File.seek(0, io.SEEK_END)
l_Blocks = l_File.tell() // p_BufferSize
while l_Blocks >= 0:
l_File.seek(l_Blocks * p_BufferSize, io.SEEK_SET)
l_BufferContent = l_File.read(p_BufferSize) # might overshoot at first read
l_Blocks -= 1
if not l_Separator in l_BufferContent:
l_Fragment = l_BufferContent + l_Fragment
else:
l_BufferFragments = l_BufferContent.split(l_Separator)
yield str(l_BufferFragments[-1] + l_Fragment + l_KeepSeparator, p_Encoding)
for l_BufferFragment in reversed(l_BufferFragments[1:-1]):
yield str(l_BufferFragment + l_KeepSeparator, p_Encoding)
l_Fragment = bytearray(l_BufferFragments[0])
yield str(l_Fragment, p_Encoding)
####################################################################################################
if __name__ == '__main__':
import os
import sys
import time
C_TestFileName = 'tmp.txt'
C_TestBufferSize = 9182
if len(sys.argv) != 2:
print ('Usage: python3 tailfile.py <testfile>')
sys.exit(0)
if True: # benchmark
l_Moment1 = time.time()
l_Count1 = 0
with open(sys.argv[1], 'r') as l_File:
for l_Line in l_File:
l_Count1 += 1
l_Moment2 = time.time()
l_Count2 = 0
for l_Line in TailFile(sys.argv[1], p_BufferSize=C_TestBufferSize):
l_Count2 += 1
l_Moment3 = time.time()
print ('{}: {} {}'.format(l_Count1, (l_Moment2 - l_Moment1), (l_Moment3 - l_Moment2)))
else: # test algorithm
# write reversed content to tmp file
with open(C_TestFileName, 'w') as l_TempFile:
for l_Line in TailFile(sys.argv[1], C_TestBufferSize, p_Separator='\n'):
l_TempFile.write(l_Line)
# print (l_Line, end='')
# read and compare original file to reversed tmp file, should be identical
for l_Line, l_Copy in zip(open(sys.argv[1], 'r'), TailFile(C_TestFileName, C_TestBufferSize)):
if l_Line != l_Copy:
print ('|'+l_Line+'|\n---\n|'+l_Copy+'|')
break
os.remove(C_TestFileName) | 3.671875 | 4 |
libcst/codemod/commands/add_trailing_commas.py | jschavesr/LibCST | 0 | 12795945 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import textwrap
from typing import Dict, Optional
import libcst as cst
from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand
presets_per_formatter: Dict[str, Dict[str, int]] = {
"black": {
"parameter_count": 1,
"argument_count": 2,
},
"yapf": {
"parameter_count": 2,
"argument_count": 2,
},
}
class AddTrailingCommas(VisitorBasedCodemodCommand):
DESCRIPTION: str = textwrap.dedent(
"""
Codemod that adds trailing commas to arguments in function
headers and function calls.
The idea is that both the black and yapf autoformatters will
tend to split headers and function calls so that there
is one parameter / argument per line if there is a trailing
comma:
- Black will always separate them by line
- Yapf appears to do so whenever there are at least two arguments
Applying this codemod (and then an autoformatter) may make
it easier to read function definitions and calls
"""
)
def __init__(
self,
context: CodemodContext,
formatter: str = "black",
parameter_count: Optional[int] = None,
argument_count: Optional[int] = None,
) -> None:
super().__init__(context)
presets = presets_per_formatter.get(formatter)
if presets is None:
raise ValueError(
f"Unknown formatter {formatter!r}. Presets exist for "
+ ", ".join(presets_per_formatter.keys())
)
self.parameter_count: int = parameter_count or presets["parameter_count"]
self.argument_count: int = argument_count or presets["argument_count"]
@staticmethod
def add_args(arg_parser: argparse.ArgumentParser) -> None:
arg_parser.add_argument(
"--formatter",
dest="formatter",
metavar="FORMATTER",
help="Formatter to target (e.g. yapf or black)",
type=str,
default="black",
)
arg_parser.add_argument(
"--paramter-count",
dest="parameter_count",
metavar="PARAMETER_COUNT",
help="Minimal number of parameters for us to add trailing comma",
type=int,
default=None,
)
arg_parser.add_argument(
"--argument-count",
dest="argument_count",
metavar="ARGUMENT_COUNT",
help="Minimal number of arguments for us to add trailing comma",
type=int,
default=None,
)
def leave_Parameters(
self,
original_node: cst.Parameters,
updated_node: cst.Parameters,
) -> cst.Parameters:
skip = (
#
self.parameter_count is None
or len(updated_node.params) < self.parameter_count
or (
len(updated_node.params) == 1
and updated_node.params[0].name.value in {"self", "cls"}
)
)
if skip:
return updated_node
else:
last_param = updated_node.params[-1]
return updated_node.with_changes(
params=(
*updated_node.params[:-1],
last_param.with_changes(comma=cst.Comma()),
),
)
def leave_Call(
self,
original_node: cst.Call,
updated_node: cst.Call,
) -> cst.Call:
if len(updated_node.args) < self.argument_count:
return updated_node
else:
last_arg = updated_node.args[-1]
return updated_node.with_changes(
args=(
*updated_node.args[:-1],
last_arg.with_changes(comma=cst.Comma()),
),
)
| 3 | 3 |
scripts/temporal_coding/izhikevich_step.py | embeddedlabsiu/snn_exploration_spinnaker | 1 | 12795946 | """
Tests with the Izhikevich neuron model.
"""
import numpy as np
import matplotlib.pyplot as plt
import pyNN.nest as sim
from pyNN.utility.plotting import Figure, Panel
# === Configure the simulator ================================================
duration = 100
dt = 0.01
sim.setup(timestep=dt, min_delay=0.1)
# === Build and instrument the network =======================================
phasic_spiking = {'a': 0.02, 'b': 0.25, 'c': -65, 'd': 6}
class_2 = {'a': 0.2, 'b': 0.26, 'c': -65, 'd': 0}
params = class_2
n = 100
v_init = -64
input_currents = 0.0005 * np.logspace(-4, 6, n, base=np.e)
neurons = sim.Population(n, sim.Izhikevich(i_offset=input_currents, **params))
neurons.record(['v', 'u', 'spikes'])
neurons.initialize(v=v_init, u=-params['b']*v_init)
# === Run the simulation =====================================================
sim.run(duration)
# === Save the results, optionally plot a figure =============================
data = neurons.get_data().segments[0]
first_spiketimes = []
rates = []
for spiketrain in data.spiketrains:
if len(spiketrain) == 0:
first_spiketimes.append(np.infty)
else:
first_spiketimes.append(spiketrain[0])
rates.append(np.count_nonzero(spiketrain) / duration)
plt.scatter(input_currents, 1 / np.array(first_spiketimes),
label='inverse ttfs')
plt.scatter(input_currents, rates, label='avg spikerate')
plt.legend()
plt.savefig('FI')
v = data.filter(name="v")[0]
u = data.filter(name="u")[0]
Figure(Panel(v, ylabel="Membrane potential (mV)", xticks=True,
xlabel="Time (ms)", yticks=True),
Panel(u, ylabel="u variable (units?)")).save('mem')
# === Clean up and quit ========================================================
sim.end()
| 2.8125 | 3 |
day_3_1.py | Nishant-Mishra/Advent_of_Code_2019 | 1 | 12795947 | #!/usr/bin/python3 -u
import sys
def puzzle(filename):
with open(filename, "r") as f:
path1 = f.readline()
path2 = f.readline()
path1_list_str = path1.strip("\n").split(",")
path2_list_str = path2.strip("\n").split(",")
# print(path1_list)
# print(path2_list)
# Get relative coords of path
path1_list = []
path2_list = []
for i in range(0, len(path1_list_str)):
path1_list.append(get_coord(path1_list_str[i]))
for i in range(0, len(path2_list_str)):
path2_list.append(get_coord(path2_list_str[i]))
# print(path1_list)
# print(path2_list)
# Get absolute coords of line segments
path1 = {"complete": [(0, 0)]}
for i in range(0, len(path1_list)):
if i:
path1["complete"].insert(i + 1, (path1["complete"][i][0] + path1_list[i][0], path1["complete"][i][1] + path1_list[i][1]))
else:
path1["complete"].insert(1, path1_list[0])
path2 = {"complete": [(0, 0)]}
for i in range(0, len(path2_list)):
if i:
path2["complete"].insert(i + 1, (path2["complete"][i][0] + path2_list[i][0], path2["complete"][i][1] + path2_list[i][1]))
else:
path2["complete"].insert(1, path2_list[0])
# Segregate vertical and horizontal lines
path1["vertical"] = []
path1["horizontal"] = []
for i in range(1, len(path1["complete"])):
# 'x' coord is same
if path1["complete"][i - 1][0] == path1["complete"][i][0]:
path1["vertical"].append((path1["complete"][i - 1], path1["complete"][i]))
elif path1["complete"][i - 1][1] == path1["complete"][i][1]:
path1["horizontal"].append((path1["complete"][i - 1], path1["complete"][i]))
path2["vertical"] = []
path2["horizontal"] = []
for i in range(1, len(path2["complete"])):
# 'x' coord is same
if path2["complete"][i - 1][0] == path2["complete"][i][0]:
path2["vertical"].append((path2["complete"][i - 1], path2["complete"][i]))
elif path2["complete"][i - 1][1] == path2["complete"][i][1]:
path2["horizontal"].append((path2["complete"][i - 1], path2["complete"][i]))
# print("%s\n" % path1["horizontal"])
# print("%s\n" % path1["vertical"])
# print("%s\n" % path2["horizontal"])
# print("%s\n" % path2["vertical"])
intersection_points_list = []
# Check if horizontal line of one path intersects with vertical line of other abd vice-versa
for h_seg in path1["horizontal"]:
for v_seg in path2["vertical"]:
intersection_point = check_intersection(h_seg, v_seg)
if intersection_point:
intersection_points_list.append(intersection_point)
for h_seg in path2["horizontal"]:
for v_seg in path1["vertical"]:
intersection_point = check_intersection(h_seg, v_seg)
if intersection_point:
intersection_points_list.append(intersection_point)
print(intersection_points_list)
dist = abs(intersection_points_list[0][0]) + abs(intersection_points_list[0][1])
for point in intersection_points_list[1:]:
if dist > (abs(point[0]) + abs(point[1])):
dist = abs(point[0]) + abs(point[1])
print("Shortest Dist: %d" % dist)
def get_coord(cmd):
if cmd[0] == "R":
return (int(cmd[1:]), 0)
elif cmd[0] == "L":
return (-int(cmd[1:]), 0)
elif cmd[0] == "U":
return (0, int(cmd[1:]))
elif cmd[0] == "D":
return (0, -int(cmd[1:]))
def check_intersection(horiz, vert):
x = vert[0][0]
y1 = vert[0][1]
y2 = vert[1][1]
w = horiz[0][1]
z1 = horiz[0][0]
z2 = horiz[1][0]
to_return = None
if (z1 < z2 and y1 < y2 and z1 <= x <= z2 and y1 <= w <= y2) or\
(z1 > z2 and y1 < y2 and z1 >= x >= z2 and y1 <= w <= y2) or\
(z1 < z2 and y1 > y2 and z1 <= x <= z2 and y1 >= w >= y2) or\
(z1 > z2 and y1 > y2 and z1 >= x >= z2 and y1 >= w >= y2) :
to_return = (x, w)
# if to_return:
print("<< %s :: %s >> == %s" % (horiz, vert, (x,w)))
return to_return
def get_all_points_on_path(c1, c2):
coord_list = []
if c1[0] == c2[0]:
if c1[1] < c2[1]:
for i in range(c1[1], c2[1] + 1):
coord_list.append((c1[0], i))
else:
for i in range(c2[1], c1[1] + 1):
coord_list.append((c1[0], i))
# coord_list.reverse()
elif c1[1] == c2[1]:
if c1[0] < c2[0]:
for i in range(c1[0], c2[0] + 1):
coord_list.append((i, c1[1]))
else:
for i in range(c2[0], c1[0] + 1):
coord_list.append((i, c1[1]))
# coord_list.reverse()
return coord_list
def main():
puzzle("input_day_3_1.txt")
# test()
def test():
p = get_all_points_on_path((123, 67), (123, 15))
p = get_all_points_on_path((-123, 67), (123, 67))
print(p)
if __name__ == "__main__":
main()
| 3.4375 | 3 |
scripts/retrieveSummary.py | ChaoXianSen/Duplex-Seq-Pipeline | 1 | 12795948 | import sys
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--indexes', dest='inFile', required=True,
help='Path to indexes file, one per line.')
parser.add_argument('--config', dest='config', required=True)
o=parser.parse_args()
outFile = open(f"{o.config}.summary.csv",'w')
indexes = []
indexFile = open(o.inFile, 'r')
for line in indexFile:
indexes.append(line.strip())
indexFile.close()
outFile.write("RunID,Index,Raw reads,SSCS reads,Mapped SSCS,DCS reads,Mapped DCS,% mapped SSCS,% mapped DCS,Raw/SSCS,SSCS/DCS,Peak Family Size,Max Family Size,Mean Insert Size,SSCS On Target,DCS On Target,DCS Mean Depth,DCS Max Depth,DCS Uncovered Target,Nucleotides Sequenced,A's Sequenced,T's Sequenced,C's Sequenced,G's Sequenced,Mutations,Mutation Frequency,A>T,A>C,A>G,T>A,T>C,T>G,C>A,C>T,C>G,G>A,G>T,G>C,ins,dels\n")
for index in indexes:
print(f"Index {index}")
print("Reading config")
# Get the run ID from the config file
runID=""
c=""
C=""
d=""
configFile = open(f"{index}/{index}_config.sh", 'r')
for line in configFile:
if "RUN_ID=" in line:
runID = line.strip().split('=')[1].strip('"')
elif "minClonal=" in line:
c=line.strip().split('=')[1].split()[0]
elif "maxClonal=" in line:
C=line.strip().split('=')[1].split()[0]
elif "minDepth=" in line:
d=line.strip().split('=')[1].split()[0]
configFile.close()
print("Getting read counts")
# get read counts
# Read tagstats files:
rawFlagstats = open(f"{index}/Stats/data/{runID}.temp.sort.flagstats.txt", 'r').readlines()
sscsFlagstats = open(f"{index}/Stats/data/{runID}_mem.sscs.sort.flagstats.txt", 'r').readlines()
dcsFlagstats = open(f"{index}/Stats/data/{runID}_mem.dcs.sort.flagstats.txt", 'r').readlines()
rawReads = float(rawFlagstats[0].split()[0])
#rawReads = float(pysam.flagstat(f"{index}/{runID}.temp.sort.bam").split('\n')[0].split()[0])
#sscsFlagstat=pysam.flagstat(f"{index}/{runID}_mem.sscs.sort.bam").split('\n')
sscsReads=float(sscsFlagstats[0].split()[0])
mappedSscs=float(sscsFlagstats[4].split()[0])
# ~ dcsFlagstat=pysam.flagstat(f"{index}/{runID}_mem.dcs.sort.bam").split('\n')
dcsReads=float(dcsFlagstats[0].split()[0])
mappedDcs=float(dcsFlagstats[4].split()[0])
print("Processing Tagstats")
# get tagstats numbers
tagstatsFile = open(f"{index}/Stats/data/{runID}.tagstats.txt", 'r')
lastProportion=1
peakProportion = 0
peakSize = 1
maxSize=0
for line in tagstatsFile:
if float(line.split()[2]) <= lastProportion:
lastProportion = float(line.split()[2])
elif float(line.split()[2]) >= peakProportion:
lastProportion = 0
peakSize = line.split()[0]
peakProportion = float(line.split()[2])
maxSize = line.split()[0]
tagstatsFile.close()
sscsOnTarget="NA"
# read depth file:
print("Processing Depth")
depthFile = open(f"{index}/Stats/data/{runID}.dcs.region.mutpos.vcf_depth.txt", 'r')
totDepth = 0
numLocs = 0
dcsMaxDepth = 0
for line in depthFile:
if "#" not in line:
totDepth += int(line.split('\t')[3])
numLocs += 1
dcsMaxDepth = max(dcsMaxDepth, int(line.split('\t')[3]))
dcsOnTarget="NA"
if numLocs != 0:
dcsMeanDepth=totDepth / numLocs
else:
dcsMeanDepth=0
dcsUncovered="NA"
depthFile.close()
# insert size file
print("Processing Insert Size")
insertSizeFile = open(f"{index}/Stats/data/{runID}.dcs.iSize_Metrics.txt", 'r')
totInsertSize = 0
numInsertReads = 0
line = next(insertSizeFile)
while "## HISTOGRAM" not in line:
line = next(insertSizeFile)
contIter = True
line = next(insertSizeFile)
while contIter:
try:
line = next(insertSizeFile)
if line.strip() != "":
linebins = [int(x) for x in line.strip().split('\t')]
totInsertSize += linebins[0] * linebins[1]
numInsertReads += linebins[1]
except StopIteration:
contIter = False
if numInsertReads == 0:
meanInsertSize = "N/A"
else:
meanInsertSize = totInsertSize / numInsertReads
print("Processing countmuts")
# get countmuts data
sys.stderr.write(f"{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\n")
cmFile = open(f"{index}/Final/dcs/{runID}.dcs.countmuts.csv", 'r')
AsSeq=""
AtoT=""
AtoC=""
AtoG=""
TsSeq=""
TtoA=""
TtoC=""
TtoG=""
CsSeq=""
CtoA=""
CtoT=""
CtoG=""
GsSeq=""
GtoA=""
GtoT=""
GtoC=""
totalNt=""
totalMuts=""
ins=""
dels=""
for line in cmFile:
if "##" not in line and "OVERALL" in line:
linebins = line.strip().split(',')
if "A>T" in line:
AtoT=linebins[4]
AsSeq=linebins[5]
elif "A>C" in line:
AtoC=linebins[4]
elif "A>G" in line:
AtoG=linebins[4]
elif "T>A" in line:
TtoA=linebins[4]
TsSeq=linebins[5]
elif "T>C" in line:
TtoC=linebins[4]
elif "T>G" in line:
TtoG=linebins[4]
elif "C>A" in line:
CtoA=linebins[4]
CsSeq=linebins[5]
elif "C>T" in line:
CtoT=linebins[4]
elif "C>G" in line:
CtoG=linebins[4]
elif "G>A" in line:
GtoA=linebins[4]
GsSeq=linebins[5]
elif "G>T" in line:
GtoT=linebins[4]
elif "G>C" in line:
GtoC=linebins[4]
elif "Total" in line and "SNV" in line:
totalNt = float(linebins[5])
totalMuts = float(linebins[4])
elif "Total" in line and "INS" in line:
ins=linebins[4]
elif "Total" in line and "DEL" in line:
dels=linebins[4]
cmFile.close()
if sscsReads > 0:
percentMappedSSCS = mappedSscs/sscsReads
rawPerSSCS = rawReads/sscsReads
else:
percentMappedSSCS = 0
rawPerSSCS = 0
if dcsReads > 0:
percentMappedDCS = mappedDcs/dcsReads
sscsPerDCS = sscsReads/dcsReads
else:
percentMappedDCS = 0
sscsPerDCS = 0
if totalNt > 0:
mutFreq = totalMuts/totalNt
else:
mutFreq = 0
outFile.write(
f"{runID},"
f"{index},{rawReads},{sscsReads},{mappedSscs},{dcsReads},{mappedDcs},"
f"{percentMappedSSCS},{percentMappedDCS},{rawPerSSCS},{sscsPerDCS},"
f"{peakSize},{maxSize},{meanInsertSize},{sscsOnTarget},{dcsOnTarget},{dcsMeanDepth},"
f"{dcsMaxDepth},{dcsUncovered},{totalNt},{AsSeq},{TsSeq},{CsSeq},{GsSeq},{totalMuts},{mutFreq},"
f"{AtoT},{AtoC},{AtoG},{TtoA},{TtoC},{TtoG},{CtoA},{CtoT},{CtoG},{GtoA},"
f"{GtoT},{GtoC},{ins},{dels}\n"
)
outFile.close()
| 2.78125 | 3 |
algorithm/COMP90038/Graph/TopologicalSort.py | martindavid/code-sandbox | 0 | 12795949 | <filename>algorithm/COMP90038/Graph/TopologicalSort.py<gh_stars>0
from __future__ import print_function
from graph_list import Graph
def dfs(G, current_vert, visited, sequence):
visited[current_vert] = True # mark the visited node
#print("traversal: " + currentVert.get_vertex_ID())
sequence.append(current_vert.get_vertex_ID())
for nbr in current_vert.get_connections(): # take a neighbouring node
if nbr not in visited: # condition to check whether the neighbour node is already visited
dfs(G, nbr, visited, sequence) # recursively traverse the neighbouring node
def DFSTraversal(G):
sequence = []
visited = {} # Dictionary to mark the visited nodes
for current_vert in G: # G contains vertex objects
# Start traversing from the root node only if its not visited
if current_vert not in visited:
# For a connected graph this is called only once
dfs(G, current_vert, visited, sequence)
print(sequence)
length = len(sequence)
for i in range(length, 0, -1):
print(sequence[i - 1])
if __name__ == '__main__':
G = Graph()
G.add_vertex('a')
G.add_vertex('b')
G.add_vertex('c')
G.add_vertex('d')
G.add_vertex('e')
G.add_vertex('f')
G.add_edge('a', 'b', 1)
G.add_edge('a', 'c', 1)
G.add_edge('b', 'd', 1)
G.add_edge('b', 'e', 1)
G.add_edge('c', 'd', 1)
G.add_edge('c', 'e', 1)
G.add_edge('d', 'e', 1)
G.add_edge('e', 'a', 1)
print('Graph data:')
#print(G.get_edges())
DFSTraversal(G)
| 3.484375 | 3 |
setup.py | darkslab/Crawler | 0 | 12795950 | from pathlib import Path
from setuptools import find_packages, setup
long_description: str = (Path(__file__).parent.resolve() / "README.md").read_text(
encoding="utf-8"
)
setup(
name="crawler",
version="0.0.0",
description="A Web Crawler",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/darkslab/Crawler",
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.8",
],
keywords="crawler",
package_dir={"": "src"},
packages=find_packages(where="src"),
python_requires=">=3.8, <4",
install_requires=[
"click==7.1.2",
"aiohttp==3.8.1",
"yarl==1.7.2",
],
entry_points={
"console_scripts": [
"crawler=crawler:cli",
],
},
)
| 1.507813 | 2 |
Subsets and Splits