repo_name
stringlengths 5
100
| path
stringlengths 4
254
| copies
stringlengths 1
5
| size
stringlengths 4
7
| content
stringlengths 681
1M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,298,349B
| line_mean
float64 3.5
100
| line_max
int64 15
1k
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class | ratio
float64 1.5
8.15
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Yellowen/Barmaan | setup.py | 1 | 1952 | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# Barmaan - is a very simple, easy to use yet powerful monitoring tool.
# Copyright (C) 2013-2014 Yellowen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------------
from setuptools import setup, find_packages
setup(name='Barmaan',
version='2.67.0',
description='Barmaan is a very simple, easy to use yet powerful monitoring tool.',
author='Sameer Rahmani, Shervin Ara,',
author_email='[email protected], [email protected]',
url='http://barmaan.yellowen.com/',
download_url="http://barmaan.yellowen.com/downloads/",
keywords="Monitoring",
license='GPL v2',
packages=find_packages(),
install_requires=['Twisted',],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
]
)
| gpl-2.0 | 3,128,222,375,649,994,000 | 41.434783 | 88 | 0.625512 | false | 4.436364 | false | false | false |
radamizell/WallApp | location/models.py | 1 | 1645 | from __future__ import unicode_literals
from django.db import models
from django.contrib.gis.db import models
from django.core.urlresolvers import reverse
from django.contrib.gis.geos import Point
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.contenttypes.fields import GenericRelation
from star_ratings.models import Rating
from constrainedfilefield.fields import ConstrainedFileField
import magic
from .validators import MimetypeValidator
class Places(models.Model):
title= models.CharField(max_length=100)
latitude= models.FloatField(null= True, blank=True,)
longitude= models.FloatField(null= True, blank=True,)
location = models.PointField(null= True, srid=4326,default= Point(27,-38))
objects = models.GeoManager()
sound= ConstrainedFileField(max_upload_size= 4194304,)
prefered_radius = models.IntegerField(default=5, help_text="in kilometers")
rating= GenericRelation(Rating, related_query_name='foos')
usersave= models.CharField(max_length=100)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if self.latitude and self.longitude:
self.location = Point(self.longitude, self.latitude)
super(Places,self).save(*args,**kwargs)
def get_absolute_url(self):
return reverse('posts:detail', kwargs={'id': self.id})
# def clean_file(self):
# file = self.cleaned_data.get("sound", False)
# filetype = magic.from_buffer(file.read())
# if not "audio/mpeg" in filetype:
# raise ValidationError("File is not XML.")
# return file
| mit | -2,185,394,025,348,146,700 | 16.315789 | 76 | 0.740426 | false | 3.427083 | false | false | false |
TalLinzen/russian-preps | code/python/constants.py | 1 | 1701 | # -*- coding: utf-8 -*-
# Author: Tal Linzen <[email protected]>
# License: BSD (3-clause)
# Linzen, Kasyanenko, & Gouskova (2013). (Lexical and phonological
# variation in Russian prepositions, Phonology 30(3).)
import os
project_dir = os.environ['RUSS_PREPS_ROOT']
paradigm_file = os.path.join(project_dir, 'resources', 'paradigms.txt')
yandex_shelf_file = os.path.join(project_dir, 'results', 'yandex.shelf')
rnc_shelf_file = os.path.join(project_dir, 'results', 'rnc.shelf')
default_csv_dir = os.path.join(project_dir, 'csv')
automatically_stressed_vowels = u'ё'
vowels = automatically_stressed_vowels + u'яюаеиоыуэ'
consontants = u'шртпщсдфгчклжхцвбнм'
znaks = [u'ь', u'ъ']
unvoiced_stops = u'птк'
voiced_stops = u'бдг'
unvoiced_fricatives = u'сфшщцчх'
voiced_fricatives = u'звж'
nasals = u'мн'
liquids = u'лp'
# Selkirk,
Elizabeth
(1984).
On
the
major
class
features
and
syllable
theory.
# Should we have the same sonority for palatalized consonants?
selkirk_sonority_scale = [unvoiced_stops, voiced_stops, unvoiced_fricatives,
voiced_fricatives, nasals, liquids]
s = {'cases': ['inst', 'gen'],
'variants': [u'с', u'со'],
'transcribed_variants': ['s', 'so']}
v = {'cases': ['acc', 'prep'],
'variants': [u'в', u'во'],
'transcribed_variants': ['v', 'vo']}
k = {'cases': ['dat'],
'variants': [u'к', u'ко'],
'transcribed_variants': ['k', 'ko']}
def build_sonority_dict(self):
self.sonority = {}
for group_index, group in enumerate(self.selkirk_sonority_scale):
for consonant in group:
self.sonority[consonant] = group_index
| bsd-3-clause | 4,551,813,099,564,061,700 | 26 | 77 | 0.657407 | false | 2.378855 | false | false | false |
berkmancenter/mediacloud | apps/common/src/python/mediawords/test/text.py | 1 | 2616 | import colorama
import difflib
import re
from mediawords.util.perl import decode_object_from_bytes_if_needed
colorama.init()
class TestCaseTextUtilities(object):
@staticmethod
def __normalize_text(text: str) -> str:
"""Normalize text by stripping whitespace and such."""
text = text.replace("\r\n", "\n")
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
@staticmethod
def __colorize_difflib_ndiff_line_output(diff_line: str) -> str:
"""Colorize a single line of difflib.ndiff() output by adding some ANSI colors."""
if diff_line.startswith('+'):
diff_line = colorama.Fore.GREEN + diff_line + colorama.Fore.RESET
elif diff_line.startswith('-'):
diff_line = colorama.Fore.RED + diff_line + colorama.Fore.RESET
elif diff_line.startswith('^'):
diff_line = colorama.Fore.BLUE + diff_line + colorama.Fore.RESET
return diff_line
# noinspection PyPep8Naming
def assertTextEqual(self, got_text: str, expected_text: str, msg: str = None) -> None:
"""An equality assertion for two texts.
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
got_text: First text to be compared (e.g. received from a tested function).
expected_text: Second text (e.g. the one that is expected from a tested function).
msg: Optional message to use on failure instead of a list of differences.
"""
got_text = decode_object_from_bytes_if_needed(got_text)
expected_text = decode_object_from_bytes_if_needed(expected_text)
msg = decode_object_from_bytes_if_needed(msg)
if got_text is None:
raise TypeError("Got text is None.")
if expected_text is None:
raise TypeError("Expected text is None.")
got_text = self.__normalize_text(got_text)
expected_text = self.__normalize_text(expected_text)
if got_text == expected_text:
return
got_words = got_text.split()
expected_words = expected_text.split()
if got_words == expected_words:
return
if msg is None:
differences = []
for diff_line in difflib.ndiff(expected_words, got_words):
diff_line = self.__colorize_difflib_ndiff_line_output(diff_line=diff_line)
differences.append(diff_line)
msg = " ".join(differences)
raise AssertionError(msg)
| agpl-3.0 | -5,912,170,401,044,431,000 | 33.421053 | 94 | 0.618119 | false | 4.030817 | false | false | false |
juanlealz/mm2md | mm2md.py | 1 | 3338 | #!/usr/bin/env python
# -*- coding: UTF8 -*-
from xml.etree import ElementTree
from sys import argv
# Recursive
def print_node (e, header_depth=0, bullet=None, bullet_depth=0, multinode_paragraph=False):
#parse icons
icons=[]
for icon in e.findall("icon"):
icons.append(icon.attrib.get("BUILTIN"))
icons=set(icons)
#multi-node paragraph and bullets
if "bullets" in icons:
next_bullet="-"
elif "numbers" in icons:
next_bullet="1."
else:
next_bullet=None
if "multi-node_paragraph" in icons:
next_multinode_paragraph=True
else:
next_multinode_paragraph=False
#document title
if header_depth==0:
print "---"
print "title: ",
print e.attrib.get("TEXT").encode('UTF8')
print "...\n"
for node in e.findall("node"):
print_node(node, header_depth+1, multinode_paragraph=next_multinode_paragraph)
#comments
elif "comments" in icons:
pass
elif "comment" in icons:
for node in e.findall("node"):
print_node(node, header_depth, bullet, bullet_depth, multinode_paragraph=next_multinode_paragraph)
if "multi-node_paragraph" in icons and not multinode_paragraph:
print "\n\n",
#heading
elif "heading" in icons:
print "#"*header_depth,
print e.attrib.get("TEXT").encode('UTF8'),
print "\n\n",
for node in e.findall("node"):
print_node(node, header_depth+1, bullet=next_bullet, bullet_depth=bullet_depth, multinode_paragraph=next_multinode_paragraph)
#bullet-list start
elif bullet is None and ("bullets" in icons or "numbers" in icons):
print e.attrib.get("TEXT").encode('UTF8'),
print "\n\n",
for node in e.findall("node"):
print_node(node, header_depth, bullet=next_bullet, bullet_depth=bullet_depth, multinode_paragraph=next_multinode_paragraph)
print "\n",
#bullet-list item
elif bullet is not None:
print " "*bullet_depth+bullet,
if e.attrib.get("TEXT") is None:
print ""
else:
print e.attrib.get("TEXT").encode('UTF8'),
if not "multi-node_paragraph" in icons:
print "\n",
if next_bullet is None and not "multi-node_paragraph" in icons:
next_bullet="-"
for node in e.findall("node"):
print_node(node, header_depth, bullet=next_bullet, bullet_depth=bullet_depth+1, multinode_paragraph=next_multinode_paragraph)
if "multi-node_paragraph" in icons:
print "\n",
#multi-node paragraph header
elif "multi-node_paragraph" in icons:
print e.attrib.get("TEXT").encode('UTF8'),
print " ",
for node in e.findall("node"):
print_node(node, header_depth, bullet=next_bullet, bullet_depth=bullet_depth, multinode_paragraph=next_multinode_paragraph)
if not multinode_paragraph:
print "\n\n",
#multi-node paragraph item
elif multinode_paragraph:
print e.attrib.get("TEXT").encode('UTF8'),
for node in e.findall("node"):
print_node(node, header_depth, bullet=None, bullet_depth=bullet_depth, multinode_paragraph=True)
#implicit bullet-list start
elif e.find("node") is not None:
next_bullet="-"
print e.attrib.get("TEXT").encode('UTF8'),
print "\n\n",
for node in e.findall("node"):
print_node(node, header_depth, bullet=next_bullet, bullet_depth=bullet_depth, multinode_paragraph=next_multinode_paragraph)
print "\n",
return
#one-node paragraph
else:
print e.attrib.get("TEXT").encode('UTF8'),
print "\n\n",
#Start
et = ElementTree.parse(argv[1])
print_node(et.find("node")) | gpl-2.0 | 935,514,337,068,483,200 | 29.633028 | 128 | 0.696525 | false | 2.890043 | false | false | false |
Talos4757/NVIDIABot | driverStation.py | 2 | 3817 | '''
Copyright (c) 2014, Rishi Desai
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import Tkinter
import tkMessageBox
import socket
import pickle
import pygame
top = Tkinter.Tk()
joyFrame = Tkinter.Frame(top)
noJoyFrame = Tkinter.Frame(top)
port = 8081
host = "10.99.99.2"
#host = "192.168.1.83"
pygame.init()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#j =0;
s.bind(("", 0))
started = False
def startSession():
global started
started= True
s.sendto(pickle.dumps(started), (host, port))
# change wait to 2 after done testing
top.after(200, sendJoystickVal)
def endSession():
global started
started= False
#s.bind(("", 0))
s.sendto(pickle.dumps(started), (host, port))
#top.destroy()
def closeProgram():
s.close()
top.destroy()
sessionStart = Tkinter.Button(top, text ="Start Session", command = startSession)
sessionEnd = Tkinter.Button(top, text="End Session", command=endSession)
programClose= Tkinter.Button(top, text="Close Program", command=closeProgram)
def isJoystick():
return pygame.joystick.get_count()>0
def whileJoyCon():
if(isJoystick()):
sessionStart.config(state="normal")
sessionStart.pack()
sessionEnd.config(state="normal")
sessionEnd.pack()
programClose.config(state="normal")
programClose.pack()
howTo = Tkinter.Text(top)
howTo.insert(Tkinter.INSERT, "Press Start on the Joystick or end session to stop the program")
howTo.pack()
else:
print isJoystick()
sessionStart.config(state="disable")
sessionStart.pack()
sessionEnd.config(state="disable")
sessionEnd.pack()
programClose.config(state="normal")
programClose.pack()
noJoy = Tkinter.Text(top)
noJoy.insert(Tkinter.INSERT, "No Joystick Connected. Please connect a Joystick and Restart the program")
noJoy.pack()
def sendJoystickVal():
#print isJoy
#if(isJoystick):
pygame.event.pump()
j = pygame.joystick.Joystick(0)
j.init()
xAxis = j.get_axis(1)
yAxis=j.get_axis(3)
i=1
button =-1;
for i in range(j.get_numbuttons()):
if(j.get_button(i)==True):
button = i
break
data = [started, xAxis, -yAxis, button]
s.sendto(pickle.dumps(data), (host, port))
print data
#change wait to 2 after done testing
top.after(200, sendJoystickVal)
whileJoyCon()
#rint started
#f(started):
#top.after(2000, sendJoystickVal)
top.mainloop()
| bsd-2-clause | -3,083,335,546,384,579,600 | 33.387387 | 112 | 0.691381 | false | 3.824649 | true | false | false |
mitocw/xsiftx | xsiftx/lti/util.py | 1 | 1626 | """
Utility support functions
"""
from xsiftx.util import get_sifters
class LTIException(Exception):
"""
Custom LTI exception for proper handling
of LTI specific errors
"""
pass
class LTIRoleException(Exception):
"""
Exception class for when LTI user doesn't have the
right role.
"""
pass
class InvalidAPIUsage(Exception):
"""
API Error handler to return helpful json when problems occur.
Stolen right from the flask docs
"""
status_code = 400
def __init__(self, message, status_code=None, payload=None):
"""
Setup class with optional arguments for returning later
"""
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
"""
Aggregate properties into dictionary for use
in returning jsonified errors.
"""
exception_dict = dict(self.payload or ())
exception_dict['message'] = self.message
return exception_dict
def get_allowed_sifters(consumer, as_dict=False):
"""
Returns a list of sifter names allowed by the client
"""
all_sifters = get_sifters()
sifters = {}
allowed_sifters = consumer.get('allowed_sifters', None)
if allowed_sifters:
for sifter in all_sifters.keys():
if sifter in allowed_sifters:
sifters[sifter] = all_sifters[sifter]
else:
sifters = all_sifters
if not as_dict:
return sifters.keys()
else:
return sifters
| gpl-3.0 | -5,572,836,397,753,395,000 | 23.636364 | 65 | 0.615006 | false | 3.985294 | false | false | false |
ctogle/dilapidator | src/dilap/BROKEN/infrastructure/infragraph.py | 1 | 26037 | import dilap.core.base as db
import dilap.core.vector as dpv
import dilap.core.tools as dpr
import dilap.core.lsystem as dls
import dilap.mesh.tools as dtl
import dilap.mesh.pointset as dps
import dilap.infrastructure.graphnode as gnd
import dilap.infrastructure.graphedge as geg
import dilap.infrastructure.graphregion as grg
import dilap.infrastructure.infralsystem as ifl
import matplotlib.pyplot as plt
import random as rm
import pdb
class graph(db.base):
def plot_regions(self,ax = None):
if ax is None:ax = dtl.plot_axes()
pdb.set_trace()
def plot(self,ax = None):
if ax is None:ax = dtl.plot_axes()
for n in self.nodes:
if not n is None:
n.plot(ax)
for eg in self.edges:
if not eg is None:
eg.plot(ax)
ax.set_xlim([-100,100])
ax.set_ylim([-100,100])
ax.set_zlim([-100,100])
return ax
def plot_xy(self,ax = None):
if ax is None:ax = dtl.plot_axes_xy()
for n in self.nodes:
if not n is None:
n.plot_xy(ax)
for eg in self.edges:
if not eg is None:
eg.plot_xy(ax)
ax.set_aspect('equal')
return ax
def __str__(self):
st = '\tinfragraph with:\n\t'
st += str(self._count_nodes())+'\tnodes\n\t'
st += str(self._count_edges())+'\tedges\n\t'
return st
def _count_nodes(self):
ncnt = 0
for x in range(self.nodecount):
if self.nodes[x]:ncnt += 1
return ncnt
def _count_edges(self):
ecnt = 0
for x in range(self.edgecount):
eg = self.edges[x]
if self.edges[x]:ecnt += 1
return ecnt
# verify graph is correct where possible
def _update(self):
for nd in self.nodes:
if not nd is None:
nd._spikes(self)
for eg in self.edges:
if not eg is None:
eg._place_road(self)
self._regions()
def __init__(self,**kwargs):
self.nodes = []
self.nodes_lookup = {}
self.edges = []
self.edges_lookup = {}
self.nodecount = 0
self.edgecount = 0
# given an edge e, direction 0 or 1, and cw or ccw
# return the forward path of e
def _loopwalk(self,ie,d,w):
def complete(inp):
i1,i2 = inp[0],inp[1]
cnt = len(inp)
for x in range(1,cnt-1):
if inp[x] == i1:
if inp[x+1] == i2:
return inp[:x+1]
if d:inpath = [ie.one.key(),ie.two.key()]
else:inpath = [ie.two.key(),ie.one.key()]
while True:
ekey = (inpath[-2],inpath[-1])
e = self.edges[self.edges_lookup[ekey]]
nx = e._walk(inpath[-1],w)
if nx is None:
inpath.append(inpath[-2])
nx = e._walk(inpath[-3],w)
nxndkey = self.nodes[nx].key()
#if ie.two.key() == (0.0,100.0,0.0):
# print('going',nxndkey,inpath)
# pdb.set_trace()
res = complete(inpath)
if not res is None:return res
#if inpath[-1] == inpath[0] and nxndkey == inpath[1]:return inpath
#if inpath.count(ie.one.key()) > 1 and nxndkey == inpath[1]:
#if inpath.count(ie.one.key()) > 1 and inpath.count(ie.two.key()) > 1:
# return inpath
else:inpath.append(nxndkey)
# return a collection of points outlining all edge loops in the graph
def _edge_loops(self):
edgelloops = []
edgerloops = []
edgestodo = self.edges[:]
while edgestodo:
e = edgestodo.pop(0)
ewalkrcw = self._loopwalk(e,1,1)
ewalkrccw = self._loopwalk(e,0,0)
ewalklccw = self._loopwalk(e,0,1)
ewalklcw = self._loopwalk(e,1,0)
if set(ewalkrcw) == set(ewalkrccw):
#print('closed loop!',len(edgestodo))
rloop = tuple(ewalkrcw)
else:
print('unclosed loop!',len(edgestodo))
pdb.set_trace()
rloop = tuple(ewalkrccw[::-1][:-1]+ewalkrcw[1:])
if set(ewalklccw) == set(ewalklcw):
#print('closed loop!',len(edgestodo))
lloop = tuple(ewalklccw)
else:
print('unclosed loop!',len(edgestodo))
pdb.set_trace()
lloop = tuple(ewalklccw[::-1][:-1]+ewalklcw[1:])
rlloop = lloop[::-1]
if not dpr.cyclic_permutation(rlloop,rloop):
edgelloops.append(lloop)
edgerloops.append(rloop)
#pdb.set_trace()
return edgelloops,edgerloops
# eloop is a list of node keys which are connected in a loop by edges
# side is either 0 (right) or 1 (left) relative to the first edge
# in the loop - other edges must be handled carefully
def _edge_loop_points(self,eloop,side):
elcnt = len(eloop)
looppts = []
ne = self.edges[self.edges_lookup[eloop[0],eloop[1]]]
if side == 0:
looppts.extend(ne.rbpts)
lnkey = ne.two.key()
elif side == 1:
looppts.extend(ne.lbpts)
lnkey = ne.one.key()
le = ne
for elx in range(2,elcnt+1):
elx1,elx2 = elx-1,elx if elx < elcnt else 0
nekey = (eloop[elx1],eloop[elx2])
if nekey[0] == nekey[1]:return looppts
ne = self.edges[self.edges_lookup[nekey]]
nelooppts = self._find_road_points(looppts[-1],le,ne)
looppts.extend(nelooppts)
le = ne
return looppts
# given the last and next edge, and the last point in a loop
# properly return the set of road points which connects
def _find_road_points(self,tip,le,ne):
# create the shortest line segment from el1 to el2
def tiptail(el1,el2):
d1 = dpv.distance(el1[ 0],el2[ 0])
d2 = dpv.distance(el1[ 0],el2[-1])
d3 = dpv.distance(el1[-1],el2[ 0])
d4 = dpv.distance(el1[-1],el2[-1])
md = min(d1,d2,d3,d4)
if md == d1:return el1[ 0],el2[ 0]
elif md == d2:return el1[ 0],el2[-1]
elif md == d3:return el1[-1],el2[ 0]
elif md == d4:return el1[-1],el2[-1]
def closer(p,r,l):
if dpv.distance(p,r) < dpv.distance(p,l):return r
else:return l
'''#
ax = dtl.plot_axes_xy()
ax = dtl.plot_point_xy(tip,ax)
ax = dtl.plot_edges_xy(le.rpts,ax)
ax = dtl.plot_edges_xy(ne.rpts,ax)
s1,s2 = tiptail(le.rpts,ne.rpts)
ax = dtl.plot_edges_xy([s1,s2],ax,lw = 5.0)
ax = dtl.plot_edges_xy([tip,closer(tip,ne.rbpts[0],ne.rbpts[-1])],ax)
ax = dtl.plot_edges_xy([tip,closer(tip,ne.lbpts[0],ne.lbpts[-1])],ax)
plt.show()
'''#
'''#
this function is verrrrry sloppy.... rewrite it....
'''#
def same_side(lp):
lp0d = dpv.distance(lp[ 0],tip)
lp1d = dpv.distance(lp[-1],tip)
lpt = lp[0] if lp0d < lp1d else lp[-1]
s1,s2 = tiptail(le.rpts,ne.rpts)
segsect = dpr.segments_intersect_noncolinear(s1,s2,lpt,tip)
if not segsect:return lpt
def connect_end(lp,lpt):
d1,d2 = dpv.distance(lp[0],lpt),dpv.distance(lp[-1],lpt)
if d1 < d2:return lp[:]
else:return lp[::-1]
if le is ne:
if tip in le.rbpts:return connect_end(ne.lbpts,tip)
else:return connect_end(ne.rbpts,tip)
else:
lrpt = same_side(ne.rbpts)
llpt = same_side(ne.lbpts)
if lrpt is None and llpt is None:
lsd = dpv.distance(tip,ne.lbpts[ 0])
led = dpv.distance(tip,ne.lbpts[-1])
rsd = dpv.distance(tip,ne.rbpts[ 0])
red = dpv.distance(tip,ne.rbpts[-1])
sxs = dpr.order_ascending([lsd,led,rsd,red])
nelooppts = None
for sx in sxs:
if sx == 0 and not tip in ne.lbpts:nelooppts = ne.lbpts[:]
elif sx == 1 and not tip in ne.lbpts:nelooppts = ne.lbpts[:-1]
elif sx == 2 and not tip in ne.rbpts:nelooppts = ne.rbpts[:]
elif sx == 3 and not tip in ne.rbpts:nelooppts = ne.rbpts[:-1]
if not nelooppts is None:break
return nelooppts
if not lrpt is None:return connect_end(ne.rbpts,lrpt)
else:return connect_end(ne.lbpts,llpt)
# return a collection of points outlining all nodes/edges in the graph
def _edge_loop_boundaries(self):
def uniq_loop(eloops,elp):
uniq = True
for elps in eloops:
for x in range(len(elps)):
p = elps[x]
for y in range(len(elp)):
q = elp[y]
if p.near(q):return False
return True
edgelloops,edgerloops = self._edge_loops()
rperms = {}
lperms = {}
for ex in range(len(edgelloops)):
lloop,rloop = edgelloops[ex],edgerloops[ex]
rkey = rloop[:-1]
isperm = False
for rps in rperms:
if dpr.cyclic_permutation(rkey,rps):isperm = True;break
if not isperm:rperms[rkey] = self._edge_loop_points(rloop,0)
lkey = lloop[:-1]
isperm = False
for lps in lperms:
if dpr.cyclic_permutation(lkey,lps):isperm = True;break
if not isperm:lperms[lkey] = self._edge_loop_points(lloop,1)
eloops = []
for el in lperms:
elp = [v for v in lperms[el]]
if uniq_loop(eloops,elp):eloops.append(elp)
for el in rperms:
elp = [v for v in rperms[el]]
if uniq_loop(eloops,elp):eloops.append(elp)
return self._rank_edge_loops(eloops)
# determine how the loops are arranged based on containment
# so that they can be properly triangulated
def _rank_edge_loops(self,eloops):
bedgeloops = {}
#ax = dtl.plot_axes_xy()
#ax = self.plot_xy(ax)
#for bedge in eloops:ax = dtl.plot_edges_xy(bedge,ax)
#plt.show()
containments = [[] for el in eloops]
for elx in range(len(eloops)):
elp = tuple(eloops[elx])
for elxo in range(len(eloops)):
if elxo == elx:continue
elpo = tuple(eloops[elxo])
isect = dpr.concaves_intersect(elp,elpo)
elins = dpr.inconcave_xy(elpo[0],elp)
if isect:raise ValueError
elif elins:containments[elx].append(elxo)
looplook = {'king':[],'interiors':[]}
for elx in range(len(eloops)):
cont = containments[elx]
if cont:looplook['king'].append(eloops[elx])
else:looplook['interiors'].append(eloops[elx])
return looplook
# provide a polygon for the terrain
#
# provide a polygon for the road
#
# the terrain runs from convex bound to the loop that contains
# all other loops
# the terrain also contains the interiors of all loops of road
#
# the road extends from the loop that contains all others to the
# collection of all other loops of road
#
# assume the graph is connected? fix if not?
# calculate polygons representing regions to place terrain
def _regions(self):
rpts = []
for eg in self.edges:rpts.extend([x.copy() for x in eg.rpts])
convexbnd = dpr.pts_to_convex_xy(rpts)
convexbnd = dpr.inflate(convexbnd,50)
eloops = self._edge_loop_boundaries()
self.tpolygons = [(tuple(convexbnd),(tuple(eloops['king'][0]),))]+\
[(tuple(i),()) for i in eloops['interiors']]
self.rpolygons = [(eloops['king'][0],tuple(eloops['interiors']))]
# add a new node to the graph or existing node index
# ndkey is a tuple(x,y,layer)
def _add_node(self,ndkey):
if ndkey in self.nodes_lookup:
nd = self.nodes[self.nodes_lookup[ndkey]]
if not nd is None:return nd.index
nx,ny,nl = ndkey
newnode = gnd.node(dpv.vector(nx,ny,20*nl),layer = nl)
newnode.index = self.nodecount
self.nodes.append(newnode)
self.nodes_lookup[ndkey] = newnode.index
self.nodecount += 1
return newnode.index
# delete an existing node from the graph
def _del_node(self,ndkey):
if ndkey in self.nodes_lookup:
nd = self.nodes[self.nodes_lookup[ndkey]]
if nd is None:return
for ekey in self.edges_lookup:
if nd.index in ekey:
self._del_edge(*ekey)
self.nodes[nd.index] = None
del self.nodes_lookup[nd.key()]
# add a new edge to the graph, or return existing index
def _add_edge(self,ndkey1,ndkey2,**kwargs):
if ndkey1 in self.nodes_lookup:
nd1 = self.nodes[self.nodes_lookup[ndkey1]]
else:nd1 = self.nodes[self._add_node(ndkey1)]
if ndkey2 in self.nodes_lookup:
nd2 = self.nodes[self.nodes_lookup[ndkey2]]
else:nd2 = self.nodes[self._add_node(ndkey2)]
newedge = geg.edge(nd1,nd2,**kwargs)
newedge.index = self.edgecount
ndir1,ndir2 = newedge._directions()
newedge.one.connect(ndir1,newedge.two)
newedge.two.connect(ndir2,newedge.one)
self.edges_lookup[(ndkey1,ndkey2)] = newedge.index
self.edges_lookup[(ndkey2,ndkey1)] = newedge.index
self.edges.append(newedge)
self.edgecount += 1
return newedge.index
# add a new edges to the graph, return indicies
def _add_edges(self,ndkeys,**kwargs):
edgexs = []
for kdx in range(1,len(ndkeys)):
ndkey1,ndkey2 = ndkeys[kdx-1],ndkeys[kdx]
edgexs.append(self._add_edge(ndkey1,ndkey2,**kwargs))
return edgexs
# delete an existing edge from the graph
def _del_edge(self,ndkey1,ndkey2):
ekey = (ndkey1,ndkey2)
if not ekey in self.edges_lookup:return
edge = self.edges[self.edges_lookup[ekey]]
edge.one.disconnect(edge.two)
edge.two.disconnect(edge.one)
del self.edges_lookup[ekey]
del self.edges_lookup[ekey[::-1]]
self.edges[edge.index] = None
# delete existing nodes from the graph and replace all
# connectivity with new edges to a new node
# ndxs is a list of existing nodes indices being merged
# nndx is the index of the new node which replaces them
def _merge_nodes(self,ndxs,nndx,**kwargs):
mnds = [self.nodes[x] for x in ndxs if not x == nndx]
for ndx in ndxs:
if ndx == nndx:continue
for ndrk in list(self.nodes[ndx].ring.keys()):
ekey = (ndrk,ndx)
if ekey in list(self.edges_lookup.keys()):
eg = self.edges_lookup[ekey]
if eg is None:continue
iterp = self.edges[eg].interpolated
self._del_edge(*ekey)
if not ndrk in ndxs:
nd1,nd2 = self.nodes[nndx],self.nodes[ndrk]
newedge = edge(nd1,nd2,interpolated = iterp)
#newedge = edge(nd1,nd2,**kwargs)
self._add_edge(newedge)
self._del_node(ndx)
# return index of closest node to p within e, or None
def _find_node(self,p,e):
nps = [nd.p for nd in self.nodes]
ndx = dpv.find_closest(p,nps,self.nodecount,1.0)
if self.nodes[ndx].p.neighborhood(p,e):return ndx
# return indices of all nodes within e of p
def _find_nodes(self,p,e):
within = []
for ndx in range(self.nodecount):
nd = self.nodes[ndx]
if nd is None:continue
if nd.p.neighborhood(p,e):
within.append(ndx)
return within
# return index of closest node within a cone,
# cone is pointed from o towards p,
# has halfangle e, and ends at p,
# or return None if no node exists
def _find_node_cone(self,o,p,e):
ca = dpr.deg(dpv.angle_from_xaxis_xy(dpv.v1_v2(o,p).normalize()))
#best,margin = None,dpv.distance(o,p)
best,margin = None,100000000000000000
for ndx in range(self.nodecount):
nd = self.nodes[ndx]
tn = dpv.v1_v2(o,nd.p).normalize()
npa = dpr.deg(dpv.angle_from_xaxis_xy(tn))
if adist(ca,npa) < e:
ndd = dpv.distance(o,nd.p)
if ndd < margin:
best = ndx
margin = ndd
return best
# add a new edge to the graph, or return existing index
# this function should do this safely, so the resulting graph
# does not carry improper intersections!
# return None if the desired edge could not be created properly
def _edge(self,nkey1,nkey2,**kwargs):
n1 = self.nodes_lookup[nkey1]
n2 = self.nodes_lookup[nkey2]
existing = self._find_edge(n1,n2)
if not existing is None:return existing
else:
nd1,nd2 = self.nodes[n1],self.nodes[n2]
newedge = edge(nd1,nd2,**kwargs)
ipts = []
ilys = []
for edx in range(self.edgecount):
eg = self.edges[edx]
if eg is None:continue
ipt = eg._tangents_intersect(newedge)
if not ipt is None:
ily = eg._layers_intersect(newedge)
ilycnt = len(ily)
if ilycnt > 1:raise ValueError
if type(ipt) is type(tuple()):
if ilycnt == 0:
continue
print('overlapping intersection!')
return None
#pdb.set_trace()
if ily:
l = ily[0]
ilys.append(l)
elif eg.one.layer == eg.two.layer:
l = eg.one.layer
if newedge.one.layer == newedge.two.layer:
iptndxxs = self._node(node(ipt,layer = newedge.one.layer))
print('shit: just like you said')
ilys.append(newedge.one.layer)
else:
print('shit: layer ambiguity')
pdb.set_trace()
else:
print('shit: layer ambiguity')
pdb.set_trace()
iptndxs = self._node(node(ipt,layer = l))
iptndx = iptndxs[l]
self._split_edge(eg.one.index,eg.two.index,iptndx)
ipts.append(ipt)
if not ipts:return self._add_edge(newedge,**kwargs)
newedgexs = []
ipts.insert(0,nd1.p)
ilys.insert(0,nd1.layer)
ipts.append(nd2.p)
ilys.append(nd2.layer)
siptxs = dpv.proximity_order_xy(nd1.p,ipts)
for iptx in range(1,len(ipts)):
ipt1,ipt2 = ipts[siptxs[iptx-1]],ipts[siptxs[iptx]]
ily1,ily2 = ilys[siptxs[iptx-1]],ilys[siptxs[iptx]]
n1 = self.nodes_lookup[(ipt1.x,ipt1.y,ily1)]
n2 = self.nodes_lookup[(ipt2.x,ipt2.y,ily2)]
nd1,nd2 = self.nodes[n1],self.nodes[n2]
print('illlys',ilys,ily1,ily2)
#pdb.set_trace()
newedge = edge(nd1,nd2,**kwargs)
newedgexs.append(self._add_edge(newedge,**kwargs))
return newedgexs
# return index of edge within connecting ndx1,ndx2, or None
def _find_edge(self,ndx1,ndx2):
if (ndx1,ndx2) in self.edges_lookup:
return self.edges_lookup[(ndx1,ndx2)]
# remove existing edge from ndx1 to ndx2
# add two new edges, connecting n to ndx1 and to ndx2
def _split_edge(self,ndx1,ndx2,newndx,**kwargs):
sekey = self.edges_lookup[(ndx1,ndx2)]
if not sekey is None:
sedge = self.edges[sekey]
kwargs['interpolated'] = sedge.interpolated
else:
print('IM BULLSHITTING OVER HERE')
return
self._del_edge(ndx1,ndx2)
nd1,nd2 = self.nodes[ndx1],self.nodes[newndx]
if nd1.p.near(nd2.p):pdb.set_trace()
newedge = edge(nd1,nd2,**kwargs)
self._add_edge(newedge)
nd1,nd2 = self.nodes[ndx2],self.nodes[newndx]
if nd1.p.near(nd2.p):pdb.set_trace()
newedge = edge(nd1,nd2,**kwargs)
self._add_edge(newedge)
def smatter():
g = graph()
g._node(node(dpv.vector(0,0,0)))
for x in range(100):
ndx = rm.choice(range(g.nodecount))
rcnt = len(g.nodes[ndx].ring)
if rcnt == 0:
ndd = dpv.xhat.copy()
ndd.rotate_z(dpr.rad(rm.choice(range(360))))
ndd.scale_u(rm.choice([100,200,300]))
elif rcnt == 1:
ndir = next(iter(g.nodes[ndx].ring.values()))
nda = ndir+180 if ndir < 180 else ndir - 180
ndd = dpv.xhat.copy().rotate_z(dpr.rad(nda))
ndd.scale_u(rm.choice([100,200,300]))
elif rcnt == 2:
r1,r2 = tuple(g.nodes[ndx].ring.values())
mpt = (r1+r2)/2.0
nda = mpt+180 if mpt < 180 else mpt - 180
ndd = dpv.xhat.copy().rotate_z(dpr.rad(nda))
ndd.scale_u(rm.choice([100,200,300]))
elif rcnt == 3:
t1,t2,t3 = tuple(g.nodes[ndx].ring.values())
d1,d2,d3 = adist(t1,t2),adist(t2,t3),adist(t3,t1)
if d1 > d2 and d1 > d3:nda = (t1+t2)/2.0
elif d2 > d1 and d2 > d3:nda = (t2+t3)/2.0
elif d3 > d1 and d3 > d2:nda = (t3+t1)/2.0
ndd = dpv.xhat.copy().rotate_z(dpr.rad(nda))
ndd.scale_u(rm.choice([100,200,300]))
elif rcnt == 4:
print('this node cannot be more connected!',ndx)
#g._extrude_safe(ndx,ndd)
g._extrude(ndx,ndd)
#g._update()
#ax = g.plot_xy()
#ax.set_aspect('equal')
#plt.show()
return g
def ramp():
g = graph()
g._add_edge((-100,-100,0),(0,-100,0),interpolated = False)
g._add_edge((0,-100,0),(100,0,0))
g._add_edge((100,0,0),(0,100,0))
g._add_edge((0,100,0),(-100,0,1))
g._add_edge((-100,0,1),(0,-100,1))
g._add_edge((0,-100,1),(100,-100,1),interpolated = False)
g._add_edge((100,-100,1),(200,-100,1))
g._add_edge((200,-100,1),(300,0,0))
g._add_edge((300,0,0),(300,100,0))
return g
def hairpin():
g = graph()
g._add_edge((0,0,0),(100,50,0))
g._add_edge((100,50,0),(0,100,0))
g._add_edge((0,100,0),(100,150,0))
#g._add_edge((0,0,0),(-50,100,1))
#g._add_edge((-50,100,2),(100,150,3))
return g
def circle():
g = graph()
g._add_edge((0,0,0),(50,50,0),interpolated = False)
#g._add_edge((0,0,0),(50,50,0))
g._add_edge((50,50,0),(0,100,0),interpolated = False)
#g._add_edge((50,50,0),(0,100,0))
g._add_edge((0,100,0),(-50,50,0),interpolated = True)
#g._add_edge((0,100,0),(-50,50,0))
g._add_edge((-50,50,0),(0,0,0),interpolated = True)
#g._add_edge((-50,50,0),(0,0,0))
return g
def newcastle():
g = graph()
l,w = 200,200
g._add_edge((0,0,0),(l*0.5,w*0.5,0),interpolated = True)
g._add_edge((l*0.5,w*0.5,0),(0,w,0),interpolated = True)
g._add_edge((0,w,0),(-l*0.5,l*0.5,0),interpolated = True)
g._add_edge((-l*0.5,w*0.5,0),(0,0,0),interpolated = True)
g._add_edge((0,0,0),(0,w*0.5,0),interpolated = True)
return g
def eight():
g = graph()
r = 100
g._add_edge((0,0,0),(r,0,0),interpolated = True)
g._add_edge((r,0,0),(2*r,0,0),interpolated = True)
g._add_edge((2*r,0,0),(2*r,r,0),interpolated = True)
g._add_edge((2*r,r,0),(r,r,0),interpolated = True)
g._add_edge((r,r,0),(0,r,0),interpolated = True)
g._add_edge((0,r,0),(0,0,0),interpolated = True)
g._add_edge((r,r,0),(r,0,0),interpolated = True)
g._update()
#ax = dtl.plot_axes()
#ax = g.plot(ax)
#ax.set_zlim([0,40])
#plt.show()
return g
def clover():
g = graph()
r = 100
g._add_edge((0,0,0),( r,0,0),interpolated = True)
g._add_edge((0,0,0),(-r,0,0),interpolated = True)
g._add_edge((0,0,0),(0, r,0),interpolated = True)
g._add_edge((0,0,0),(0,-r,0),interpolated = True)
g._add_edge(( r,0,0),(2*r,-r,0),interpolated = True)
g._add_edge((2*r,-r,0),(3*r,0,0),interpolated = True)
g._add_edge((3*r,0,0),(2*r,r,0),interpolated = True)
g._add_edge((2*r,r,0),(r,0,0),interpolated = True)
g._update()
#ax = dtl.plot_axes()
#ax = g.plot(ax)
#ax.set_zlim([0,40])
#plt.show()
return g
def opass():
g = graph()
bnd = dpr.square(100,100)
oprgn1 = grg.overpass(bnd)
oprgn1._graph(g)
bnd = dpr.square(100,100,dpv.vector(500,0,0),dpr.rad(75))
oprgn2 = grg.overpass(bnd)
oprgn2._graph(g)
bnd = dpr.square(100,100,dpv.vector(250,200,0),dpr.rad(35))
oprgn3 = grg.overpass(bnd)
oprgn3._graph(g)
oprgn1._connect(oprgn2,g)
oprgn2._connect(oprgn3,g)
oprgn3._connect(oprgn1,g)
g._update()
ax = dtl.plot_axes()
ax = oprgn1.plot(ax)
ax = oprgn2.plot(ax)
ax = oprgn3.plot(ax)
ax = g.plot(ax)
ax.set_zlim([0,40])
plt.show()
return g
def generate_graph():
#g = graph()
#g = smatter()
#g = lsystem_graph()
g = ramp()
#g = opass()
#g = hairpin()
g._update()
ax = g.plot()
plt.show()
return g
| mit | -7,481,076,039,353,380,000 | 33.39498 | 86 | 0.528095 | false | 3.113729 | false | false | false |
googleapis/python-bigquery-reservation | google/cloud/bigquery_reservation_v1/services/reservation_service/pagers.py | 1 | 21681 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.bigquery_reservation_v1.types import reservation
class ListReservationsPager:
"""A pager for iterating through ``list_reservations`` requests.
This class thinly wraps an initial
:class:`google.cloud.bigquery_reservation_v1.types.ListReservationsResponse` object, and
provides an ``__iter__`` method to iterate through its
``reservations`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListReservations`` requests and continue to iterate
through the ``reservations`` field on the
corresponding responses.
All the usual :class:`google.cloud.bigquery_reservation_v1.types.ListReservationsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., reservation.ListReservationsResponse],
request: reservation.ListReservationsRequest,
response: reservation.ListReservationsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.bigquery_reservation_v1.types.ListReservationsRequest):
The initial request object.
response (google.cloud.bigquery_reservation_v1.types.ListReservationsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = reservation.ListReservationsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[reservation.ListReservationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[reservation.Reservation]:
for page in self.pages:
yield from page.reservations
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListReservationsAsyncPager:
"""A pager for iterating through ``list_reservations`` requests.
This class thinly wraps an initial
:class:`google.cloud.bigquery_reservation_v1.types.ListReservationsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``reservations`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListReservations`` requests and continue to iterate
through the ``reservations`` field on the
corresponding responses.
All the usual :class:`google.cloud.bigquery_reservation_v1.types.ListReservationsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[reservation.ListReservationsResponse]],
request: reservation.ListReservationsRequest,
response: reservation.ListReservationsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.bigquery_reservation_v1.types.ListReservationsRequest):
The initial request object.
response (google.cloud.bigquery_reservation_v1.types.ListReservationsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = reservation.ListReservationsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[reservation.ListReservationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[reservation.Reservation]:
async def async_generator():
async for page in self.pages:
for response in page.reservations:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListCapacityCommitmentsPager:
"""A pager for iterating through ``list_capacity_commitments`` requests.
This class thinly wraps an initial
:class:`google.cloud.bigquery_reservation_v1.types.ListCapacityCommitmentsResponse` object, and
provides an ``__iter__`` method to iterate through its
``capacity_commitments`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListCapacityCommitments`` requests and continue to iterate
through the ``capacity_commitments`` field on the
corresponding responses.
All the usual :class:`google.cloud.bigquery_reservation_v1.types.ListCapacityCommitmentsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., reservation.ListCapacityCommitmentsResponse],
request: reservation.ListCapacityCommitmentsRequest,
response: reservation.ListCapacityCommitmentsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.bigquery_reservation_v1.types.ListCapacityCommitmentsRequest):
The initial request object.
response (google.cloud.bigquery_reservation_v1.types.ListCapacityCommitmentsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = reservation.ListCapacityCommitmentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[reservation.ListCapacityCommitmentsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[reservation.CapacityCommitment]:
for page in self.pages:
yield from page.capacity_commitments
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListCapacityCommitmentsAsyncPager:
"""A pager for iterating through ``list_capacity_commitments`` requests.
This class thinly wraps an initial
:class:`google.cloud.bigquery_reservation_v1.types.ListCapacityCommitmentsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``capacity_commitments`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListCapacityCommitments`` requests and continue to iterate
through the ``capacity_commitments`` field on the
corresponding responses.
All the usual :class:`google.cloud.bigquery_reservation_v1.types.ListCapacityCommitmentsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[reservation.ListCapacityCommitmentsResponse]],
request: reservation.ListCapacityCommitmentsRequest,
response: reservation.ListCapacityCommitmentsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.bigquery_reservation_v1.types.ListCapacityCommitmentsRequest):
The initial request object.
response (google.cloud.bigquery_reservation_v1.types.ListCapacityCommitmentsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = reservation.ListCapacityCommitmentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[reservation.ListCapacityCommitmentsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[reservation.CapacityCommitment]:
async def async_generator():
async for page in self.pages:
for response in page.capacity_commitments:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListAssignmentsPager:
"""A pager for iterating through ``list_assignments`` requests.
This class thinly wraps an initial
:class:`google.cloud.bigquery_reservation_v1.types.ListAssignmentsResponse` object, and
provides an ``__iter__`` method to iterate through its
``assignments`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListAssignments`` requests and continue to iterate
through the ``assignments`` field on the
corresponding responses.
All the usual :class:`google.cloud.bigquery_reservation_v1.types.ListAssignmentsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., reservation.ListAssignmentsResponse],
request: reservation.ListAssignmentsRequest,
response: reservation.ListAssignmentsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.bigquery_reservation_v1.types.ListAssignmentsRequest):
The initial request object.
response (google.cloud.bigquery_reservation_v1.types.ListAssignmentsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = reservation.ListAssignmentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[reservation.ListAssignmentsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[reservation.Assignment]:
for page in self.pages:
yield from page.assignments
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListAssignmentsAsyncPager:
"""A pager for iterating through ``list_assignments`` requests.
This class thinly wraps an initial
:class:`google.cloud.bigquery_reservation_v1.types.ListAssignmentsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``assignments`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListAssignments`` requests and continue to iterate
through the ``assignments`` field on the
corresponding responses.
All the usual :class:`google.cloud.bigquery_reservation_v1.types.ListAssignmentsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[reservation.ListAssignmentsResponse]],
request: reservation.ListAssignmentsRequest,
response: reservation.ListAssignmentsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.bigquery_reservation_v1.types.ListAssignmentsRequest):
The initial request object.
response (google.cloud.bigquery_reservation_v1.types.ListAssignmentsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = reservation.ListAssignmentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[reservation.ListAssignmentsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[reservation.Assignment]:
async def async_generator():
async for page in self.pages:
for response in page.assignments:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class SearchAssignmentsPager:
"""A pager for iterating through ``search_assignments`` requests.
This class thinly wraps an initial
:class:`google.cloud.bigquery_reservation_v1.types.SearchAssignmentsResponse` object, and
provides an ``__iter__`` method to iterate through its
``assignments`` field.
If there are more pages, the ``__iter__`` method will make additional
``SearchAssignments`` requests and continue to iterate
through the ``assignments`` field on the
corresponding responses.
All the usual :class:`google.cloud.bigquery_reservation_v1.types.SearchAssignmentsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., reservation.SearchAssignmentsResponse],
request: reservation.SearchAssignmentsRequest,
response: reservation.SearchAssignmentsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.bigquery_reservation_v1.types.SearchAssignmentsRequest):
The initial request object.
response (google.cloud.bigquery_reservation_v1.types.SearchAssignmentsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = reservation.SearchAssignmentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[reservation.SearchAssignmentsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[reservation.Assignment]:
for page in self.pages:
yield from page.assignments
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class SearchAssignmentsAsyncPager:
"""A pager for iterating through ``search_assignments`` requests.
This class thinly wraps an initial
:class:`google.cloud.bigquery_reservation_v1.types.SearchAssignmentsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``assignments`` field.
If there are more pages, the ``__aiter__`` method will make additional
``SearchAssignments`` requests and continue to iterate
through the ``assignments`` field on the
corresponding responses.
All the usual :class:`google.cloud.bigquery_reservation_v1.types.SearchAssignmentsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[reservation.SearchAssignmentsResponse]],
request: reservation.SearchAssignmentsRequest,
response: reservation.SearchAssignmentsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.bigquery_reservation_v1.types.SearchAssignmentsRequest):
The initial request object.
response (google.cloud.bigquery_reservation_v1.types.SearchAssignmentsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = reservation.SearchAssignmentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[reservation.SearchAssignmentsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[reservation.Assignment]:
async def async_generator():
async for page in self.pages:
for response in page.assignments:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| apache-2.0 | -4,435,182,498,768,271,000 | 39.22449 | 101 | 0.659379 | false | 4.66961 | false | false | false |
carrdelling/topcoder | dp/prime_soccer.py | 1 | 2017 | """
Problem name: PrimeSoccer
Class: SRM 422, Division I Level One
Description: https://community.topcoder.com/stat?c=problem_statement&pm=10240
"""
import math
import numpy as np
def is_prime(a):
""" Check (in O(N)) whether a is prime or not """
if a < 2:
return False
for i in range(2, int(math.sqrt(a)) +1):
if a % i == 0:
return False
return True
def get_distribution(skill, rounds, dist):
""" Computes the full distribution of possible scores given a number of
rounds left and a skill value for the team
"""
if rounds == 0:
return dist
dist[19-rounds] = dist[18-rounds] * skill
for score in sorted(dist, reverse=True)[1:-1]:
prob = (dist[score] * (1.0 - skill)) + (dist[score-1] * skill)
dist[score] = prob
dist[0] *= (1.0 - skill)
return get_distribution(skill, rounds - 1, dist)
def prime_score(skill):
""" Compute the probability that a team reaches a prime result given its
skill score
"""
dist = {0: 1.0 - skill, 1: skill}
dist = get_distribution(skill, 17, dist)
prime = 0.0
composite = 0.0
for score in dist:
if is_prime(score):
prime += dist[score]
else:
composite += dist[score]
return prime / (prime + composite)
def solve(args):
""" Compute the prime probability for each team skill, and aggregate them
"""
team_a, team_b = args
prime_a = prime_score(team_a / 100)
prime_b = prime_score(team_b / 100)
return prime_a + ((1.0 - prime_a) * prime_b)
if __name__ == "__main__":
test_cases = [((50, 50), 0.5265618908306351),
((100, 100), 0.0),
((12, 89), 0.6772047168840167)
]
for index, case in enumerate(test_cases):
output = solve(case[0])
assert np.isclose(output, case[1]), 'Case {} failed: {} != {}'.format(
index, output, case[1])
else:
print('All tests OK')
| apache-2.0 | 3,778,689,198,419,414,500 | 21.411111 | 78 | 0.5647 | false | 3.418644 | false | false | false |
hookehu/utility | editors/studio/main.py | 1 | 3082 | #-*- coding:utf-8 -*-
import wx
import os
import setting
class MyFrame(wx.Frame):
"""We simple derive a new class of Frame"""
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title,size=(600,600))
self.cur_frame = None
self.init_panels()
self.init_menu()
self.init_statusbar()
self.Show(True)
self.Bind(wx.EVT_SIZE, self.on_size)
def on_size(self, evt):
if self.cur_frame:
self.cur_frame.SetSize(self.Size)
def init_panels(self):
#self.tree_panel = TreePanel(self)
pass
def gen_on_menu(self, container, k):
def func(self):
container.on_menu(k)
return func
def init_menu(self):
filemenu = wx.Menu()
for k, v in setting.APPS.items():
menu = filemenu.Append(wx.ID_ANY, k, " ")
print menu
self.Bind(wx.EVT_MENU, self.gen_on_menu(self, k), menu)
menu_exit = filemenu.Append(wx.ID_ANY, "Exit", "Termanate the program")
filemenu.AppendSeparator()
menu_about = filemenu.Append(wx.ID_ANY, "About", "Information about this program")#设置菜单的内容
menuBar = wx.MenuBar()
menuBar.Append(filemenu, u"编辑器")
self.SetMenuBar(menuBar)#创建菜单条
self.Bind(wx.EVT_MENU, self.on_exit, menu_exit)#把出现的事件,同需要处理的函数连接起来
def init_statusbar(self):
self.CreateStatusBar()#创建窗口底部的状态栏
def on_about(self,e):#about按钮的处理函数
dlg = wx.MessageDialog(self,"A samll text editor", "About sample Editor",wx.OK)#创建一个对话框,有一个ok的按钮
dlg.ShowModal()#显示对话框
dlg.Destroy()#完成后,销毁它。
def on_exit(self,e):
self.Close(True)
def on_menu(self, key):
pkg = setting.APPS.get(key, None)
print key, pkg
if pkg:
p = __import__(pkg)
if self.cur_frame:
self.cur_frame.Close()
self.cur_frame = None
self.cur_frame = p.init(self)
def on_open(self,e):
"""open a file"""
self.dirname = ''
dlg = wx.FileDialog(self, "Choose a file", self.dirname, "", "*.*", wx.FD_OPEN)#调用一个函数打开对话框
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetFilename()
self.dirname = dlg.GetDirectory()
self.address = os.path.join(self.dirname,self.filename)
f = open(self.address,"r")
file = (f.read()).decode(encoding='utf-8')#解码,使文件可以读取中文
f.close()
self.control.Clear()
self.control.AppendText(file)#把打开的文件内容显示在多行文本框内
dlg.Destroy()
def on_save(self, e):
date = (self.control.GetValue()).encode(encoding="utf-8")#编码,使中文可以正确存储
f = open(self.address, 'w')
f.write(date)
f.close()#把文本框内的数据写入并关闭文件
dlg = wx.MessageDialog(self, u"文件已经成功保存", u"消息提示", wx.OK)
dlg.ShowModal()
dlg.Destroy()
if __name__ == "__main__":
app = wx.App(False)
frame = MyFrame(None, '编辑器')
app.MainLoop() | gpl-2.0 | 8,455,450,927,402,620,000 | 28.393617 | 100 | 0.622013 | false | 2.378984 | false | false | false |
jreback/pandas | pandas/tests/dtypes/cast/test_find_common_type.py | 1 | 5038 | import numpy as np
import pytest
from pandas.core.dtypes.cast import find_common_type
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from pandas import Categorical, Index
@pytest.mark.parametrize(
"source_dtypes,expected_common_dtype",
[
((np.int64,), np.int64),
((np.uint64,), np.uint64),
((np.float32,), np.float32),
((object,), object),
# Into ints.
((np.int16, np.int64), np.int64),
((np.int32, np.uint32), np.int64),
((np.uint16, np.uint64), np.uint64),
# Into floats.
((np.float16, np.float32), np.float32),
((np.float16, np.int16), np.float32),
((np.float32, np.int16), np.float32),
((np.uint64, np.int64), np.float64),
((np.int16, np.float64), np.float64),
((np.float16, np.int64), np.float64),
# Into others.
((np.complex128, np.int32), np.complex128),
((object, np.float32), object),
((object, np.int16), object),
# Bool with int.
((np.dtype("bool"), np.int64), object),
((np.dtype("bool"), np.int32), object),
((np.dtype("bool"), np.int16), object),
((np.dtype("bool"), np.int8), object),
((np.dtype("bool"), np.uint64), object),
((np.dtype("bool"), np.uint32), object),
((np.dtype("bool"), np.uint16), object),
((np.dtype("bool"), np.uint8), object),
# Bool with float.
((np.dtype("bool"), np.float64), object),
((np.dtype("bool"), np.float32), object),
(
(np.dtype("datetime64[ns]"), np.dtype("datetime64[ns]")),
np.dtype("datetime64[ns]"),
),
(
(np.dtype("timedelta64[ns]"), np.dtype("timedelta64[ns]")),
np.dtype("timedelta64[ns]"),
),
(
(np.dtype("datetime64[ns]"), np.dtype("datetime64[ms]")),
np.dtype("datetime64[ns]"),
),
(
(np.dtype("timedelta64[ms]"), np.dtype("timedelta64[ns]")),
np.dtype("timedelta64[ns]"),
),
((np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")), object),
((np.dtype("datetime64[ns]"), np.int64), object),
],
)
def test_numpy_dtypes(source_dtypes, expected_common_dtype):
assert find_common_type(source_dtypes) == expected_common_dtype
def test_raises_empty_input():
with pytest.raises(ValueError, match="no types given"):
find_common_type([])
@pytest.mark.parametrize(
"dtypes,exp_type",
[
([CategoricalDtype()], "category"),
([object, CategoricalDtype()], object),
([CategoricalDtype(), CategoricalDtype()], "category"),
],
)
def test_categorical_dtype(dtypes, exp_type):
assert find_common_type(dtypes) == exp_type
def test_datetimetz_dtype_match():
dtype = DatetimeTZDtype(unit="ns", tz="US/Eastern")
assert find_common_type([dtype, dtype]) == "datetime64[ns, US/Eastern]"
@pytest.mark.parametrize(
"dtype2",
[
DatetimeTZDtype(unit="ns", tz="Asia/Tokyo"),
np.dtype("datetime64[ns]"),
object,
np.int64,
],
)
def test_datetimetz_dtype_mismatch(dtype2):
dtype = DatetimeTZDtype(unit="ns", tz="US/Eastern")
assert find_common_type([dtype, dtype2]) == object
assert find_common_type([dtype2, dtype]) == object
def test_period_dtype_match():
dtype = PeriodDtype(freq="D")
assert find_common_type([dtype, dtype]) == "period[D]"
@pytest.mark.parametrize(
"dtype2",
[
DatetimeTZDtype(unit="ns", tz="Asia/Tokyo"),
PeriodDtype(freq="2D"),
PeriodDtype(freq="H"),
np.dtype("datetime64[ns]"),
object,
np.int64,
],
)
def test_period_dtype_mismatch(dtype2):
dtype = PeriodDtype(freq="D")
assert find_common_type([dtype, dtype2]) == object
assert find_common_type([dtype2, dtype]) == object
interval_dtypes = [
IntervalDtype(np.int64),
IntervalDtype(np.float64),
IntervalDtype(np.uint64),
IntervalDtype(DatetimeTZDtype(unit="ns", tz="US/Eastern")),
IntervalDtype("M8[ns]"),
IntervalDtype("m8[ns]"),
]
@pytest.mark.parametrize("left", interval_dtypes)
@pytest.mark.parametrize("right", interval_dtypes)
def test_interval_dtype(left, right):
result = find_common_type([left, right])
if left is right:
assert result is left
elif left.subtype.kind in ["i", "u", "f"]:
# i.e. numeric
if right.subtype.kind in ["i", "u", "f"]:
# both numeric -> common numeric subtype
expected = IntervalDtype(np.float64)
assert result == expected
else:
assert result == object
else:
assert result == object
@pytest.mark.parametrize("dtype", interval_dtypes)
def test_interval_dtype_with_categorical(dtype):
obj = Index([], dtype=dtype)
cat = Categorical([], categories=obj)
result = find_common_type([dtype, cat.dtype])
assert result == dtype
| bsd-3-clause | -495,415,800,500,659,260 | 28.635294 | 76 | 0.583565 | false | 3.422554 | true | false | false |
alirizakeles/tendenci | tendenci/apps/base/management/commands/downgrade_user.py | 1 | 1864 | import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import Group, Permission, User
from django.core.exceptions import ObjectDoesNotExist
class Command(BaseCommand):
"""
Downgrades a user to just a regular user
This command does the following to the user account:
* Removes them from all groups
* Removes all user level permissions
* Sets is_staff to 0
* Sets is_superuser to 0
* Sets is_active to 1
* Removes them from all tendenci user_groups
"""
def add_arguments(self, parser):
parser.add_argument('--username',
dest='username',
required=True,
help='Username of the user account being downgraded')
def handle(self, *args, **options):
from tendenci.apps.user_groups.models import GroupMembership
verbosity = options['verbosity']
username = options['username']
if not username:
raise CommandError('downgrade_user: --username parameter is required')
# get the user
try:
u = User.objects.get(username=username)
except ObjectDoesNotExist:
print 'User with username (%s) could not be found' % username
return
# Remove the user from all groups
u.groups.clear()
# Remove all user-level permissions
u.user_permissions.clear()
# Reset permission bits
u.is_staff = False
u.is_superuser = False
u.is_active = True
u.save()
# Remove the tendenci group permissions
group_memberships = GroupMembership.objects.filter(member=u)
for m in group_memberships:
m.delete()
if verbosity >= 2:
print 'Done downgrading user (%s).' % u
| gpl-3.0 | -4,922,462,935,471,569,000 | 29.557377 | 82 | 0.626073 | false | 4.66 | false | false | false |
amluto/time_domain_tools | src/td_analysis.py | 1 | 6199 | # Time domain tools for CASPER
# Copyright (C) 2011 Massachusetts Institute of Technology
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
"""Time-domain analysis functions"""
from __future__ import division
import scipy.signal
import math
import numpy
import weakref
def _asfloat(x):
"""Internal helper to coerce an array to floating-point."""
if isinstance(x, numpy.ndarray) and x.dtype == numpy.float64:
return x
else:
return numpy.asarray(x, numpy.float64)
def _as_float_or_complex(x):
"""Internal helper to coerce an array to floating-point or complex."""
if (isinstance(x, numpy.ndarray)
and x.dtype in (numpy.float64, numpy.complex128)):
return x
else:
return numpy.asarray(x, numpy.complex128)
_time_coord_cache = weakref.WeakValueDictionary()
def _time_coord(fs, num_samples, offset = 0, dec = 1):
spec = (num_samples, offset, dec)
try:
return _time_coord_cache[spec]
except KeyError:
ret = numpy.arange(offset, num_samples + offset, dec) / fs
ret.setflags(write = False)
ret = ret[:] # Make sure it never becomes writeable.
_time_coord_cache[spec] = ret
return ret
def mean_power(signal):
signal = _as_float_or_complex(signal)
if signal.ndim != 1:
raise TypeError, 'signal must be one-dimensional'
# This is the least inefficient way I can think of.
return numpy.linalg.norm(signal, ord = 2)**2 / len(signal)
class ToneEstimate(object):
def __init__(self, estimator, f, data, tone):
self.__est = estimator
self.__fs = estimator._fs
self.__f = f
self.__datalen = len(data)
self.__t_data = None
self.__t_tone = None
self.tone = tone
self.total_power = mean_power(data)
self.inband_power = mean_power(self.tone) / 2
self.inband_noise = ((self.total_power - self.inband_power)
/ (1 - estimator.fractional_band)
* estimator.fractional_band)
self.est_tone_power = self.inband_power - self.inband_noise
# We compute t_data and t_tone as lazily as possible.
@property
def t_data(self):
if self.__t_data is None:
self.__t_data = _time_coord(self.__fs, self.__datalen)
return self.__t_data
@property
def t_tone(self):
if self.__t_tone is None:
self.__t_tone = _time_coord(self.__fs,
len(self.tone) * self.__est._dec,
self.__est.offset, self.__est._dec)
return self.__t_tone
class ToneEstimator(object):
def __init__(self, fs, bw):
self._fs = fs
self._bw = bw
self._dec = 1 # Decimation factor
# Generate a symmetric FIR filter.
# Some scipy versions give a bogus warning. Ignore it.
self._nyquist = fs / 2
cutoff = bw / self._nyquist
firlen = 10.0 / cutoff
# Round to a power of 2 (so fftconvolve can be super fast)
firlen = 2**int(numpy.ceil(numpy.log2(firlen)))
old_err = numpy.seterr(invalid='ignore')
self._filter = scipy.signal.firwin(
firlen,
cutoff = cutoff)
numpy.seterr(**old_err)
self.offset = (len(self._filter) - 1) / 2
self.fractional_band = bw / self._nyquist
def estimate_tone(self, f, data):
"""Returns a ToneEstimate for the cosine wave at frequency f.
Note that the mean square of the tone is *twice* the mean square of
the original cosine wave."""
f = float(f)
data = _asfloat(data)
if data.ndim != 1:
raise TypeError, 'data must be one-dimensional'
baseband = 2 * data * numpy.exp(-2j * math.pi * f / self._fs
* numpy.arange(0, len(data)))
if len(data) < len(self._filter):
raise (ValueError,
'You need at least %d samples for specified bandwidth'
% len(self._filter))
tone = scipy.signal.fftconvolve(baseband, self._filter, mode='valid')
if self._dec != 1:
tone = tone[::self._dec]
return ToneEstimate(self, f, data, tone)
class ToneDecimatingEstimator(ToneEstimator):
def __init__(self, fs, bw):
super(ToneDecimatingEstimator, self).__init__(fs, bw)
cutoff = self._bw / self._nyquist
self._dec = int(2.0 / cutoff) # Oversample by 2 to minimize aliasing.
def estimate_tone(self, f, data):
"""Returns a ToneEstimate for the cosine wave at frequency f.
Note that the mean square of the tone is *twice* the mean square of
the original cosine wave."""
f = float(f)
data = _asfloat(data)
if data.ndim != 1:
raise TypeError, 'data must be one-dimensional'
baseband = 2 * data * numpy.exp(-2j * math.pi * f / self._fs
* numpy.arange(0, len(data)))
if len(data) < len(self._filter):
raise (ValueError,
'You need at least %d samples for specified bandwidth'
% len(self._filter))
valid_len = (len(data) - len(self._filter) + self._dec) // self._dec
tone = numpy.zeros(valid_len, dtype = baseband.dtype)
for i in xrange(valid_len):
pos = self._dec * i
tone[i] = numpy.dot(self._filter,
baseband[pos:pos+len(self._filter)])
return ToneEstimate(self, f, data, tone)
| gpl-2.0 | 2,539,962,466,386,287,600 | 34.626437 | 77 | 0.589127 | false | 3.75697 | false | false | false |
Pharylon/PiClock | clock.py | 1 | 1209 | import datetime
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
#GPIO.cleanup()
ypins = [17, 18, 27, 22, 23, 24, 25]
xpins = [5, 6, 12]
def setArray(myInt, array):
asBinary = "{0:b}".format(myInt).zfill(7)
for i in range(0, 7):
if (asBinary[i] == "0"):
array[i] = False
else:
array[i] = True
for i in xpins:
GPIO.setup(i, GPIO.IN)
#GPIO.output(i, False)
for i in ypins:
GPIO.setup(i, GPIO.IN)
#GPIO.output(i, False)
grid = [[0 for x in range(7)] for x in range(3)]
'''
GPIO.setup(17, GPIO.OUT)
GPIO.setup(5, GPIO.OUT)
GPIO.output(17, False)
GPIO.output(5, True)
time.sleep(1)
'''
while True:
now = datetime.datetime.now()
setArray(now.hour, grid[0])
setArray(now.minute, grid[1])
setArray(now.second, grid[2])
for i in range(0, 7):
for j in range(0, 3):
if (grid[j][i]):
GPIO.setup(xpins[j], GPIO.OUT)
GPIO.setup(ypins[i], GPIO.OUT)
GPIO.output(xpins[j], True)
GPIO.output(ypins[i], False)
GPIO.setup(xpins[j], GPIO.IN)
GPIO.setup(ypins[i], GPIO.IN)
GPIO.cleanup()
| mit | -8,457,110,341,301,701,000 | 19.981818 | 49 | 0.543424 | false | 2.722973 | false | false | false |
hongta/practice-python | data_structures/tree/binary_search_tree.py | 1 | 5713 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from tree_node import TreeNode
class BinarySearchTree(object):
def __init__(self):
self._root = None;
##################
## Iterator method
def __iter__(self):
current = self._find_minmum(self._root)
# and then, until we have reached the end:
while current is not None:
yield current
# in order to get from one Node to the next one:
current = self.successor(current)
def _replace_with(self, old_node, new_node):
if not old_node:
return False
if old_node.parent:
if old_node.parent.left == old_node:
old_node.parent.set_children(left=new_node)
else:
old_node.parent.set_children(right=new_node)
else:
if new_node:
new_node.parent = None
self._root = new_node
return True
def insert(self, k, payload=None):
# tree is empty construct the tree
if not self._root:
self._root= TreeNode(k,payload)
else:
self._insert(self._root, k, payload)
def _insert(self, tree_node, k, payload=None):
if not tree_node:
return TreeNode(k, payload)
if k < tree_node.key:
tree_node.set_children(left=self._insert(tree_node.left, k, payload))
elif k > tree_node.key:
tree_node.set_children(right=self._insert(tree_node.right, k, payload))
else:
tree_node.payload = payload
return tree_node
def remove_node(self, node):
if None:
return
node.key = node.payload = node.left = node.right = node.parent = None
del node
def delete(self, k):
node = self.search(k)
if not node:
return
p = node.parent
if node.left and node.right:
# if the node has two children, we replace the node's key and payload
# with minnum of the right substree
min_on_right = self._find_minmum(node.right)
min_parent = min_on_right.parent
node.key = min_on_right.key
node.payload = min_on_right.payload
if min_on_right != node.right:
#update min right child, make it become min's parent's left child
min_parent.set_children(left=min_on_right.right)
else:
node.set_children(right=min_on_right.right)
self.remove_node(min_on_right)
else:
# if the node has 0-1 child, we delete this node
old_node = node
if not node.left and not node.right:
# no child
node = None
elif node.left:
# has one left child
node.left.parent = p
node = node.left
elif node.right:
# has one right child
node.right.parent = p
node = node.right
if not p:
#trying to delete root node
self._root = node
else:
if p.left == old_node:
p.left = node
else:
p.right = node
self.remove_node(old_node)
def find_minnum(self):
return self._find_minmum(self._root)
def _find_minmum(self, node):
if not node:
return None
while node.left:
node = node.left
return node
def find_maxmum(self):
return self._find_maxmum(self._root)
def _find_maxmum(self, node):
if not node:
return None
while node.right:
node = node.right
return node
def traverse(self):
return self._traverse(self._root)
# Python 2 version
def _traverse(self, node):
if node:
if node.left:
for n in self._traverse(node.left):
yield n
yield node
if node.right:
for n in self._traverse(node.right):
yield n
# Python 3 version
# def _traverse(self, node):
# if node:
# yield from self._traverse(node.left)
# yield node
# yield from self._traverse(node.right)
def successor(self, node):
if not node:
return None
if node.right:
return self._find_minmum(node.right)
p = node.parent
while p and p.right == node:
node = p
p = p.parent
return p
def predecessor(self, node):
if not node:
return None
if node.left:
return self._find_maxmum(node.left)
p = node.parent
while p and p.left == node:
node = p
p = p.parent
return p
def height(self):
pass
def search(self, k):
return self._search(self._root, k)
def _search(self, node, k):
if not node:
return None
if k == node.key:
return node
if k < node.key:
return self._search(node.left, k)
else:
return self._search(node.right, k)
def count():
pass
if __name__ == "__main__":
t = BinarySearchTree()
# t.insert(3)
# t.insert(8)
# t.insert(12)
# t.insert(1)
# t.insert(15)
# t.insert(7)
data = [30, 25, 49, 35, 68, 33, 34, 38, 40, 37, 36]
for i in data:
t.insert(i)
for v in t.traverse():
print v.key
d = t._find_maxmum(t._root)
while d:
print d.key
d = t.successor(d)
| mit | 3,723,569,289,260,304,000 | 25.086758 | 83 | 0.501663 | false | 3.959113 | false | false | false |
evite/nudge | nudge/automagic/scribes/python_stubs.py | 1 | 3903 | #
# Copyright (C) 2011 Evite LLC
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from nudge.automagic.scribes.default import DefaultGenerator, get_template
from nudge.utils import Dict, breakup_path
class PythonStubGenerator(DefaultGenerator):
extension = 'py'
template = get_template('python.txt')
def _prepare_data(self, project):
def arg_string(endpoint):
args = []
args.extend([arg_repr(arg) for arg in endpoint.sequential])
args.extend([arg_repr(arg, True) for arg in endpoint.named])
return ', '.join(args)
def arg_repr(arg, named=False):
if named:
return '='.join([str(arg.name), str(None)])
return arg.name
modules = {}
for section in project.sections:
for ep in section.endpoints:
# -- module_name and class_name can both be ''. They'll be put
# in the default module as simple functions
# -- module_name..function_name means a module level function
# in the given module
# -- something.otherthing = default module, something =
# class_name, otherthing = function_name
# -- something = default module, module level function called
# something
module, class_name, function = breakup_path(ep.function_name)
current = (modules.setdefault(module,{})
.setdefault(class_name, {})
.setdefault(function,
Dict({'sequential':[],
'named':{}}))
)
# Preserve order...it's super important
if len(ep.sequential) > len(current.sequential):
current.sequential = ep.sequential
func_desc = dict([(arg.name, arg) for arg in current.named])
current.named.update(func_desc)
del project['sections']
module_list = []
for module, classes in modules.iteritems():
module_dict = Dict({
'module_name':module,
'classes':[],
'project':project
})
for class_name, endpoints in classes.iteritems():
data = [{'function_name':name, 'args':arg_string(args)}
for name, args in endpoints.iteritems()]
class_name = class_name or False
class_desc = [{"name":class_name, "endpoints":data}]
module_dict.classes.extend(class_desc)
module_list.append(Dict(module_dict))
return module_list
def generate(self, project):
module_list = self._prepare_data(Dict(project))
for module in module_list:
# functions without modules go into the default file
output_file = self.output_file
# otherwise they go into their specified module file
if module.module_name:
output_file = self._filepath(module.module_name)
self._render_and_write(self.template, module, output_file)
| lgpl-2.1 | 3,267,725,642,107,230,700 | 44.383721 | 79 | 0.578786 | false | 4.708082 | false | false | false |
joelverhagen/PingdomBackup | pingdombackup/tool.py | 1 | 3294 | import argparse
import sys
import logging
import pkg_resources
from . import __version__
from .PingdomBackup import PingdomBackup
def tool_main():
# this is done without ArgumentParser so required args are not enforced
if '-v' in sys.argv or '--version' in sys.argv:
print(__version__)
sys.exit(0)
# initialize the parser
parser = argparse.ArgumentParser(
prog='pingdombackup',
description='Backup Pingdom result logs to a SQLite database.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
argument_default=argparse.SUPPRESS)
# meta arguments
parser.add_argument('-v', '--version',
dest='version', action='store_true', default=False,
help='show the version and exit')
# required arguments
parser.add_argument('-e', '--email',
dest='email', required=True,
help='your Pingdom email address (used for logging in)')
parser.add_argument('-p', '--password',
dest='password', required=True,
help='your Pingdom password (used for logging in)')
parser.add_argument('-a', '--app-key',
dest='app_key', required=True,
help='a valid Pingdom API application key, see: https://my.pingdom.com/account/appkeys')
parser.add_argument('-d', '--db-path',
dest='db_path', default='pingdom.db',
help='a path to the SQLite database used for storage')
# conditionally required arguments
parser.add_argument('-n', '--check-name',
dest='check_name', default=None,
help='the name of the check to update')
# optional arguments
parser.add_argument('--offine-check',
dest='offline_check', action='store_true', default=False,
help='get the check ID by name from the database, instead of the Pingdom API')
parser.add_argument('--no-update-results',
dest='no_update_results', action='store_true', default=False,
help='do not update the results for the specified check')
parser.add_argument('--update-probes',
dest='update_probes', action='store_true', default=False,
help='update the probes')
parser.add_argument('--update-checks',
dest='update_checks', action='store_true', default=False,
help='update the checks for your account')
parser.add_argument('--verbose',
dest='verbose', action='store_true', default=False,
help='trace progress')
# parse
args = parser.parse_args()
if not args.no_update_results and args.check_name is None:
parser.error('-n/--check-name is required when updating results')
if args.verbose:
logger = logging.getLogger('PingdomBackup')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
pb = PingdomBackup(args.email, args.password, args.app_key, args.db_path)
if args.update_probes:
pb.update_probes()
if args.update_checks or (not args.no_update_results and not args.offline_check):
pb.update_checks()
if not args.no_update_results:
check = pb.get_check_by_name(args.check_name)
if check is None:
parser.error('no check with name "{0}" was found'.format(args.check_name))
pb.update_results(check)
if __name__ == '__main__':
tool_main()
| mit | -6,555,140,197,749,762,000 | 36.431818 | 96 | 0.649059 | false | 3.954382 | false | false | false |
z0rr0/eshop | shop/sales/admin.py | 1 | 1097 | from django.contrib import admin
from .models import Category, Product, Order
class ProductAdmin(admin.ModelAdmin):
"""docstring for ProductAdmin"""
list_display = ('name', 'category', 'price')
search_fields = ('name', 'desc')
list_filter = ('category', 'modified')
class OrderAdmin(admin.ModelAdmin):
"""docstring for OrderAdmin"""
def order_products(order):
names = []
for ps in order.productset_set.all():
names.append("{0} [{1}]".format(ps.product.name, ps.number))
return '; '.join(names)
def total(order):
return order.total()
def make_sent(self, request, queryset):
queryset.update(status=1)
def make_received(self, request, queryset):
queryset.update(status=2)
list_display = ('id', 'status', 'customer', order_products, total, 'modified')
search_fields = ('desc',)
list_filter = ('status', 'modified', 'created')
actions = ('make_sent', 'make_received')
admin.site.register(Category)
admin.site.register(Product, ProductAdmin)
admin.site.register(Order, OrderAdmin)
| mit | -7,115,111,287,846,792,000 | 28.648649 | 82 | 0.64722 | false | 3.782759 | false | false | false |
domthu/gasistafelice | gasistafelice/base/backends.py | 1 | 5773 | from django.db.models import Model
from django.contrib.contenttypes.models import ContentType
import permissions.utils
from permissions.models import ObjectPermission
from gasistafelice.auth.models import GlobalPermission
class DummyBackend(object):
"""A dummy authorization backend intended only for development purposes.
Using this backend, permission checks always succeed ! ;-)
"""
supports_object_permissions = True
supports_anonymous_user = True
supports_inactive_user = True
def authenticate(self, username, password):
return None
def has_perm(self, user_obj, perm, obj=None):
return True
class ObjectPermissionsBackend(object):
"""An authorization backend for Django for role-based permission checking.
Support global (per-model) and local (per-instance) Permissions.
Use it together with the default ModelBackend like this:
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'gasistafelice.base.backends.ObjectPermissionsBackend',
)
Then you can use it like:
user.has_perm("view", your_object)
where `your_object` can be a ContentType instance (if you want to check global permissions)
or a model instance (if you want to check local permissions).
"""
supports_object_permissions = True
supports_anonymous_user = True
supports_inactive_user = True
def authenticate(self, username, password):
return None
def get_group_permissions(self, user_obj, obj=None):
"""
Returns the set of Permissions (locals and globals) this User has been granted
through his/her Groups (via the Roles assigned to them).
If the `obj` argument is a model (actually, a ContentType instance), all (global) Permissions for that model are returned.
If the `obj` argument is a model instance all (local) Permissions for that instance are returned.
"""
# iterate on each Group the User belongs to
roles = []
groups = user_obj.groups.all()
for group in groups:
roles.extend(permissions.utils.get_roles(group))
if isinstance(obj, ContentType): # `obj` is a model class, so check for global Permissions for this model
perms = GlobalPermission.objects.filter(content_type=obj, role__in=roles)
elif isinstance(obj, Model) : # `obj` is a model instance, so check for local Permissions for this instance
ct = ContentType.objects.get_for_model(obj)
perms = ObjectPermission.objects.filter(content_type=ct, content_id=obj.id, role__in=roles)
else: # `obj` is neither a model class nor a model instance (e.g. obj == None), so listing Permissions is meaningless
raise TypeError, "Can't get permissions for the provided object."
return perms
def get_all_permissions(self, user_obj, obj=None):
"""
Returns the set of all Permissions (locals or globals) this User has been granted
(directly, via Roles assigned to him/her, or indirectly via those assigned to the Groups he/she belongs to).
If the `obj` argument is a model (actually, a ContentType instance), all (global) Permissions for that model are returned.
If the `obj` argument is a model instance all (local) Permissions for that instance are returned.
"""
# retrieve all the Roles assigned to the User (directly or indirectly)
roles = permissions.utils.get_roles(user_obj)
if isinstance(obj, ContentType): # `obj` is a model class, so check for global Permissions for this model
perms = GlobalPermission.objects.filter(content_type=obj, role__in=roles)
elif isinstance(obj, Model) : # `obj` is a model instance, so check for local Permissions for this instance
ct = ContentType.objects.get_for_model(obj)
perms = ObjectPermission.objects.filter(content_type=ct, content_id=obj.id, role__in=roles)
else: # `obj` is neither a model class nor a model instance (e.g. obj == None), so listing Permissions is meaningless
raise TypeError, "Can't get permissions for the provided object."
return perms
def has_perm(self, user_obj, perm, obj=None):
"""Checks whether a User has a global (local) Permission on a model (model instance).
This should be the primary method to check wether a User has a certain Permission.
Parameters
==========
perm
The codename of the Permission which should be checked.
user_obj
The User for which the Permission should be checked.
obj
The Object (either a model or model instance) for which the Permission should be checked.
"""
# if User is not authenticated or inactive, he has no Permissions
if user_obj.is_anonymous() or not user_obj.is_active():
return False
if isinstance(obj, ContentType): # `obj` is a model class, so check for global Permissions for this model
return perm in self.get_all_permissions(user_obj, obj)
elif isinstance(obj, Model) : # `obj` is a model instance, so check for local Permissions for this instance
return permissions.utils.has_permission(obj, user_obj, perm)
else: # `obj` is neither a model class nor a model instance (e.g. obj == None), so Permissions check is meaningless
raise TypeError, "Can't check permissions for the provided object." | agpl-3.0 | 7,847,195,928,386,982,000 | 44.464567 | 138 | 0.652347 | false | 4.6184 | false | false | false |
SymbiFlow/prjxray | utils/mergedb.py | 1 | 3632 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import os, sys, re
from prjxray import util
TAG_PART_RE = re.compile(r"^[a-zA-Z][0-9a-zA-Z_]*(\[[0-9]+\])?$")
def check_tag_name(tag):
'''
Checks if the tag name given by the used conforms to the valid fasm
name rules.
>>> check_tag_name("CELL.feature19.ENABLED")
True
>>> check_tag_name("FEATURE")
True
>>> check_tag_name("TAG.")
False
>>> check_tag_name(".TAG")
False
>>> check_tag_name("CELL..FEATURE")
False
>>> check_tag_name("CELL.3ENABLE")
False
>>> check_tag_name("FEATURE.12.ON")
False
'''
for part in tag.split("."):
if not len(part) or TAG_PART_RE.match(part) is None:
return False
return True
def run(fn_ins, fn_out, strict=False, track_origin=False, verbose=False):
# tag to bits
entries = {}
# tag to (bits, line)
tags = dict()
# bits to (tag, line)
bitss = dict()
for fn_in in fn_ins:
for line, (tag, bits, mode, origin) in util.parse_db_lines(fn_in):
line = line.strip()
assert mode is not None or mode != "always", "strict: got ill defined line: %s" % (
line, )
if not check_tag_name(tag):
assert not strict, "strict: Invalid tag name '{}'".format(tag)
if tag in tags:
orig_bits, orig_line, orig_origin = tags[tag]
if orig_bits != bits:
print(
"WARNING: got duplicate tag %s" % (tag, ),
file=sys.stderr)
print(" Orig line: %s" % orig_line, file=sys.stderr)
print(" New line : %s" % line, file=sys.stderr)
assert not strict, "strict: got duplicate tag"
origin = os.path.basename(os.getcwd())
if track_origin and orig_origin != origin:
origin = orig_origin + "," + origin
if bits in bitss:
orig_tag, orig_line = bitss[bits]
if orig_tag != tag:
print(
"WARNING: got duplicate bits %s" % (bits, ),
file=sys.stderr)
print(" Orig line: %s" % orig_line, file=sys.stderr)
print(" New line : %s" % line, file=sys.stderr)
assert not strict, "strict: got duplicate bits"
if track_origin and origin is None:
origin = os.path.basename(os.getcwd())
entries[tag] = (bits, origin)
tags[tag] = (bits, line, origin)
if bits != None:
bitss[bits] = (tag, line)
util.write_db_lines(fn_out, entries, track_origin)
def main():
import argparse
parser = argparse.ArgumentParser(description="Combine multiple .db files")
util.db_root_arg(parser)
parser.add_argument('--verbose', action='store_true', help='')
parser.add_argument('--track_origin', action='store_true', help='')
parser.add_argument('--out', help='')
parser.add_argument('ins', nargs='+', help='Last takes precedence')
args = parser.parse_args()
run(
args.ins,
args.out,
strict=int(os.getenv("MERGEDB_STRICT", "1")),
track_origin=args.track_origin,
verbose=args.verbose)
if __name__ == '__main__':
main()
| isc | 1,326,039,727,474,506,500 | 30.310345 | 95 | 0.535793 | false | 3.668687 | false | false | false |
aerialhedgehog/VyPy | trunk/VyPy/data/scaling/Linear.py | 1 | 1454 |
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from ScalingFunction import ScalingFunction
# ----------------------------------------------------------------------
# Linear Scaling Function
# ----------------------------------------------------------------------
class Linear(ScalingFunction):
def __init__(self,scale,center=0.0):
""" o / scl ==> (o-center)/scale
o * scl ==> (o*scale)+center
"""
self.scale = scale
self.center = center
def set_scaling(self,other):
return (other-self.center)/self.scale
def unset_scaling(self,other):
return other*self.scale + self.center
def set_scaling_gradient(self,other):
return other/self.scale
def unset_scaling_gradient(self,other):
return other*self.scale
# ----------------------------------------------------------------------
# Module Tests
# ----------------------------------------------------------------------
if __name__ == '__main__':
import numpy as np
s = Linear(10.0,0.0)
a = 10.0
b = np.array([1,20,3.])
print a
print b
a = a / s
b = b / s
print a
print b
a = a * s
b = b * s
print a
print b | bsd-3-clause | 8,101,156,460,664,302,000 | 23.103448 | 72 | 0.343879 | false | 5.031142 | false | false | false |
johnboxall/django_kiss | kiss/storage/base.py | 1 | 1786 | from django.utils.encoding import force_unicode, StrAndUnicode
from django.utils import simplejson
class Event(StrAndUnicode):
def __init__(self, event):
self.event = event
def _prepare(self):
self.event = simplejson.dumps(self.event)
def __unicode__(self):
return self.event
class BaseStorage(object):
def __init__(self, request, *args, **kwargs):
self.request = request
self._queued_events = []
self.used = False
self.added_new = False
super(BaseStorage, self).__init__(*args, **kwargs)
def __len__(self):
return len(self._loaded_events) + len(self._queued_events)
def __iter__(self):
self.used = True
if self._queued_events:
self._loaded_events.extend(self._queued_events)
self._queued_events = []
return iter(self._loaded_events)
@property
def _loaded_events(self):
if not hasattr(self, '_loaded_data'):
events, all_retrieved = self._get()
self._loaded_data = events or []
return self._loaded_data
def _prepare_events(self, events):
for event in events:
event._prepare()
def update(self, response):
self._prepare_events(self._queued_events)
if self.used:
return self._store(self._queued_events, response)
elif self.added_new:
events = self._loaded_events + self._queued_events
return self._store(events, response)
def add(self, event):
self.added_new = True
self._queued_events.append(Event(event))
def _get(self, *args, **kwargs):
raise NotImplementedError()
def _store(self, events, response, *args, **kwargs):
raise NotImplementedError() | mit | 4,487,836,395,715,369,000 | 28.295082 | 66 | 0.595745 | false | 4.077626 | false | false | false |
SamProtas/PALiquor | scraper2.py | 1 | 4999 | from bs4 import BeautifulSoup
import sqlite3
import os
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
DATABASE1 = os.path.join(PROJECT_ROOT, 'dbs', 'licensees.db')
conn = sqlite3.connect(DATABASE1)
c = conn.cursor()
try:
c.execute('DROP TABLE licensees')
c.execute('DROP TABLE cases')
finally:
c.execute('''CREATE TABLE licensees (lid INT PRIMARY KEY NOT NULL UNIQUE,
name TEXT,
address TEXT,
zipcode INT,
trade_name TEXT,
license_no TEXT,
license_type TEXT,
license_type_title TEXT,
license_type_code TEXT,
status TEXT,
tavern_gaming_status TEXT,
original_owner TEXT,
current_owner TEXT,
latitude REAL,
longitude REAL)''')
c.execute('PRAGMA foreign_keys = ON')
c.execute('''CREATE TABLE cases (case_id INTEGER PRIMARY KEY,
lid INT NOT NULL,
penalty TEXT,
penalty_text TEXT,
fine INT,
initiation_date TEXT,
FOREIGN KEY(lid) REFERENCES licensees(lid))''')
fileroot = 'saved_html/licensee_source_'
filetail = '.html'
for x in range(114):
filenum = str(x+1).zfill(4)
filename = fileroot + filenum + filetail
print filename
page = open(filename)
soup = BeautifulSoup(page,'html.parser')
page.close()
main_content = soup.find(id="MainContent_UpdatePanel1").find("tbody")
rows = main_content.find_all('tr')
print 'Number of rows:'
print len(rows)
headercount = 0
locator = None
rowcount = 0
for row in rows:
rowcount+=1
attributes = row.attrs
if 'style' in attributes: #Identify headers of licensee
locatorrow = rowcount
if attributes['style'] == 'background-color:#800000': #Identify licensee
general_info={}
cases = []
casenum = 0
headercount+=1
locator = row.find('font').text
else: #Identifies sub-header of licensee
locator = row.find(class_='heading').text
if (locator == 'GENERAL LICENSEE INFORMATION' or locator == 'OWNER ISSUE DATES') and rowcount != locatorrow:
cells = row.find_all('td')
for cell in cells:
heading_title = cell.find(class_="fieldHeading")
the_data = cell.find(class_="data")
if heading_title and the_data:
if heading_title.text[:-1] == 'Address':
contents = the_data.contents
contents = [x for x in contents if x.string != None]
general_info[heading_title.text[:-1]] = " ".join(contents)
general_info['Zipcode'] = int(contents[-1][0:5])
elif heading_title.text[:-1] == 'License Type':
contents = the_data.text.split('-')
license_type_title = "-".join(contents[0:-1]).strip()
license_type_code = contents[-1].strip()[1:-1].strip()
general_info['License Type Title'] = license_type_title
general_info['License Type Code'] = license_type_code
general_info[heading_title.text[:-1]] = the_data.text
else:
general_info[heading_title.text[:-1]] = the_data.text
if locator == 'CITATION CASE INFORMATION(Click on the Case Number(s) for Citation detail)' and rowcount != locatorrow:
cells = row.find_all('td')
for cell in cells:
heading_title = cell.find(class_="fieldHeading").text[:-1]
if heading_title == 'Penalty':
penalty = cell.find(class_="data").text
penalty_split = penalty.split('-')
penalty_text = " ".join(penalty_split[0:-1]).strip()
if len(penalty_split) > 1:
fine = int(penalty_split[-1].strip()[2:-4])
else:
fine = None
if heading_title == 'Initiation Date':
initiation_date = cell.find(class_="data").text
cases.append({'penalty':penalty, 'penalty_text':penalty_text, 'fine':fine, 'initiation_date':initiation_date})
penalty = None
initiation_date = None
if locator == 'APPLICATION CASE INFORMATION' and rowcount == locatorrow:
c.execute('''INSERT INTO licensees (lid, name, address, zipcode, trade_name, license_no,
license_type, license_type_title, license_type_code, status, tavern_gaming_status,
original_owner, current_owner)
VALUES
(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
[general_info['LID'], general_info['Name'],
general_info['Address'], general_info['Zipcode'],
general_info['Trade Name'], general_info['License No'],
general_info['License Type'],
general_info['License Type Title'],
general_info['License Type Code'],
general_info['Status'],
general_info['Tavern Gaming Status'],
general_info['Original Owner'],
general_info['Current Owner']])
if cases:
for case in cases:
c.execute('''INSERT INTO cases (lid, penalty, penalty_text, fine, initiation_date) VALUES (?, ?, ?, ?, ?)''',
[general_info['LID'], case['penalty'], case['penalty_text'], case['fine'],case['initiation_date']])
print 'HeaderCount'
print headercount
conn.commit()
c.close()
| gpl-2.0 | -5,075,766,108,287,510,000 | 32.10596 | 120 | 0.618324 | false | 3.304032 | false | false | false |
google/flax | examples/linen_design_test/linear_regression.py | 1 | 1341 | # Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax
from jax import numpy as jnp, random, lax, jit
from flax import linen as nn
from dense import Dense
X = jnp.ones((1, 10))
Y = jnp.ones((5,))
model = Dense(features=5)
@jit
def predict(params):
return model.apply({'params': params}, X)
@jit
def loss_fn(params):
return jnp.mean(jnp.abs(Y - predict(params)))
@jit
def init_params(rng):
mlp_variables = model.init({'params': rng}, X)
return mlp_variables['params']
# Get initial parameters
params = init_params(jax.random.PRNGKey(42))
print("initial params", params)
# Run SGD.
for i in range(50):
loss, grad = jax.value_and_grad(loss_fn)(params)
print(i, "loss = ", loss, "Yhat = ", predict(params))
lr = 0.03
params = jax.tree_multimap(lambda x, d: x - lr * d, params, grad)
| apache-2.0 | 2,824,153,034,652,094,000 | 26.9375 | 74 | 0.710664 | false | 3.223558 | false | false | false |
maxdl/Synapse.py | synapse/point.py | 1 | 13079 | import sys
from . import geometry
from .err_warn import ProfileError, profile_warning, profile_message
def lazy_property(fn):
"""Decorator that makes a property lazily evaluated.
From https://stevenloria.com/lazy-properties/.
"""
attr_name = '_lazy_' + fn.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazy_property
class PointList(list):
def __init__(self, pointli, ptype, profile):
super().__init__()
try:
self.extend([Point(p.x, p.y, ptype, profile) for p in pointli])
except (AttributeError, IndexError):
raise TypeError('not a list of Point elements')
class Point(geometry.Point):
def __init__(self, x=None, y=None, ptype="", profile=None):
if isinstance(x, geometry.Point):
geometry.Point.__init__(self, x.x, x.y)
else:
geometry.Point.__init__(self, x, y)
self.profile = profile
if self.profile is not None:
self.opt = self.profile.opt
else:
self.opt = None
self.discard = False
self.ptype = ptype
self.cluster = None
self.is_within_psd = None
self.lateral_dist_psd = None
self.norm_lateral_dist_psd = None
self.nearest_psd = None
self.is_associated_with_psd = None
self.associated_psd = None
self.nearest_neighbour_dist = None
self.nearest_neighbour_point = geometry.Point()
self.nearest_lateral_neighbour_dist = None
self.nearest_lateral_neighbour_point = geometry.Point()
self.nearest_neighbour = geometry.Point()
def determine_stuff(self):
def mark_to_discard(msg):
if self.ptype == 'particle': # don't warn if random points
profile_message("Discarding particle at %s: %s" % (self, msg))
self.discard = True
self.profile.n_discarded[self.ptype] += 1
return
if self.dist_to_posel is None:
mark_to_discard("Could not project on postsynaptic element")
return
if self.dist_to_prsel is None:
mark_to_discard("Could not project on presynaptic element")
return
if not self.is_within_shell:
mark_to_discard("Located outside the shell")
return
if self.is_within_hole:
mark_to_discard("Located within a profile hole")
return
__ = self.lateral_location
__ = self.strict_lateral_location
__ = self.axodendritic_location
__ = self.is_within_postsynaptic_membrane_shell
__ = self.is_postsynaptic_membrane_associated
__ = self.is_presynaptic_membrane_associated
self.get_lateral_dist_psd()
self.get_psd_association()
@lazy_property
def dist_to_posel(self):
"""Return perpendicular distance to postsynaptic element membrane"""
return self.perpend_dist(self.profile.posel, posloc=self.profile.posloc)
@lazy_property
def dist_to_prsel(self):
"""Return perpendicular distance to postsynaptic element membrane"""
if len(self.profile.prsel) > 0:
return self.perpend_dist(self.profile.prsel, posloc=self.profile.posloc)
else:
return None
@lazy_property
def is_within_hole(self):
""" Determine whether self is inside a profile hole
"""
for h in self.profile.holeli:
if self.is_within_polygon(h):
return True
return False
@lazy_property
def is_within_shell(self):
"""Determine whether self is within shell"""
return (self.dist_to_posel is not None and
abs(self.dist_to_posel) <= geometry.to_pixel_units(self.opt.shell_width,
self.profile.pixelwidth))
@lazy_property
def is_within_postsynaptic_membrane_shell(self):
return (self.dist_to_posel is not None
and abs(self.dist_to_posel) <=
geometry.to_pixel_units(self.opt.shell_width,
self.profile.pixelwidth))
@lazy_property
def lateral_location(self):
""" Determines the lateral location, defined as the location
along the mediolateral synaptic axis of the projection of
the point on the postsynaptic membrane.
Assumes that PSDs and the postsynaptic membrane have been determined.
"""
# A point is classified as synaptic if its projection on the
# postsynaptic membrane is within the spatial resolution limit
# of the postsynaptic membrane
for psd in self.profile.psdli:
if (self.lateral_dist_syn(self.profile.posel, psd.posm) -
geometry.to_pixel_units(self.opt.spatial_resolution,
self.profile.pixelwidth) <= psd.posm.length() / 2):
return "synaptic"
# If not synaptic but still within the extent of the synaptic
# membrane
if (self.lateral_dist_syn(self.profile.posel, self.profile.total_posm) <
self.profile.total_posm.length() / 2):
return "within perforation"
# Otherwise it may be perisynaptic, defined here as being
# outside the synapse but within half a diameter of the nearest
# PSD... or perhaps it should be a fixed distance, e g 200 nm?
# I don't want to determine which PSD is closest and which edge
# of that PSD faces the extrasynapse, so I just determine the
# distance to the nearest edge; if that is an internal edge,
# the point will have been identified as within perforation
# above.
for psd in self.profile.psdli:
if (self.lateral_dist_syn(self.profile.posel, psd.posm) -
geometry.to_pixel_units(self.opt.spatial_resolution,
self.profile.pixelwidth) <= psd.posm.length()):
return "perisynaptic"
# If none of the above
return "extrasynaptic"
@lazy_property
def strict_lateral_location(self):
""" Determines the lateral location, defined as the location
along the mediolateral synaptic axis of the projection of
the particle on the postsynaptic membrane. Does not take
spatial resolution into account, such that a particle is
considered synaptic only when strictly projectable on the
postsynaptic membrane.
"""
# A point is classified as synaptic if its projection on the
# postsynaptic membrane is within the postsynaptic membrane
for psd in self.profile.psdli:
if self.lateral_dist_syn(self.profile.posel, psd.posm) <= psd.posm.length() / 2.0:
return "synaptic"
# If not synaptic but still within the extent of the synaptic
# membrane
if (self.lateral_dist_syn(self.profile.posel, self.profile.total_posm) <
self.profile.total_posm.length() / 2.0):
return "within perforation"
# Otherwise it may be perisynaptic, defined here as being
# outside the synapse but within half a diameter of the nearest
# PSD... or perhaps it should be a fixed distance, e g 200 nm?
# I don't want to determine which PSD is closest and which edge
# of that PSD faces the extrasynapse, so I just determine the
# distance to the nearest edge; if that is an internal edge,
# the point will have been identified as within perforation
# above.
for psd in self.profile.psdli:
if self.lateral_dist_syn(self.profile.posel, psd.posm) <= psd.posm.length():
return "perisynaptic"
# If none of the above
return "extrasynaptic"
@lazy_property
def axodendritic_location(self):
""" Determines the particle's location along the axodendritic axis.
"""
if self.dist_to_posel >= 0:
return "postsynaptic"
elif self.dist_to_prsel is None: # if there is no presynaptic membrane,
return "not postsynaptic" # that's about all we can say
elif self.dist_to_prsel <= 0:
return "presynaptic"
else:
return "neither pre- or postsynaptic"
def get_lateral_dist_psd(self):
mindist = sys.maxsize
nearest_psd = None
for psd in self.profile.psdli:
d = self.lateral_dist_syn(self.profile.posel, psd.posm) - psd.posm.length() / 2
if d < mindist:
mindist = d
nearest_psd = psd
if not nearest_psd:
raise ProfileError(self.profile,
"could not determine lateral distance to a PSD of particle at %s"
% self)
mindist = self.lateral_dist_syn(self.profile.posel, nearest_psd.posm)
normdist = mindist / (nearest_psd.posm.length() / 2)
self.lateral_dist_psd = mindist
self.norm_lateral_dist_psd = normdist
self.nearest_psd = nearest_psd
def get_psd_association(self):
if self.is_within_psd is not None:
return
is_within_psd = False
is_associated_with_psd = False
associated_psd = None
mindist = sys.maxsize
for psd in self.profile.psdli:
if self.is_within_polygon(psd.psdposm):
is_within_psd = True
is_associated_with_psd = True
associated_psd = psd
break
dist = self.perpend_dist_closed_path(psd.psdposm, dont_care_if_on_or_off_seg=True)
if dist <= geometry.to_pixel_units(self.opt.spatial_resolution,
self.profile.pixelwidth):
is_associated_with_psd = True
if dist < mindist:
associated_psd = psd
mindist = dist
self.is_within_psd = is_within_psd
self.is_associated_with_psd = is_associated_with_psd
self.associated_psd = associated_psd
@lazy_property
def is_postsynaptic_membrane_associated(self):
if (self.dist_to_posel is not None and
abs(self.dist_to_posel) <= geometry.to_pixel_units(self.opt.spatial_resolution,
self.profile.pixelwidth)):
return True
else:
return False
@lazy_property
def is_presynaptic_membrane_associated(self):
if (self.dist_to_prsel is not None and
abs(self.dist_to_prsel) <= geometry.to_pixel_units(self.opt.spatial_resolution,
self.profile.pixelwidth)):
return True
else:
return False
def get_nearest_neighbour(self, pointli):
# Assumes that only valid (projectable, within shell etc) points
# are in pointli
mindist = float(sys.maxsize)
for p in pointli:
if p is not self:
d = self.dist(p)
if d < mindist:
mindist = d
if not mindist < float(sys.maxsize):
return None
else:
nearest_neighbour_dist = mindist
return nearest_neighbour_dist
def get_nearest_lateral_neighbour(self, pointli):
# Assumes that only valid (projectable, within shell etc) points
# are in pointli
mindist = float(sys.maxsize)
for p in pointli:
if p is not self:
d = self.lateral_dist_to_point(p, self.profile.posel)
if d < mindist:
mindist = d
if not mindist < float(sys.maxsize):
return None
else:
nearest_lateral_neighbour_dist = mindist
return nearest_lateral_neighbour_dist
def lateral_dist_to_point(self, p2, sel):
""" Determine lateral distance to a point p2 along sel.
Overrides function in geometry.Point, which only works
with a closed path.
"""
path = geometry.SegmentedPath()
p2_project, p2_seg_project = p2.project_on_path_or_endnode(sel)
project, seg_project = self.project_on_path_or_endnode(sel)
path.extend([project, p2_project])
if p2_seg_project < seg_project:
path.reverse()
for n in range(min(p2_seg_project, seg_project) + 1,
max(p2_seg_project, seg_project)):
path.insert(len(path) - 1, sel[n])
return path.length()
def lateral_dist_syn(self, sel, sm):
""" Determine lateral distance to center of (post- or pre-)
synaptic membrane.
"""
return self.lateral_dist_to_point(sm.center_point(), sel)
| mit | 5,774,043,725,307,891,000 | 40.520635 | 96 | 0.58399 | false | 3.927628 | false | false | false |
702nADOS/sumo | tools/build/status.py | 1 | 2776 | #!/usr/bin/env python
"""
@file status.py
@author Michael Behrisch
@author Laura Bieker
@date 2007-03-13
@version $Id: status.py 22608 2017-01-17 06:28:54Z behrisch $
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2008-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import smtplib
import re
from os.path import basename, join, commonprefix
from datetime import datetime
def findErrors(line, warnings, errors, failed):
if re.search("[Ww]arn[ui]ng[: ]", line) or "[WARNING]" in line:
warnings += 1
if re.search("[Ee]rror[: ]", line) or re.search("[Ff]ehler:", line) or "[ERROR]" in line:
errors += 1
failed += line
return warnings, errors, failed
def printStatus(makeLog, makeAllLog, smtpServer="localhost", out=sys.stdout, toAddr="[email protected]"):
failed = ""
build = commonprefix([basename(makeLog), basename(makeAllLog)])
print(build, end=' ', file=out)
print(datetime.now().ctime(), file=out)
print("--", file=out)
print(basename(makeLog), file=out)
warnings = 0
errors = 0
svnLocked = False
for l in open(makeLog):
if ("svn: Working copy" in l and "locked" in l) or "svn: Failed" in l:
svnLocked = True
failed += l
warnings, errors, failed = findErrors(l, warnings, errors, failed)
if svnLocked:
failed += "svn up failed\n\n"
print(warnings, "warnings", file=out)
if errors:
print(errors, "errors", file=out)
failed += "make failed\n\n"
print("--\nbatchreport\n--", file=out)
print(basename(makeAllLog), file=out)
warnings = 0
errors = 0
for l in open(makeAllLog):
warnings, errors, failed = findErrors(l, warnings, errors, failed)
print(warnings, "warnings", file=out)
if errors:
print(errors, "errors", file=out)
failed += "make debug failed\n\n"
print("--", file=out)
if failed:
fromAddr = "[email protected]"
message = """From: "%s" <%s>
To: %s
Subject: Error occurred while building
%s""" % (build, fromAddr, toAddr, failed)
try:
server = smtplib.SMTP(smtpServer)
server.sendmail(fromAddr, toAddr, message)
server.quit()
except:
print("Could not send mail.")
if __name__ == "__main__":
printStatus(sys.argv[1], sys.argv[2], sys.argv[3], sys.stdout, sys.argv[4])
| gpl-3.0 | -8,624,987,886,662,435,000 | 31.27907 | 105 | 0.636167 | false | 3.397797 | false | false | false |
jaffe59/vp-cnn | cnn_classifier/chatscript_file_generator.py | 1 | 3404 | import math
import torch
import scipy.stats as stats
import ast
# this script is used to generate results used to combine the classifier with chatscript
def calc_indices(args):
#calc fold indices
indices = []
numfolds = args.xfolds
len_dataset = 4330
fold_size = math.ceil(len_dataset/numfolds)
for fold in range(numfolds):
startidx = fold*fold_size
endidx = startidx+fold_size if startidx+fold_size < len_dataset else len_dataset
indices.append((startidx, endidx))
return indices
def read_in_labels(labels_file):
labels = []
with open(labels_file) as l:
for line in l:
line = line.strip().split("\t")
labels.append('_'.join(line[1].split(' ')))
return labels
def read_in_dialogues(dialogue_file):
dialogue_indices = []
dialogue_index = -1
turn_index = -1
if dialogue_file.endswith('indices'):
with open(dialogue_file) as l:
for line in l:
dialogue_indices.append(ast.literal_eval(line.strip()))
else:
with open(dialogue_file) as l:
for line in l:
if line.startswith('#S'):
dialogue_index += 1
turn_index = 0
else:
dialogue_indices.append((dialogue_index, turn_index))
turn_index += 1
return dialogue_indices
def read_in_chat(chat_file, dialogues):
chats = {}
with open(chat_file) as c:
for line in c:
if line.startswith('dia'):
continue
else:
line = line.strip().split(',')
this_index = (int(line[0]), int(line[1]))
# print(dialogues)
chats[this_index] = (line[-2], line[-1])
return chats
def print_test_features(tensor, confidence, ave_probs, ave_logprobs, target, dialogue_indices, labels, indices, fold_id, chats, feature_file):
# dial_id, turn_id, predicted_label, correct_bool, prob, entropy, confidence, chat_prob, chat_rank
tensor = torch.exp(tensor)
probs, predicted = torch.max(tensor, 1)
predicted = predicted.view(target.size()).data
probs = probs.view(target.size()).data
corrects = predicted == target.data
confidence = confidence.squeeze().data.cpu().numpy() / 2
ave_logprobs = ave_logprobs.squeeze().data.cpu().numpy() / 2
ave_probs = ave_probs.squeeze().data.cpu().numpy() / 2
tensor = tensor.squeeze().data.cpu().numpy()
start_id, end_id = indices[fold_id]
for ind, val in enumerate(corrects):
item = []
item_id = start_id+ind
dialogue_index, turn_index = dialogue_indices[item_id]
item.append(dialogue_index)
item.append(turn_index)
item.append(labels[predicted[ind]])
item.append(str(bool(val)))
item.append(probs[ind])
if probs[ind] < 0.0:
print(tensor[ind])
print(probs[ind], predicted[ind])
raise Exception
item.append(stats.entropy(tensor[ind]))
item.append(confidence[ind, predicted[ind]])
item.append(ave_probs[ind, predicted[ind]])
item.append(ave_logprobs[ind, predicted[ind]])
item.append(chats[(dialogue_index, turn_index)][0])
item.append(chats[(dialogue_index, turn_index)][1])
print(','.join([str(x) for x in item]), file=feature_file)
| apache-2.0 | 8,866,380,884,439,430,000 | 34.458333 | 142 | 0.595476 | false | 3.628998 | false | false | false |
mpiannucci/PiMonitor | Reporter/Templates/__init__.py | 1 | 4984 | from web.template import CompiledTemplate, ForLoop, TemplateResult
# coding: utf-8
def base (page):
__lineoffset__ = -4
loop = ForLoop()
self = TemplateResult(); extend_ = self.extend
extend_([u'\n'])
extend_([u'<html>\n'])
extend_([u'<head>\n'])
extend_([u' <meta name="viewport" content="width=device-width, initial-scale=1">\n'])
extend_([u' <title>PiMonitor</title>\n'])
extend_([u' <link rel="shortcut icon" type="image/x-icon" href="/static/favicon.ico" />\n'])
extend_([u' <link rel="stylesheet" href="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css">\n'])
extend_([u' <link rel="stylesheet" type="text/css" href="/static/pimonitor.css" />\n'])
extend_([u' <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>\n'])
extend_([u' <script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js"></script>\n'])
extend_([u' <script src="/static/Scripts/pimonitor.js" type="text/javascript"></script>\n'])
extend_([u'</head>\n'])
extend_([u'\n'])
extend_([u'<body>\n'])
extend_([u' <!-- Navigation Bar -->\n'])
extend_([u' <nav class="navbar navbar-inverse navbar-fixed-top" role="navigation">\n'])
extend_([u' <div class="container">\n'])
extend_([u' <!-- Brand and toggle get grouped for better mobile display -->\n'])
extend_([u' <div class="navbar-header">\n'])
extend_([u' <button type="button" class="navbar-toggle" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1">\n'])
extend_([u' <span class="sr-only">Toggle navigation</span>\n'])
extend_([u' <span class="icon-bar"></span>\n'])
extend_([u' <span class="icon-bar"></span>\n'])
extend_([u' <span class="icon-bar"></span>\n'])
extend_([u' </button>\n'])
extend_([u' <a class="navbar-brand" href="http://blog.mpiannucci.com/">Matthew Iannucci</a>\n'])
extend_([u' </div>\n'])
extend_([u' <!-- Collect the nav links, forms, and other content for toggling -->\n'])
extend_([u' <div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">\n'])
extend_([u' <ul class="nav navbar-nav">\n'])
extend_([u' <li>\n'])
extend_([u' <a href="http://blog.mpiannucci.com/blog">Blog</a>\n'])
extend_([u' </li>\n'])
extend_([u' <li>\n'])
extend_([u' <a href="http://blog.mpiannucci.com/apps">Projects</a>\n'])
extend_([u' </li>\n'])
extend_([u' <li>\n'])
extend_([u' <a href="http://blog.mpiannucci.com/bio">About</a>\n'])
extend_([u' </li>\n'])
extend_([u' </ul>\n'])
extend_([u' </div>\n'])
extend_([u' <!-- /.navbar-collapse -->\n'])
extend_([u' </div>\n'])
extend_([u' <!-- /.container -->\n'])
extend_([u' </nav>\n'])
extend_([u'<!-- <header class="jumbotron map_jumbotron" id="mainheader">\n'])
extend_([u' <div class="container">\n'])
extend_([u' <h1>MapGetter</h1>\n'])
extend_([u' <p>Get static images of a central area with coordinates in meters</p>\n'])
extend_([u' <em>Images courtesy of Google Maps</em>\n'])
extend_([u' </div>\n'])
extend_([u' </header> -->\n'])
extend_([u' <div class="container">\n'])
extend_([u' <div class="row">\n'])
extend_([u' <div class="col-sm-12 text-center" id="mapImage">\n'])
extend_([u' ', escape_(page, False), u'\n'])
extend_([u' </div>\n'])
extend_([u' </div>\n'])
extend_([u' <div class="row">\n'])
extend_([u' <div class="col-sm-12 text-center" id="mainfooter">\n'])
extend_([u' <p>Copyright 2015, Matthew Iannucci</p>\n'])
extend_([u' </div>\n'])
extend_([u' </div>\n'])
extend_([u' </div>\n'])
extend_([u'</body>\n'])
extend_([u'</html>\n'])
return self
base = CompiledTemplate(base, 'templates/base.html')
join_ = base._join; escape_ = base._escape
# coding: utf-8
def index():
__lineoffset__ = -5
loop = ForLoop()
self = TemplateResult(); extend_ = self.extend
posts = getAllReports()
extend_([u'\n'])
for post in loop.setup(posts):
extend_([escape_(post.date, True), u'\n'])
extend_([escape_(post.temperature, True), u'\n'])
extend_([escape_(post.humidity, True), u'\n'])
extend_([u'<hr>\n'])
return self
index = CompiledTemplate(index, 'templates/index.html')
join_ = index._join; escape_ = index._escape
| mit | -4,137,701,518,279,961,600 | 49.857143 | 147 | 0.507424 | false | 3.234263 | false | false | false |
google-research/google-research | ravens/ravens/cameras.py | 1 | 2469 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Camera configs."""
import numpy as np
import pybullet as p
class RealSenseD415():
"""Default configuration with 3 RealSense RGB-D cameras."""
# Mimic RealSense D415 RGB-D camera parameters.
image_size = (480, 640)
intrinsics = (450., 0, 320., 0, 450., 240., 0, 0, 1)
# Set default camera poses.
front_position = (1., 0, 0.75)
front_rotation = (np.pi / 4, np.pi, -np.pi / 2)
front_rotation = p.getQuaternionFromEuler(front_rotation)
left_position = (0, 0.5, 0.75)
left_rotation = (np.pi / 4.5, np.pi, np.pi / 4)
left_rotation = p.getQuaternionFromEuler(left_rotation)
right_position = (0, -0.5, 0.75)
right_rotation = (np.pi / 4.5, np.pi, 3 * np.pi / 4)
right_rotation = p.getQuaternionFromEuler(right_rotation)
# Default camera configs.
CONFIG = [{
'image_size': image_size,
'intrinsics': intrinsics,
'position': front_position,
'rotation': front_rotation,
'zrange': (0.01, 10.),
'noise': False
}, {
'image_size': image_size,
'intrinsics': intrinsics,
'position': left_position,
'rotation': left_rotation,
'zrange': (0.01, 10.),
'noise': False
}, {
'image_size': image_size,
'intrinsics': intrinsics,
'position': right_position,
'rotation': right_rotation,
'zrange': (0.01, 10.),
'noise': False
}]
class Oracle():
"""Top-down noiseless image used only by the oracle demonstrator."""
# Near-orthographic projection.
image_size = (480, 640)
intrinsics = (63e4, 0, 320., 0, 63e4, 240., 0, 0, 1)
position = (0.5, 0, 1000.)
rotation = p.getQuaternionFromEuler((0, np.pi, -np.pi / 2))
# Camera config.
CONFIG = [{
'image_size': image_size,
'intrinsics': intrinsics,
'position': position,
'rotation': rotation,
'zrange': (999.7, 1001.),
'noise': False
}]
| apache-2.0 | -4,647,917,578,801,625,000 | 29.109756 | 74 | 0.64115 | false | 3.202335 | true | false | false |
evantygf/BlockFun | id_list.py | 1 | 2710 | # Copyright 2016 Evan Dunning
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# image_path, name, id, type, state, breakable, drops, illuminant
sky_id = ("images/tiles/sky.png", "sky", 0, "block", 0, 0, 0, 1)
invisible_id = ("images/tiles/invisible.png", "invisible", 1, "block", 1, 0, 1, 0)
bedrock_id = ("images/tiles/bedrock.png", "bedrock", 2, "block", 1, 0, 2, 0)
grass_id = ("images/tiles/grass.png", "grass", 3, "block", 1, 1, 4, 0)
dirt_id = ("images/tiles/dirt.png", "dirt", 4, "block", 1, 1, 4, 0)
stone_id = ("images/tiles/stone.png", "stone", 5, "block", 1, 1, 5, 0)
sand_id = ("images/tiles/sand.png", "sand", 6, "block", 1, 1, 6, 0)
wood_id = ("images/tiles/wood.png", "wood", 7, "block", 0, 1, 7, 0)
leaf_id = ("images/tiles/leaf.png", "leaf", 8, "block", 0, 1, 8, 0)
chest_id = ("images/tiles/chest.png", "chest", 9, "block", 1, 1, 9, 0)
diamond_id = ("images/tiles/diamond ore.png", "diamond ore", 10, "block", 1, 1, 10, 0)
torch_id = ("images/tiles/torch.png", "torch", 11, "block", 0, 1, 11, 1)
pistol_id = ("images/items/pistol.png", "pistol", 100, "item")
all_ids = [sky_id,
invisible_id,
bedrock_id,
grass_id,
dirt_id,
stone_id,
sand_id,
wood_id,
leaf_id,
chest_id,
diamond_id,
torch_id,
pistol_id]
empty_list = [None for i in range(256)]
for i in all_ids:
empty_list[i[2]] = i
id_list = empty_list | gpl-3.0 | -2,952,824,591,461,782,000 | 52.156863 | 124 | 0.469742 | false | 3.321078 | false | false | false |
xaviergmail/clargs | clargs/parser.py | 1 | 3276 | import re
import sys
full_re = r'^--(no)?([\w|-]*)(=(.*))?$'
short_re = r'^-(\w)(\w*)?$'
upper_re = r'^[a-z]*$'
def parse(argformat, argv):
# Initialize rogue list and named dict
rogue = []
named = {}
argc = len(argv)
i = 0
while i < argc:
# Current argument value in the loop
arg = argv[i]
# Search for the abbreviated options first
short = re.match(short_re, arg, re.I)
full = None
# Search for the full option if shorthand wasn't found
if not short:
# Search for the full argument
full = re.match(full_re, arg, re.I)
# Still haven't found a match. Add to rogue list and continue
if not full:
rogue.append(arg)
i += 1
continue
# Loop through argument data to find desired type. Default to str
for arg, argd in argformat.items():
argType = argd[2] if len(argd) > 2 else str
# Shorthand match!
if short and short.group(1).lower() == argd[0]:
# Boolean requested! True if lowercase, False if UPPERCASE
if argType is bool:
named[arg] = re.search(upper_re, short.group(1))
# 'Compressed' argument, Ex: -oSomething
# Take the value from the second capture group
elif short.group(2):
named[arg] = short.group(2)
# Our value is stored in the next index.
# Error out with missing argument if we go out of range
else:
if i + 2 > argc:
sys.stderr.write(
"Error: Missing value for argument %s\n" %
short.group(1))
sys.exit(1)
i += 1
# Store the value in the index
named[arg] = argv[i]
# Successfully matched a shorthand argument! Break out of loop.
break
# Full name match!
elif full and full.group(2).lower() == arg:
# Boolean requested. Assign the inverse of capture group 1 (no)
if argType is bool:
named[arg] = not bool(full.group(1))
# Equal sign found, assign value found after it
elif full.group(4):
named[arg] = full.group(4)
break # Success, exit this inner loop
else: # Did not break out of the loop, error out.
sys.stderr.write("Error: Unknown argument %s\n" %
("-" + short.group(1) if short else
"--" + full.group(1)))
sys.exit(1)
i += 1
for arg, argd in argformat.items():
# Default argument, if specified
if not arg in named and len(argd) > 1:
named[arg] = argd[1]
# Convert to the requested type, if specified. This will also convert
# the previously assigned regex/group matches to booleans.
elif len(argd) > 2:
named[arg] = argd[2](named[arg])
ret = {}
ret["named"] = named
ret["rogue"] = rogue
return ret
| gpl-2.0 | -3,713,753,290,371,247,600 | 31.117647 | 79 | 0.492979 | false | 4.249027 | false | false | false |
jfindleyderegt/au_trap | test_canny.py | 1 | 1891 | import cv2
import numpy as np
def CannyThreshold(lowThreshold):
detected_edges = cv2.GaussianBlur(gray,(3,3),0)
detected_edges = cv2.Canny(detected_edges,lowThreshold,lowThreshold*ratio,apertureSize = kernel_size)
dst = cv2.bitwise_and(img,img,mask = detected_edges) # just add some colours to edges from original image.
cv2.imshow('canny demo',dst)
lowThreshold = 0
max_lowThreshold = 400
ratio = 7
kernel_size = 5
im_simple = '/home/faedrus/Documents/au_trap/20141031/1414763725.88.png'
im_complex = '/home/faedrus/Documents/au_trap/20141031/1414769658.04.png'
img = cv2.imread(im_complex)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.namedWindow('canny demo')
cv2.createTrackbar('Min threshold','canny demo',lowThreshold, max_lowThreshold, CannyThreshold)
CannyThreshold(0) # initialization
if cv2.waitKey(0) == 27:
cv2.destroyAllWindows()
#(height, width, depth) = img.shape
#
#r_count=np.sum(img[:,width/4:3*width/4,2])
#r_background=np.sum(img[:,:width/4,2])+np.sum(img[:,-width/4:,2])
#
#avg_count = r_count - r_background
#return avg_count
#path = '/home/faedrus/Documents/au_trap/20141013/'
#listing = os.listdir(path)
#listing = sorted (listing)
#print listing
#data=np.array([file_mean(path+im_file) for im_file in listing]).reshape((47,10))
#mean=np.mean(data,axis=1)
#std=np.std(data,axis=1)
#pylab.figure(1)
#pylab.hist(mean, 40)
#pylab.xlabel('intensity (a.u.)')
#pylab.ylabel('number of particles')
#pylab.figure(2)
#pylab.errorbar(range(len(mean)),mean,yerr=std)
#pylab.axis([0,50,0,6e5])
#pylab.xlabel('particle number')
#pylab.ylabel('intensity (a.u.)')
#fig1.show()
#pylab.show()
#print "Avg: " + str(r_count)
#r_min = np.array([0, 0, 0], np.uint8)
#r_max = np.array([100, 100, 255], np.uint8)
#dst = cv2.inRange(img, r_min, r_max)
#no_blue = cv2.countNonZero(dst)
#print('The number of blue pixels is: ' + str(no_blue))
| cc0-1.0 | 4,776,103,049,954,169,000 | 26.808824 | 111 | 0.703332 | false | 2.514628 | false | false | false |
udapi/udapi-python | udapi/block/util/eval.py | 1 | 3902 | """Eval is a special block for evaluating code given by parameters."""
import collections
import pprint
import re
from udapi.core.block import Block
pp = pprint.pprint # pylint: disable=invalid-name
# We need exec in this block and the variables this etc. are not unused but provided for the exec
# pylint: disable=exec-used,unused-variable
class Eval(Block):
r"""Special block for evaluating code given by parameters.
Tricks:
`pp` is a shortcut for `pprint.pprint`.
`$.` is a shortcut for `this.` which is a shortcut for `node.`, `tree.` etc.
depending on context.
`count_X` is a shortcut for `self.count[X]` where X is any string (\S+)
and `self.count` is a `collections.Counter()` instance.
Thus you can use code like
`util.Eval node='count_$.upos +=1; count_"TOTAL" +=1' end="pp(self.count)"`
"""
# So many arguments is the design of this block (consistent with Perl Udapi).
# pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(self, doc=None, bundle=None, tree=None, node=None, start=None, end=None,
before_doc=None, after_doc=None, before_bundle=None, after_bundle=None,
expand_code=True, **kwargs):
super().__init__(**kwargs)
self.doc = doc
self.bundle = bundle
self.tree = tree
self.node = node
self.start = start
self.end = end
self.before_doc = before_doc
self.after_doc = after_doc
self.before_bundle = before_bundle
self.after_bundle = after_bundle
self.expand_code = expand_code
self.count = collections.Counter()
def expand_eval_code(self, to_eval):
"""Expand '$.' to 'this.', useful for oneliners."""
if not self.expand_code:
return to_eval
to_eval = re.sub(r'count_(\S+)', r'self.count[\1]', to_eval)
return to_eval.replace('$.', 'this.')
def before_process_document(self, document):
if self.before_doc:
this = doc = document
exec(self.expand_eval_code(self.before_doc))
def after_process_document(self, document):
if self.after_doc:
this = doc = document
exec(self.expand_eval_code(self.after_doc))
def process_document(self, document):
this = doc = document
if self.doc:
exec(self.expand_eval_code(self.doc))
if self.bundle or self.before_bundle or self.after_bundle or self.tree or self.node:
for bundle in doc.bundles:
# TODO if self._should_process_bundle(bundle):
self.process_bundle(bundle)
def process_bundle(self, bundle):
# Extract variables, so they can be used in eval code
document = doc = bundle.document
this = bundle
if self.before_bundle:
exec(self.expand_eval_code(self.before_bundle))
if self.bundle:
exec(self.expand_eval_code(self.bundle))
if self.tree or self.node:
trees = bundle.trees
for tree in trees:
if self._should_process_tree(tree):
self.process_tree(tree)
if self.after_bundle:
exec(self.expand_eval_code(self.after_bundle))
def process_tree(self, tree):
# Extract variables so they can be used in eval code
bundle = tree.bundle
doc = document = bundle.document
this = tree
root = tree
if self.tree:
exec(self.expand_eval_code(self.tree))
if self.node:
for node in tree.descendants():
this = node
exec(self.expand_eval_code(self.node))
def process_start(self):
if self.start:
exec(self.expand_eval_code(self.start))
def process_end(self):
if self.end:
exec(self.expand_eval_code(self.end))
| gpl-3.0 | 2,267,388,180,198,973,700 | 32.930435 | 97 | 0.60123 | false | 3.848126 | false | false | false |
cellnopt/cellnopt | cno/io/cna.py | 1 | 7855 | # -*- python -*-
#
# This file is part of the cinapps.tcell package
#
# Copyright (c) 2012-2013 - EMBL-EBI
#
# File author(s): Thomas Cokelaer ([email protected])
#
# Distributed under the GLPv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# website: www.cellnopt.org
#
##############################################################################
""":Topic: **Module dedicated to the CNA reactions data structure**
:Status: for production but not all features implemented.
"""
from __future__ import print_function
from cno.io.reactions import Reactions
from cno.io.sif import SIF
from cno.misc import CNOError
__all__ = ["CNA"]
class CNA(Reactions):
"""Reads a reaction file (CNA format)
This class has the :class:`Interaction` class as a Base class.
It is used to read **reactions** files from the CNA format, which
is a CSV-like format where each line looks like::
mek=erk 1 mek = 1 erk | # 0 1 0 436 825 1 1 0.01
The pipe decompose the strings into a LHS and RHS.
The LHS is made of a unique identifier without blanks (mek=erk). The remaining part is
the reaction equation. The equal sign "=" denotes the reaction arrow. Identifiers,
coefficients and equal sign must be separated by at least one blank.
The ! sign to indicate not. The + sign indicates an OR relation.
.. warning:: The + sign indicates an OR as it should be. However, keep in
mind that in CellNOptR code, the + sign
indicates an AND gate. In this package we always use **+** for an OR and
**^** or **&** for an AND gate.
.. warning:: in the CNA case, some reactions have no LHS or RHS. Such
reactions are valid in CNA but may cause issue if converted to SIF
.. note:: there don't seem to be any AND in CNA reactions.
The RHS is made of
* a default value: # or a value.
* a set of 3 flags representing the time scale
* flag 1: whether this interaction is to be excluded in logical computations
* flag 2: whether the logical interaction is treated with incomplete truth table
* flag 3: whether the interaction is monotone
* reacBoxes (columns 5,6,7,8)
* monotony (col 9)
In this class, only the LHS are used for now, however, the RHS values are
stored in different attributes.
::
>>> from cno.io import Reactions
>>> from cno import getdata
>>> a = Reactions(getdata('test_reactions'))
>>> reacs = a.reactions
.. seealso:: CNA class inherits from :class:`cno.io.reaction.Reaction`
"""
def __init__(self, filename=None, type=2, verbose=False):
""".. rubric:: Constructor
:param str filename: an optional filename containing reactions in CNA
format. If not provided, the CNA object is empty but you can
add reactions using :meth:`~cno.io.cna.CNA.add_reaction`.
However, attributes such as :attr:`~cno.io.cna.CNA.reacBoxes`
will not be populated.
:param integer type: only type 2 for now.
:param bool verbose: False by default
.. todo:: type1 will be implemented on request.
"""
super(CNA, self).__init__()
self.strict_rules = False
#self.metabolites = metabolites # must be a class LoadMetabolites
self.filename = filename
self.verbose = verbose
self.type = type
if type != 2:
raise NotImplementedError("only type 2 implemented")
# Attributes populated while reading the data.
#: populated when reading CNA reactions file
self.reacBoxes = []
#: populated when reading CNA reactions file
self.incTruthTable = []
#: populated when reading CNA reactions file
self.timeScale = []
#: populated when reading CNA reactions file
self.excludeInLogical = []
#: populated when reading CNA reactions file
self.reacText = []
#: populated when reading CNA reactions file
self.monotony = [] #flag 3
self.reacDefault = []
if filename:
self._read_reactions()
self._get_species()
def _read_reactions(self):
"""Read a reactions file and populate readID"""
f = open(self.filename, "r")
data = [] # the data structure to populate
for line in f.readlines(): # for each line
# convert tab.to white space, remove trailing and \n character
line = line.replace('\t',' ').replace('\n','').strip()
# do not consider commented or empty lines
if line.startswith("%") or line.startswith('#'):
pass
if len(line) == 0:
print("Found an empty line. Skipped")
else:
data.append(line)
f.close()
# scan all the data
for i, x in enumerate(data):
try:
beforePipe, afterPipe = x.split('|') # there should be only one pipe per
# line, so if it fails, this is a format error
except ValueError as err:
raise ValueError("Error msg to do")
reacID = beforePipe.split()[0].strip()
if reacID.count('=') != 1:
raise ValueError("Error line %s: wrong format expected one " %(i+1)
+ "only one = sign, found %s" % reacID.count('='))
else:
self.add_reaction(reacID)
reacText = beforePipe.replace(reacID, "").strip()
self.reacText.append(reacText)
parameters = afterPipe.split()
if len(parameters) != 9:
raise ValueError("Error line %s: did no find expected numbers of parameters" % i+1)
if self.type == 1:
# not finished
reacDefault, reacMin, reacMax, objFunc, d, d, d, d, reacVariance = parameters
mue = []
stoichMat = []
elif self.type == 2:
# First, the reac default value.
if parameters[0].isalnum():
self.reacDefault.append(float(parameters[0]))
elif parameters[0].strip()=='#':
self.reacDefault.append(float('NaN'))
else:
raise ValueError("""Error line %s: unexpected value in the
first column after pipe character (%s)""" % (str(i+1), parameters[0]))
self.incTruthTable.append(float(parameters[1]))
self.timeScale.append(float(parameters[2]))
self.excludeInLogical.append(float(parameters[3]))
self.monotony.append(float(parameters[8]))
self.reacBoxes.append([i+1, float(parameters[4]),
float(parameters[5]), 0, float(parameters[6]),
float(parameters[7])])
# clean up the reacDefault: could be # or number
if self.verbose == True:
print(self)
def to_sif(self, filename=None):
"""Export the reactions to SIF format
::
from cno.io import CNA
r = CNA()
r.add_reaction("a=b")
r.add_reaction("a+c=e")
r.to_sif("test.sif")
Again, be aware that "+" sign in Reaction means "OR".
Looking into the save file, we have the a+c=e reactions (a=e OR c=e)
expanded into 2 reactions (a 1 e) and (c 1 e) as expected::
a 1 b
a 1 e
c 1 e
"""
s = SIF()
for reac in self.reactions:
try:
s.add_reaction(reac)
except CNOError:
print("Skipped {} reaction".format(reac))
s.save(filename)
| bsd-2-clause | 8,088,367,737,963,195,000 | 35.534884 | 99 | 0.569064 | false | 4.011747 | false | false | false |
cntnboys/cmput410-project | main/basicHttpAuth.py | 1 | 5273 | import base64
import json
from django.http import HttpResponse
from django.contrib.auth import authenticate, login
from django.template import RequestContext, loader
from django.core.exceptions import ObjectDoesNotExist
from main.models import Nodes
from django.db.models import Q
# This code snippet is taken from django snippets:
# https://djangosnippets.org/snippets/243/
# and Written By Scanner. The code snippet is used to allow for basic auth
#############################################################################
def view_or_basicauth(view, request, test_func, realm = "", *args, **kwargs):
"""
This is a helper function used by both 'logged_in_or_basicauth' and
'has_perm_or_basicauth' that does the nitty of determining if they
are already logged in or if they have provided proper http-authorization
and returning the view if all goes well, otherwise responding with a 401.
"""
if test_func(request.user):
# Already logged in, just return the view.
return view(request, *args, **kwargs)
# They are not logged in. See if they provided login credentials
#
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
# NOTE: We are only support basic authentication for now.
if auth[0].lower() == "basic":
# Require Username:Host:Passwd
try:
uname, host, passwd = base64.b64decode(auth[1]).decode('ascii').split(':')
except:
response = HttpResponse(content="{message: not authenticated}",content_type="text/HTML; charset=utf-8")
response.status_code = 401
response['message'] = 'not authenticated'
return response
# Node Checking
try:
node = Nodes.objects.get(node_name=host, node_status=True)
except ObjectDoesNotExist:
response = HttpResponse(content="{message: node approved, contact admin}",
content_type="text/HTML; charset=utf-8")
response.status_code = 401
response['message'] = 'node not approved, contact admin'
return response
user = authenticate(username=uname, password=passwd)
if user is not None:
if user.is_active:
login(request, user)
request.user = user
return view(request, *args, **kwargs)
# Either they did not provide an authorization header or
# something in the authorization attempt failed. Send a 401
# back to them to ask them to authenticate.
response = HttpResponse(content="{message: not authenticated}",content_type="text/HTML; charset=utf-8")
response.status_code = 401
response['message'] = 'not authenticated'
return response
#############################################################################
def logged_in_or_basicauth(realm = ""):
"""
A simple decorator that requires a user to be logged in. If they are not
logged in the request is examined for a 'authorization' header.
If the header is present it is tested for basic authentication and
the user is logged in with the provided credentials.
If the header is not present a http 401 is sent back to the
requestor to provide credentials.
The purpose of this is that in several django projects I have needed
several specific views that need to support basic authentication, yet the
web site as a whole used django's provided authentication.
The uses for this are for urls that are access programmatically such as
by rss feed readers, yet the view requires a user to be logged in. Many rss
readers support supplying the authentication credentials via http basic
auth (and they do NOT support a redirect to a form where they post a
username/password.)
Use is simple:
@logged_in_or_basicauth
def your_view:
...
You can provide the name of the realm to ask for authentication within.
"""
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_basicauth(func, request,
lambda u: u.is_authenticated(),
realm, *args, **kwargs)
return wrapper
return view_decorator
#############################################################################
def has_perm_or_basicauth(perm, realm = ""):
"""
This is similar to the above decorator 'logged_in_or_basicauth'
except that it requires the logged in user to have a specific
permission.
Use:
@logged_in_or_basicauth('asforums.view_forumcollection')
def your_view:
...
"""
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_basicauth(func, request,
lambda u: u.has_perm(perm),
realm, *args, **kwargs)
return wrapper
return view_decorator
| apache-2.0 | -4,862,675,662,238,732,000 | 40.849206 | 123 | 0.591883 | false | 4.877891 | false | false | false |
ereOn/azmq | azmq/engines/base.py | 1 | 1843 | """
Base engine class.
"""
import asyncio
from pyslot import Signal
from ..common import (
CompositeClosableAsyncObject,
cancel_on_closing,
)
from ..errors import ProtocolError
from ..log import logger
class BaseEngine(CompositeClosableAsyncObject):
def __init__(
self,
*,
socket_type,
identity,
mechanism,
zap_client,
**kwargs
):
super().__init__(**kwargs)
self.socket_type = socket_type
self.identity = identity
self.mechanism = mechanism
self.zap_client = zap_client
self.on_connection_ready = Signal()
self.on_connection_lost = Signal()
self.on_connection_failure = Signal()
self.max_backoff_duration = 300 # 5 minutes.
self.min_backoff_duration = 0.001
self.current_backoff_duration = self.min_backoff_duration
def on_open(self, **kwargs):
super().on_open(**kwargs)
self.run_task = asyncio.ensure_future(self.run(), loop=self.loop)
@cancel_on_closing
async def run(self):
while not self.closing:
try:
result = await self.open_connection()
if isinstance(result, ProtocolError) and result.fatal:
logger.debug("Fatal error: %s. Not restarting.", result)
break
except asyncio.CancelledError:
break
except Exception as ex:
logger.debug("Connection error: %r.", ex)
else:
self.current_backoff_duration = self.min_backoff_duration
await asyncio.sleep(self.current_backoff_duration, loop=self.loop)
self.current_backoff_duration = min(
self.max_backoff_duration,
self.current_backoff_duration * 2,
)
| gpl-3.0 | -7,066,353,448,453,532,000 | 26.507463 | 78 | 0.578947 | false | 4.306075 | false | false | false |
Maasouza/MinVertexCover | src/heuristic.py | 1 | 1221 | import networkx as nx
from util import *
def heuristic_cover(graph , preprocess = False):
"""
heuristica
se preprocess entao
realiza o preprocessamento para remover vertices com apenas um vizinho
retornando os vertices ja visitados
enquanto existir vertices no grafo
v = vertice de maior grau de G
marcado[v]=1
adiciona v a cobertura
para cada u vizinho de v
marcado[u] = 1
remove u do grafo
remove g do grafo
retorna cobertura
"""
start = time.time()
g = nx.Graph()
g.add_edges_from(graph.edges())
if(preprocess):
cover,marked,visited = pre_process(g)
else:
cover = [False for x in range(len(g.nodes()))]
marked = [False for x in range(len(g.nodes()))]
visited = 0
while(visited!=len(graph.nodes())):
v = max_degree_vertex(g)
cover[v]=True
visited+=1
for u in g.neighbors(v):
visited+=1
g.remove_node(u)
g.remove_node(v)
end = time.time()
print("--- Heuristica")
print("\tExec time: "+str((end-start))+" sec")
return cover
| mit | 4,187,248,736,841,971,000 | 26.75 | 82 | 0.550369 | false | 3.688822 | false | false | false |
codeforamerica/rva-screening | tests/unit/screener/test_screener.py | 1 | 20571 | import datetime
from StringIO import StringIO
from werkzeug.datastructures import FileStorage
from tests.unit.test_base import BaseTestCase
from tests.unit.screener.utils import (
get_user,
get_service,
get_patient
)
from app.models import AppUser, Service, ServiceTranslation, Patient
from app.prescreening import calculate_fpl, calculate_pre_screen_results
from app.utils import translate_object
import add_data.add_service_data as add_service_data
class TestScreener(BaseTestCase):
def setUp(self):
super(TestScreener, self).setUp()
self.test_user = get_user()
def test_login_logout(self):
"""Test logging in and out."""
app_user = get_user()
self.login('[email protected]', 'password')
self.assert_template_used('index.html')
self.logout()
self.assertEquals(app_user.authenticated, False)
self.login('[email protected]', 'badpassword')
self.assert_template_used('security/login_user.html')
self.assertEquals(app_user.authenticated, False)
def test_index(self):
"""Test that the index page works as expected."""
response = self.login()
response = self.client.get('/index')
self.assert200(response)
self.assert_template_used('index.html')
def test_add_patient(self):
"""Test that adding a patient works as expected."""
# Check that the new patient page loads as expected.
self.login()
response = self.client.get('/new_patient')
self.assert200(response)
self.assert_template_used('patient_details.html')
# Check that you can't save a new patient without a name
response = self.client.post('/new_patient', data=dict(
gender='',
has_prescription_coverage_yn='N',
eligible_for_vets_benefits_yn='N'
))
self.assert200(response)
self.assertEquals(len(Patient.query.all()), 0)
# Check that a new patient saves
response = self.client.post('/new_patient', data=dict(
first_name='John',
last_name='Richmond',
dob='1950-01-01',
ssn='111-11-1111',
gender='',
has_prescription_coverage_yn='N',
eligible_for_vets_benefits_yn='N'
), follow_redirects=True)
saved_patient = Patient.query.first()
self.assertEquals(
saved_patient.first_name,
'John'
)
self.assertEquals(
saved_patient.last_name,
'Richmond'
)
self.assertEquals(
saved_patient.dob,
datetime.date(1950, 1, 1)
)
self.assertEquals(
saved_patient.ssn,
'111-11-1111'
)
# Check that user stays on patient details page after saving
self.assert_template_used('patient_details.html')
def test_update_patient(self):
"""Test that updating an existing patient works as expected."""
self.login()
patient = get_patient()
# Check that the patient details page loads for an existing patient
response = self.client.get('/patient_details/{}'.format(patient.id))
self.assert200(response)
self.assert_template_used('patient_details.html')
# Check that updates to the patient save, including many-to-one fields
post_data = dict(
first_name='James',
last_name='Richmond',
dob='1950-12-12',
ssn='222-22-2222',
medical_home="CAHN",
email='[email protected]',
has_transport_yn='N',
gender='M',
transgender='No',
race='AA',
ethnicity='NHL',
languages=['EN', 'ES'],
has_interpreter_yn='N',
education_level='High school',
marital_status='MAR',
veteran_yn='N',
housing_status='REN',
# years_living_in_area='5',
# months_living_in_area='1',
time_in_area='LESS THAN 6',
city_or_county_of_residence='Richmond',
temp_visa_yn='N',
student_status='Not a student',
employment_status='FT',
spouse_employment_status='PT',
years_unemployed='0',
months_unemployed='6',
spouse_years_unemployed='1',
spouse_months_unemployed='11',
years_at_current_employer='LESS',
spouse_years_at_current_employer='LESS',
last_healthcare='Last year at VCU ED',
insurance_status='N',
coverage_type='VCC',
has_prescription_coverage_yn='N',
has_vcc='Y',
eligible_insurance_types='NE',
applied_for_vets_benefits_yn='N',
eligible_for_vets_benefits_yn='N',
applied_for_medicaid_yn='N',
medicaid_date_effective='2015-01-01',
applied_for_ssd_yn='N',
ssd_date_effective='1999-12-12',
care_due_to_accident_yn='N',
accident_work_related_yn='N',
filed_taxes_yn='N',
claimed_as_dependent_yn='N',
how_food_and_shelter='Stay with sister',
how_other_expenses='Gets money from father'
)
post_data['phone_numbers-0-phone_number'] = '(111) 111-1111'
post_data['phone_numbers-0-number_description'] = 'CELL'
post_data['phone_numbers-1-phone_number'] = '(222) 222-2222'
post_data['phone_numbers-1-number_description'] = 'HOME'
post_data['addresses-0-address1'] = '1 Main St.'
post_data['addresses-0-address2'] = 'Apt. 1'
post_data['addresses-0-city'] = 'Richmond'
post_data['addresses-0-state'] = 'VA'
post_data['addresses-0-zip'] = '11111'
post_data['addresses-0-address_description'] = 'OWN'
post_data['addresses-1-address1'] = '1 Maple St.'
post_data['addresses-1-address2'] = ''
post_data['addresses-1-city'] = 'Richmond'
post_data['addresses-1-state'] = 'VA'
post_data['addresses-1-zip'] = '11111'
post_data['addresses-1-address_description'] = 'RELATIVE'
post_data['emergency_contacts-0-full_name'] = 'Jane Johnson'
post_data['emergency_contacts-0-relationship'] = 'mother'
post_data['emergency_contacts-0-phone_number'] = '(111) 111-1111'
post_data['emergency_contacts-1-full_name'] = 'Mary Richmond'
post_data['emergency_contacts-1-relationship'] = 'sister'
post_data['emergency_contacts-1-phone_number'] = '(222) 222-2222'
post_data['household_members-0-full_name'] = 'Michael Richmond'
post_data['household_members-0-dob'] = '2000-12-12'
post_data['household_members-0-ssn'] = '999-99-9999'
post_data['household_members-0-relationship'] = 'son'
post_data['household_members-1-full_name'] = '11111'
post_data['household_members-1-dob'] = '2006-02-28'
post_data['household_members-1-ssn'] = '888-88-8888'
post_data['household_members-1-relationship'] = 'Emily Richmond'
post_data['income_sources-0-source'] = 'job'
post_data['income_sources-0-monthly_amount'] = '1000'
post_data['income_sources-1-source'] = 'food stamps'
post_data['income_sources-1-monthly_amount'] = '200'
post_data['employers-0-employer_name'] = 'Target'
post_data['employers-0-phone_number'] = '(111) 111-1111'
post_data['employers-0-employee'] = 'Patient'
post_data['employers-0-start_date'] = '2014-01-01'
post_data['employers-1-employer_name'] = 'Walmart'
post_data['employers-1-phone_number'] = '(222) 222-2222'
post_data['employers-1-employee'] = 'Spouse'
post_data['employers-1-start_date'] = '1999-12-12'
response = self.client.post(
'/patient_details/{}'.format(patient.id),
data=post_data,
follow_redirects=True
)
saved_patient = Patient.query.first()
self.assertEquals(
saved_patient.first_name,
'James'
)
self.assertEquals(
saved_patient.last_name,
'Richmond'
)
self.assertEquals(
saved_patient.dob,
datetime.date(1950, 12, 12)
)
self.assertEquals(
saved_patient.ssn,
'222-22-2222'
)
self.assertEquals(saved_patient.phone_numbers.count(), 2)
self.assertEquals(saved_patient.addresses.count(), 2)
self.assertEquals(saved_patient.emergency_contacts.count(), 2)
self.assertEquals(saved_patient.household_members.count(), 2)
self.assertEquals(saved_patient.income_sources.count(), 2)
self.assertEquals(saved_patient.employers.count(), 2)
# Check that the user stays on patient details page after saving
self.assert_template_used('patient_details.html')
# Check that updated many-to-one fields save correctly
post_data['phone_numbers-0-phone_number'] = '(333) 333-3333'
post_data['phone_numbers-0-number_description'] = 'WORK'
response = self.client.post(
'/patient_details/{}'.format(patient.id),
data=post_data,
follow_redirects=True
)
self.assert200(response)
saved_patient = Patient.query.first()
self.assertEquals(saved_patient.phone_numbers[0].phone_number, '(333) 333-3333')
self.assertEquals(saved_patient.phone_numbers[0].number_description, 'WORK')
self.assert_template_used('patient_details.html')
# Check that deleting many-to-one fields works as expected
post_data['phone_numbers-0-phone_number'] = ''
post_data['phone_numbers-0-number_description'] = ''
response = self.client.post(
'/patient_details/{}'.format(patient.id),
data=post_data,
follow_redirects=True
)
self.assert200(response)
self.assertEquals(saved_patient.phone_numbers.count(), 1)
self.assertEquals(saved_patient.phone_numbers[0].phone_number, '(222) 222-2222')
self.assertEquals(saved_patient.phone_numbers[0].number_description, 'HOME')
self.assert_template_used('patient_details.html')
# def test_document_image(self):
# """Test that uploading document images works as expected."""
# self.login()
# patient = get_patient()
# # Check that multiple document image uploads save correctly
# with open('tests/unit/screener/test_image.jpg', 'rb') as test_image:
# img_string_io = StringIO(test_image.read())
# post_data = dict(
# first_name='James',
# last_name='Richmond',
# dob='1950-12-12',
# gender='',
# transgender='',
# race='',
# ethnicity='',
# coverage_type='',
# student_status='',
# employment_status='',
# marital_status='',
# housing_status='',
# veteran_yn='',
# insurance_status='',
# spouse_employment_status='',
# has_prescription_coverage_yn='N',
# eligible_for_vets_benefits_yn='N',
# eligible_insurance_types='',
# applied_for_ssd_yn='',
# accident_work_related_yn='',
# has_vcc='',
# filed_taxes_yn='',
# applied_for_medicaid_yn='',
# has_interpreter_yn='',
# applied_for_vets_benefits_yn='',
# has_transport_yn='',
# claimed_as_dependent_yn='',
# temp_visa_yn='',
# care_due_to_accident_yn=''
# )
# post_data['document_images-0-file_name'] = FileStorage(img_string_io, filename='test_image.jpg')
# post_data['document_images-0-file_description'] = 'Test'
# post_data['document_images-1-file_name'] = FileStorage(img_string_io, filename='test_image_2.jpg')
# post_data['document_images-1-file_description'] = 'Test 2'
# response = self.client.post(
# '/patient_details/{}'.format(patient.id),
# data=post_data,
# follow_redirects=True
# )
# self.assert200(response)
# saved_patient = Patient.query.first()
# self.assertEquals(saved_patient.document_images.count(), 2)
# # Check that the page that displays the images loads correctly
# for image in saved_patient.document_images:
# response = self.client.get(
# '/document_image/{}'.format(image.id)
# )
# self.assert200(response)
# self.assert_template_used('documentimage.html')
def test_delete_patient(self):
"""Test that hard-deleting a patient works as expected."""
user = get_user()
self.login()
patient = get_patient(user)
response = self.client.get('/delete/{}'.format(patient.id), follow_redirects=True)
self.assert200(response)
# Check that patient was deleted
self.assertTrue(Patient.query.get(patient.id).deleted)
# Check that user is redirected to index page
self.assert_template_used('index.html')
def test_new_prescreening(self):
"""Test that the new prescreening page works as expected."""
response = self.client.get('/new_prescreening')
self.assert200(response)
self.assert_template_used('new_prescreening.html')
def test_patient_history(self):
"""Test that the edit history page works as expected."""
self.login()
patient = get_patient()
response = self.client.get('/patient_history/{}'.format(patient.id))
self.assert200(response)
self.assert_template_used('patient_history.html')
def test_patient_share(self):
"""Test that the share patient page works as expected."""
self.login()
patient = get_patient()
response = self.client.get('/patient_share/{}'.format(patient.id))
self.assert200(response)
self.assert_template_used('patient_share.html')
def test_add_referral(self):
"""Test that adding a referral works as expected."""
self.login()
user = AppUser.query.first()
patient = get_patient()
response = self.client.post('/add_referral', data=dict(
patient_id=patient.id,
app_user_id=user.id,
service_id='1',
notes='this is a note'
), follow_redirects=True)
self.assert200(response)
referral = Patient.query.first().referrals[0]
self.assertEquals(referral.from_app_user_id, user.id)
self.assertEquals(referral.to_service_id, 1)
def test_user(self):
"""Test that the user profile page works as expected."""
user = get_user()
response = self.client.get('/user/{}'.format(user.id))
self.assert200(response)
self.assert_template_used('user_profile.html')
def test_service(self):
"""Test that the service profile page works as expected."""
service = get_service()
response = self.client.get('/service/{}'.format(service.id))
self.assert200(response)
self.assert_template_used('service_profile.html')
def test_fpl_calculation(self):
"""Test that calculating a patient's Federal Poverty Level percentage
works as expected.
"""
self.assertEquals(calculate_fpl(8, 40890), 100)
self.assertEquals(calculate_fpl(1, 0), 0)
def test_prescreening_basic(self):
"""Test that the prescreening input page works as expected."""
# Make sure the prescreening input page loads
response = self.client.get('/prescreening_basic')
self.assert200(response)
self.assert_template_used('prescreening_basic.html')
# Make sure submitting the form works
response = self.client.post('/prescreening_basic', data=dict(
household_size='5',
household_income='1000',
has_health_insurance='N',
is_eligible_for_medicaid='N'
))
self.assertRedirects(response, '/prescreening_results')
def test_calculate_pre_screen_results(self):
"""Test that calculating prescreening results works as expected."""
add_service_data.main(self.app)
daily_planet = Service.query.filter(Service.name == 'Daily Planet').first()
result = calculate_pre_screen_results(
fpl=0,
has_health_insurance='no',
is_eligible_for_medicaid='no',
service_ids=[daily_planet.id]
)[0]
self.assertEquals(result['name'], daily_planet.name)
self.assertEquals(result['eligible'], True)
self.assertEquals(result['fpl_cutoff'], daily_planet.fpl_cutoff)
self.assertEquals(result['fpl_eligible'], True)
self.assertEquals(result['uninsured_only_yn'], daily_planet.uninsured_only_yn)
self.assertEquals(
result['medicaid_ineligible_only_yn'],
daily_planet.medicaid_ineligible_only_yn
)
self.assertEquals(
result['residence_requirement_yn'],
daily_planet.residence_requirement_yn
)
self.assertEquals(
result['time_in_area_requirement_yn'],
daily_planet.time_in_area_requirement_yn
)
self.assertEquals(result['sliding_scale'], 'Nominal')
self.assertEquals(result['sliding_scale_range'], 'between 0% and 100%')
self.assertEquals(result['id'], daily_planet.id)
def test_patient_screening_history(self):
"""Test that the patient referral/screening history page works as expected."""
add_service_data.main(self.app)
user = get_user()
user.service = Service.query.filter(Service.name == 'Daily Planet').first()
self.login()
patient = get_patient()
# Make sure the page loads as expected
response = self.client.get('/patient_screening_history/{}'.format(patient.id))
self.assert200(response)
self.assert_template_used('patient_screening_history.html')
def test_patient_overview(self):
"""Test that the patient overview and screening result page works as expected."""
add_service_data.main(self.app)
user = get_user()
user.service = Service.query.filter(Service.name == 'Daily Planet').first()
self.login()
patient = get_patient(user)
# Make sure the page loads as expected
response = self.client.get('/patient_overview/{}'.format(patient.id))
self.assert200(response)
self.assert_template_used('patient_overview.html')
# Make sure you can save a new screening result
response = self.client.post(
'/patient_overview/{}'.format(patient.id),
data=dict(
eligible_yn='Y',
sliding_scale_id=user.service.sliding_scales[0].id,
notes='Test'
),
follow_redirects=True
)
self.assert200(response)
# User should stay on the same page after saving
self.assert_template_used('patient_overview.html')
screening_result = Patient.query.first().screening_results[0]
self.assertEquals(screening_result.service_id, user.service_id)
self.assertEquals(screening_result.eligible_yn, 'Y')
self.assertEquals(screening_result.sliding_scale_id, user.service.sliding_scales[0].id)
self.assertEquals(screening_result.notes, 'Test')
def test_translate_object(self):
"""Test that translating text from the database works as expected."""
# Test that the object stays the same if no translation exists
service = Service(
name='Richmond Clinic',
description='English description'
)
translated_service = translate_object(service, 'es_US')
self.assertEquals(translated_service.description, 'English description')
# Test that the object is translated when a translation exists
service.translations.append(
ServiceTranslation(
language_code='es_US',
description='Spanish description'
)
)
translated_service = translate_object(service, 'es_US')
self.assertEquals(translated_service.description, 'Spanish description')
| bsd-3-clause | -2,623,195,424,517,089,000 | 40.142 | 108 | 0.596374 | false | 3.75726 | true | false | false |
fyookball/electrum | lib/blockchain.py | 1 | 25888 | # Electrum - lightweight Bitcoin client
# Copyright (C) 2012 [email protected]
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import threading
from typing import Optional
from . import asert_daa
from . import networks
from . import util
from .bitcoin import *
class VerifyError(Exception):
'''Exception used for blockchain verification errors.'''
CHUNK_FORKS = -3
CHUNK_BAD = -2
CHUNK_LACKED_PROOF = -1
CHUNK_ACCEPTED = 0
def bits_to_work(bits):
return (1 << 256) // (bits_to_target(bits) + 1)
def bits_to_target(bits):
if bits == 0:
return 0
size = bits >> 24
assert size <= 0x1d
word = bits & 0x00ffffff
assert 0x8000 <= word <= 0x7fffff
if size <= 3:
return word >> (8 * (3 - size))
else:
return word << (8 * (size - 3))
def target_to_bits(target):
if target == 0:
return 0
target = min(target, MAX_TARGET)
size = (target.bit_length() + 7) // 8
mask64 = 0xffffffffffffffff
if size <= 3:
compact = (target & mask64) << (8 * (3 - size))
else:
compact = (target >> (8 * (size - 3))) & mask64
if compact & 0x00800000:
compact >>= 8
size += 1
assert compact == (compact & 0x007fffff)
assert size < 256
return compact | size << 24
HEADER_SIZE = 80 # bytes
MAX_BITS = 0x1d00ffff
MAX_TARGET = bits_to_target(MAX_BITS)
# indicates no header in data file
NULL_HEADER = bytes([0]) * HEADER_SIZE
NULL_HASH_BYTES = bytes([0]) * 32
NULL_HASH_HEX = NULL_HASH_BYTES.hex()
def serialize_header(res):
s = int_to_hex(res.get('version'), 4) \
+ rev_hex(res.get('prev_block_hash')) \
+ rev_hex(res.get('merkle_root')) \
+ int_to_hex(int(res.get('timestamp')), 4) \
+ int_to_hex(int(res.get('bits')), 4) \
+ int_to_hex(int(res.get('nonce')), 4)
return s
def deserialize_header(s, height):
h = {}
h['version'] = int.from_bytes(s[0:4], 'little')
h['prev_block_hash'] = hash_encode(s[4:36])
h['merkle_root'] = hash_encode(s[36:68])
h['timestamp'] = int.from_bytes(s[68:72], 'little')
h['bits'] = int.from_bytes(s[72:76], 'little')
h['nonce'] = int.from_bytes(s[76:80], 'little')
h['block_height'] = height
return h
def hash_header_hex(header_hex):
return hash_encode(Hash(bfh(header_hex)))
def hash_header(header):
if header is None:
return NULL_HASH_HEX
if header.get('prev_block_hash') is None:
header['prev_block_hash'] = '00'*32
return hash_header_hex(serialize_header(header))
blockchains = {}
def read_blockchains(config):
blockchains[0] = Blockchain(config, 0, None)
fdir = os.path.join(util.get_headers_dir(config), 'forks')
if not os.path.exists(fdir):
os.mkdir(fdir)
l = filter(lambda x: x.startswith('fork_'), os.listdir(fdir))
l = sorted(l, key = lambda x: int(x.split('_')[1]))
for filename in l:
parent_base_height = int(filename.split('_')[1])
base_height = int(filename.split('_')[2])
b = Blockchain(config, base_height, parent_base_height)
blockchains[b.base_height] = b
return blockchains
def check_header(header):
if type(header) is not dict:
return False
for b in blockchains.values():
if b.check_header(header):
return b
return False
def can_connect(header):
for b in blockchains.values():
if b.can_connect(header):
return b
return False
def verify_proven_chunk(chunk_base_height, chunk_data):
chunk = HeaderChunk(chunk_base_height, chunk_data)
header_count = len(chunk_data) // HEADER_SIZE
prev_header = None
prev_header_hash = None
for i in range(header_count):
header = chunk.get_header_at_index(i)
# Check the chain of hashes for all headers preceding the proven one.
this_header_hash = hash_header(header)
if i > 0:
if prev_header_hash != header.get('prev_block_hash'):
raise VerifyError("prev hash mismatch: %s vs %s" % (prev_header_hash, header.get('prev_block_hash')))
prev_header_hash = this_header_hash
# Copied from electrumx
def root_from_proof(hash, branch, index):
hash_func = Hash
for elt in branch:
if index & 1:
hash = hash_func(elt + hash)
else:
hash = hash_func(hash + elt)
index >>= 1
if index:
raise ValueError('index out of range for branch')
return hash
class HeaderChunk:
def __init__(self, base_height, data):
self.base_height = base_height
self.header_count = len(data) // HEADER_SIZE
self.headers = [deserialize_header(data[i * HEADER_SIZE : (i + 1) * HEADER_SIZE],
base_height + i)
for i in range(self.header_count)]
def __repr__(self):
return "HeaderChunk(base_height={}, header_count={})".format(self.base_height, self.header_count)
def get_count(self):
return self.header_count
def contains_height(self, height):
return height >= self.base_height and height < self.base_height + self.header_count
def get_header_at_height(self, height):
assert self.contains_height(height)
return self.get_header_at_index(height - self.base_height)
def get_header_at_index(self, index):
return self.headers[index]
class Blockchain(util.PrintError):
"""
Manages blockchain headers and their verification
"""
def __init__(self, config, base_height, parent_base_height):
self.config = config
self.catch_up = None # interface catching up
self.base_height = base_height
self.parent_base_height = parent_base_height
self.lock = threading.Lock()
with self.lock:
self.update_size()
def __repr__(self):
return "<{}.{} {}>".format(__name__, type(self).__name__, self.format_base())
def format_base(self):
return "{}@{}".format(self.get_name(), self.get_base_height())
def parent(self):
return blockchains[self.parent_base_height]
def get_max_child(self):
children = list(filter(lambda y: y.parent_base_height==self.base_height, blockchains.values()))
return max([x.base_height for x in children]) if children else None
def get_base_height(self):
mc = self.get_max_child()
return mc if mc is not None else self.base_height
def get_branch_size(self):
return self.height() - self.get_base_height() + 1
def get_name(self):
return self.get_hash(self.get_base_height()).lstrip('00')[0:10]
def check_header(self, header):
header_hash = hash_header(header)
height = header.get('block_height')
return header_hash == self.get_hash(height)
def fork(parent, header):
base_height = header.get('block_height')
self = Blockchain(parent.config, base_height, parent.base_height)
open(self.path(), 'w+').close()
self.save_header(header)
return self
def height(self):
return self.base_height + self.size() - 1
def size(self):
with self.lock:
return self._size
def update_size(self):
p = self.path()
self._size = os.path.getsize(p)//HEADER_SIZE if os.path.exists(p) else 0
def verify_header(self, header, prev_header, bits=None):
prev_header_hash = hash_header(prev_header)
this_header_hash = hash_header(header)
if prev_header_hash != header.get('prev_block_hash'):
raise VerifyError("prev hash mismatch: %s vs %s" % (prev_header_hash, header.get('prev_block_hash')))
# We do not need to check the block difficulty if the chain of linked header hashes was proven correct against our checkpoint.
if bits is not None:
# checkpoint BitcoinCash fork block
if (header.get('block_height') == networks.net.BITCOIN_CASH_FORK_BLOCK_HEIGHT and hash_header(header) != networks.net.BITCOIN_CASH_FORK_BLOCK_HASH):
err_str = "block at height %i is not cash chain fork block. hash %s" % (header.get('block_height'), hash_header(header))
raise VerifyError(err_str)
if bits != header.get('bits'):
raise VerifyError("bits mismatch: %s vs %s" % (bits, header.get('bits')))
target = bits_to_target(bits)
if int('0x' + this_header_hash, 16) > target:
raise VerifyError("insufficient proof of work: %s vs target %s" % (int('0x' + this_header_hash, 16), target))
def verify_chunk(self, chunk_base_height, chunk_data):
chunk = HeaderChunk(chunk_base_height, chunk_data)
prev_header = None
if chunk_base_height != 0:
prev_header = self.read_header(chunk_base_height - 1)
header_count = len(chunk_data) // HEADER_SIZE
for i in range(header_count):
header = chunk.get_header_at_index(i)
# Check the chain of hashes and the difficulty.
bits = self.get_bits(header, chunk)
self.verify_header(header, prev_header, bits)
prev_header = header
def path(self):
d = util.get_headers_dir(self.config)
filename = 'blockchain_headers' if self.parent_base_height is None else os.path.join('forks', 'fork_%d_%d'%(self.parent_base_height, self.base_height))
return os.path.join(d, filename)
def save_chunk(self, base_height, chunk_data):
chunk_offset = (base_height - self.base_height) * HEADER_SIZE
if chunk_offset < 0:
chunk_data = chunk_data[-chunk_offset:]
chunk_offset = 0
# Headers at and before the verification checkpoint are sparsely filled.
# Those should be overwritten and should not truncate the chain.
top_height = base_height + (len(chunk_data) // HEADER_SIZE) - 1
truncate = top_height > networks.net.VERIFICATION_BLOCK_HEIGHT
self.write(chunk_data, chunk_offset, truncate)
self.swap_with_parent()
def swap_with_parent(self):
if self.parent_base_height is None:
return
parent_branch_size = self.parent().height() - self.base_height + 1
if parent_branch_size >= self.size():
return
self.print_error("swap", self.base_height, self.parent_base_height)
parent_base_height = self.parent_base_height
base_height = self.base_height
parent = self.parent()
with open(self.path(), 'rb') as f:
my_data = f.read()
with open(parent.path(), 'rb') as f:
f.seek((base_height - parent.base_height)*HEADER_SIZE)
parent_data = f.read(parent_branch_size*HEADER_SIZE)
self.write(parent_data, 0)
parent.write(my_data, (base_height - parent.base_height)*HEADER_SIZE)
# store file path
for b in blockchains.values():
b.old_path = b.path()
# swap parameters
self.parent_base_height = parent.parent_base_height; parent.parent_base_height = parent_base_height
self.base_height = parent.base_height; parent.base_height = base_height
self._size = parent._size; parent._size = parent_branch_size
# move files
for b in blockchains.values():
if b in [self, parent]: continue
if b.old_path != b.path():
self.print_error("renaming", b.old_path, b.path())
os.rename(b.old_path, b.path())
# update pointers
blockchains[self.base_height] = self
blockchains[parent.base_height] = parent
def write(self, data, offset, truncate=True):
filename = self.path()
with self.lock:
with open(filename, 'rb+') as f:
if truncate and offset != self._size*HEADER_SIZE:
f.seek(offset)
f.truncate()
f.seek(offset)
f.write(data)
f.flush()
os.fsync(f.fileno())
self.update_size()
def save_header(self, header):
delta = header.get('block_height') - self.base_height
data = bfh(serialize_header(header))
assert delta == self.size()
assert len(data) == HEADER_SIZE
self.write(data, delta*HEADER_SIZE)
self.swap_with_parent()
def read_header(self, height, chunk=None):
# If the read is done within an outer call with local unstored header data, we first look in the chunk data currently being processed.
if chunk is not None and chunk.contains_height(height):
return chunk.get_header_at_height(height)
assert self.parent_base_height != self.base_height
if height < 0:
return
if height < self.base_height:
return self.parent().read_header(height)
if height > self.height():
return
delta = height - self.base_height
name = self.path()
if os.path.exists(name):
with open(name, 'rb') as f:
f.seek(delta * HEADER_SIZE)
h = f.read(HEADER_SIZE)
# Is it a pre-checkpoint header that has never been requested?
if h == NULL_HEADER:
return None
return deserialize_header(h, height)
def get_hash(self, height):
if height == -1:
return NULL_HASH_HEX
elif height == 0:
return networks.net.GENESIS
return hash_header(self.read_header(height))
# Not used.
def BIP9(self, height, flag):
v = self.read_header(height)['version']
return ((v & 0xE0000000) == 0x20000000) and ((v & flag) == flag)
def get_median_time_past(self, height, chunk=None):
if height < 0:
return 0
times = [
self.read_header(h, chunk)['timestamp']
for h in range(max(0, height - 10), height + 1)
]
return sorted(times)[len(times) // 2]
def get_suitable_block_height(self, suitableheight, chunk=None):
#In order to avoid a block in a very skewed timestamp to have too much
#influence, we select the median of the 3 top most block as a start point
#Reference: github.com/Bitcoin-ABC/bitcoin-abc/master/src/pow.cpp#L201
blocks2 = self.read_header(suitableheight, chunk)
blocks1 = self.read_header(suitableheight-1, chunk)
blocks = self.read_header(suitableheight-2, chunk)
if (blocks['timestamp'] > blocks2['timestamp'] ):
blocks,blocks2 = blocks2,blocks
if (blocks['timestamp'] > blocks1['timestamp'] ):
blocks,blocks1 = blocks1,blocks
if (blocks1['timestamp'] > blocks2['timestamp'] ):
blocks1,blocks2 = blocks2,blocks1
return blocks1['block_height']
_cached_asert_anchor: Optional[asert_daa.Anchor] = None # cached Anchor, per-Blockchain instance
def get_asert_anchor(self, prevheader, mtp, chunk=None):
if networks.net.asert_daa.anchor is not None:
# Checkpointed (hard-coded) value exists, just use that
return networks.net.asert_daa.anchor
if (self._cached_asert_anchor is not None
and self._cached_asert_anchor.height <= prevheader['block_height']):
return self._cached_asert_anchor
# ****
# This may be slow -- we really should be leveraging the hard-coded
# checkpointed value. TODO: add hard-coded value to networks.py after
# Nov. 15th 2020 HF to ASERT DAA
# ****
anchor = prevheader
activation_mtp = networks.net.asert_daa.MTP_ACTIVATION_TIME
while mtp >= activation_mtp:
ht = anchor['block_height']
prev = self.read_header(ht - 1, chunk)
if prev is None:
self.print_error("get_asert_anchor missing header {}".format(ht - 1))
return None
prev_mtp = self.get_median_time_past(ht - 1, chunk)
if prev_mtp < activation_mtp:
# Ok, use this as anchor -- since it is the first in the chain
# after activation.
bits = anchor['bits']
self._cached_asert_anchor = asert_daa.Anchor(ht, bits, prev['timestamp'])
return self._cached_asert_anchor
mtp = prev_mtp
anchor = prev
def get_bits(self, header, chunk=None):
'''Return bits for the given height.'''
# Difficulty adjustment interval?
height = header['block_height']
# Genesis
if height == 0:
return MAX_BITS
prior = self.read_header(height - 1, chunk)
if prior is None:
raise Exception("get_bits missing header {} with chunk {!r}".format(height - 1, chunk))
bits = prior['bits']
# NOV 13 HF DAA and/or ASERT DAA
prevheight = height - 1
daa_mtp = self.get_median_time_past(prevheight, chunk)
# ASERTi3-2d DAA activated on Nov. 15th 2020 HF
if daa_mtp >= networks.net.asert_daa.MTP_ACTIVATION_TIME:
header_ts = header['timestamp']
prev_ts = prior['timestamp']
if networks.net.TESTNET:
# testnet 20 minute rule
if header_ts - prev_ts > 20*60:
return MAX_BITS
anchor = self.get_asert_anchor(prior, daa_mtp, chunk)
assert anchor is not None, "Failed to find ASERT anchor block for chain {!r}".format(self)
return networks.net.asert_daa.next_bits_aserti3_2d(anchor.bits,
prev_ts - anchor.prev_time,
prevheight - anchor.height)
# Mon Nov 13 19:06:40 2017 DAA HF
if prevheight >= networks.net.CW144_HEIGHT:
if networks.net.TESTNET:
# testnet 20 minute rule
if header['timestamp'] - prior['timestamp'] > 20*60:
return MAX_BITS
# determine block range
daa_starting_height = self.get_suitable_block_height(prevheight-144, chunk)
daa_ending_height = self.get_suitable_block_height(prevheight, chunk)
# calculate cumulative work (EXcluding work from block daa_starting_height, INcluding work from block daa_ending_height)
daa_cumulative_work = 0
for daa_i in range (daa_starting_height+1, daa_ending_height+1):
daa_prior = self.read_header(daa_i, chunk)
daa_bits_for_a_block = daa_prior['bits']
daa_work_for_a_block = bits_to_work(daa_bits_for_a_block)
daa_cumulative_work += daa_work_for_a_block
# calculate and sanitize elapsed time
daa_starting_timestamp = self.read_header(daa_starting_height, chunk)['timestamp']
daa_ending_timestamp = self.read_header(daa_ending_height, chunk)['timestamp']
daa_elapsed_time = daa_ending_timestamp - daa_starting_timestamp
if (daa_elapsed_time>172800):
daa_elapsed_time=172800
if (daa_elapsed_time<43200):
daa_elapsed_time=43200
# calculate and return new target
daa_Wn = (daa_cumulative_work*600) // daa_elapsed_time
daa_target = (1 << 256) // daa_Wn - 1
daa_retval = target_to_bits(daa_target)
daa_retval = int(daa_retval)
return daa_retval
#END OF NOV-2017 DAA
N_BLOCKS = networks.net.LEGACY_POW_RETARGET_BLOCKS # Normally 2016
if height % N_BLOCKS == 0:
return self.get_new_bits(height, chunk)
if networks.net.TESTNET:
# testnet 20 minute rule
if header['timestamp'] - prior['timestamp'] > 20*60:
return MAX_BITS
# special case for a newly started testnet (such as testnet4)
if height < N_BLOCKS:
return MAX_BITS
return self.read_header(height // N_BLOCKS * N_BLOCKS, chunk)['bits']
# bitcoin cash EDA
# Can't go below minimum, so early bail
if bits == MAX_BITS:
return bits
mtp_6blocks = self.get_median_time_past(height - 1, chunk) - self.get_median_time_past(height - 7, chunk)
if mtp_6blocks < 12 * 3600:
return bits
# If it took over 12hrs to produce the last 6 blocks, increase the
# target by 25% (reducing difficulty by 20%).
target = bits_to_target(bits)
target += target >> 2
return target_to_bits(target)
def get_new_bits(self, height, chunk=None):
N_BLOCKS = networks.net.LEGACY_POW_RETARGET_BLOCKS
assert height % N_BLOCKS == 0
# Genesis
if height == 0:
return MAX_BITS
first = self.read_header(height - N_BLOCKS, chunk)
prior = self.read_header(height - 1, chunk)
prior_target = bits_to_target(prior['bits'])
target_span = networks.net.LEGACY_POW_TARGET_TIMESPAN # usually: 14 * 24 * 60 * 60 = 2 weeks
span = prior['timestamp'] - first['timestamp']
span = min(max(span, target_span // 4), target_span * 4)
new_target = (prior_target * span) // target_span
return target_to_bits(new_target)
def can_connect(self, header, check_height=True):
height = header['block_height']
if check_height and self.height() != height - 1:
return False
if height == 0:
return hash_header(header) == networks.net.GENESIS
previous_header = self.read_header(height -1)
if not previous_header:
return False
prev_hash = hash_header(previous_header)
if prev_hash != header.get('prev_block_hash'):
return False
bits = self.get_bits(header)
try:
self.verify_header(header, previous_header, bits)
except VerifyError as e:
self.print_error('verify header {} failed at height {:d}: {}'
.format(hash_header(header), height, e))
return False
return True
def connect_chunk(self, base_height, hexdata, proof_was_provided=False):
chunk = HeaderChunk(base_height, hexdata)
header_count = len(hexdata) // HEADER_SIZE
top_height = base_height + header_count - 1
# We know that chunks before the checkpoint height, end at the checkpoint height, and
# will be guaranteed to be covered by the checkpointing. If no proof is provided then
# this is wrong.
if top_height <= networks.net.VERIFICATION_BLOCK_HEIGHT:
if not proof_was_provided:
return CHUNK_LACKED_PROOF
# We do not truncate when writing chunks before the checkpoint, and there's no
# way at this time to know if we have this chunk, or even a consecutive subset.
# So just overwrite it.
elif base_height < networks.net.VERIFICATION_BLOCK_HEIGHT and proof_was_provided:
# This was the initial verification request which gets us enough leading headers
# that we can calculate difficulty and verify the headers that we add to this
# chain above the verification block height.
if top_height <= self.height():
return CHUNK_ACCEPTED
elif base_height != self.height() + 1:
# This chunk covers a segment of this blockchain which we already have headers
# for. We need to verify that there isn't a split within the chunk, and if
# there is, indicate the need for the server to fork.
intersection_height = min(top_height, self.height())
chunk_header = chunk.get_header_at_height(intersection_height)
our_header = self.read_header(intersection_height)
if hash_header(chunk_header) != hash_header(our_header):
return CHUNK_FORKS
if intersection_height <= self.height():
return CHUNK_ACCEPTED
else:
# This base of this chunk joins to the top of the blockchain in theory.
# We need to rule out the case where the chunk is actually a fork at the
# connecting height.
our_header = self.read_header(self.height())
chunk_header = chunk.get_header_at_height(base_height)
if hash_header(our_header) != chunk_header['prev_block_hash']:
return CHUNK_FORKS
try:
if not proof_was_provided:
self.verify_chunk(base_height, hexdata)
self.save_chunk(base_height, hexdata)
return CHUNK_ACCEPTED
except VerifyError as e:
self.print_error('verify_chunk failed: {}'.format(e))
return CHUNK_BAD
| mit | -3,008,171,902,361,770,000 | 39.323988 | 160 | 0.600587 | false | 3.760604 | false | false | false |
gattazr/cohorte-herald | python/herald/transports/xmpp/utils.py | 1 | 11998 | #!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Utilities to debug sleekXMPP objects
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.0.5
:status: Alpha
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 0, 5)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# SleekXMPP
import sleekxmpp
# Standard library
import logging
import threading
try:
# Python 2
# pylint: disable=F0401
from StringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
# ------------------------------------------------------------------------------
def dump_element(element):
"""
Dumps the content of the given ElementBase object to a string
:param element: An ElementBase object
:return: A full description of its content
:raise TypeError: Invalid object
"""
# Check type
try:
assert isinstance(element, sleekxmpp.ElementBase)
except AssertionError:
raise TypeError("Not an ElementBase: {0}".format(type(element)))
# Prepare string
output = StringIO()
output.write("ElementBase : {0}\n".format(type(element)))
output.write("- name......: {0}\n".format(element.name))
output.write("- namespace.: {0}\n".format(element.namespace))
output.write("- interfaces:\n")
for itf in sorted(element.interfaces):
output.write("\t- {0}: {1}\n".format(itf, element[itf]))
if element.sub_interfaces:
output.write("- sub-interfaces:\n")
for itf in sorted(element.sub_interfaces):
output.write("\t- {0}: {1}\n".format(itf, element[itf]))
return output.getvalue()
# ------------------------------------------------------------------------------
class RoomData(object):
"""
Stores data about a room to create
"""
def __init__(self, room, nick, configuration, callback, errback):
"""
Stores the description of the room.
:param room: Bare JID of the room
:param nick: Nick of the room creator
:param configuration: Room configuration
:param callback: Method to callback on success
:param errback: Method to callback on error
"""
self.room = room
self.nick = nick
self.configuration = configuration
self.callback = callback
self.errback = errback
class RoomCreator(object):
"""
XMPP Room creation utility.
The associated client must have registered plug-ins XEP-0004 and XEP-0045.
"""
def __init__(self, client, logname=None):
"""
Sets up the room creator
The given client must have registered plug-ins XEP-0004 and XEP-0045.
:param client: A ClientXMPP object
:param logname: Logger name
"""
# XMPP client
self.__xmpp = client
self.__muc = client['xep_0045']
# Logger
self.__logger = logging.getLogger(logname)
# Room name -> RoomData
self.__rooms = {}
# Some thread safety...
self.__lock = threading.Lock()
def create_room(self, room, service, nick, config=None,
callback=None, errback=None, room_jid=None):
"""
Prepares the creation of a room.
The callback is a method with two arguments:
- room: Bare JID of the room
- nick: Nick used to create the room
The errback is a method with 4 arguments:
- room: Bare JID of the room
- nick: Nick used to create the room
- condition: error category (XMPP specification or "not-owner")
- text: description of the error
:param room: Name of the room
:param service: Name of the XMPP MUC service
:param config: Configuration of the room
:param callback: Method called back on success
:param errback: Method called on error
:param room_jid: Forced room JID
"""
self.__logger.debug("Creating room: %s", room)
with self.__lock:
if not room_jid:
# Generate/Format the room JID if not given
room_jid = sleekxmpp.JID(local=room, domain=service).bare
self.__logger.debug("... Room JID: %s", room_jid)
if not self.__rooms:
# First room to create: register to events
self.__xmpp.add_event_handler("presence", self.__on_presence)
# Store information
self.__rooms[room_jid] = RoomData(room_jid, nick, config,
callback, errback)
# Send the presence, i.e. request creation of the room
self.__muc.joinMUC(room_jid, nick)
def __safe_callback(self, room_data):
"""
Safe use of the callback method, to avoid errors propagation
:param room_data: A RoomData object
"""
method = room_data.callback
if method is not None:
try:
method(room_data.room, room_data.nick)
except Exception as ex:
self.__logger.exception("Error calling back room creator: %s",
ex)
def __safe_errback(self, room_data, err_condition, err_text):
"""
Safe use of the callback method, to avoid errors propagation
:param room_data: A RoomData object
:param err_condition: Category of error
:param err_text: Description of the error
"""
method = room_data.errback
if method is not None:
try:
method(room_data.room, room_data.nick, err_condition, err_text)
except Exception as ex:
self.__logger.exception("Error calling back room creator: %s",
ex)
def __on_presence(self, data):
"""
Got a presence stanza
"""
room_jid = data['from'].bare
muc_presence = data['muc']
room = muc_presence['room']
nick = muc_presence['nick']
with self.__lock:
try:
# Get room state machine
room_data = self.__rooms[room]
if room_data.nick != nick:
# Not about the room creator
return
except KeyError:
# Unknown room (or not a room)
return
else:
# Clean up, as we got what we wanted
del self.__rooms[room]
if not self.__rooms:
# No more rooms: no need to listen to presence anymore
self.__xmpp.del_event_handler("presence", self.__on_presence)
if data['type'] == 'error':
# Got an error: update the state machine and clean up
self.__safe_errback(room_data, data['error']['condition'],
data['error']['text'])
elif muc_presence['affiliation'] != 'owner':
# We are not the owner the room: consider it an error
self.__safe_errback(room_data, 'not-owner',
'We are not the owner of the room')
else:
# Success: we own the room
# Setup room configuration
try:
config = self.__muc.getRoomConfig(room_jid)
except ValueError:
# Can't differentiate IQ errors from a "no configuration"
# result: consider it OK
self.__logger.warning("Can't get the configuration form for "
"XMPP room %s", room_jid)
self.__safe_callback(room_data)
else:
# Prepare our configuration
custom_values = room_data.configuration or {}
# Filter options that are not known from the server
known_fields = config['fields']
to_remove = [key for key in custom_values
if key not in known_fields]
for key in to_remove:
del custom_values[key]
# Send configuration (use a new form to avoid OpenFire to have
# an internal error)
form = self.__xmpp['xep_0004'].make_form("submit")
form['values'] = custom_values
self.__muc.setRoomConfig(room_jid, form)
# Call back the creator
self.__safe_callback(room_data)
# ------------------------------------------------------------------------------
class MarksCallback(object):
"""
Calls back a method when a list of elements have been marked
"""
def __init__(self, elements, callback, logname=None):
"""
Sets up the count down.
The callback method must accept two arguments: successful elements and
erroneous ones. The elements must be hashable, as sets are used
internally.
:param elements: A list of elements to wait for
:param callback: Method to call back when all elements have been
marked
:param logname: The name of the logger to use in case of error
"""
self.__logger = logging.getLogger(logname or __name__)
# Use a dictionary to keep a reference to the original element
self.__elements = {element: element for element in elements}
self.__callback = callback
self.__called = False
self.__successes = set()
self.__errors = set()
def __call(self):
"""
Calls the callback method
"""
try:
if self.__callback is not None:
self.__callback(self.__successes, self.__errors)
except Exception as ex:
self.__logger.exception("Error calling back count down "
"handler: %s", ex)
else:
self.__called = True
def __mark(self, element, mark_set):
"""
Marks an element
:param element: The element to mark
:param mark_set: The set corresponding to the mark
:return: True if the element was known
"""
try:
# The given element can be of a different type than the original
# one (JID instead of str, ...), so we retrieve the original one
original = self.__elements.pop(element)
mark_set.add(original)
except KeyError:
return False
else:
if not self.__elements:
# No more elements to wait for
self.__call()
return True
def is_done(self):
"""
Checks if the call back has been called, i.e. if this object can be
deleted
"""
return self.__called
def set(self, element):
"""
Marks an element as successful
:param element: An element
:return: True if the element was known
"""
return self.__mark(element, self.__successes)
def set_error(self, element):
"""
Marks an element as erroneous
:param element: An element
:return: True if the element was known
"""
return self.__mark(element, self.__errors)
| apache-2.0 | 8,786,725,984,340,857,000 | 31.871233 | 80 | 0.550342 | false | 4.560243 | true | false | false |
egbertbouman/tribler-g | Tribler/Core/DecentralizedTracking/pymdht/plugins/routing_bep5.py | 1 | 13339 | # Copyright (C) 2009-2010 Raul Jimenez
# Released under GNU LGPL 2.1
# See LICENSE.txt for more information
"""
This module intends to implement the routing policy specified in BEP5:
-
-
-
-
"""
import random
import core.ptime as time
import heapq
import logging
import core.identifier as identifier
import core.message as message
from core.querier import Query
import core.node as node
from core.node import Node, RoutingNode
from core.routing_table import RoutingTable
logger = logging.getLogger('dht')
NUM_BUCKETS = identifier.ID_SIZE_BITS
"""
We need 160 sbuckets to cover all the cases. See the following table:
Index | Distance | Comment
0 | [2^0,2^1) | All bits equal but the least significant bit
1 | [2^1,2^2) | All bits equal till the second least significant bit
...
158 | [2^159,2^160) | The most significant bit is equal the second is not
159 | [2^159,2^160) | The most significant bit is different
IMPORTANT: Notice there is NO bucket for -1
-1 | 0 | The bit strings are equal
"""
DEFAULT_NUM_NODES = 8
NODES_PER_BUCKET = [] # 16, 32, 64, 128, 256]
NODES_PER_BUCKET[:0] = [DEFAULT_NUM_NODES] \
* (NUM_BUCKETS - len(NODES_PER_BUCKET))
REFRESH_PERIOD = 15 * 60 # 15 minutes
QUARANTINE_PERIOD = 3 * 60 # 3 minutes
MAX_NUM_TIMEOUTS = 2
PING_DELAY_AFTER_TIMEOUT = 30 #seconds
MIN_RNODES_BOOTSTRAP = 10
NUM_NODES_PER_BOOTSTRAP_STEP = 1
BOOTSTRAP_MODE = 'bootstrap_mode'
FIND_NODES_MODE = 'find_nodes_mode'
NORMAL_MODE = 'normal_mode'
_MAINTENANCE_DELAY = {BOOTSTRAP_MODE: .2,
FIND_NODES_MODE: 2,
NORMAL_MODE: 2}
class RoutingManager(object):
def __init__(self, my_node, bootstrap_nodes):
self.my_node = my_node
#Copy the bootstrap list
self.bootstrap_nodes = iter(bootstrap_nodes)
self.table = RoutingTable(my_node, NODES_PER_BUCKET)
self.ping_msg = message.OutgoingPingQuery(my_node.id)
self.find_closest_msg = message.OutgoingFindNodeQuery(
my_node.id,
my_node.id)
# maintenance variables
self._maintenance_mode = BOOTSTRAP_MODE
self._pinged_q_rnodes = {} # questionable nodes which have been
# recently pinged
self._maintenance_tasks = [self._refresh_stale_bucket,
#self._ping_a_staled_rnode,
# self._ping_a_query_received_node,
# self._ping_a_found_node,
]
def do_maintenance(self):
queries_to_send = []
maintenance_lookup_target = None
if self._maintenance_mode == BOOTSTRAP_MODE:
try:
node_ = self.bootstrap_nodes.next()
queries_to_send = [self._get_maintenance_query(node_)]
except (StopIteration):
maintenance_lookup_target = self.my_node.id
self._maintenance_mode = FIND_NODES_MODE
return (10, [], maintenance_lookup_target)
else:
maintenance_lookup_target = self._refresh_stale_bucket()
return (_MAINTENANCE_DELAY[self._maintenance_mode],
queries_to_send, maintenance_lookup_target)
def _refresh_stale_bucket(self):
maintenance_lookup_target = None
current_time = time.time()
for i in xrange(self.table.lowest_index, NUM_BUCKETS):
sbucket = self.table.get_sbucket(i)
m_bucket = sbucket.main
if not m_bucket:
continue
inactivity_time = current_time - m_bucket.last_changed_ts
if inactivity_time > REFRESH_PERIOD:
# print time.time(), '>>>>>>>>>>>>>>> refreshing bucket %d after %f secs' % (
# i, inactivity_time)
maintenance_lookup_target = self.my_node.id.generate_close_id(
i)
m_bucket.last_changed_ts = current_time
return maintenance_lookup_target
self._maintenance_mode = NORMAL_MODE
return None
def _get_maintenance_query(self, node_):
return Query(self.ping_msg, node_)
def on_query_received(self, node_):
'''
Return None when nothing to do
Return a list of queries when queries need to be sent (the queries
will be sent out by the caller)
'''
if self._maintenance_mode != NORMAL_MODE:
return
log_distance = self.my_node.log_distance(node_)
try:
sbucket = self.table.get_sbucket(log_distance)
except(IndexError):
return # Got a query from myself. Just ignore it.
m_bucket = sbucket.main
rnode = m_bucket.get_rnode(node_)
if rnode:
# node in routing table: inform rnode
self._update_rnode_on_query_received(rnode)
return
# node is not in the routing table
if m_bucket.there_is_room():
# There is room in the bucket. Just add the new node.
rnode = node_.get_rnode(log_distance)
m_bucket.add(rnode)
self.table.update_lowest_index(log_distance)
self.table.num_rnodes += 1
self._update_rnode_on_query_received(rnode)
return
# No room in the main routing table
# Check whether there is a bad node to be replaced.
bad_rnode = self._pop_bad_rnode(m_bucket)
if bad_rnode:
# We have a bad node in the bucket. Replace it with the new node.
rnode = node_.get_rnode(log_distance)
m_bucket.add(rnode)
self._update_rnode_on_query_received(rnode)
self.table.update_lowest_index(log_distance)
self.table.num_rnodes += 0
return
# No bad nodes. Check for questionable nodes
q_rnodes = self._get_questionable_rnodes(m_bucket)
queries_to_send = []
# if q_rnodes:
# print time.time(), '-----pinging questionable nodes in',
# print log_distance
# print q_rnodes
for q_rnode in q_rnodes:
# Ping questinable nodes to check whether they are still alive.
# (0 timeouts so far, candidate node)
c_rnode = node_.get_rnode(log_distance)
self._update_rnode_on_query_received(c_rnode)
self._pinged_q_rnodes[q_rnode] = [0, c_rnode]
queries_to_send.append(Query(self.ping_msg, q_rnode))
return queries_to_send
def on_response_received(self, node_, rtt, nodes):
log_distance = self.my_node.log_distance(node_)
try:
sbucket = self.table.get_sbucket(log_distance)
except(IndexError):
return # Got a response from myself. Just ignore it.
m_bucket = sbucket.main
rnode = m_bucket.get_rnode(node_)
if rnode:
# node in routing table: update
self._update_rnode_on_response_received(rnode, rtt)
if self._maintenance_mode == NORMAL_MODE:
m_bucket.last_changed_ts = time.time()
if node_ in self._pinged_q_rnodes:
# This node is questionable. This response proves that it is
# alive. Remove it from the questionable dict.
del self._pinged_q_rnodes[node_]
return
# The node is not in main
if m_bucket.there_is_room():
rnode = node_.get_rnode(log_distance)
m_bucket.add(rnode)
self.table.update_lowest_index(log_distance)
self.table.num_rnodes += 1
self._update_rnode_on_response_received(rnode, rtt)
if self._maintenance_mode == NORMAL_MODE:
m_bucket.last_changed_ts = time.time()
return
# The main bucket is full
# if there is a bad node inside the bucket,
# replace it with the sending node_
bad_rnode = self._pop_bad_rnode(m_bucket)
if bad_rnode:
rnode = node_.get_rnode(log_distance)
m_bucket.add(rnode)
self._update_rnode_on_response_received(rnode, rtt)
if self._maintenance_mode == NORMAL_MODE:
m_bucket.last_changed_ts = time.time()
self.table.update_lowest_index(log_distance)
self.table.num_rnodes += 0
return
# There are no bad nodes. Ping questionable nodes (if any)
q_rnodes = self._get_questionable_rnodes(m_bucket)
queries_to_send = []
for q_rnode in q_rnodes:
# (0 timeouts so far, candidate node)
c_rnode = node_.get_rnode(log_distance)
self._update_rnode_on_response_received(c_rnode, rtt)
self._pinged_q_rnodes[q_rnode] = [0, c_rnode]
queries_to_send.append(Query(self.ping_msg, q_rnode))
return queries_to_send
def _pop_bad_rnode(self, mbucket):
for rnode in mbucket.rnodes:
if rnode.timeouts_in_a_row() >= 2:
mbucket.remove(rnode)
return rnode
def _get_questionable_rnodes(self, m_bucket):
q_rnodes = []
for rnode in m_bucket.rnodes:
inactivity_time = time.time() - rnode.last_seen
if inactivity_time > REFRESH_PERIOD:
q_rnodes.append(rnode)
if rnode.num_responses == 0:
q_rnodes.append(rnode)
return q_rnodes
def on_error_received(self, node_):
pass
def on_timeout(self, node_):
if not node_.id:
return # This is a bootstrap node (just addr, no id)
log_distance = self.my_node.log_distance(node_)
try:
sbucket = self.table.get_sbucket(log_distance)
except (IndexError):
return # Got a timeout from myself, WTF? Just ignore.
m_bucket = sbucket.main
rnode = m_bucket.get_rnode(node_)
if not rnode:
# This node is not in the table. Nothing to do here
return
# The node is in the table. Update it
self._update_rnode_on_timeout(rnode)
t_strikes, c_rnode = self._pinged_q_rnodes.get(node_, (None, None))
if t_strikes is None:
# The node is not being checked by a "questinable ping".
return
elif t_strikes == 0:
# This is the first timeout
self._pinged_q_rnodes[node_] = (1, c_rnode)
# Let's give it another chance
return [Query(self.ping_msg, rnode)]
elif t_strikes == 1:
# Second timeout. You're a bad node, replace if possible
# check if the candidate node is in the routing table
log_distance = self.my_node.log_distance(c_rnode)
m_bucket = self.table.get_sbucket(log_distance).main
c_rnode_in_table = m_bucket.get_rnode(c_rnode)
if c_rnode_in_table:
print 'questionable node replaced'
# replace
m_bucket.remove(rnode)
m_bucket.add(c_rnode)
self.table.update_lowest_index(log_distance)
self.table.num_rnodes += 0
def get_closest_rnodes(self, log_distance, num_nodes, exclude_myself):
if not num_nodes:
num_nodes = NODES_PER_BUCKET[log_distance]
return self.table.get_closest_rnodes(log_distance, num_nodes,
exclude_myself)
def get_main_rnodes(self):
return self.table.get_main_rnodes()
def print_stats(self):
self.table.print_stats()
def _update_rnode_on_query_received(self, rnode):
"""Register a query from node.
You should call this method when receiving a query from this node.
"""
current_time = time.time()
rnode.last_action_ts = time.time()
rnode.msgs_since_timeout += 1
rnode.num_queries += 1
rnode.add_event(current_time, node.QUERY)
rnode.last_seen = current_time
def _update_rnode_on_response_received(self, rnode, rtt):
"""Register a reply from rnode.
You should call this method when receiving a response from this rnode.
"""
rnode.rtt = rtt
current_time = time.time()
#rnode._reset_refresh_task()
if rnode.in_quarantine:
rnode.in_quarantine = rnode.last_action_ts < (
current_time - QUARANTINE_PERIOD)
rnode.last_action_ts = current_time
rnode.num_responses += 1
rnode.add_event(time.time(), node.RESPONSE)
rnode.last_seen = current_time
def _update_rnode_on_timeout(self, rnode):
"""Register a timeout for this rnode.
You should call this method when getting a timeout for this node.
"""
rnode.last_action_ts = time.time()
rnode.msgs_since_timeout = 0
rnode.num_timeouts += 1
rnode.add_event(time.time(), node.TIMEOUT)
def _worst_rnode(self, rnodes):
max_num_timeouts = -1
worst_rnode_so_far = None
for rnode in rnodes:
num_timeouots = rnode.timeouts_in_a_row()
if num_timeouots >= max_num_timeouts:
max_num_timeouts = num_timeouots
worst_rnode_so_far = rnode
return worst_rnode_so_far
| lgpl-2.1 | 1,855,917,418,661,080,000 | 35.848066 | 92 | 0.580628 | false | 3.64354 | false | false | false |
JunctionAt/JunctionWWW | blueprints/forum/views/topic_view.py | 1 | 3817 | from flask import render_template, redirect, abort, url_for
import math
from .. import blueprint
from models.forum_model import Topic, Post
from post_reply import TopicReplyForm
from blueprints.auth import current_user
from topic_edit import PostEditForm
from ..forum_util import forum_template_data
POSTS_PER_PAGE = 20
PAGINATION_VALUE_RANGE = 3
@blueprint.route('/forum/t/<int:topic_id>/<string:topic_name>/', defaults={'page': 1})
@blueprint.route('/forum/t/<int:topic_id>/<string:topic_name>/page/<int:page>/')
def view_topic(topic_id, topic_name, page):
if page == 0:
abort(404)
topic_reply_form = TopicReplyForm()
topic = Topic.objects(topic_url_id=topic_id).exclude().first()
if topic is None:
abort(404)
if not topic_name == topic.get_url_name():
return redirect(topic.get_url())
if current_user.is_authenticated():
topic.update(add_to_set__users_read_topic=current_user.to_dbref())
board = topic.board
forum = board.forum
# Get our sorted posts and the number of posts.
posts = Post.objects(topic=topic).order_by('+date')
num_posts = len(posts)
# Calculate the total number of pages and make sure the request is a valid page.
num_pages = int(math.ceil(num_posts / float(POSTS_PER_PAGE)))
if num_pages < page:
if page == 1:
return render_template('forum_topic_view.html', topic=topic, board=board, forum=forum,
posts=posts, topic_reply_form=topic_reply_form,
total_pages=num_pages, current_page=page, next=None, prev=None, links=[])
abort(404)
# Compile the list of topics we want displayed.
display_posts = posts.skip((page - 1) * POSTS_PER_PAGE).limit(POSTS_PER_PAGE)
# Find the links we want for the next/prev buttons if applicable.
next_page = url_for('forum.view_topic', page=page + 1, **topic.get_url_info()) if page < num_pages else None
prev_page = url_for('forum.view_topic', page=page - 1, **topic.get_url_info()) if page > 1 and not num_pages == 1 else None
# Mash together a list of what pages we want linked to in the pagination bar.
links = []
for page_mod in range(-min(PAGINATION_VALUE_RANGE, page - 1), min(PAGINATION_VALUE_RANGE, num_pages-page) + 1):
num = page + page_mod
links.append({'num': num, 'url': url_for('forum.view_topic', page=num, **topic.get_url_info()),
'active': (num == page)})
return render_template('forum_topic_view.html', topic=topic, board=board, forum=forum,
posts=display_posts, topic_reply_form=topic_reply_form,
total_pages=num_pages, current_page=page,
next=next_page, prev=prev_page, links=links, markdown_escape=markdown_escape,
post_edit=PostEditForm(), forum_menu_current=board.id, **forum_template_data(forum))
from markupsafe import Markup, text_type
def markdown_escape(s):
"""Convert the characters &, <, >, ' and " in string s to HTML-safe
sequences. Use this if you need to display text that might contain
such characters in HTML. Marks return value as markup string.
"""
if hasattr(s, '__html__'):
return s.__html__().replace('>', '>')
return Markup(text_type(s)
.replace('&', '&')
.replace('>', '>')
.replace('<', '<')
.replace("'", ''')
.replace('"', '"')
)
#@blueprint.route('/f/ulastpost')
#def ulastpost():
# topics = Topic.objects()
# for topic in topics:
# post = Post.objects(topic=topic).order_by('-date').first()
# topic.last_post_date = post.date
# topic.save()
# return 'ye' | agpl-3.0 | 8,476,154,457,414,899,000 | 39.617021 | 127 | 0.614357 | false | 3.590781 | false | false | false |
Kinovarobotics/kinova-ros | kinova_demo/nodes/kinova_demo/gravity_compensated_mode.py | 1 | 1165 | #! /usr/bin/env python
"""A test program to test action servers for the JACO and MICO arms."""
import roslib; roslib.load_manifest('kinova_demo')
import actionlib
import kinova_msgs.msg
import geometry_msgs.msg
import tf
import std_msgs.msg
import math
import thread
from kinova_msgs.srv import *
import argparse
from robot_control_modules import *
prefix = 'j2s7s300_'
nbJoints = 7
interactive = True
duration_sec = 100
if __name__ == '__main__':
try:
prefix, nbJoints = argumentParser(None)
rospy.init_node('torque_compensated_mode')
if (interactive == True):
nb = raw_input("Moving robot to candle like position, and setting zero torques, press return to start, n to skip")
if (nb != "n" and nb != "N"):
result = joint_position_client([180]*7, prefix)
if (interactive == True):
nb = raw_input('Setting torques to zero, press return')
ZeroTorque(prefix)
if (interactive == True):
nb = raw_input('Starting gravity compensation mode')
publishTorqueCmd([0,0,0,0,0,0,0], duration_sec, prefix)
print("Done!")
except rospy.ROSInterruptException:
print "program interrupted before completion"
| bsd-3-clause | 3,772,988,054,336,513,000 | 24.888889 | 117 | 0.692704 | false | 3.165761 | false | false | false |
lordgittgenstein/eGov | corect/utility.py | 1 | 4923 | from datetime import datetime
import random
import string
from corect.models import Officer, Location, Office, Complaint, History
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def get_group(user_id):
return Officer.objects.get(user=user_id).type
def get_higher_office(office):
officer = Officer.objects.get(id=office.head.id)
boss = Officer.objects.get(id=officer.boss.id)
return Office.objects.get(head=boss.id)
def get_offices_in(location):
queryset = Location.objects.none()
if location.type == 'state' or location.type == 'ut':
queryset = Location.objects.filter(state=location.state)
if location.type == 'division':
queryset = Location.objects.filter(division=location.division)
if location.type == 'district':
queryset = Location.objects.filter(district=location.district)
if location.type == 'subdistrict':
queryset = Location.objects.filter(subdistrict=location.subdistrict)
if location.type == 'locality' or location.type == 'village':
queryset = Location.objects.filter(locality=location.locality)
office_list = Office.objects.none()
for q in queryset:
office_list = office_list | Office.objects.filter(location=q.id)
return office_list
def get_offices_over(location):
queryset = Location.objects.none()
aqueryset = Location.objects.none()
flag = 'dont'
if location.type == 'state' or location.type == 'ut':
queryset = Location.objects.filter(state=location.state)
if location.type == 'division':
aqueryset = Location.objects.filter(state=location.state)
queryset = Location.objects.filter(division=location.division)
if location.type == 'district':
aqueryset = Location.objects.filter(division=location.division)
queryset = Location.objects.filter(district=location.district)
flag = 'do'
if location.type == 'subdistrict':
aqueryset = Location.objects.filter(district=location.district)
queryset = Location.objects.filter(subdistrict=location.subdistrict)
flag = 'do'
if location.type == 'locality' or location.type == 'village':
aqueryset = Location.objects.filter(subdistrict=location.subdistrict)
queryset = Location.objects.filter(locality=location.locality)
flag = 'do'
office_list = Office.objects.none()
if flag == 'do':
for q in aqueryset:
office_list = office_list | Office.objects.filter(location=q.id)
for q in queryset:
office_list = office_list | Office.objects.filter(location=q.id)
return office_list
def get_complaints_under(officer):
officers_under = Officer.objects.filter(boss=officer.id)
offices = Office.objects.none()
for o in officers_under:
offices = offices | Office.objects.filter(head=o.id)
complaints_under = Complaint.objects.none()
for oo in offices:
complaints_under = complaints_under | Complaint.objects.filter(office=oo.id)
complaints = Complaint.objects.filter(office=Office.objects.get(head=officer.id).id)
return complaints | complaints_under
def n_deadlines(officer):
officers_under = Officer.objects.filter(boss=officer.id)
offices = Office.objects.none()
for o in officers_under:
offices = offices | Office.objects.filter(head=o.id)
n_complaints_under = 0
for oo in offices:
n_complaints_under = n_complaints_under + Complaint.objects.filter(
office=oo.id,
wake_up__lte=datetime.now().date(),
resolved=False).count()
n_complaints = Complaint.objects.filter(office=Office.objects.get(
head=officer.id).id,
wake_up__lte=datetime.now().date(),
resolved=False).count()
return n_complaints + n_complaints_under
def n_recent(officer, last_login):
complaints, n_events = get_complaints_under(officer), 0
for c in complaints:
if c.office.id == Office.objects.get(head=officer.id).id:
n_events = n_events + History.objects.filter(complaint=c.id,
datetime__gte=datetime.combine(last_login, datetime.min.time()),
is_read_officer=False).exclude(user=officer.user).count()
else:
n_events = n_events + History.objects.filter(
complaint=c.id,
datetime__gte=datetime.combine(last_login, datetime.min.time()),
is_read_boss=False).exclude(user=officer.user).count()
return n_events | gpl-3.0 | -8,112,677,190,083,402,000 | 46.805825 | 100 | 0.628682 | false | 3.676624 | false | false | false |
YuepengGuo/backtrader | docs/datafeed-develop-general/vchart-test.py | 1 | 1988 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime
import backtrader as bt
from vchart import VChartData
if __name__ == '__main__':
# Create a cerebro entity
cerebro = bt.Cerebro(stdstats=False)
# Add a strategy
cerebro.addstrategy(bt.Strategy)
###########################################################################
# Note:
# The goog.fd file belongs to VisualChart and cannot be distributed with
# backtrader
#
# VisualChart can be downloaded from www.visualchart.com
###########################################################################
# Create a Data Feed
datapath = '../../datas/goog.fd'
data = VChartData(
dataname=datapath,
fromdate=datetime.datetime(2006, 1, 1),
todate=datetime.datetime(2006, 12, 31),
timeframe=bt.TimeFrame.Days
)
# Add the Data Feed to Cerebro
cerebro.adddata(data)
# Run over everything
cerebro.run()
# Plot the result
cerebro.plot(style='bar')
| gpl-3.0 | -2,867,846,791,584,958,000 | 32.133333 | 79 | 0.577465 | false | 4.321739 | false | false | false |
luca76/QGIS | python/plugins/MetaSearch/dialogs/newconnectiondialog.py | 1 | 3231 | # -*- coding: utf-8 -*-
###############################################################################
#
# CSW Client
# ---------------------------------------------------------
# QGIS Catalogue Service client.
#
# Copyright (C) 2010 NextGIS (http://nextgis.org),
# Alexander Bruy ([email protected]),
# Maxim Dubinin ([email protected])
#
# Copyright (C) 2014 Tom Kralidis ([email protected])
#
# This source is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This code is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# A copy of the GNU General Public License is available on the World Wide Web
# at <http://www.gnu.org/copyleft/gpl.html>. You can also obtain it by writing
# to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA.
#
###############################################################################
from PyQt4.QtCore import QSettings
from PyQt4.QtGui import QDialog, QMessageBox
from MetaSearch.ui.newconnectiondialog import Ui_NewConnectionDialog
class NewConnectionDialog(QDialog, Ui_NewConnectionDialog):
"""Dialogue to add a new CSW entry"""
def __init__(self, conn_name=None):
"""init"""
QDialog.__init__(self)
self.setupUi(self)
self.settings = QSettings()
self.conn_name = None
self.conn_name_orig = conn_name
def accept(self):
"""add CSW entry"""
conn_name = self.leName.text().strip()
conn_url = self.leURL.text().strip()
if any([conn_name == '', conn_url == '']):
QMessageBox.warning(self, self.tr('Save connection'),
self.tr('Both Name and URL must be provided'))
return
if conn_name is not None:
key = '/MetaSearch/%s' % conn_name
keyurl = '%s/url' % key
key_orig = '/MetaSearch/%s' % self.conn_name_orig
# warn if entry was renamed to an existing connection
if all([self.conn_name_orig != conn_name,
self.settings.contains(keyurl)]):
res = QMessageBox.warning(self, self.tr('Save connection'),
self.tr('Overwrite %s?' % conn_name),
QMessageBox.Ok | QMessageBox.Cancel)
if res == QMessageBox.Cancel:
return
# on rename delete original entry first
if all([self.conn_name_orig is not None,
self.conn_name_orig != conn_name]):
self.settings.remove(key_orig)
self.settings.setValue(keyurl, conn_url)
self.settings.setValue('/MetaSearch/selected', conn_name)
QDialog.accept(self)
def reject(self):
"""back out of dialogue"""
QDialog.reject(self)
| gpl-2.0 | -4,703,666,397,091,688,000 | 36.569767 | 79 | 0.563603 | false | 4.27381 | false | false | false |
ZeitOnline/zeit.calendar | src/zeit/calendar/calendar.py | 1 | 3226 | import datetime
import BTrees.OOBTree
import zope.component
import zope.interface
import zope.lifecycleevent
import zope.proxy
import zope.app.container.btree
import zeit.calendar.interfaces
class Calendar(zope.app.container.btree.BTreeContainer):
zope.interface.implements(zeit.calendar.interfaces.ICalendar)
def __init__(self):
super(Calendar, self).__init__()
self._date_index = BTrees.OOBTree.OOBTree()
self._key_index = BTrees.OOBTree.OOBTree()
def getEvents(self, date):
"""Return the events occuring on `date`."""
for event_id in self._date_index.get(date, []):
yield self[event_id]
def haveEvents(self, date):
"""Return whether there are events occuring on `date`."""
return bool(self._date_index.get(date))
def __setitem__(self, key, value):
event = zeit.calendar.interfaces.ICalendarEvent(value)
super(Calendar, self).__setitem__(key, value)
self._index(key, event.start, event.end)
def __delitem__(self, key):
super(Calendar, self).__delitem__(key)
self._unindex(key)
def _index(self, key, start, end):
if end is None:
check = (start,)
else:
check = (start, end)
for day in check:
if not isinstance(day, datetime.date):
raise ValueError("Expected date object, got %r instead" % day)
for day in date_range(start, end):
try:
day_idx = self._date_index[day]
except KeyError:
self._date_index[day] = day_idx = BTrees.OOBTree.OOTreeSet()
day_idx.insert(key)
self._key_index[key] = (start, end)
def _unindex(self, key):
start, end = self._key_index[key]
del self._key_index[key]
for day in date_range(start, end):
self._date_index[day].remove(key)
@zope.component.adapter(
zeit.calendar.interfaces.ICalendarEvent,
zope.lifecycleevent.IObjectModifiedEvent)
def updateIndexOnEventChange(calendar_event, event):
calendar = zope.proxy.removeAllProxies(calendar_event.__parent__)
key = calendar_event.__name__
calendar._unindex(key)
calendar._index(key, calendar_event.start, calendar_event.end)
def date_range(start, end):
"""Generate all datetime.date objects from start through end.
If end is None or earlier than start, yield only start. The range is never
empty so every event is always listed for at least one day. Otherwise
faulty dates might render an event unreachable via the index.
>>> day1 = datetime.date(2008, 1, 30)
>>> day2 = datetime.date(2008, 2, 2)
>>> list(date_range(day1, day2))
[datetime.date(2008, 1, 30), datetime.date(2008, 1, 31),
datetime.date(2008, 2, 1), datetime.date(2008, 2, 2)]
>>> list(date_range(day1, None))
[datetime.date(2008, 1, 30)]
>>> list(date_range(day1, day1))
[datetime.date(2008, 1, 30)]
>>> list(date_range(day2, day1))
[datetime.date(2008, 2, 2)]
"""
if end is None or end <= start:
yield start
else:
for i in xrange(start.toordinal(), end.toordinal() + 1):
yield datetime.date.fromordinal(i)
| bsd-3-clause | 5,608,074,890,873,393,000 | 31.26 | 78 | 0.626472 | false | 3.576497 | false | false | false |
MaterialsDiscovery/PyChemia | pychemia/core/element.py | 1 | 6941 | from collections.abc import Mapping
from pychemia.utils.periodic import *
madelung_exceptions = {'Cr': ['Ar', '4s1', '3d5'],
'Cu': ['Ar', '4s1', '3d10'],
'Nb': ['Kr', '5s1', '4d4'],
'Mo': ['Kr', '5s1', '4d5'],
'Ru': ['Kr', '5s1', '4d7'],
'Rh': ['Kr', '5s1', '4d8'],
'Pd': ['Kr', '4d10'],
'Ag': ['Kr', '5s1', '4d10'],
'La': ['Xe', '6s2', '5d1'],
'Ce': ['Xe', '6s2', '4f1', '5d1'],
'Gd': ['Xe', '6s2', '4f7', '5d1'],
'Pt': ['Xe', '6s1', '4f14', '5d9'],
'Au': ['Xe', '6s1', '4f14', '5d10'],
'Ac': ['Rn', '7s2', '6d1'],
'Th': ['Rn', '7s2', '6d2'],
'Pa': ['Rn', '7s2', '5f2', '6d1'],
'U': ['Rn', '7s2', '5f3', '6d1'],
'Np': ['Rn', '7s2', '5f4', '6d1'],
'Cm': ['Rn', '7s2', '5f7', '6d1'],
'Lr': ['Rn', '7s2', '5f14', '7p1']}
class Element:
def __init__(self, value=None):
if value in atomic_symbols:
self.symbol = value
elif value.capitalize() in atomic_symbols:
self.symbol = value.capitalize()
else:
raise ValueError('Symbol %s does not appear on the periodic table' % value)
@property
def name(self):
return atomic_name(self.symbol)
@property
def atomic_number(self):
return atomic_number(self.symbol)
@property
def group(self):
return group(self.symbol)
@property
def period(self):
return period(self.symbol)
@property
def block(self):
return block(self.symbol)
@property
def valence(self):
return valence(self.symbol)
@property
def valence_nominal(self):
return valence_nominal(self.symbol)
@property
def mass(self):
return mass(self.symbol)
@property
def covalent_radius(self):
return covalent_radius(self.symbol)
@property
def electronegativity(self):
return electronegativity(self.symbol)
@property
def crystal_structure(self):
return crystal_structure(self.symbol)
@property
def phase(self):
return phase(self.symbol)
@property
def boiling_point(self):
return boiling_point(self.symbol)
@property
def melting_point(self):
return melting_point(self.symbol)
@property
def oxidation_states(self):
return oxidation_state(self.symbol)
@property
def oxidation_states_common(self):
return oxidation_state(self.symbol, common=True)
def __str__(self):
ret = """
Symbol: %s
Name : %s
Z : %d
Group : %d
Period: %d
Block : %s
Valence : %f
Valence (Nominal): %f
Mass : %f
Covalent Radius : %f
Electronegativity : %f
Crystal Structure : %s
Phase : %s
Boiling Point : %f
Melting Point : %f
""" % (self.symbol, self.name, self.atomic_number, self.group, self.period, self.block,
self.valence, self.valence_nominal,
self.mass, self.covalent_radius, self.electronegativity, self.crystal_structure, self.phase,
self.boiling_point, self.melting_point)
return ret
def previous_inert(self):
inerts = ['He', 'Ne', 'Ar', 'Kr', 'Xe', 'Rn', 'Og']
if self.period == 1:
return None
else:
# In case the element is already a noble gas the previous one look one period above in Periodic Table
return inerts[self.period - 2]
@property
def madelung_filling(self):
order = ['1s', '2s', '2p', '3s', '3p', '4s', '3d', '4p', '5s', '4d',
'5p', '6s', '4f', '5d', '6p', '7s', '5f', '6d', '7p', '8s',
'5g', '6f', '7d', '8p', '9s']
# We start with the total number of electron and get those associated to orbitals following the order
capacities = {}
# l quantum number
for lqn in range(4):
label = Element.orbital_label_from_number(lqn)
nele = Element.max_electrons_subshell(label)
capacities[label] = nele
max_electrons = {}
for ishell in order:
label = ishell[-1]
maxele = Element.max_electrons_subshell(label)
max_electrons[ishell] = maxele
ret = []
if self.previous_inert() is not None:
inert = self.__class__(self.previous_inert())
ret.append(inert.symbol)
inert_remain = inert.atomic_number
# Consume the shells up to the previous inert atom
numele = self.atomic_number - inert.atomic_number
else:
numele = self.atomic_number
inert_remain = 0
for i in order:
if inert_remain >= max_electrons[i]:
inert_remain -= max_electrons[i]
elif inert_remain == 0:
if numele >= max_electrons[i]:
numele -= max_electrons[i]
ret.append(i + str(max_electrons[i]))
elif numele == 0:
break
elif numele < max_electrons[i]:
ret.append(i + str(numele))
break
return ret
@staticmethod
def azimuthal_quantum_number(label):
aqn = {'s': 0, 'p': 1, 'd': 2, 'f': 3, 'g': 4}
if label not in ['s', 'p', 'd', 'f', 'g']:
raise ValueError('Not such label for an orbital: %s' % label)
return aqn[label]
# lqn = angular momemtum
@staticmethod
def orbital_label_from_number(lqn):
orbitals = ['s', 'p', 'd', 'f', 'g']
if lqn not in range(4):
raise ValueError('Not such azimuthal quantum number: %s' % lqn)
return orbitals[lqn]
@staticmethod
def max_electrons_subshell(subshell):
if subshell in ['s', 'p', 'd', 'f', 'g']:
ll = Element.azimuthal_quantum_number(subshell)
elif subshell in [0, 1, 2, 3, 4]:
ll = subshell
else:
raise ValueError('Not a valid subshell: %s' % subshell)
return 2 * (2 * ll + 1)
@property
def electronic_configuration(self):
"""
Return the known electronic configuration including exceptions to Madelung's rule
Based on:
https://en.wikipedia.org/wiki/Electron_configuration#Atoms:_Aufbau_principle_and_Madelung_rule
:return:
"""
if self.symbol in madelung_exceptions:
return madelung_exceptions[self.symbol]
else:
return self.madelung_filling
@property
def is_madelung_exception(self):
return self.symbol in madelung_exceptions
| mit | -169,579,305,929,761,860 | 29.310044 | 113 | 0.511742 | false | 3.392473 | false | false | false |
medunigraz/outpost | src/outpost/django/video/migrations/0001_initial.py | 1 | 5601 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-18 13:23
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
from ...base.utils import Uuid4Upload
class Migration(migrations.Migration):
initial = True
dependencies = [
('geo', '0015_auto_20170809_0948'),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Recorder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('hostname', models.CharField(max_length=128)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Server',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hostname', models.CharField(blank=True, max_length=128)),
('port', models.PositiveIntegerField(default=2022)),
('key', models.BinaryField(default=b'')),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Epiphan',
fields=[
('recorder_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='video.Recorder')),
('username', models.CharField(max_length=128)),
('password', models.CharField(max_length=128)),
('key', models.BinaryField(null=True)),
('active', models.BooleanField(default=True)),
],
options={
'abstract': False,
},
bases=('video.recorder',),
),
migrations.AlterUniqueTogether(
name='server',
unique_together=set([('hostname', 'port')]),
),
migrations.AddField(
model_name='recorder',
name='polymorphic_ctype',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_video.recorder_set+', to='contenttypes.ContentType'),
),
migrations.AddField(
model_name='epiphan',
name='server',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='video.Server'),
),
migrations.CreateModel(
name='Recording',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('data', models.FileField(upload_to=Uuid4Upload)),
('recorder', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='video.Recorder')),
('info', django.contrib.postgres.fields.jsonb.JSONField(default={})),
],
options={
'abstract': False,
'ordering': ('-modified', '-created'),
'get_latest_by': 'modified',
},
),
migrations.CreateModel(
name='Export',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='epiphan',
name='active',
),
migrations.AddField(
model_name='recorder',
name='active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='recorder',
name='room',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='geo.Room'),
),
migrations.AlterField(
model_name='epiphan',
name='key',
field=models.BinaryField(default=b''),
preserve_default=False,
),
migrations.CreateModel(
name='SideBySideExport',
fields=[
('export_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='video.Export')),
('data', models.FileField(upload_to=Uuid4Upload)),
],
options={
'abstract': False,
},
bases=('video.export',),
),
migrations.AddField(
model_name='export',
name='polymorphic_ctype',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_video.export_set+', to='contenttypes.ContentType'),
),
migrations.AddField(
model_name='export',
name='recording',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='video.Recording'),
),
]
| bsd-2-clause | -8,162,181,520,460,886,000 | 39.883212 | 193 | 0.555972 | false | 4.4808 | false | false | false |
PGower/PyCanvas | pycanvas/tests/submissions.py | 1 | 7707 | """Submissions API Tests for Version 1.0.
This is a testing template for the generated SubmissionsAPI Class.
"""
import unittest
import requests
import secrets
from pycanvas.apis.submissions import SubmissionsAPI
from pycanvas.apis.submissions import Submissioncomment
from pycanvas.apis.submissions import Submission
from pycanvas.apis.submissions import Mediacomment
class TestSubmissionsAPI(unittest.TestCase):
"""Tests for the SubmissionsAPI."""
def setUp(self):
self.client = SubmissionsAPI(secrets.instance_address, secrets.access_token)
def test_submit_assignment_courses(self):
"""Integration test for the SubmissionsAPI.submit_assignment_courses method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_submit_assignment_sections(self):
"""Integration test for the SubmissionsAPI.submit_assignment_sections method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_list_assignment_submissions_courses(self):
"""Integration test for the SubmissionsAPI.list_assignment_submissions_courses method."""
course_id = None # Change me!!
assignment_id = None # Change me!!
r = self.client.list_assignment_submissions_courses(course_id, assignment_id, grouped=None, include=None)
def test_list_assignment_submissions_sections(self):
"""Integration test for the SubmissionsAPI.list_assignment_submissions_sections method."""
section_id = None # Change me!!
assignment_id = None # Change me!!
r = self.client.list_assignment_submissions_sections(section_id, assignment_id, grouped=None, include=None)
def test_list_submissions_for_multiple_assignments_courses(self):
"""Integration test for the SubmissionsAPI.list_submissions_for_multiple_assignments_courses method."""
course_id = None # Change me!!
r = self.client.list_submissions_for_multiple_assignments_courses(course_id, assignment_ids=None, grading_period_id=None, grouped=None, include=None, order=None, order_direction=None, student_ids=None)
def test_list_submissions_for_multiple_assignments_sections(self):
"""Integration test for the SubmissionsAPI.list_submissions_for_multiple_assignments_sections method."""
section_id = None # Change me!!
r = self.client.list_submissions_for_multiple_assignments_sections(section_id, assignment_ids=None, grading_period_id=None, grouped=None, include=None, order=None, order_direction=None, student_ids=None)
def test_get_single_submission_courses(self):
"""Integration test for the SubmissionsAPI.get_single_submission_courses method."""
course_id = None # Change me!!
assignment_id = None # Change me!!
user_id = None # Change me!!
r = self.client.get_single_submission_courses(user_id, course_id, assignment_id, include=None)
def test_get_single_submission_sections(self):
"""Integration test for the SubmissionsAPI.get_single_submission_sections method."""
section_id = None # Change me!!
assignment_id = None # Change me!!
user_id = None # Change me!!
r = self.client.get_single_submission_sections(user_id, section_id, assignment_id, include=None)
def test_upload_file_courses(self):
"""Integration test for the SubmissionsAPI.upload_file_courses method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_upload_file_sections(self):
"""Integration test for the SubmissionsAPI.upload_file_sections method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_grade_or_comment_on_submission_courses(self):
"""Integration test for the SubmissionsAPI.grade_or_comment_on_submission_courses method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_grade_or_comment_on_submission_sections(self):
"""Integration test for the SubmissionsAPI.grade_or_comment_on_submission_sections method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_list_gradeable_students(self):
"""Integration test for the SubmissionsAPI.list_gradeable_students method."""
course_id = None # Change me!!
assignment_id = None # Change me!!
r = self.client.list_gradeable_students(course_id, assignment_id)
def test_grade_or_comment_on_multiple_submissions_courses_submissions(self):
"""Integration test for the SubmissionsAPI.grade_or_comment_on_multiple_submissions_courses_submissions method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_grade_or_comment_on_multiple_submissions_courses_assignments(self):
"""Integration test for the SubmissionsAPI.grade_or_comment_on_multiple_submissions_courses_assignments method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_grade_or_comment_on_multiple_submissions_sections_submissions(self):
"""Integration test for the SubmissionsAPI.grade_or_comment_on_multiple_submissions_sections_submissions method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_grade_or_comment_on_multiple_submissions_sections_assignments(self):
"""Integration test for the SubmissionsAPI.grade_or_comment_on_multiple_submissions_sections_assignments method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_mark_submission_as_read_courses(self):
"""Integration test for the SubmissionsAPI.mark_submission_as_read_courses method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_mark_submission_as_read_sections(self):
"""Integration test for the SubmissionsAPI.mark_submission_as_read_sections method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_mark_submission_as_unread_courses(self):
"""Integration test for the SubmissionsAPI.mark_submission_as_unread_courses method."""
course_id = None # Change me!!
assignment_id = None # Change me!!
user_id = None # Change me!!
r = self.client.mark_submission_as_unread_courses(user_id, course_id, assignment_id)
def test_mark_submission_as_unread_sections(self):
"""Integration test for the SubmissionsAPI.mark_submission_as_unread_sections method."""
section_id = None # Change me!!
assignment_id = None # Change me!!
user_id = None # Change me!!
r = self.client.mark_submission_as_unread_sections(user_id, section_id, assignment_id)
| mit | 7,338,021,029,388,652,000 | 51.520833 | 211 | 0.703387 | false | 4.288815 | true | false | false |
ryansb/mediapublic | server/mediapublic/mediapublic/models.py | 1 | 17235 | from sqlalchemy import (
Column,
Index,
ForeignKey,
Integer,
Text,
UnicodeText,
DateTime,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
relationship,
backref,
)
import transaction
from zope.sqlalchemy import ZopeTransactionExtension
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension(), expire_on_commit=False))
Base = declarative_base()
class CreationMixin():
@classmethod
def add(cls, **kwargs):
with transaction.manager:
thing = cls(**kwargs)
DBSession.add(thing)
transaction.commit()
return thing
@classmethod
def get_all(cls):
with transaction.manager:
things = DBSession.query(
cls,
).all()
#retThings = []
#for t in things:
# retThings.append(t.to_dict())
#return retThings
return things
@classmethod
def get_by_id(cls, id):
with transaction.manager:
thing = DBSession.query(
cls,
).filter(
cls.id == id,
).first()
return thing
@classmethod
def delete_by_id(cls, id):
with transaction.manager:
thing = cls.get_by_id(id)
if not thing is None:
DBSession.delete(thing)
transaction.commit()
return thing
@classmethod
def update_by_id(cls, id, **kwargs):
print '\nupdate_by_id(), args:'
#print args
with transaction.manager:
keys = set(cls.__dict__)
thing = cls.get_by_id(id)
if not thing is None:
for k in kwargs:
if k in keys:
setattr(thing, k, kwargs[k])
DBSession.add(thing)
transaction.commit()
return thing
@classmethod
def reqkeys(cls):
keys = []
for key in cls.__table__.columns:
if '__required__' in type(key).__dict__:
keys.append(str(key).split('.')[1])
return keys
class ReqColumn(Column):
__required__ = True
class UserTypes(Base, CreationMixin):
__tablename__ = 'user_types'
id = Column(Integer, primary_key=True)
name = ReqColumn(UnicodeText)
description = ReqColumn(UnicodeText)
value = ReqColumn(Integer)
creation_datetime = Column(DateTime)
def to_dict(self):
resp = dict(
id = self.id,
name = self.name,
description = self.description,
value = self.value,
)
return resp
class Users(Base, CreationMixin):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
unique = Column(Text)
first = ReqColumn(UnicodeText)
last = ReqColumn(UnicodeText)
email = ReqColumn(UnicodeText)
twitter = ReqColumn(UnicodeText)
creation_datetime = Column(UnicodeText)
last_longin_datetime = Column(UnicodeText)
user_type_id = ReqColumn(ForeignKey('user_types.id'))
organization_id = Column(ForeignKey('organizations.id'), nullable=True)
def to_dict(self):
resp = dict(
id = self.id,
first = self.first,
last = self.last,
email = self.email,
user_type = self.user_type_id,
organization_id = self.organization_id,
)
return resp
class Comments(Base, CreationMixin):
__tablename__ = 'comments'
id = Column(Integer, primary_key=True)
subject = ReqColumn(UnicodeText)
contents = ReqColumn(UnicodeText)
creation_datetime = Column(DateTime)
parent_comment_id = ReqColumn(Integer, ForeignKey('comments.id'))
author_id = ReqColumn(Integer, ForeignKey('users.id'))
organization_id = Column(ForeignKey('organizations.id'), nullable=True)
people_id = Column(ForeignKey('people.id'), nullable=True)
recording_id = Column(ForeignKey('recordings.id'), nullable=True)
howto_id = Column(ForeignKey('howtos.id'), nullable=True)
blog_id = Column(ForeignKey('blogs.id'), nullable=True)
def to_dict(self):
resp = dict(
id = self.id,
subject = self.subject,
contents = self.contents,
creation_datetime = str(self.creation_datetime),
parent_comment_id = self.parent_comment_id,
author_id = self.author_id,
)
return resp
@classmethod
def get_by_organization_id(cls, id):
with transaction.manager:
comments = DBSession.query(
Comments,
).filter(
Comments.organization_id == id,
).all()
return comments
@classmethod
def get_by_people_id(cls, id):
with transaction.manager:
comments = DBSession.query(
Comments,
).filter(
Comments.people_id == id,
).all()
return comments
@classmethod
def get_by_recording_id(cls, id):
with transaction.manager:
comments = DBSession.query(
Comments,
).filter(
Comments.recording_id == id,
).all()
return comments
@classmethod
def get_by_howto_id(cls, id):
with transaction.manager:
comments = DBSession.query(
Comments,
).filter(
Comments.howto_id == id,
).all()
return comments
@classmethod
def get_by_blog_id(cls, id):
with transaction.manager:
comments = DBSession.query(
Comments,
).filter(
Comments.blog_id == id,
).all()
return comments
class Organizations(Base, CreationMixin):
__tablename__ = 'organizations'
id = Column(Integer, primary_key=True)
short_name = ReqColumn(UnicodeText)
long_name = ReqColumn(UnicodeText)
short_description = ReqColumn(UnicodeText)
long_description = ReqColumn(UnicodeText)
address_0 = ReqColumn(UnicodeText)
address_1 = ReqColumn(UnicodeText)
city = ReqColumn(UnicodeText)
state = ReqColumn(UnicodeText)
zipcode = ReqColumn(UnicodeText)
phone = ReqColumn(UnicodeText)
fax = ReqColumn(UnicodeText)
primary_website = ReqColumn(UnicodeText)
secondary_website = ReqColumn(UnicodeText)
creation_datetime = Column(DateTime)
def to_dict(self):
resp = dict(
id = self.id,
short_name = self.short_name,
long_name = self.long_name,
short_description = self.short_description,
long_description = self.long_description,
address_0 = self.address_0,
address_1 = self.address_1,
city = self.city,
state = self.state,
zipcode = self.zipcode,
phone = self.phone,
fax = self.fax,
primary_website = self.primary_website,
secondary_website = self.secondary_website,
creation_datetime = str(self.creation_datetime),
)
return resp
class PlaylistAssignments(Base, CreationMixin):
__tablename__ = 'playlist_assignments'
id = Column(Integer, primary_key=True)
playlist_id = Column(Integer, ForeignKey('playlists.id'))
recording_id = ReqColumn(Integer, ForeignKey('recordings.id'))
creation_datetime = Column(DateTime)
@classmethod
def delete_by_playlist_id_and_recording_id(cls, pid, rid):
success = False
with transaction.manager:
playlist = DBSession.query(
PlaylistAssignments,
).filter(
PlaylistAssignments.playlist_id == pid,
PlaylistAssignment.recording_id == rid,
).first()
if not playlist is None:
DBSession.remove(playlist)
transaction.commit()
success = True
return success
def to_dict(self):
resp = dict(
id = self.id,
playlist_id = self.playlist_id,
recording_id = self.recording_id,
)
return resp
class Playlists(Base, CreationMixin):
__tablename__ = 'playlists'
id = Column(Integer, primary_key=True)
author_id = Column(Integer, ForeignKey('people.id'))
title = ReqColumn(UnicodeText)
description = ReqColumn(UnicodeText)
creation_datetime = Column(DateTime)
recordings = relationship(
"Recordings",
secondary=PlaylistAssignments.__table__,
backref="playlists",
)
@classmethod
def get_by_owner_id(cls, id):
with transaction.manager:
playlists = DBSession.query(
Playlists,
).filter(
Playlists.author_id == id,
).all()
return playlists
@classmethod
def remove_recording_ny_id(cls, pid, rid):
with transaction.manager:
assignment = DBSession.query(
PlaylistAssignments,
).filter(
PlaylistAssignments.playlist_id == pid,
PlaylistAssignments.recording_id == rid,
).first()
DBSession.delete(assignment)
@classmethod
def get_recordings_by_playlist_id(self, id):
with transaction.manager:
recordings = DBSession.query(
Recordings,
).join(
PlaylistAssignments,
).filter(
PlaylistAssignments.playlist_id == id,
).all()
if recordings is None:
recordings = []
if not isinstance(recordings, list):
recordings = [recordings]
return recordings
def to_dict(self):
resp = dict(
id = self.id,
author_id = self.author_id,
title = self.title,
# This should cause a LEFT JOIN against the many-to-many
# recording_assignments table, and get the recordings
# that are associated with the playlist
#recordings = [r.to_dict() for r in self.recordings]
recordings = [r.to_dict() for r in Playlists.get_recordings_by_playlist_id(self.id)],
)
return resp
class People(Base, CreationMixin):
__tablename__= 'people'
id = Column(Integer, primary_key=True)
first = ReqColumn(UnicodeText)
last = ReqColumn(UnicodeText)
address_0 = ReqColumn(UnicodeText)
address_1 = ReqColumn(UnicodeText)
city = ReqColumn(UnicodeText)
state = ReqColumn(UnicodeText)
zipcode = ReqColumn(UnicodeText)
phone = ReqColumn(UnicodeText)
fax = ReqColumn(UnicodeText)
primary_website = ReqColumn(UnicodeText)
secondary_website = ReqColumn(UnicodeText)
creation_datetime = Column(DateTime)
# these should probably be brough out into a seperate table as
# many to one so we don't have to keep adding colyumns ...
twitter = ReqColumn(UnicodeText)
facebook = ReqColumn(UnicodeText)
instagram = ReqColumn(UnicodeText)
periscope = ReqColumn(UnicodeText)
user_id = ReqColumn(ForeignKey('users.id'), nullable=True)
organization_id = Column(ForeignKey('organizations.id'), nullable=True)
def to_dict(self):
resp = dict(
id = self.id,
first = self.first,
address_0 = self.address_0,
address_1 = self.address_1,
city = self.city,
state = self.state,
zipcode = self.zipcode,
phone = self.phone,
fax = self.fax,
primary_website = self.primary_website,
secondary_website = self.secondary_website,
creation_datetime = str(self.creation_datetime),
# see note on definitions
twitter = self.twitter,
facebook = self.facebook,
instagram = self.instagram,
periscope = self.periscope,
user_id = self.user_id,
organization_id = self.organization_id,
)
return resp
@classmethod
def get_by_organization_id(cls, id):
with transaction.manager:
people = DBSession.query(
People,
).filter(
People.organization_id == id,
).all()
return people
class Recordings(Base, CreationMixin):
__tablename__= 'recordings'
id = Column(Integer, primary_key=True)
title = ReqColumn(UnicodeText)
url = ReqColumn(UnicodeText)
recorded_datetime = ReqColumn(DateTime)
creation_datetime = Column(DateTime)
organization_id = Column(Integer, ForeignKey('organizations.id'))
def to_dict(self):
resp = dict(
id = self.id,
title = self.title,
url = self.url,
recorded_datetime = str(self.recorded_datetime),
creation_datetime = str(self.creation_datetime),
organization_id = self.organization_id,
)
return resp
@classmethod
def get_by_organization_id(cls, id):
with transaction.manager:
recordings = DBSession.query(
Recordings,
# RecordingCategories,
).filter(
Recordings.organization_id == id,
#).join(
# RecordingCategoryAssignments,
).all()
return recordings
class RecordingCategories(Base, CreationMixin):
__tablename__ = 'recording_categories'
id = Column(Integer, primary_key=True)
name = ReqColumn(UnicodeText)
short_description = ReqColumn(UnicodeText)
long_description = ReqColumn(UnicodeText)
creation_datetime = Column(DateTime)
def to_dict(self):
resp = dict(
id = self.id,
name = self.name,
short_description = self.short_description,
long_description = self.long_description,
creation_datetime = str(self.creation_datetime),
)
return resp
class RecordingCategoryAssignments(Base, CreationMixin):
__tablename__ = 'recording_category_assignments'
id = Column(Integer, primary_key=True)
recording_category_id = ReqColumn(Integer, ForeignKey('recording_categories.id'))
recording_id = ReqColumn(Integer, ForeignKey('recordings.id'))
creation_datetime = Column(DateTime)
def to_dict(self):
resp = dict(
id = self.id,
recording_category_id = self.recording_category_id,
recording_id = self.recording_id,
creation_datetime = str(self.creation_datetime),
)
return resp
class Howtos(Base, CreationMixin):
__tablename__ = 'howtos'
id = Column(Integer, primary_key=True)
title = ReqColumn(UnicodeText)
contents = ReqColumn(UnicodeText)
creation_datetime = Column(DateTime)
edit_datetime = Column(DateTime)
tags = ReqColumn(UnicodeText)
def to_dict(self):
resp = dict(
id = self.id,
title = self.title,
contents = self.contents,
creation_datetime = str(self.creation_datetime),
edit_datetime = self.edit_datetime,
tags = self.tags,
)
return resp
class HowtoCategories(Base, CreationMixin):
__tablename__ = 'howto_categories'
id = Column(Integer, primary_key=True)
name = ReqColumn(UnicodeText)
short_description = ReqColumn(UnicodeText)
long_description = ReqColumn(UnicodeText)
creation_datetime = Column(DateTime)
def to_dict(self):
resp = dict(
id = self.id,
name = self.name,
short_description = self.short_description,
long_description = self.long_description,
creation_datetime = str(self.creation_datetime),
)
return resp
class HowtoCategoryAssignments(Base, CreationMixin):
__tablename__ = 'howto_category_assignments'
id = Column(Integer, primary_key=True)
howto_category_id = ReqColumn(Integer, ForeignKey('howto_categories.id'))
howto_id = ReqColumn(Integer, ForeignKey('howtos.id'))
creation_datetime = Column(DateTime)
def to_dict(self):
resp = dict(
id = self.id,
howto_category_id = self.howto_category_id,
howto_id = self.howto_id,
creation_datetime = str(self.creation_datetime),
)
return resp
class Blogs(Base, CreationMixin):
__tablename__ = 'blogs'
id = Column(Integer, primary_key=True)
title = ReqColumn(UnicodeText)
contents = ReqColumn(UnicodeText)
creation_datetime = Column(DateTime)
edit_datetime = Column(DateTime)
tags = ReqColumn(UnicodeText)
author_id = Column(ForeignKey('users.id'))
def to_dict(self):
resp = dict(
id = self.id,
title = self.title,
contents = self.contents,
creation_datetime = str(self.creation_datetime),
edit_datetime = self.edit_datetime,
tags = self.tags,
author_id = self.author_id,
)
return resp
| gpl-3.0 | -7,340,058,238,994,979,000 | 28.361158 | 102 | 0.583406 | false | 4.149013 | false | false | false |
IQSS/geoconnect | gc_apps/worldmap_connect/jointarget_formatter.py | 1 | 11627 | """
Helper class to format JSON in the JoinTargetInformation model's "target_info" field
- In terms of UI, this data is used for:
1. Creating a list of Geospatial Identifiers
- e.g. Census Tract, Zip code
2. Creating a list of Names/Years based on the chosen Geospatial Identifiers
- e.g. If Cenus Tract is chosen, list might be:
"US Census 2010", "US Census 2000", "US Census 1990", etc.
3. Based on the chosen JoinTarget, prep data for WorldMap datatables API
- The Upload and Join API
- Parms: name of target layer, name of target layer column
"""
import json
from collections import OrderedDict
from gc_apps.worldmap_connect.single_join_target_info import SingleJoinTargetInfo
class JoinTargetFormatter(object):
"""
Helper class to format JSON in the JoinTargetInformation model's "target_info" field
Sample target info data:
{
"data": [
{
"layer": "geonode:massachusetts_census_nhu",
"geocode_type": "US Census Tract",
"geocode_type_slug": "us-census-tract",
"attribute": {
"attribute": "TRACTCE",
"type": "xsd:string"
},
"year": 2010,
"type": null,
"id": 3
}
],
"success": true
}
"""
def __init__(self, target_info):
"""Initialize using target_info JSON retrieved from WorldMap"""
self.err_found = False
self.err_message = None
self.target_info = target_info
self.initial_check()
def is_valid(self):
return self.err_found
def add_error(self, err_msg):
"""
Error detected, store a messsage in the class
"""
self.err_found = True
self.err_message = err_msg
def initial_check(self):
"""
Make sure that 'target_info' has the expected data
"""
if self.target_info is None:
self.add_error("target_info should not be None")
return False
# Is this a dict? (e.g. not a list or blank, etc)
#print 'target_info', self.target_info
if not hasattr(self.target_info, 'has_key'):
# OK, Maybe it's a JSON string that can be converted to a dict
print 'type self.target_info', type(self.target_info)
try:
self.target_info = json.loads(self.target_info)
except ValueError:
self.add_error("target_info should always be a JSON string or python dict")
return False
# Is there a 'success' attribute?
if not 'success' in self.target_info:
self.add_error("target_info does not have a 'success' attribute")
return False
# Is success True?
if not self.target_info['success'] is True:
self.add_error("target_info does not have a 'success' marked as True")
return False
# Is there a data attribute?
if not 'data' in self.target_info:
self.add_error("target_info does not have a 'data' attribute")
return False
# Does the data attribute contain any elements?
if len(self.target_info['data']) == 0:
self.add_error("There are no JoinTargets available.")
return False
return True
@staticmethod
def get_formatted_name(geocode_type, year=None, title=None):
if geocode_type is None:
return None
if year and title:
return "{0} ({1}) {2}".format(geocode_type, year, title)
if year:
return "{0} ({1})".format(geocode_type, year)
if title:
return "{0} - {1}".format(geocode_type, title)
return "{0}".format(geocode_type)
def get_single_join_target_info(self, target_layer_id):
"""
Given a target_layer_id, send back:
- target layer name
- target layer column
- zero pad length
- zero_pad_length is either an integer or None
return (target layer name, target layer column, zero_pad_length)
"""
if target_layer_id is None:
return (None, None, None)
for info in self.target_info['data']:
if 'id' in info and target_layer_id == info['id']:
return SingleJoinTargetInfo(info)
#return SingleJoinTargetInfo(
# info['layer'],
# info['attribute']['attribute'],
# info['attribute']['type'],
# self.get_formatting_zero_pad_length(target_layer_id)
# )
return None
def get_geocode_types(self):
"""
Create a list tuples for available Geospatial Identifiers
- Tuple Format: (name, slug)
- e.g. [("Census Tract", "census-tract'"), ("Zip code", "zip-code")]
"""
if self.err_found:
return None
gtypes = []
type_dict = {}
for info in self.target_info['data']:
# Have we already added this type to the list?
if not info['geocode_type_slug'] in type_dict:
# Nope, add it
gtypes.append((info['geocode_type'], info['geocode_type_slug']))
type_dict.update({ info['geocode_type_slug']: 1 })
return gtypes
def get_available_layers_list_by_type(self, chosen_geocode_type=None, for_json=False):
"""
Used for populating form dropdown with list of layers
Create a list of items, each item has the following attributes:
[
{
"join_target_id" : 8
"name" : "2014 - Election Precincts, Boston",
"expected_format" : "Boston Election Precinct ID (integer)"
}
]
value - join target id
text - (year) layer title
"""
if self.err_found:
return None
join_targets = []
for info in self.target_info['data']:
gtype_slug = info['geocode_type_slug']
if chosen_geocode_type == gtype_slug or\
chosen_geocode_type is None:
if 'name' not in info:
continue
join_target_id = info['id']
info_line = "{0} - {1}".format(info['year'], info['name'])
description = info.get('expected_format', {}).get('description', '')
if for_json:
info_dict = OrderedDict()
info_dict['join_target_id'] = info['id']
info_dict['name'] = info_line
info_dict['description'] = description
join_targets.append(info_dict)
else:
join_targets.append((join_target_id, info_line))
return join_targets
def get_format_info_for_target_layer(self, target_layer_id):
if target_layer_id is None:
return None
for info in self.target_info['data']:
if 'id' in info and target_layer_id == info['id']:
if 'expected_format' in info:
return info['expected_format']
return None
def get_formatting_zero_pad_length(self, target_layer_id):
"""
Used to format join columns before sending them over to WorldMap.
If this Target layer expects zero padding, return the
length of the expected field.
If no zero padding needed, return None
"""
expected_format = self.get_format_info_for_target_layer(target_layer_id)
if expected_format is None:
return None
if expected_format.get('is_zero_padded') is True\
and expected_format.get('expected_zero_padded_length', -1) > 0:
return expected_format['expected_zero_padded_length']
return None
def get_zero_pad_note(self, info):
"""
If the format type JSON includes zero padding info,
show it
Example JSON:
"expected_format": {
"expected_zero_padded_length": 6,
"is_zero_padded": true,
"description": "Remove non integers. Check for empty string. Pad with zeros until 6 digits.",
"name": "Census Tract (6 digits, no decimal)"
},
"""
if info is None or not hasattr(info, 'get'):
return None
if not 'expected_format' in info:
return None
expected_format = info['expected_format']
if expected_format.get('is_zero_padded') is True\
and expected_format.get('expected_zero_padded_length', -1) > 0:
return 'Zero padded to %s digits' %\
expected_format['expected_zero_padded_length']
return None
def get_format_name(self, info):
"""
If the format type JSON includes zero padding info,
show it
Example JSON:
"expected_format": {
"expected_zero_padded_length": 6,
"is_zero_padded": true,
"description": "Remove non integers. Check for empty string. Pad with zeros until 6 digits.",
"name": "Census Tract (6 digits, no decimal)"
},
"""
if info is None or not hasattr(info, 'get'):
return None
if not 'expected_format' in info:
return None
expected_format = info['expected_format']
return expected_format.get('name', None)
def get_join_targets_by_type(self, chosen_geocode_type=None):
"""
Creating a list of tuples of Names/Years based on the chosen Geospatial Identifier
- Tuple Format:
[(join target name, join_target_id),]
join_target_name = name (year)
join_target_id = JoinTarget id on the WorldMap system
- Used in the Geoconnect form
- e.g. If Cenus Tract is chosen, list might be:
[("US Census 2010", 7), ("US Census 2000", 3), etc.]
Note: if chosen_geocode_type is None, all identifiers will be retrieved
"""
join_targets = []
for info in self.target_info['data']:
gtype_slug = info['geocode_type_slug']
if chosen_geocode_type == gtype_slug or\
chosen_geocode_type is None:
info_line = JoinTargetFormatter.get_formatted_name(
info['geocode_type'])
#info['year'])
gtype_tuple = (info['geocode_type_slug'], info_line)
if not gtype_tuple in join_targets:
join_targets.append(gtype_tuple)
# Sort list by geocode_type name
join_targets.sort(key=lambda tup: tup[1]) # sorts in place
return join_targets
"""
python manage.py shell
from gc_apps.worldmap_connect.utils import get_latest_jointarget_information
from gc_apps.worldmap_connect.jointarget_formatter import JoinTargetFormatter
jt = get_latest_jointarget_information()
formatter = JoinTargetFormatter(jt.target_info)
gtypes = formatter.get_geocode_types()
print gtypes
print '-- targets for each type --'
cnt = 0
for g in gtypes:
cnt +=1
print '({0}) {1}'.format(cnt, formatter.get_join_targets_by_type(g))
cnt = 0
print '\n-- all targets --'
for item in formatter.get_join_targets_by_type(g):
cnt +=1
print '({0}) {1}'.format(cnt, item)
"""
| apache-2.0 | 7,038,965,172,254,172,000 | 32.604046 | 105 | 0.551991 | false | 4.145098 | false | false | false |
SergeySatskiy/codimension | codimension/debugger/excpt.py | 1 | 2978 | # -*- coding: utf-8 -*-
#
# codimension - graphics python two-way code editor and analyzer
# Copyright (C) 2010-2012 Sergey Satskiy <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Debugger exceptions viewer"""
from ui.qt import Qt, pyqtSignal, QVBoxLayout, QWidget, QSplitter
from .clientexcptviewer import ClientExceptionsViewer
from .ignoredexcptviewer import IgnoredExceptionsViewer
class DebuggerExceptions(QWidget):
"""Implements the debugger context viewer"""
sigClientExceptionsCleared = pyqtSignal()
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.__createLayout()
self.clientExcptViewer.sigClientExceptionsCleared.connect(
self.__onClientExceptionsCleared)
def __createLayout(self):
"""Creates the widget layout"""
verticalLayout = QVBoxLayout(self)
verticalLayout.setContentsMargins(1, 1, 1, 1)
self.splitter = QSplitter(Qt.Vertical)
self.ignoredExcptViewer = IgnoredExceptionsViewer(self.splitter)
self.clientExcptViewer = ClientExceptionsViewer(
self.splitter, self.ignoredExcptViewer)
self.splitter.addWidget(self.clientExcptViewer)
self.splitter.addWidget(self.ignoredExcptViewer)
self.splitter.setCollapsible(0, False)
self.splitter.setCollapsible(1, False)
verticalLayout.addWidget(self.splitter)
def clear(self):
"""Clears everything"""
self.clientExcptViewer.clear()
def addException(self, exceptionType, exceptionMessage, stackTrace):
"""Adds the exception to the view"""
self.clientExcptViewer.addException(exceptionType, exceptionMessage,
stackTrace)
def isIgnored(self, exceptionType):
"""Returns True if this exception type should be ignored"""
return self.ignoredExcptViewer.isIgnored(exceptionType)
def setFocus(self):
"""Sets the focus to the client exception window"""
self.clientExcptViewer.setFocus()
def getTotalClientExceptionCount(self):
"""Provides the total number of the client exceptions"""
return self.clientExcptViewer.getTotalCount()
def __onClientExceptionsCleared(self):
"""Triggered when the user cleared exceptions"""
self.sigClientExceptionsCleared.emit()
| gpl-3.0 | -3,956,236,440,457,816,000 | 35.317073 | 76 | 0.708193 | false | 4.21813 | false | false | false |
BartGo/bottle-cuturl | app/settings.py | 1 | 1381 | # -*- coding: utf-8 -*-
import os
APP_NAME = 'bottle-cuturl'
# disabled but also removed crashreporter==1.11 from setup.py, somehow does not like setuptools==21.0.0
CRASH_REPORT = 0
# Paths
PROJECT_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)))
TEMPLATE_PATH = os.path.join(PROJECT_PATH, 'views')
STATIC_PATH = os.path.join(PROJECT_PATH, 'assets')
# SQL Alchemy
# *** PostgreSQL
SQL_SQLITE_ONLY = 1
SQL_PG_USE_LOCAL = 0
SQL_PG_DBENGINE_LOCAL = "postgresql+psycopg2://cuturl:cuturl@localhost:5432/bottle-cuturl"
if (not SQL_SQLITE_ONLY):
try:
import psycopg2
# # for windows, add to PATH: C:\Program Files\PostgreSQL\9.4\bin
# DATABASE_URL is an environment variable used by Heroku
if SQL_PG_USE_LOCAL == 1:
SQA_DBENGINE = SQL_PG_DBENGINE_LOCAL
else:
SQA_DBENGINE = os.environ["DATABASE_URL"]
except (OSError, ImportError, KeyError):
# *** SQLite
SQL_SQLITE_ONLY = 1
if SQL_SQLITE_ONLY:
SQA_DBENGINE = 'sqlite:///data//sqlite.db'
SQA_ECHO = True
SQA_KEYWORD = 'db'
SQA_CREATE = True
SQA_COMMIT = True
SQA_USE_KWARGS = False
# Crashreporter
if CRASH_REPORT == 1:
from crashreporter import CrashReporter
cr = CrashReporter(report_dir='crashreporter', check_interval=10, config='.crashreporter.cfg')
cr.application_name = APP_NAME
cr.application_version = '0.0.22' # bumpversion updates that
| mit | 5,902,666,382,758,505,000 | 26.078431 | 103 | 0.694424 | false | 3.008715 | false | false | false |
nagyistoce/geokey | geokey/contributions/tests/observations/test_views.py | 1 | 38917 | import json
from django.test import TestCase
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser
from nose.tools import raises
from rest_framework.test import APIRequestFactory, force_authenticate
from geokey.projects.tests.model_factories import UserF, ProjectF
from geokey.projects.models import Project
from geokey.categories.tests.model_factories import (
CategoryFactory, TextFieldFactory, NumericFieldFactory
)
from geokey.users.tests.model_factories import UserGroupF
from geokey.subsets.tests.model_factories import SubsetFactory
from ..model_factories import (
ObservationFactory, CommentFactory, LocationFactory
)
from geokey.contributions.views.observations import (
SingleAllContributionAPIView, SingleContributionAPIView,
ProjectObservations
)
from geokey.contributions.models import Observation
class SingleContributionAPIViewTest(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.admin = UserF.create()
self.creator = UserF.create()
self.moderator = UserF.create()
self.viewer = UserF.create()
self.project = ProjectF(
add_admins=[self.admin],
add_contributors=[self.creator],
add_viewer=[self.viewer]
)
self.moderators = UserGroupF(add_users=[self.moderator], **{
'project': self.project,
'can_moderate': True
})
self.observation = ObservationFactory.create(**{
'project': self.project,
'creator': self.creator,
'status': 'active'
})
def test_approve_pending_with_admin(self):
self.observation.status = 'pending'
self.observation.save()
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {'meta': {'status': "active"}}
request.user = self.admin
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
self.assertEqual(
Observation.objects.get(pk=self.observation.id).status,
'active'
)
def test_approve_pending_with_admin_empty_properties(self):
self.observation.properties = None
self.observation.status = 'pending'
self.observation.save()
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {'meta': {'status': "active"}}
request.user = self.admin
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
self.assertEqual(
Observation.objects.get(pk=self.observation.id).status,
'active'
)
def test_suspend_pending_with_admin(self):
self.observation.status = 'active'
self.observation.save()
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {'meta': {'status': "pending"}}
request.user = self.admin
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
self.assertEqual(
Observation.objects.get(pk=self.observation.id).status,
'pending'
)
def test_approve_pending_with_moderator(self):
self.observation.status = 'pending'
self.observation.save()
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {'meta': {'status': "active"}}
request.user = self.moderator
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
self.assertEqual(
Observation.objects.get(pk=self.observation.id).status,
'active'
)
@raises(PermissionDenied)
def test_approve_pending_with_contributor(self):
self.observation.status = 'pending'
self.observation.save()
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {'meta': {'status': "active"}}
request.user = self.creator
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
self.assertEqual(
Observation.objects.get(pk=self.observation.id).status,
'pending'
)
def test_approve_pending_with_contributor_who_is_moderator(self):
self.moderators.users.add(self.creator)
self.observation.status = 'pending'
self.observation.save()
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {'meta': {'status': "active"}}
request.user = self.creator
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
self.assertEqual(
Observation.objects.get(pk=self.observation.id).status,
'active'
)
def test_flag_with_admin(self):
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {'meta': {'status': "pending"}}
request.user = self.admin
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
self.assertEqual(
Observation.objects.get(pk=self.observation.id).status,
'pending'
)
def test_flag_with_moderator(self):
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {'meta': {'status': "pending"}}
request.user = self.moderator
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
ref = Observation.objects.get(pk=self.observation.id)
self.assertEqual(ref.status, 'pending')
def test_flag_with_moderator_and_edit(self):
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {
'properties': {
'key': 'updated'
},
'meta': {
'status': 'pending',
}
}
request.user = self.moderator
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
ref = Observation.objects.get(pk=self.observation.id)
self.assertEqual(ref.status, 'pending')
self.assertEqual(ref.properties.get('key'), 'updated')
def test_flag_with_contributor(self):
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {'meta': {'status': "pending"}}
request.user = self.creator
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
self.assertEqual(
Observation.objects.get(pk=self.observation.id).status,
'pending'
)
@raises(PermissionDenied)
def test_flag_with_anonymous(self):
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {'meta': {'status': "pending"}}
request.user = AnonymousUser()
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
self.assertEqual(
Observation.objects.get(pk=self.observation.id).status,
'active'
)
@raises(PermissionDenied)
def test_update_user(self):
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {'properies': {'text': 'blah'}}
request.user = self.viewer
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
def test_update_under_review(self):
CommentFactory.create(**{
'commentto': self.observation,
'review_status': 'open'
})
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {'meta': {'status': 'active'}}
request.user = self.admin
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
ref = Observation.objects.get(pk=self.observation.id)
self.assertEqual(ref.status, 'review')
@raises(PermissionDenied)
def test_commit_from_draft_admin(self):
self.observation.status = 'draft'
self.observation.save()
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {'meta': {'status': "active"}}
request.user = self.admin
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
self.assertEqual(
Observation.objects.get(pk=self.observation.id).status,
'pending'
)
@raises(PermissionDenied)
def test_commit_from_draft_with_moderator(self):
self.observation.status = 'draft'
self.observation.save()
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {'meta': {'status': "active"}}
request.user = self.moderator
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
self.assertEqual(
Observation.objects.get(pk=self.observation.id).status,
'pending'
)
def test_commit_from_draft_with_contributor(self):
self.moderators.users.add(self.creator)
self.observation.status = 'draft'
self.observation.save()
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {'meta': {'status': "active"}}
request.user = self.creator
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
self.assertEqual(
Observation.objects.get(pk=self.observation.id).status,
'active'
)
def test_commit_from_draft_with_contributor_who_is_moderator(self):
self.observation.status = 'draft'
self.observation.save()
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {'meta': {'status': "active"}}
request.user = self.creator
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
self.assertEqual(
Observation.objects.get(pk=self.observation.id).status,
'pending'
)
def test_commit_from_draft_with_contributor_with_data(self):
self.observation.status = 'draft'
self.observation.save()
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = self.factory.patch(url)
request.DATA = {
'properties': {
'key': 'updated'
},
'meta': {
'status': "active",
}
}
request.user = self.creator
view = SingleContributionAPIView()
view.update_and_respond(request, self.observation)
ref = Observation.objects.get(pk=self.observation.id)
self.assertEqual(ref.status, 'pending')
self.assertEqual(ref.properties.get('key'), 'updated')
class SingleAllContributionAPIViewTest(TestCase):
def setUp(self):
self.admin = UserF.create()
self.creator = UserF.create()
self.project = ProjectF(
add_admins=[self.admin],
add_contributors=[self.creator]
)
self.observation = ObservationFactory.create(**{
'project': self.project,
'creator': self.creator,
'status': 'active'
})
def test_get_object_with_creator(self):
view = SingleAllContributionAPIView()
view.get_object(
self.creator, self.observation.project.id, self.observation.id)
def test_get_object_with_admin(self):
view = SingleAllContributionAPIView()
observation = view.get_object(
self.admin, self.observation.project.id, self.observation.id)
self.assertEqual(observation, self.observation)
@raises(Project.DoesNotExist)
def test_get_object_with_some_dude(self):
some_dude = UserF.create()
view = SingleAllContributionAPIView()
view.get_object(
some_dude, self.observation.project.id, self.observation.id)
@raises(Observation.DoesNotExist)
def test_get_draft_object_with_admin(self):
self.observation.status = 'draft'
self.observation.save()
view = SingleAllContributionAPIView()
view.get_object(
self.admin, self.observation.project.id, self.observation.id)
def test_api_with_admin(self):
CommentFactory.create_batch(5, **{'commentto': self.observation})
factory = APIRequestFactory()
url = reverse('api:project_single_observation', kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
})
request = factory.get(url)
force_authenticate(request, user=self.admin)
theview = SingleAllContributionAPIView.as_view()
response = theview(
request,
project_id=self.project.id,
observation_id=self.observation.id).render()
self.assertEqual(response.status_code, 200)
class ProjectPublicApiTest(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.admin = UserF.create()
self.contributor = UserF.create()
self.non_member = UserF.create()
self.project = ProjectF(
add_admins=[self.admin],
add_contributors=[self.contributor]
)
self.category = CategoryFactory(**{
'status': 'active',
'project': self.project
})
TextFieldFactory.create(**{
'key': 'key_1',
'category': self.category,
'required': True,
'order': 1
})
NumericFieldFactory.create(**{
'key': 'key_2',
'category': self.category,
'minval': 0,
'maxval': 1000,
'order': 2
})
self.data = {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
-0.13404607772827148,
51.52439200896907
]
},
"properties": {
"key_1": "value 1",
"key_2": 12
},
"meta": {
"category": self.category.id,
},
"location": {
"name": "UCL",
"description": "UCL's main quad",
"private": True
}
}
def _post(self, data, user):
url = reverse(
'api:project_observations',
kwargs={
'project_id': self.project.id
}
)
request = self.factory.post(
url, json.dumps(data), content_type='application/json')
force_authenticate(request, user=user)
view = ProjectObservations.as_view()
return view(request, project_id=self.project.id).render()
def test_contribute_with_wrong_category(self):
self.data['meta']['category'] = 3864
response = self._post(self.data, self.admin)
self.assertEqual(response.status_code, 400)
def test_contribute_with_invalid(self):
data = {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
-0.13404607772827148,
51.52439200896907
]
},
"properties": {
"key_1": 12,
"key_2": "jsdbdjhsb"
},
"meta": {
"category": self.category.id,
},
"location": {
"name": "UCL",
"description": "UCL's main quad",
"private": True
}
}
response = self._post(data, self.admin)
self.assertEqual(response.status_code, 400)
def test_contribute_with_invalid_number(self):
data = {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
-0.13404607772827148,
51.52439200896907
]
},
"properties": {
"key_1": 12,
"key_2": 2000
},
"meta": {
"category": self.category.id,
},
"location": {
"name": "UCL",
"description": "UCL's main quad",
"private": True
}
}
response = self._post(data, self.admin)
self.assertEqual(response.status_code, 400)
def test_contribute_with_existing_location(self):
location = LocationFactory()
data = {
"type": "Feature",
"geometry": location.geometry.geojson,
"location": {
"id": location.id,
"name": location.name,
"description": location.description,
"private": location.private
},
"properties": {
"key_1": "value 1",
"key_2": 12
},
"meta": {
"category": self.category.id,
}
}
response = self._post(data, self.admin)
self.assertEqual(response.status_code, 201)
def test_contribute_with_private_for_project_location(self):
location = LocationFactory(**{
'private': True,
'private_for_project': self.project
})
data = {
"type": "Feature",
"geometry": location.geometry.geojson,
"location": {
"id": location.id,
"name": location.name,
"description": location.description,
"private": location.private
},
"properties": {
"key_1": "value 1",
"key_2": 12
},
"meta": {
"category": self.category.id,
}
}
response = self._post(data, self.admin)
self.assertEqual(response.status_code, 201)
def test_contribute_with_wrong_project_location(self):
project = ProjectF()
location = LocationFactory(**{
'private': True,
'private_for_project': project
})
data = {
"type": "Feature",
"geometry": location.geometry.geojson,
"location": {
"id": location.id,
"name": location.name,
"description": location.description,
"private": location.private
},
"properties": {
"key_1": "value 1",
"key_2": 12
},
"meta": {
"category": self.category.id,
}
}
response = self._post(data, self.admin)
self.assertEqual(response.status_code, 400)
def test_contribute_with_private_location(self):
location = LocationFactory(**{
'private': True
})
data = {
"type": "Feature",
"geometry": location.geometry.geojson,
"location": {
"id": location.id,
"name": location.name,
"description": location.description,
"private": location.private
},
"properties": {
"key_1": "value 1",
"key_2": 12
},
"meta": {
"category": self.category.id,
}
}
response = self._post(data, self.admin)
self.assertEqual(response.status_code, 400)
def test_contribute_valid_draft(self):
self.data = {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
-0.13404607772827148,
51.52439200896907
]
},
"location": {
"name": "UCL",
"description": "UCL's main quad",
"private": True
},
"properties": {
"key_1": "value 1",
"key_2": 12
},
"meta": {
"category": self.category.id,
"status": "draft"
}
}
response = self._post(self.data, self.admin)
self.assertEqual(response.status_code, 201)
self.assertIn('"status":"draft"', response.content)
def test_contribute_valid_draft_with_empty_required(self):
self.data = {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
-0.13404607772827148,
51.52439200896907
]
},
"properties": {
"key_1": None,
"key_2": 12
},
"meta": {
"category": self.category.id,
"status": "draft"
},
"location": {
"name": "UCL",
"description": "UCL's main quad",
"private": True
}
}
response = self._post(self.data, self.admin)
self.assertEqual(response.status_code, 201)
self.assertIn('"status":"draft"', response.content)
def test_contribute_invalid_draft(self):
self.data = {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
-0.13404607772827148,
51.52439200896907
]
},
"properties": {
"key_1": "value 1",
"key_2": 'Blah'
},
"meta": {
"category": self.category.id,
"status": "draft"
},
"location": {
"name": "UCL",
"description": "UCL's main quad",
"private": True
},
}
response = self._post(self.data, self.admin)
self.assertEqual(response.status_code, 400)
def test_contribute_to_public_everyone_with_Anonymous(self):
self.project.everyone_contributes = 'true'
self.project.isprivate = False
self.project.save()
response = self._post(self.data, AnonymousUser())
self.assertEqual(response.status_code, 201)
def test_contribute_to_public_with_admin(self):
self.project.isprivate = False
self.project.save()
response = self._post(self.data, self.admin)
self.assertEqual(response.status_code, 201)
self.assertIn('"status":"active"', response.content)
def test_contribute_to_public_with_contributor(self):
self.project.isprivate = False
self.project.save()
response = self._post(self.data, self.contributor)
self.assertEqual(response.status_code, 201)
self.assertIn('"status":"pending"', response.content)
def test_contribute_to_public_with_non_member(self):
self.project.isprivate = False
self.project.save()
response = self._post(self.data, self.non_member)
self.assertEqual(response.status_code, 403)
def test_contribute_to_public_with_anonymous(self):
self.project.isprivate = False
self.project.save()
response = self._post(self.data, AnonymousUser())
self.assertEqual(response.status_code, 403)
def test_contribute_to_private_with_admin(self):
response = self._post(self.data, self.admin)
self.assertEqual(response.status_code, 201)
self.assertEqual(len(self.project.observations.all()), 1)
def test_contribute_to_private_with_contributor(self):
response = self._post(self.data, self.contributor)
self.assertEqual(response.status_code, 201)
self.assertEqual(len(self.project.observations.all()), 1)
def test_contribute_to_private_with_non_member(self):
response = self._post(self.data, self.non_member)
self.assertEqual(response.status_code, 404)
self.assertEqual(len(self.project.observations.all()), 0)
def test_contribute_to_private_with_anonymous(self):
response = self._post(self.data, AnonymousUser())
self.assertEqual(response.status_code, 404)
self.assertEqual(len(self.project.observations.all()), 0)
def test_contribute_to_inactive_with_admin(self):
self.project.status = 'inactive'
self.project.save()
response = self._post(self.data, self.admin)
self.assertEqual(response.status_code, 403)
self.assertEqual(len(self.project.observations.all()), 0)
def test_contribute_to_inactive_with_contributor(self):
self.project.status = 'inactive'
self.project.save()
response = self._post(self.data, self.contributor)
self.assertEqual(response.status_code, 404)
self.assertEqual(len(self.project.observations.all()), 0)
def test_contribute_to_inactive_with_non_member(self):
self.project.status = 'inactive'
self.project.save()
response = self._post(self.data, self.non_member)
self.assertEqual(response.status_code, 404)
self.assertEqual(len(self.project.observations.all()), 0)
def test_contribute_to_inactive_with_Anonymous(self):
self.project.status = 'inactive'
self.project.save()
response = self._post(self.data, AnonymousUser())
self.assertEqual(response.status_code, 404)
self.assertEqual(len(self.project.observations.all()), 0)
def test_contribute_to_deleted_with_admin(self):
self.project.status = 'deleted'
self.project.save()
response = self._post(self.data, self.admin)
self.assertEqual(response.status_code, 404)
def test_contribute_to_deleted_with_contributor(self):
self.project.status = 'deleted'
self.project.save()
response = self._post(self.data, self.contributor)
self.assertEqual(response.status_code, 404)
def test_contribute_to_deleted_with_non_member(self):
self.project.status = 'deleted'
self.project.save()
response = self._post(self.data, self.non_member)
self.assertEqual(response.status_code, 404)
def test_contribute_to_deleted_with_anonymous(self):
self.project.status = 'deleted'
self.project.save()
response = self._post(self.data, AnonymousUser())
self.assertEqual(response.status_code, 404)
class GetSingleObservationInProject(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.admin = UserF.create()
self.contributor = UserF.create()
self.project = ProjectF(
add_admins=[self.admin],
add_contributors=[self.contributor]
)
self.observation = ObservationFactory(
**{'project': self.project, 'creator': self.contributor})
def _get(self, user):
url = reverse(
'api:project_single_observation',
kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
}
)
request = self.factory.get(url)
force_authenticate(request, user=user)
view = SingleAllContributionAPIView.as_view()
return view(
request, project_id=self.project.id,
observation_id=self.observation.id).render()
def test_get_with_admin(self):
response = self._get(self.admin)
self.assertEqual(response.status_code, 200)
def test_get_with_contributor(self):
response = self._get(self.contributor)
self.assertEqual(response.status_code, 200)
def test_get_with_non_member(self):
user = UserF.create()
response = self._get(user)
self.assertEqual(response.status_code, 404)
class UpdateObservationInProject(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.admin = UserF.create()
self.contributor = UserF.create()
self.non_member = UserF.create()
self.project = ProjectF(
add_admins=[self.admin],
add_contributors=[self.contributor]
)
self.category = CategoryFactory(**{
'status': 'active',
'project': self.project
})
TextFieldFactory.create(**{
'key': 'key_1',
'category': self.category,
'order': 0
})
NumericFieldFactory.create(**{
'key': 'key_2',
'category': self.category,
'order': 1
})
location = LocationFactory()
self.observation = ObservationFactory.create(**{
'properties': {
"key_1": "value 1",
"key_2": 12,
},
'category': self.category,
'project': self.project,
'location': location,
'creator': self.admin,
'status': 'active'
})
self.update_data = {
"properties": {
"version": 1,
"key_2": 15
}
}
def _patch(self, data, user):
url = reverse(
'api:project_single_observation',
kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
}
)
request = self.factory.patch(
url, json.dumps(data), content_type='application/json')
force_authenticate(request, user=user)
view = SingleAllContributionAPIView.as_view()
return view(
request, project_id=self.project.id,
observation_id=self.observation.id).render()
def _delete(self, user):
url = reverse(
'api:project_single_observation',
kwargs={
'project_id': self.project.id,
'observation_id': self.observation.id
}
)
request = self.factory.delete(url, content_type='application/json')
force_authenticate(request, user=user)
view = SingleAllContributionAPIView.as_view()
return view(
request, project_id=self.project.id,
observation_id=self.observation.id).render()
def test_update_conflict(self):
response = self._patch(
self.update_data,
self.admin
)
self.assertEqual(response.status_code, 200)
data = {"properties": {"attributes": {"version": 1, "key_2": 2}}}
response = self._patch(
data,
self.admin
)
self.assertEqual(response.status_code, 200)
def test_update_location_with_admin(self):
self.update_data['geometry'] = {
'type': 'Point',
'coordinates': [
-0.1444154977798462,
51.54671869005856
]
}
self.update_data['properties']['location'] = {
'name': 'New name'
}
response = self._patch(
self.update_data,
self.admin
)
self.assertEqual(response.status_code, 200)
observation = Observation.objects.get(pk=self.observation.id)
self.assertEqual(
observation.properties.get('key_2'), 15)
self.assertContains(response, 'New name')
self.assertContains(response, '-0.144415')
def test_update_with_admin(self):
response = self._patch(
self.update_data,
self.admin
)
self.assertEqual(response.status_code, 200)
observation = Observation.objects.get(pk=self.observation.id)
self.assertEqual(
observation.properties.get('key_2'), 15)
@raises(Observation.DoesNotExist)
def test_delete_with_admin(self):
response = self._delete(
self.admin
)
self.assertEqual(response.status_code, 204)
Observation.objects.get(pk=self.observation.id)
def test_update_with_contributor(self):
response = self._patch(
self.update_data,
self.contributor
)
self.assertEqual(response.status_code, 403)
observation = Observation.objects.get(pk=self.observation.id)
self.assertEqual(
observation.properties.get('key_2'), 12)
def test_delete_with_contributor(self):
response = self._delete(
self.contributor
)
self.assertEqual(response.status_code, 403)
def test_update_with_non_member(self):
response = self._patch(
self.update_data,
self.non_member
)
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.observation.properties.get('key_2'), 12)
def test_delete_with_non_member(self):
response = self._delete(
self.non_member
)
self.assertEqual(response.status_code, 404)
self.assertNotEqual(
Observation.objects.get(pk=self.observation.id).status,
'deleted'
)
class TestProjectPublicApi(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.admin = UserF.create()
self.contributor = UserF.create()
self.project = ProjectF.create(
add_admins=[self.admin],
add_contributors=[self.contributor]
)
def get(self, user, search=None, subset=None):
url = reverse('api:project_observations', kwargs={
'project_id': self.project.id
})
if search:
url += '?search=blah'
if subset:
url += '?subset=' + str(subset)
request = self.factory.get(url)
force_authenticate(request, user=user)
theview = ProjectObservations.as_view()
return theview(
request,
project_id=self.project.id).render()
def test_get_with_subset(self):
category_1 = CategoryFactory(**{'project': self.project})
category_2 = CategoryFactory(**{'project': self.project})
subset = SubsetFactory.create(**{
'project': self.project,
'filters': {category_1.id: {}}
})
for x in range(0, 2):
ObservationFactory.create(**{
'project': self.project,
'category': category_1}
)
ObservationFactory.create(**{
'project': self.project,
'category': category_2}
)
response = self.get(self.admin, subset=subset.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content).get('features')), 2)
def test_get_with_search(self):
category = CategoryFactory(**{'project': self.project})
TextFieldFactory.create(**{'key': 'text', 'category': category})
for x in range(0, 2):
ObservationFactory.create(**{
'project': self.project,
'category': category,
'properties': {'text': 'blah'}}
)
ObservationFactory.create(**{
'project': self.project,
'category': category,
'properties': {'text': 'blub'}}
)
response = self.get(self.admin, search='blah')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content).get('features')), 2)
def test_get_with_admin(self):
response = self.get(self.admin)
self.assertEqual(response.status_code, 200)
def test_get_with_contributor(self):
response = self.get(self.contributor)
self.assertEqual(response.status_code, 200)
def test_get_with_some_dude(self):
some_dude = UserF.create()
response = self.get(some_dude)
self.assertEqual(response.status_code, 404)
def test_get_with_anonymous(self):
response = self.get(AnonymousUser())
self.assertEqual(response.status_code, 404)
| apache-2.0 | 5,508,809,007,634,089,000 | 31.84135 | 78 | 0.558368 | false | 4.208608 | true | false | false |
lizardsystem/lizard-waterbalance | lizard_wbcomputation/level_control_assignment.py | 1 | 2649 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#******************************************************************************
#
# This file is part of the lizard_waterbalance Django app.
#
# The lizard_waterbalance app is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# the lizard_waterbalance app. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2011 Nelen & Schuurmans
#
#******************************************************************************
#
# Initial programmer: Pieter Swinkels
# Initial date: 2011-01-26
#
#******************************************************************************
from timeseries.timeseriesstub import multiply_timeseries
class LevelControlAssignment:
def compute(self, level_control, pumping_stations):
"""Computes and returns the computed level control time series.
Parameters:
* level_control -- pair of (total incoming, total outgoing) time series
* pumping_stations -- list of PumpingStation(s) to handle the water flow
The total incoming and total outgoing level control volumes have to be
assigned to the intakes and pumps that can be used for level control. This
method computes that assignment and returns it as a dictionary of
PumpingStation to SparseTimeseriesStub.
The keys of the returned dictionary are the intakes and pumps that can
be used for level control. The associated value is the level control
time series.
"""
assignment = {}
(incoming_timeseries, outgoing_timeseries) = level_control
for pumping_station in pumping_stations:
timeseries = None
fraction = pumping_station.percentage / 100.0
if pumping_station.is_computed:
if pumping_station.into:
timeseries = multiply_timeseries(incoming_timeseries, fraction)
else:
timeseries = multiply_timeseries(outgoing_timeseries, fraction)
if timeseries is None:
continue
assignment[pumping_station] = timeseries
return assignment
| gpl-3.0 | 8,728,992,048,921,368,000 | 37.391304 | 83 | 0.626274 | false | 4.705151 | false | false | false |
jake-delorme/skypi | Daemons/skypi/GPS.py | 1 | 3873 | """Manages retrieving data from our GPSD daemon"""
import threading
import logging
import Queue
import time
import calendar
import re
from gps import *
from skypi.Manager import Event
class GPS(object):
"""Runs a thread for retrieving GPS status and a queue for giving location"""
def __init__(self, pimanager):
# create the object yo
logging.debug("Create the GPS object")
self.name = "GPS"
self.pimanager = pimanager
# Create the local queue
self.queue = Queue.PriorityQueue()
# GPS object
self.gpsd = gps(mode=WATCH_ENABLE)
self.gpslocation = Gpslocation()
# Register for messages
self.pimanager.register(self, "SystemTest")
self.pimanager.register(self, "GetGPS")
# Create and start the threads
self.listenerthread = threading.Thread(target=self.__listener, name=self.name+"-listener")
self.listenerthread.daemon = True
self.listenerthread.start()
self.consumerthread = threading.Thread(target=self.__queueconsumer, name=self.name+"-consumer")
self.consumerthread.daemon = True
self.consumerthread.start()
def __listener(self):
"""Continuously read the GPS data and update the gpslocation object"""
name = threading.current_thread().getName()
logging.debug("Running the "+name+" thread")
while True:
self.gpsd.next()
# match only if we got a valid date (partial fix)
if re.match(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.000Z', self.gpsd.utc):
# convert utc to epoch
parsedtime = time.strptime(self.gpsd.utc, "%Y-%m-%dT%H:%M:%S.000Z")
parsedepoch = calendar.timegm(parsedtime)
# 2 = 2D_FIX 3 = 3D_FIX
if self.gpsd.fix.mode > 1:
self.gpslocation.lattitude = self.gpsd.fix.latitude
self.gpslocation.longtitude = self.gpsd.fix.longitude
self.gpslocation.lastfix = parsedepoch
if self.gpsd.fix.mode == 3:
self.gpslocation.altitude = self.gpsd.fix.altitude*3.28084
self.gpslocation.lastaltitudefix = parsedepoch
# logging.debug('GPS fix mode %s', self.gpsd.fix.mode)
# logging.debug( 'latitude %s' , self.gpsd.fix.latitude )
# logging.debug( 'longitude %s' , self.gpsd.fix.longitude )
# logging.debug( 'time utc %s + %s' , self.gpsd.utc , self.gpsd.fix.time )
# logging.debug( 'altitude (f) %s' , self.gpsd.fix.altitude*3.28084 )
# logging.debug( 'eps ' , self.gpsd.fix.eps)
# logging.debug( 'epx ' , self.gpsd.fix.epx)
# logging.debug( 'epv ' , self.gpsd.fix.epv)
# logging.debug( 'ept ' , self.gpsd.fix.ept)
# logging.debug( 'speed (f/s) ' , self.gpsd.fix.speed*3.28084)
# logging.debug( 'climb ' , self.gpsd.fix.climb)
# logging.debug( 'track ' , self.gpsd.fix.track)
# logging.debug( 'mode ' , self.gpsd.fix.mode)
# logging.debug('')
# logging.debug( 'sats ' , self.gpsd.satellites)
def addToQueue(self, event, priority=99):
"""Adds an item to the GPS queue to be processed"""
self.queue.put((priority, event))
def __queueconsumer(self):
name = threading.current_thread().getName()
logging.debug("Running the %s thread", name)
# process queue objects as the come in run the thread forever
while 1:
item = self.queue.get(True)
task = item[1].getTask()
logging.debug("Process Queue task %s", task)
if task == "GetGPS" or task == "SystemTest":
event = Event("GPSLocation", self.gpslocation)
event.setadditionalarg("callingevent", item[1])
self.pimanager.addToQueue(event)
else:
logging.error('Recieved message %s but i dont use this message', task)
self.queue.task_done()
class Gpslocation(object):
"""Holds the current GPSStatus including location"""
def __init__(self):
self.lattitude = 'Nan'
self.longtitude = 'Nan'
self.altitude = 'Nan'
self.lastfix = 0
self.lastaltitudefix = 0
def getgoogleurl(self):
return 'https://www.google.com/maps/place/%s,%s' % (self.lattitude, self.longtitude)
| mit | -7,834,503,266,576,936,000 | 35.537736 | 97 | 0.678286 | false | 2.885991 | false | false | false |
jsymolon/ARMSim | TestADD.py | 1 | 6295 | import unittest
import armv6instrdecode
import globals
import utils
import logging
import ARMCPU
import pdb
# if ConditionPassed(cond) then
# Rd = Rn + shifter_operand
# if S == 1 and Rd == R15 then
# if CurrentModeHasSPSR() then
# CPSR = SPSR
# else UNPREDICTABLE
# else if S == 1 then
# N Flag = Rd[31]
# Z Flag = if Rd == 0 then 1 else 0
# C Flag = CarryFrom(Rn + shifter_operand)
# V Flag = OverflowFrom(Rn + shifter_operand)
logfile = "TestADD.log"
with open(logfile, 'w'):
pass
logging.basicConfig(filename=logfile,level=logging.DEBUG)
class TestADD(unittest.TestCase):
"""Instructions"""
# preparing to test
def setUp(self):
""" Setting up for the test """
self.addr = 0
# ending the test
def tearDown(self):
"""Cleaning up after the test"""
# E2810A01 010A81E2 ADDAL R0, R1, #4096
# E289A00F 0FA089E2 ADDAL R10, R9, #15 @ dp imm
# E0856004 046085E0 ADDAL R6, R5, R4 @ dp imm shift
# E0866415 156486E0 ADDAL R6, R5, LSL R4 @ dp imm reg shift
def testADD_Imm1(self):
logging.debug("------------------------------------------")
logging.debug("TestDecode:testADD_Imm1")
code = 0xE2810A01 # ADDAL R0, R1, #4096
instrStr = armv6instrdecode.getInstructionFromCode(self, code, 0)
logging.debug("1:" + instrStr)
self.assertEqual(instrStr, " E2810A01 ADD AL R00, R01 #01", instrStr)
logging.debug("2:" + instrStr)
globals.regs[1] = 3 # 3 shift <--- 2 = 12
globals.regs[0] = 1
reg = utils.buildRegValString(self, 1)
self.assertEqual(reg, "R01:00000003", reg)
instrStr = armv6instrdecode.getInstructionFromCode(self, code, 1)
reg = utils.buildRegValString(self, 0)
self.assertEqual(reg, "R00:00001003", reg)
def testADD_Imm2(self):
logging.debug("------------------------------------------")
logging.debug("TestDecode:testADD_Imm2")
# 33222222222211111111110000000000
# 10987654321098765432109876543210
code = 0xE289A00F #2000101
instrStr = armv6instrdecode.getInstructionFromCode(self, code, 0)
logging.debug("1:" + instrStr)
# RN:3, RD:1 r1 = r3 +
# Rd = Rn + shifter_operand + C Flag
self.assertEqual(instrStr, " E289A00F ADD AL R10, R09 #0F", instrStr)
logging.debug("2:" + instrStr)
globals.regs[9] = 3 # 3 shift <--- 2 = 12
globals.regs[10] = 1
reg = utils.buildRegValString(self, 9)
self.assertEqual(reg, "R09:00000003", reg)
instrStr = armv6instrdecode.getInstructionFromCode(self, code, 1)
reg = utils.buildRegValString(self, 10)
self.assertEqual(reg, "R10:00000012", reg)
def testADD_ImmShft(self):
logging.debug("------------------------------------------")
logging.debug("TestDecode:testADD_ImmShft")
code = 0xE0856004
instrStr = armv6instrdecode.getInstructionFromCode(self, code, 0)
logging.debug("1:" + instrStr)
self.assertEqual(instrStr, " E0856004 ADD AL R06, R05 R04", instrStr)
globals.regs[4] = 3 # 3 shift <--- 2 = 12
globals.regs[5] = 1
globals.regs[globals.CPSR] = globals.regs[globals.CPSR] | ARMCPU.CARRYBIT
instrStr = armv6instrdecode.getInstructionFromCode(self, code, 1)
reg = utils.buildRegValString(self, 6)
self.assertEqual(reg, "R06:00000009", reg)
def testADD_RegShft(self):
logging.debug("------------------------------------------")
logging.debug("TestDecode:testADD_RegShft")
code = 0xE0866415
instrStr = armv6instrdecode.getInstructionFromCode(self, code, 0)
logging.debug("1:" + instrStr)
self.assertEqual(instrStr, " E0866415 ADD AL R06, R05 LSL R04", instrStr)
globals.regs[4] = 1
globals.regs[5] = 0x40000000
globals.regs[6] = 1
globals.regs[globals.CPSR] = globals.regs[globals.CPSR] | ARMCPU.CARRYBIT
instrStr = armv6instrdecode.getInstructionFromCode(self, code, 1)
reg = utils.buildRegValString(self, 6)
self.assertEqual(reg, "R06:80000001", reg)
def testADD_setflag_c(self):
logging.debug("------------------------------------------")
logging.debug("TestDecode:testADD_setflag_c - should produce an carry")
code = 0xE2910001 # ADDALS R0, R1, #1
globals.regs[1] = 0xFFFFFFFF
globals.regs[0] = 0
globals.regs[globals.CPSR] = 0
instrStr = armv6instrdecode.getInstructionFromCode(self, code, 1)
reg = utils.buildRegValString(self, 0)
self.assertEqual(reg, "R00:00000000", reg)
# N Flag = Rd[31]
self.assertEqual(1, globals.regs[globals.CPSR] & ARMCPU.NEGATIVEBIT == 0, 1)
# Z Flag = if Rd == 0 then 1 else 0
self.assertEqual(1, globals.regs[globals.CPSR] & ARMCPU.ZEROBIT > 0, 1)
# C Flag = CarryFrom(Rn + shifter_operand)
# V Flag = OverflowFrom(Rn + shifter_operand)
#logging.debug(hex(globals.regs[globals.CPSR] & ARMCPU.OVERBIT))
self.assertEqual(1, globals.regs[globals.CPSR] & ARMCPU.CARRYBIT > 0, 1)
def testADD_setflag_o(self):
logging.debug("------------------------------------------")
logging.debug("TestDecode:testADD_setflag_o - should produce an overflow")
code = 0xE2910001 # ADDALS R0, R1, #1
globals.regs[1] = 0x7FFFFFFF
globals.regs[0] = 0
globals.regs[globals.CPSR] = 0
instrStr = armv6instrdecode.getInstructionFromCode(self, code, 1)
reg = utils.buildRegValString(self, 0)
self.assertEqual(reg, "R00:80000000", reg)
# N Flag = Rd[31]
self.assertEqual(1, globals.regs[globals.CPSR] & ARMCPU.NEGATIVEBIT > 0, 1)
# Z Flag = if Rd == 0 then 1 else 0
self.assertEqual(1, globals.regs[globals.CPSR] & ARMCPU.ZEROBIT == 0, 1)
# C Flag = CarryFrom(Rn + shifter_operand)
# V Flag = OverflowFrom(Rn + shifter_operand)
#logging.debug(hex(globals.regs[globals.CPSR] & ARMCPU.OVERBIT))
self.assertEqual(1, globals.regs[globals.CPSR] & ARMCPU.OVERBIT > 0, 1)
if __name__ == "__main__":
unittest.main() | gpl-2.0 | 5,673,797,915,211,875,000 | 41.255034 | 84 | 0.596823 | false | 3.228205 | true | false | false |
Naereen/mazhe | phystricksCommuns.py | 1 | 1054 | # -*- coding: utf8 -*-
# Note : si tu modifie ce fichier, tu dois le copier à la main vers le répertoire de test.
# ~/script/modules/phystricks/tests
from phystricks import *
def CorrectionParametrique(curve,LLms,name,dilatation=1):
fig = GenericFigure("SubfiguresCDU"+name,script_filename="Communs")
ssfig1 = fig.new_subfigure(u"Quelque points de repères","SS1"+name)
pspict1 = ssfig1.new_pspicture(name+"psp1")
ssfig2 = fig.new_subfigure(u"La courbe","SS2"+name)
pspict2 = ssfig2.new_pspicture(name+"psp2")
for llam in LLms :
P=curve(llam)
tangent=curve.get_tangent_segment(llam)
second=curve.get_second_derivative_vector(llam)
normal=curve.get_normal_vector(llam)
normal.parameters.color="green"
tangent.parameters.color="brown"
pspict1.DrawGraphs(P,second,tangent,normal)
pspict2.DrawGraphs(P,tangent)
curve.parameters.style="dashed"
pspict2.DrawGraphs(curve)
pspict1.DrawDefaultAxes()
pspict1.dilatation(dilatation)
pspict2.DrawDefaultAxes()
pspict2.dilatation(dilatation)
fig.conclude()
fig.write_the_file()
| gpl-3.0 | 3,700,023,193,729,939,500 | 29.911765 | 90 | 0.756422 | false | 2.467136 | false | false | false |
ljean/coop_cms | coop_cms/forms/base.py | 1 | 1030 | # -*- coding: utf-8 -*-
"""forms"""
import floppyforms.__future__ as floppyforms
from coop_html_editor.widgets import get_inline_html_widget
class InlineHtmlEditableModelForm(floppyforms.ModelForm):
"""Base class for form with inline-HTML editor fields"""
is_inline_editable = True # The cms_edition templatetag checks this for switching to edit mode
def __init__(self, *args, **kwargs):
super(InlineHtmlEditableModelForm, self).__init__(*args, **kwargs) # pylint: disable=E1002
for field_name in self.Meta.fields:
no_inline_html_widgets = getattr(self.Meta, 'no_inline_editable_widgets', ())
if field_name not in no_inline_html_widgets:
self.fields[field_name].widget = get_inline_html_widget()
class Media:
css = {
'all': ('css/colorbox.css', ),
}
js = (
'js/jquery.form.js',
'js/jquery.pageslide.js',
'js/jquery.colorbox-min.js',
'js/colorbox.coop.js',
)
| bsd-3-clause | 9,131,538,540,414,729,000 | 34.517241 | 99 | 0.607767 | false | 3.614035 | false | false | false |
4a616d6573205265696c6c79/tartiflette | streaming/stream.py | 1 | 15191 | __doc__ = """
tartiflette
Program to analyse real-time traceroute inormation for routing changes.
Usage:
tartiflette --num_procs=<NUM> --v4_nets=<V4_FILE> --v6_nets=<V6_FILE>[--time=<SECONDS>] [-b=<bucket>]
Options:
--num_procs=<NUM> Number of worker processes to spin up to handle
load. Uses one asyncio event loop per process.
--time=<SECONDS> Number of seconds to run the analysis for. If
ommitted, run forever.
--v4_nets=<V4_FILE> File with a list of v4 networks
--v6_nets=<V6_FILE> File with a list of v6 networks
-b=<bucket_name> Compute stats for this time bucket
"""
import asyncio
import docopt
import ipaddress
import json
import pprint
import multiprocessing
import redis
import time
import numpy as np
from datetime import datetime
from collections import defaultdict
from ripe.atlas.cousteau import AtlasStream
WORK_QUEUE = multiprocessing.Queue()
RESULT_QUEUE = multiprocessing.Queue()
OTHER_QUEUE = multiprocessing.Queue()
pp = pprint.PrettyPrinter(indent=4)
RD = redis.StrictRedis(host='localhost', port=6379, db=0)
ONE_HOUR = 60*60
PARAMS = {
"timeWindow": 60 * 60, # in seconds
"alpha": 0.01, # parameter for exponential smoothing
"minCorr": -0.25,
"minSeen": 3,
"af": "6",
}
def dd():
return defaultdict(int)
def all_routes():
return defaultdict(dd)
class Measure(multiprocessing.Process):
def __init__(self, work_queue, result_queue):
self.WORK_QUEUE = work_queue
self.RESULT_QUEUE = result_queue
policy = asyncio.get_event_loop_policy()
policy.set_event_loop(policy.new_event_loop())
self.LOOP = asyncio.get_event_loop()
super().__init__()
@asyncio.coroutine
def main(self):
"""Loop forever looking for work from the queue"""
while True:
if not self.WORK_QUEUE.empty():
traceroute = self.WORK_QUEUE.get()
yield from self.process(traceroute)
def run(self):
self.LOOP.run_until_complete(self.main())
@asyncio.coroutine
def process(self, traceroute):
next_hops = defaultdict(dd)
res = yield from self.isValidMeasurement(traceroute)
if not res:
return
dstIp = traceroute["dst_addr"]
srcIp = traceroute["from"]
ts = int(traceroute["timestamp"])
bucket = yield from self.make_time_bucket(ts)
prevIps = [srcIp] * 3
currIps = []
yield from self.print_measurement(traceroute, bucket)
for hop in traceroute["result"]:
if not self.isValidHop(hop):
continue
for hopid, res in enumerate(hop["result"]):
ip = res.get("from", "x")
is_private = yield from self.isPrivate(ip)
if is_private:
continue
for prevIp in prevIps:
next_hops[prevIp][ip] += 1
count = next_hops[prevIp][ip]
yield from self.save_hop(dstIp, prevIp, ip, count, bucket, 6 * ONE_HOUR)
currIps.append(ip)
prevIps = currIps
currIps = []
# Measure.print_routes(next_hops)
# self.RESULT_QUEUE.put((dstIp, next_hops))
@asyncio.coroutine
def isPrivate(self, ip):
if ip == "x":
return False
ipaddr = ipaddress.ip_address(ip)
return ipaddr.is_private
@asyncio.coroutine
def make_time_bucket(self, ts, minutes=60):
return 'time_bucket/{}'.format(ts // (60 * minutes))
@asyncio.coroutine
def isValidMeasurement(self, msm):
return msm and "result" in msm and "dst_addr" in msm
@asyncio.coroutine
def isValidTraceResult(self, result):
return result and not "error" in result["result"][0]
@asyncio.coroutine
def isValidHop(self, hop):
return hop and "result" in hop and not "err" in hop["result"][0]
@staticmethod
def print_routes(routes):
data_as_dict = json.loads(json.dumps(routes))
pp.pprint(data_as_dict)
@asyncio.coroutine
def print_measurement(self, msm, bucket):
srcIp = msm["from"]
print("TS: {}, SRC: {}, DST: {} ({}) - Bucket: {}, Seen: {}".format(
msm['timestamp'],
msm['src_addr'],
msm['dst_addr'],
msm['dst_name'],
bucket,
self.has_target(srcIp, bucket)))
def get_time_bucket(self, bucket):
routes = defaultdict(all_routes)
targets = self.get_targets(bucket)
for target in targets:
links = self.get_target_links(bucket, target)
for (ip0, ip1) in links:
route_count_key = "route_{}_{}_{}_{}".format(bucket, target, ip0, ip1)
count = RD.get(route_count_key)
# print("route: {} -> {} => {}".format(ip0, ip1, int(count)))
routes[target][ip0][ip1] = count
return routes
def get_target_routes(self, routes, target):
return routes[target]
def get_targets(self, bucket):
"""Returns all destination ips in a time bucket"""
targets_key = "targets_{}".format(bucket)
targets = RD.smembers(targets_key)
return [t.decode() for t in targets]
def get_target_links(self, bucket, target):
"""Returns a list of ip0-ip1 tuples for a particular target in a bucket"""
target_to_routes_key = "routes_{}_{}".format(bucket, target)
target_to_routes = RD.smembers(target_to_routes_key)
links = []
for route in target_to_routes:
_route = route.decode()
# todo: use a regexp for this instead of a split
# since the bucket contains an underscore
_, _, ip0, ip1 = route.decode().split("_")
links.append((ip0, ip1))
return links
def compare_buckets(self, reference, bucket, target):
"""from routeChangeDetection function"""
bucket_ts = int(bucket.split("/")[1]) # time_bucket/406642
# ts = datetime.utcfromtimestamp(bucket_ts * 3600) # todo: use a param
ts = bucket_ts * 3600 # todo: use a param
bucket_links = self.get_time_bucket(bucket)
reference_links = self.get_time_bucket(reference)
routes = self.get_target_routes(bucket_links, target)
routes_ref = self.get_target_routes(reference_links, target)
alarms = []
alpha = PARAMS["alpha"]
for ip0, nextHops in routes.items():
nextHopsRef = routes_ref[ip0]
allHops = set(["0"])
for key in set(nextHops.keys()).union(
[k for k, v in nextHopsRef.items() if
isinstance(v, float)]):
if nextHops[key] or nextHopsRef[key]:
allHops.add(key)
reported = False
nbSamples = np.sum(nextHops.values())
nbSamplesRef = np.sum([x for x in nextHopsRef.values() if isinstance(x, int)])
if len(allHops) > 2 and "stats" in nextHopsRef and nextHopsRef["stats"]["nbSeen"] >= PARAMS["minSeen"]:
count = []
countRef = []
for ip1 in allHops:
count.append(nextHops[ip1])
countRef.append(nextHopsRef[ip1])
if len(count) > 1:
if np.std(count) == 0 or np.std(countRef) == 0:
print("{}, {}, {}, {}".format(allHops, countRef, count, nextHopsRef))
corr = np.corrcoef(count, countRef)[0][1]
if corr < PARAMS["minCorr"]:
reported = True
alarm = {"ip": ip0, "corr": corr,
"dst_ip": target,
"refNextHops": list(nextHopsRef.items()),
"obsNextHops": list(nextHops.items()),
"nbSamples": nbSamples,
"nbPeers": len(count),
"nbSeen": nextHopsRef["stats"]["nbSeen"]}
print("Alarm: {}".format(alarm))
alarms.append(alarm)
# Update the reference
if not "stats" in nextHopsRef:
nextHopsRef["stats"] = {"nbSeen": 0, "firstSeen": ts, "lastSeen": ts, "nbReported": 0}
if reported:
nextHopsRef["stats"]["nbReported"] += 1
nextHopsRef["stats"]["nbSeen"] += 1
nextHopsRef["stats"]["lastSeen"] = ts
for ip1 in allHops:
newCount = int(nextHops[ip1])
# print("newCount: {}".format(newCount))
nextHopsRef[ip1] = int((1.0 - alpha) * nextHopsRef[ip1] + alpha * int(newCount))
return routes_ref
@asyncio.coroutine
def save_links(self, target, links, bucket="ref", ttl=30*24*60*60):
for ip0, nextHops in links.iteritems():
for ip1, count in nextHops.iteritems():
yield from self.save_hop(target, ip0, ip1, count, bucket, ttl)
@asyncio.coroutine
def save_hop(self, target, ip0, ip1, count, bucket="ref", ttl=12*3600):
expires = int(time.time()) + ttl
p = RD.pipeline()
# a list of time bucket names
p.sadd("time_buckets", bucket)
# a set of all dst addr
target_key = "targets_{}".format(bucket)
p.sadd(target_key, target)
# a set of hops for each target dst addr
target_to_hops = "hops_{}_{}".format(bucket, target)
# a set of ip0_ip1 pairs for each target
target_to_routes = "routes_{}_{}".format(bucket, target)
# holds the total counters
route_count_key = "route_{}_{}_{}_{}".format(bucket, target, ip0, ip1)
route_key = "{}_{}_{}".format(bucket, ip0, ip1)
p.sadd(target_to_hops, ip0)
p.sadd(target_to_routes, route_key)
p.incrby(route_count_key, count)
# Set the expiration for all keys
p.expireat(bucket, expires)
p.expireat(target_key, expires)
p.expireat(target_to_hops, expires)
p.expireat(target_to_routes, expires)
p.expireat(route_count_key, expires)
p.execute()
@asyncio.coroutine
def get_route(self, target, ip0, ip1, bucket="ref"):
route_count_key = "route_{}_{}_{}_{}".format(bucket, target, ip0, ip1)
return RD.get(route_count_key)
def has_target(self, target, bucket="ref"):
return RD.sismember("targets_{}".format(bucket), target)
class IPMatcher(multiprocessing.Process):
def __init__(self, work_queue, result_queue, v4_nets, v6_nets):
self.WORK_QUEUE = work_queue
self.RESULT_QUEUE = result_queue
policy = asyncio.get_event_loop_policy()
policy.set_event_loop(policy.new_event_loop())
self.LOOP = asyncio.get_event_loop()
self.NETWORKS = {
4: [
ipaddress.ip_network(u'{}'.format(net.strip()), strict=False) for
net in open(v4_nets).readlines()
],
6: [
ipaddress.ip_network(u'{}'.format(net.strip()), strict=False) for
net in open(v6_nets).readlines()
],
}
super().__init__()
@asyncio.coroutine
def main(self):
"""Loop forever looking for work from the queue"""
while True:
if not self.WORK_QUEUE.empty():
traceroute = self.WORK_QUEUE.get()
yield from self.filter_hop_rtt(traceroute)
def run(self):
self.LOOP.run_until_complete(self.main())
@asyncio.coroutine
def filter_hop_rtt(self, traceroute):
"""Given a traceroute result, filter out the unnecessary data and
hand off for analysis"""
m_result = traceroute
if 'result' in m_result.keys() and m_result['result']:
for hop in m_result['result']:
if not 'result' in hop.keys():
continue
for address in hop['result']:
if 'from' in address.keys():
res = yield from self.in_monitored_network(
address['from']
)
if res:
self.RESULT_QUEUE.put(m_result)
return None
# The lovely folks at ripe added in some server side filtering for
# prefixes, to this code isn't really needed now. Leaving it in just
# in case anyone wants to do further filtering of the data
# UPDATE: server side is a WIP, we still need this
@asyncio.coroutine
def in_monitored_network(self, ip_address):
"""Returns true if this is in one of our monitored networks"""
address = ipaddress.ip_address(ip_address)
for network in self.NETWORKS[address.version]:
if address in network:
return True
return False
def on_result_recieved(*args):
"""Add the trqceroute result to a queue to be processed"""
WORK_QUEUE.put(args[0])
def stream_results(v4_nets, v6_nets, seconds=None, filters={}):
"""Set up the atlas stream for all traceroute results"""
atlas_stream = AtlasStream()
atlas_stream.connect()
atlas_stream.bind_channel('result', on_result_recieved)
prefixes = []
prefixes.extend([net.strip() for net in open(v4_nets).readlines()])
prefixes.extend([net.strip() for net in open(v6_nets).readlines()])
# for prefix in prefixes:
# stream_parameters = {"type": "traceroute", "passThroughPrefix": prefix}
# stream_parameters.update(filters)
# atlas_stream.start_stream(stream_type="result", **stream_parameters)
stream_parameters = {"type": "traceroute"}
stream_parameters.update(filters)
atlas_stream.start_stream(stream_type="result", **stream_parameters)
print("Before streaming")
atlas_stream.timeout(seconds=seconds)
atlas_stream.disconnect()
if __name__ == '__main__':
"""Start up one worker process to deal with handling checking traceroute
results, and just use the main thread to read from atlas."""
args = docopt.docopt(__doc__)
policy = asyncio.get_event_loop_policy()
policy.set_event_loop(policy.new_event_loop())
v4_nets = args['--v4_nets']
v6_nets = args['--v6_nets']
bucket = args['-b'] # 'time_bucket/406642'
if bucket:
measure = Measure(RESULT_QUEUE, OTHER_QUEUE)
targets = measure.get_targets(bucket)
for target in targets:
ref = measure.compare_buckets('reference', bucket, target)
# Measure.print_routes(ref)
exit()
procs = []
measure = Measure(RESULT_QUEUE, OTHER_QUEUE)
measure.start()
procs.append(measure)
for i in range(int(args['--num_procs'])):
proc = IPMatcher(WORK_QUEUE, RESULT_QUEUE, v4_nets, v6_nets)
procs.append(proc)
proc.start()
if args['--time']:
seconds = int(args['--time'])
else:
seconds = None
stream_results(v4_nets, v6_nets, seconds)
for proc in procs:
proc.terminate()
exit()
| mit | 2,678,837,684,548,656,000 | 35.516827 | 115 | 0.569745 | false | 3.768544 | false | false | false |
SethGreylyn/gwells | gwells/urls.py | 1 | 2002 | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from . import views
from django.views.generic import TemplateView
urlpatterns = [
# Examples:
# url(r'^$', 'project.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.HelloWorldView.as_view(), name='home'),
url(r'^search$', views.well_search, name='search'),
#url(r'^(?P<pk>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/$', views.DetailView.as_view(), name='detail'),
url(r'^submission/$', views.ActivitySubmissionListView.as_view(), name='activity_submission_list'),
url(r'^submission/create$', views.ActivitySubmissionWizardView.as_view(views.FORMS), name='activity_submission_create'),
url(r'^submission/(?P<pk>[0-9]+)$', views.ActivitySubmissionDetailView.as_view(), name='activity_submission_detail'),
url(r'^well/(?P<pk>[0-9]+)$', views.WellDetailView.as_view(), name='well_detail'),
url(r'^health$', views.health),
url(r'^admin/', admin.site.urls),
url(r'^additional-information', TemplateView.as_view(template_name='gwells/additional_information.html'), name='additional_information'),
url(r'^ajax/map_well_search/$', views.map_well_search, name='map_well_search'),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns | apache-2.0 | 85,206,001,416,177,220 | 45.581395 | 141 | 0.687313 | false | 3.562278 | false | false | false |
Apogaea/voldb | volunteer/apps/shifts/models.py | 1 | 6000 | from __future__ import unicode_literals
import datetime
from django.db import models
from django.core.validators import MaxValueValidator
from django.conf import settings
from django.utils import timezone
from django.utils import timesince
from django.utils.encoding import python_2_unicode_compatible
from volunteer.core.models import Timestamped
from volunteer.apps.shifts.utils import DENVER_TIMEZONE
class ShiftQuerySet(models.QuerySet):
use_for_related_fields = True
def filter_to_active_event(self, active_event=None):
if active_event is None:
from volunteer.apps.events.models import Event
active_event = Event.objects.get_current()
if active_event is None:
return self
else:
return self.filter(event=active_event)
def human_readable_minutes(minutes):
now = timezone.now()
return timesince.timeuntil(now + timezone.timedelta(minutes=minutes), now)
@python_2_unicode_compatible
class Shift(Timestamped):
event = models.ForeignKey(
'events.Event', related_name='shifts', on_delete=models.PROTECT,
)
role = models.ForeignKey(
'departments.Role', related_name='shifts', on_delete=models.PROTECT,
)
is_closed = models.BooleanField(
blank=True, default=False,
help_text=(
"This will restrict anyone from claiming slots on this shift."
),
)
start_time = models.DateTimeField(
'shift begins',
help_text=(
"Format: `YYYY-MM-DD HH:MM` with the hours in 24-hour (military) "
"format. (eg, 2pm is 14:00)."
),
)
SHIFT_MINUTES_CHOICES = tuple((
(i * 5, human_readable_minutes(i * 5)) for i in range(1, 24 * 12 + 1)
))
shift_minutes = models.PositiveSmallIntegerField(
"shift length",
validators=[MaxValueValidator(1440)], choices=SHIFT_MINUTES_CHOICES,
help_text="The length of the shift",
)
num_slots = models.PositiveSmallIntegerField(
default=1,
help_text="How many slots does this shift have",
)
code = models.CharField(
max_length=50, blank=True,
help_text="Leave blank if this shift can be claimed by anyone.",
)
objects = ShiftQuerySet.as_manager()
def __str__(self):
return self.get_start_time_display()
@property
def open_slot_count(self):
return max(0, self.num_slots - self.slots.filter(cancelled_at__isnull=True).count())
@property
def filled_slot_count(self):
return self.slots.filter(cancelled_at__isnull=True).count()
@property
def has_open_slots(self):
return bool(self.open_slot_count)
@property
def claimed_slots(self):
return self.slots.filter(cancelled_at__isnull=True)
def get_start_time_display(self):
return self.start_time.strftime('%H:%M')
@property
def end_time(self):
return self.start_time + datetime.timedelta(minutes=self.shift_minutes)
def overlaps_with(self, other):
if self.end_time <= other.start_time:
return False
elif self.start_time >= other.end_time:
return False
return True
@property
def is_protected(self):
return bool(self.code)
@property
def is_locked(self):
return self.is_closed or not self.event.is_registration_open
@property
def is_midnight_spanning(self):
if self.shift_minutes > 24 * 60:
return True
start_hour = self.start_time.astimezone(DENVER_TIMEZONE).hour
end_hour = self.end_time.astimezone(DENVER_TIMEZONE).hour
return bool(end_hour) and start_hour > end_hour
# Permissions Methods
def is_claimable_by_user(self, user):
"""
Not locked.
Has open slots.
User does not already have a slot.
"""
if self.is_locked:
return False
elif not self.has_open_slots:
return False
elif self.claimed_slots.filter(volunteer=user).exists():
return False
return True
@property
def duration(self):
return timezone.timedelta(minutes=self.shift_minutes)
class ShiftSlotQuerySet(models.QuerySet):
use_for_related_fields = True
def filter_to_active_event(self, active_event=None):
if active_event is None:
from volunteer.apps.events.models import Event
active_event = Event.objects.get_current()
if active_event is None:
return self
else:
return self.filter(shift__event=active_event)
@python_2_unicode_compatible
class ShiftSlot(Timestamped):
shift = models.ForeignKey('Shift', related_name='slots')
volunteer = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='shift_slots')
cancelled_at = models.DateTimeField(null=True)
objects = ShiftSlotQuerySet.as_manager()
def __str__(self):
return "{s.shift_id}:{s.volunteer_id}".format(s=self)
def cancel(self):
self.is_cancelled = True
self.save()
def _is_cancelled_getter(self):
return bool(self.cancelled_at)
def _is_cancelled_setter(self, value):
if bool(value) is bool(self.cancelled_at):
return
elif value:
self.cancelled_at = timezone.now()
else:
self.cancelled_at = None
is_cancelled = property(_is_cancelled_getter, _is_cancelled_setter)
@property
def is_locked(self):
return not self.shift.event.is_registration_open
#
# Permissions Methods
#
def is_cancelable_by_user(self, user):
"""
Not locked.
Not cancelled.
User is the volunteer or is an admin
else, not allowed.
"""
if self.is_cancelled:
return False
elif self.is_locked:
return False
elif user.pk == self.volunteer_id or user.is_admin:
return True
return False
| gpl-3.0 | -1,218,786,950,874,683,600 | 27.708134 | 92 | 0.6305 | false | 3.931848 | false | false | false |
qbeenslee/Nepenthes-Server | utils/codeutil.py | 1 | 3122 | # coding:utf-8
'''
编码处理
Author : qbeenslee
Created : 2015/1/20
'''
import hashlib
import random
import re
import time
import uuid
from config import setting, configuration
SAFEHASH = [x for x in "0123456789-abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ"] # 64
SHORTHASH = [x for x in "0123456789ABCDEFGHJKMNPQRSTVWXYZ"] # 32
def shorter_UID():
'''
生成一个较短的代码
:return:
'''
random.seed(int(time.time()))
num = random.randint(configuration.MIN_RAND_EMAIL_CODE, configuration.MAX_RAND_EMAIL_CODE)
return hex(num)[2:]
def short_UID(imei):
'''
产生较短唯一压缩id码
:return:string 10位id编码
'''
if imei is None:
return None
shortID = ''
imei = re.sub(r'\D+', '', imei, 0)
imei_token = imei[8:14] + imei[0:6]
time_token = ('%.3f' % time.time()).split(r'.')[-1]
token = time_token + imei_token
enbin = "%050d" % int(bin(int(token))[2:], 10)
for i in xrange(10):
shortID += SHORTHASH[int(enbin[i * 5:i * 5 + 5], 2)]
return shortID
def compress_UUID():
'''
根据http://www.ietf.org/rfc/rfc1738.txt,由uuid编码扩大字符域生成串
包括: [0-9a-zA-Z\-_] 共64个
长度: (32-2)/3*2 = 20
备注: 可在地球上人人都用,使用1000年不重复(2^120)
:return:String
'''
row = str(uuid.uuid4()).replace('-', '')
safe_code = ''
for i in xrange(10):
enbin = "%012d" % int(bin(int(row[i * 3] + row[i * 3 + 1] + row[i * 3 + 2], 16))[2:], 10)
safe_code += (SAFEHASH[int(enbin[0:6], 2)] + SAFEHASH[int(enbin[6:12], 2)])
return safe_code
def encode_pwd(password, loaded=True, salt_bit=None, iteration_index=None):
'''
明码密码加盐加密
:return:String
格式: 加密方式$迭代次数$盐$结果
:param:pwd 明文密码(或者已经加载的迭代次数)[32]
loaded 是否已经(md5)散列处理
salt 盐[20]
iteration_index 数据库中的迭代次数(对于已经已经经过一次散列处理的字符串)[2]
:do:第一次都是不加盐迭代,剩余次数根据服务器生成盐迭代
'''
if password is None: raise ValueError('ValueEmpty', 'pwd')
if salt_bit is None: salt_bit = compress_UUID()
if iteration_index is None:
iteration_index = random.randint(setting.PWD_ITERATION_INTERVAL['MIN'],
setting.PWD_ITERATION_INTERVAL['MAX'])
if not loaded:
password = hashlib.md5(password).hexdigest()
for i in xrange(iteration_index):
password = hashlib.md5(password + salt_bit).hexdigest()
return "%s$%d$%s$%s" % ('md5', iteration_index, salt_bit, password)
if __name__ == '__main__':
# strTest = u'md5$35$Xm9UuCi4hap6MmNXN2SV$9e77dd1761c233b079d9f2568f905f8'
# #
# method, iteration, salt, pwd = strTest.split(r'$')
# pwd_gen = encode_pwd(password='1234', salt_bit=salt, iteration_index=int(iteration))
# print pwd_gen
shorter_UID()
| gpl-3.0 | 8,844,720,525,724,796,000 | 28.193548 | 97 | 0.587251 | false | 2.557377 | false | false | false |
kalebdavis/calenbro | outlook/outlookservice.py | 1 | 1868 | import requests
import uuid
import json
outlook_api_endpoint = 'https://outlook.office.com/api/v2.0{0}'
def make_api_call(method, url, token, user_email, payload=None, parameters=None):
headers = {
'User-Agent': 'brickhack/1.0',
'Authorization': 'Bearer {0}'.format(token),
'Accept': 'application/json',
'X-AnchorMailbox': 'user_email'
}
request_id = str(uuid.uuid4())
instrumentation = {
'client-request-id': request_id,
'return-client-request-id': 'true'
}
headers.update(instrumentation)
response = None
if(method.upper() == 'GET'):
response = requests.get(url, headers=headers, params=parameters)
elif(method.upper() == 'DELETE'):
response = requests.delete(url, headers=headers, params=parameters)
elif(method.upper() == 'PATCH'):
headers.update({ 'Content-Type': 'application/json' })
response = requests.patch(url, headers=headers, data=json.dumps(payload), params=parameters)
elif(method.upper() == 'POST'):
headers.update({ 'Content-Type': 'application/json' })
response = requests.post(url, headers=headers, data=json.dumps(payload), params=parameters)
return response
def get_my_events(access_token, user_email):
get_events_url = outlook_api_endpoint.format('/Me/Events')
query_parameters = {
'$top': '10',
'$select': 'Subject,Start,End',
'$orderby': 'Start/DateTime ASC'
}
r = make_api_call('GET', get_events_url, access_token, user_email, parameters=query_parameters)
if(r.status_code == requests.codes.ok):
return r.json()
else:
return "{0}: {1}".format(r.status_code, r.text)
| gpl-2.0 | 1,723,155,915,210,495,700 | 37.122449 | 100 | 0.577088 | false | 3.827869 | false | false | false |
Etxea/gestioneide | profesores/urls.py | 1 | 1195 | from django.conf.urls import include, url
from django.views.generic import ListView, DetailView
from django.views.generic.edit import UpdateView
from django.contrib.auth.decorators import login_required, permission_required
from profesores.views import *
urlpatterns = [
url(r'^$', login_required(ProfesorDashboardView.as_view()),name="profesores_dashboard"),
url(r'lista/$', login_required(ProfesorListView.as_view()),name="profesores_lista"),
url(r'nuevo/$',ProfesorCreateView.as_view(), name="profesor_nuevo"),
url(r'editar/(?P<pk>\d+)/$',ProfesorUpdateView.as_view(), name="profesor_editar"),
url(r'borrar/(?P<pk>\d+)/$',ProfesorDeleteView.as_view(), name="profesor_borrar"),
url(r'passwordreset/(?P<pk>\d+)/$',ProfesorPasswordResetView.as_view(), name="profesor_passwordreset"),
url(r'createuser/(?P<pk>\d+)/$',ProfesorCreateUserView.as_view(), name="profesor_createuser"),
url(r'disableuser/(?P<pk>\d+)/$',ProfesorDisableUserView.as_view(), name="profesor_disableuser"),
url(r'enableuser/(?P<pk>\d+)/$',ProfesorEnableUserView.as_view(), name="profesor_enableuser"),
url(r'(?P<pk>\d+)/$',ProfesorDetailView.as_view(), name="profesor_detalle"),
]
| gpl-3.0 | 5,310,999,755,454,793,000 | 55.904762 | 107 | 0.713808 | false | 2.936118 | false | true | false |
lina9527/easybi | migrations/versions/822389978719_.py | 1 | 21400 | """empty message
Revision ID: 822389978719
Revises: None
Create Date: 2017-10-17 15:49:01.970182
"""
# revision identifiers, used by Alembic.
import sqlalchemy_utils
revision = '822389978719'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('keyvalue',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('value', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('access_request',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('datasource_id', sa.Integer(), nullable=True),
sa.Column('datasource_type', sa.String(length=200), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('clusters',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('verbose_name', sa.String(length=250), nullable=True),
sa.Column('cluster_name', sa.String(length=250), nullable=True),
sa.Column('coordinator_host', sa.String(length=255), nullable=True),
sa.Column('coordinator_port', sa.Integer(), nullable=True),
sa.Column('coordinator_endpoint', sa.String(length=255), nullable=True),
sa.Column('broker_host', sa.String(length=255), nullable=True),
sa.Column('broker_port', sa.Integer(), nullable=True),
sa.Column('broker_endpoint', sa.String(length=255), nullable=True),
sa.Column('metadata_last_refreshed', sa.DateTime(), nullable=True),
sa.Column('cache_timeout', sa.Integer(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('cluster_name'),
sa.UniqueConstraint('verbose_name')
)
op.create_table('css_templates',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('template_name', sa.String(length=250), nullable=True),
sa.Column('css', sa.Text(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dashboards',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('dashboard_title', sa.String(length=500), nullable=True),
sa.Column('position_json', sa.Text(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('css', sa.Text(), nullable=True),
sa.Column('json_metadata', sa.Text(), nullable=True),
sa.Column('slug', sa.String(length=255), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('slug')
)
op.create_table('dbs',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('verbose_name', sa.String(length=250), nullable=True),
sa.Column('database_name', sa.String(length=250), nullable=True),
sa.Column('sqlalchemy_uri', sa.String(length=1024), nullable=True),
sa.Column('password', sqlalchemy_utils.types.encrypted.EncryptedType(), nullable=True),
sa.Column('cache_timeout', sa.Integer(), nullable=True),
sa.Column('select_as_create_table_as', sa.Boolean(), nullable=True),
sa.Column('expose_in_sqllab', sa.Boolean(), nullable=True),
sa.Column('allow_run_sync', sa.Boolean(), nullable=True),
sa.Column('allow_run_async', sa.Boolean(), nullable=True),
sa.Column('allow_ctas', sa.Boolean(), nullable=True),
sa.Column('allow_dml', sa.Boolean(), nullable=True),
sa.Column('force_ctas_schema', sa.String(length=250), nullable=True),
sa.Column('extra', sa.Text(), nullable=True),
sa.Column('perm', sa.String(length=1000), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('database_name'),
sa.UniqueConstraint('verbose_name')
)
op.create_table('favstar',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('class_name', sa.String(length=50), nullable=True),
sa.Column('obj_id', sa.Integer(), nullable=True),
sa.Column('dttm', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('logs',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('action', sa.String(length=512), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('dashboard_id', sa.Integer(), nullable=True),
sa.Column('slice_id', sa.Integer(), nullable=True),
sa.Column('json', sa.Text(), nullable=True),
sa.Column('dttm', sa.DateTime(), nullable=True),
sa.Column('dt', sa.Date(), nullable=True),
sa.Column('duration_ms', sa.Integer(), nullable=True),
sa.Column('referrer', sa.String(length=1024), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('slices',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('slice_name', sa.String(length=250), nullable=True),
sa.Column('datasource_id', sa.Integer(), nullable=True),
sa.Column('datasource_type', sa.String(length=200), nullable=True),
sa.Column('datasource_name', sa.String(length=2000), nullable=True),
sa.Column('viz_type', sa.String(length=250), nullable=True),
sa.Column('params', sa.Text(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('cache_timeout', sa.Integer(), nullable=True),
sa.Column('perm', sa.String(length=1000), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('url',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sa.Text(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dashboard_slices',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('dashboard_id', sa.Integer(), nullable=True),
sa.Column('slice_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['dashboard_id'], ['dashboards.id'], ),
sa.ForeignKeyConstraint(['slice_id'], ['slices.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dashboard_user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('dashboard_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['dashboard_id'], ['dashboards.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('datasources',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('default_endpoint', sa.Text(), nullable=True),
sa.Column('is_featured', sa.Boolean(), nullable=True),
sa.Column('filter_select_enabled', sa.Boolean(), nullable=True),
sa.Column('offset', sa.Integer(), nullable=True),
sa.Column('cache_timeout', sa.Integer(), nullable=True),
sa.Column('params', sa.String(length=1000), nullable=True),
sa.Column('perm', sa.String(length=1000), nullable=True),
sa.Column('datasource_name', sa.String(length=255), nullable=True),
sa.Column('is_hidden', sa.Boolean(), nullable=True),
sa.Column('fetch_values_from', sa.String(length=100), nullable=True),
sa.Column('cluster_name', sa.String(length=250), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['cluster_name'], ['clusters.cluster_name'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('datasource_name')
)
op.create_table('query',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('client_id', sa.String(length=11), nullable=False),
sa.Column('database_id', sa.Integer(), nullable=False),
sa.Column('tmp_table_name', sa.String(length=256), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('status', sa.String(length=16), nullable=True),
sa.Column('tab_name', sa.String(length=256), nullable=True),
sa.Column('sql_editor_id', sa.String(length=256), nullable=True),
sa.Column('schema', sa.String(length=256), nullable=True),
sa.Column('sql', sa.Text(), nullable=True),
sa.Column('select_sql', sa.Text(), nullable=True),
sa.Column('executed_sql', sa.Text(), nullable=True),
sa.Column('limit', sa.Integer(), nullable=True),
sa.Column('limit_used', sa.Boolean(), nullable=True),
sa.Column('select_as_cta', sa.Boolean(), nullable=True),
sa.Column('select_as_cta_used', sa.Boolean(), nullable=True),
sa.Column('progress', sa.Integer(), nullable=True),
sa.Column('rows', sa.Integer(), nullable=True),
sa.Column('error_message', sa.Text(), nullable=True),
sa.Column('results_key', sa.String(length=64), nullable=True),
sa.Column('start_time', sa.Numeric(precision=20, scale=6), nullable=True),
sa.Column('start_running_time', sa.Numeric(precision=20, scale=6), nullable=True),
sa.Column('end_time', sa.Numeric(precision=20, scale=6), nullable=True),
sa.Column('end_result_backend_time', sa.Numeric(precision=20, scale=6), nullable=True),
sa.Column('tracking_url', sa.Text(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['database_id'], ['dbs.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('client_id')
)
op.create_index(op.f('ix_query_results_key'), 'query', ['results_key'], unique=False)
op.create_index('ti_user_id_changed_on', 'query', ['user_id', 'changed_on'], unique=False)
op.create_table('saved_query',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('db_id', sa.Integer(), nullable=True),
sa.Column('schema', sa.String(length=128), nullable=True),
sa.Column('label', sa.String(length=256), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('sql', sa.Text(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['db_id'], ['dbs.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('slice_user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('slice_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['slice_id'], ['slices.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('tables',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('default_endpoint', sa.Text(), nullable=True),
sa.Column('is_featured', sa.Boolean(), nullable=True),
sa.Column('filter_select_enabled', sa.Boolean(), nullable=True),
sa.Column('offset', sa.Integer(), nullable=True),
sa.Column('cache_timeout', sa.Integer(), nullable=True),
sa.Column('params', sa.String(length=1000), nullable=True),
sa.Column('perm', sa.String(length=1000), nullable=True),
sa.Column('table_name', sa.String(length=250), nullable=True),
sa.Column('main_dttm_col', sa.String(length=250), nullable=True),
sa.Column('database_id', sa.Integer(), nullable=False),
sa.Column('fetch_values_predicate', sa.String(length=1000), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('schema', sa.String(length=255), nullable=True),
sa.Column('sql', sa.Text(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['database_id'], ['dbs.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('database_id', 'schema', 'table_name', name='_customer_location_uc')
)
op.create_table('columns',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('column_name', sa.String(length=255), nullable=True),
sa.Column('verbose_name', sa.String(length=1024), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.Column('type', sa.String(length=32), nullable=True),
sa.Column('groupby', sa.Boolean(), nullable=True),
sa.Column('count_distinct', sa.Boolean(), nullable=True),
sa.Column('sum', sa.Boolean(), nullable=True),
sa.Column('avg', sa.Boolean(), nullable=True),
sa.Column('max', sa.Boolean(), nullable=True),
sa.Column('min', sa.Boolean(), nullable=True),
sa.Column('filterable', sa.Boolean(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('datasource_name', sa.String(length=255), nullable=True),
sa.Column('dimension_spec_json', sa.Text(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['datasource_name'], ['datasources.datasource_name'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('metrics',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('metric_name', sa.String(length=512), nullable=True),
sa.Column('verbose_name', sa.String(length=1024), nullable=True),
sa.Column('metric_type', sa.String(length=32), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('is_restricted', sa.Boolean(), nullable=True),
sa.Column('d3format', sa.String(length=128), nullable=True),
sa.Column('datasource_name', sa.String(length=255), nullable=True),
sa.Column('json', sa.Text(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['datasource_name'], ['datasources.datasource_name'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('sql_metrics',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('metric_name', sa.String(length=512), nullable=True),
sa.Column('verbose_name', sa.String(length=1024), nullable=True),
sa.Column('metric_type', sa.String(length=32), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('is_restricted', sa.Boolean(), nullable=True),
sa.Column('d3format', sa.String(length=128), nullable=True),
sa.Column('table_id', sa.Integer(), nullable=True),
sa.Column('expression', sa.Text(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['table_id'], ['tables.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('table_columns',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('column_name', sa.String(length=255), nullable=True),
sa.Column('verbose_name', sa.String(length=1024), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.Column('type', sa.String(length=32), nullable=True),
sa.Column('groupby', sa.Boolean(), nullable=True),
sa.Column('count_distinct', sa.Boolean(), nullable=True),
sa.Column('sum', sa.Boolean(), nullable=True),
sa.Column('avg', sa.Boolean(), nullable=True),
sa.Column('max', sa.Boolean(), nullable=True),
sa.Column('min', sa.Boolean(), nullable=True),
sa.Column('filterable', sa.Boolean(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('table_id', sa.Integer(), nullable=True),
sa.Column('is_dttm', sa.Boolean(), nullable=True),
sa.Column('expression', sa.Text(), nullable=True),
sa.Column('python_date_format', sa.String(length=255), nullable=True),
sa.Column('database_expression', sa.String(length=255), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['table_id'], ['tables.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('table_columns')
op.drop_table('sql_metrics')
op.drop_table('metrics')
op.drop_table('columns')
op.drop_table('tables')
op.drop_table('slice_user')
op.drop_table('saved_query')
op.drop_index('ti_user_id_changed_on', table_name='query')
op.drop_index(op.f('ix_query_results_key'), table_name='query')
op.drop_table('query')
op.drop_table('datasources')
op.drop_table('dashboard_user')
op.drop_table('dashboard_slices')
op.drop_table('url')
op.drop_table('slices')
op.drop_table('logs')
op.drop_table('favstar')
op.drop_table('dbs')
op.drop_table('dashboards')
op.drop_table('css_templates')
op.drop_table('clusters')
op.drop_table('access_request')
op.drop_table('keyvalue')
# ### end Alembic commands ###
| mit | -4,426,077,723,326,835,000 | 50.318945 | 94 | 0.656402 | false | 3.387148 | false | false | false |
rndusr/stig | stig/main.py | 1 | 3550 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
# http://www.gnu.org/licenses/gpl-3.0.txt
import asyncio
import os
import sys
from . import cliopts, logging, objects, settings
from .objects import cmdmgr, log, srvapi
# Remove python from process name when running inside tmux
if 'TMUX' in os.environ:
try:
from setproctitle import setproctitle
except ImportError:
pass
else:
from . import __appname__
setproctitle(__appname__)
cliargs, clicmds = cliopts.parse()
objects.main_rcfile = cliargs['rcfile'] or settings.defaults.DEFAULT_RCFILE
logging.setup(debugmods=cliargs['debug'], filepath=cliargs['debug_file'])
logging.redirect_level('INFO', sys.stdout)
def run():
cmdmgr.load_cmds_from_module('stig.commands.cli', 'stig.commands.tui')
from .commands.guess_ui import guess_ui, UIGuessError
from .commands import CmdError
from . import hooks # noqa: F401
# Read commands from rc file
rclines = ()
if not cliargs['norcfile']:
from .settings import rcfile
try:
rclines = rcfile.read(objects.main_rcfile)
except rcfile.RcFileError as e:
log.error('Loading rc file failed: {}'.format(e))
sys.exit(1)
# Decide if we run as a TUI or CLI
if cliargs['tui']:
cmdmgr.active_interface = 'tui'
elif cliargs['notui']:
cmdmgr.active_interface = 'cli'
else:
try:
cmdmgr.active_interface = guess_ui(clicmds, cmdmgr)
except UIGuessError:
log.error('Unable to guess user interface')
log.error('Provide one of these options: --tui/-t or --no-tui/-T')
sys.exit(1)
except CmdError as e:
log.error(e)
sys.exit(1)
def run_commands():
for cmdline in rclines:
success = cmdmgr.run_sync(cmdline)
# Ignored commands return None, which we consider a success here
# because TUI commands like 'tab' in the rc file should have no
# effect at all when in CLI mode.
if success is False:
return False
# Exit if CLI commands fail
if clicmds:
success = cmdmgr.run_sync(clicmds)
if not success:
return False
return True
exit_code = 0
# Run commands either in CLI or TUI mode
if cmdmgr.active_interface == 'cli':
# Exit when pipe is closed (e.g. `stig help | head -1`)
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
try:
if not run_commands():
exit_code = 1
except KeyboardInterrupt:
log.debug('Caught SIGINT')
elif cmdmgr.active_interface == 'tui':
from .tui import main as tui
if not tui.run(run_commands):
exit_code = 1
asyncio.get_event_loop().run_until_complete(srvapi.rpc.disconnect('Quit'))
# We're not closing the AsyncIO event loop here because it sometimes
# complains about unfinished tasks and not calling it seems to work fine.
sys.exit(exit_code)
| gpl-3.0 | 2,774,713,514,736,834,600 | 31.568807 | 78 | 0.63662 | false | 3.884026 | false | false | false |
praekelt/sideloader2 | sideloader.web/sideloader/web/views.py | 1 | 27153 | from datetime import timedelta, datetime
import uuid
import urlparse
import json
import hashlib, hmac, base64
import time
import yaml
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.views.decorators.csrf import csrf_exempt
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.conf import settings
from sideloader import forms, tasks, models
def verifyHMAC(request, data=None):
clientauth = request.META['HTTP_AUTHORIZATION']
sig = request.META['HTTP_SIG']
if clientauth != settings.SPECTER_AUTHCODE:
return False
sign = [settings.SPECTER_AUTHCODE, request.method, request.path]
if data:
sign.append(
hashlib.sha1(data).hexdigest()
)
mysig = hmac.new(
key = settings.SPECTER_SECRET,
msg = '\n'.join(sign),
digestmod = hashlib.sha1
).digest()
return base64.b64encode(mysig) == sig
def getProjects(request):
if request.user.is_superuser:
return models.Project.objects.all().order_by('name')
else:
return request.user.project_set.all().order_by('name')
@login_required
def index(request):
projects = getProjects(request)
if request.user.is_superuser:
builds = models.Build.objects.filter(state=0).order_by('-build_time')
last_builds = models.Build.objects.filter(state__gt=0).order_by('-build_time')[:10]
else:
all_builds = models.Build.objects.filter(state=0).order_by('-build_time')
last_builds = models.Build.objects.filter(state__gt=0, project__in=projects).order_by('-build_time')[:10]
builds = []
for build in all_builds:
if build.project in projects:
builds.append(build)
else:
builds.append({'build_time': build.build_time, 'project': {'name': 'Private'}})
return render(request, "index.html", {
'builds': builds,
'last_builds': last_builds,
'projects': projects
})
@login_required
def accounts_profile(request):
if request.method == "POST":
form = forms.UserForm(request.POST, instance=request.user)
if form.is_valid():
user = form.save(commit=False)
user.set_password(form.cleaned_data['password'])
user.save()
return redirect('home')
else:
form = forms.UserForm(instance=request.user)
return render(request, "accounts_profile.html", {
'form': form,
'projects': getProjects(request)
})
@login_required
def manage_index(request):
if not request.user.is_superuser:
return redirect('home')
users = User.objects.all().order_by('username')
repos = models.PackageRepo.objects.all().order_by('name')
hives = []
for k, v in tasks.getClusterStatus().items():
v['hostname'] = k
hives.append({
'hostname': k,
'lastseen': time.ctime(v['lastseen']),
'status': v['status']
})
return render(request, "manage/index.html", {
'projects': getProjects(request),
'users': users,
'repos': repos,
'hives': hives
})
@login_required
def manage_create_repo(request):
if not request.user.is_superuser:
return redirect('home')
if request.method == "POST":
form = forms.PackageRepoForm(request.POST)
if form.is_valid():
release = form.save(commit=False)
release.save()
return redirect('manage_index')
else:
form = forms.PackageRepoForm()
return render(request, "manage/create_repo.html", {
'form': form,
'projects': getProjects(request),
})
@login_required
def manage_delete_repo(request, id):
repo = models.PackageRepo.objects.get(id=id)
repo.delete()
return redirect('manage_index')
@login_required
def server_index(request):
servers = models.Server.objects.all().order_by('last_checkin')
return render(request, "servers/index.html", {
'servers': servers,
'projects': getProjects(request)
})
@login_required
def server_log(request, id):
# Accepts stream target ID
target = models.Target.objects.get(id=id)
projects = getProjects(request)
d = {
'target': target,
'project': target.release.project,
'projects': projects
}
if (request.user.is_superuser) or (
target.release.project in request.user.project_set.all()):
d['target'] = target
return render(request, "servers/server_log.html", d)
@login_required
def release_index(request):
releases = models.ReleaseStream.objects.all()
return render(request, "releases/index.html", {
'releases': releases,
'projects': getProjects(request)
})
@login_required
def release_create(request):
if not request.user.is_superuser:
return redirect('home')
if request.method == "POST":
form = forms.ReleaseForm(request.POST)
if form.is_valid():
release = form.save(commit=False)
release.save()
return redirect('release_index')
else:
form = forms.ReleaseForm()
return render(request, 'releases/create_edit.html', {
'form': form,
'projects': getProjects(request)
})
@login_required
def release_edit(request, id):
if not request.user.is_superuser:
return redirect('home')
release = models.ReleaseStream.objects.get(id=id)
if request.method == "POST":
form = forms.ReleaseForm(request.POST, instance=release)
if form.is_valid():
release = form.save(commit=False)
release.save()
return redirect('release_index')
else:
form = forms.ReleaseForm(instance=release)
return render(request, 'releases/create_edit.html', {
'form': form,
'release': release,
'projects': getProjects(request)
})
@login_required
def module_index(request):
if not request.user.is_superuser:
return redirect('home')
modules = models.ModuleManifest.objects.all()
return render(request, 'modules/index.html', {
'modules': modules,
'projects': getProjects(request)
})
@login_required
def module_create(request):
if not request.user.is_superuser:
return redirect('home')
if request.method == "POST":
form = forms.ModuleForm(request.POST)
if form.is_valid():
module = form.save(commit=False)
module.save()
return redirect('module_index')
else:
form = forms.ModuleForm()
return render(request, 'modules/create_edit.html', {
'form': form,
'projects': getProjects(request)
})
@login_required
def module_edit(request, id):
if not request.user.is_superuser:
return redirect('home')
module = models.ModuleManifest.objects.get(id=id)
if request.method == "POST":
form = forms.ModuleForm(request.POST, instance=module)
if form.is_valid():
module = form.save(commit=False)
module.save()
return redirect('module_index')
else:
form = forms.ModuleForm(instance=module)
return render(request, 'modules/create_edit.html', {
'form': form,
'projects': getProjects(request)
})
@login_required
def module_scheme(request, id):
module = models.ModuleManifest.objects.get(id=id)
return HttpResponse(module.structure,
content_type='application/json')
@login_required
def manifest_view(request, id):
release = models.ReleaseStream.objects.get(id=id)
project = release.project
if not((request.user.is_superuser) or (
project in request.user.project_set.all())):
return redirect('home')
manifests = release.servermanifest_set.all()
return render(request, 'modules/manifest_view.html', {
'projects': getProjects(request),
'manifests': manifests,
'project': release.project,
'release': release
})
@login_required
def manifest_delete(request, id):
manifest = models.ServerManifest.objects.get(id=id)
release = manifest.release
project = release.project
if not((request.user.is_superuser) or (
project in request.user.project_set.all())):
return redirect('home')
manifest.delete()
return redirect('manifest_view', id=release.id)
@login_required
def manifest_add(request, id):
release = models.ReleaseStream.objects.get(id=id)
project = release.project
if not((request.user.is_superuser) or (
project in request.user.project_set.all())):
return redirect('home')
if request.method == "POST":
form = forms.ManifestForm(request.POST)
if form.is_valid():
manifest = form.save(commit=False)
manifest.release = release
manifest.save()
return redirect('manifest_view', id=release.id)
else:
form = forms.ManifestForm()
return render(request, 'modules/manifest_edit.html', {
'form': form,
'release': release,
'projects': getProjects(request),
'project': release.project
})
@login_required
def manifest_edit(request, id):
manifest = models.ServerManifest.objects.get(id=id)
project = manifest.release.project
if not((request.user.is_superuser) or (
project in request.user.project_set.all())):
return redirect('home')
if request.method == "POST":
form = forms.ManifestForm(request.POST, instance=manifest)
if form.is_valid():
manifest = form.save(commit=False)
manifest.save()
return redirect('manifest_view', id=manifest.release.id)
else:
form = forms.ManifestForm(instance=manifest)
return render(request, 'modules/manifest_edit.html', {
'form': form,
'projects': getProjects(request),
'project': project
})
@login_required
def stream_create(request, project):
p = models.Project.objects.get(id=project)
if request.method == "POST":
form = forms.StreamForm(request.POST)
if form.is_valid():
s = form.save(commit=False)
s.project = p
s.save()
form.save_m2m()
return redirect('projects_view', id=project)
else:
form = forms.StreamForm()
form.fields['targets'].queryset = p.target_set.all().order_by('description')
form.fields['repo'].queryset = p.repo_set.all().order_by('github_url')
return render(request, 'stream/create_edit.html', {
'form': form,
'project': p,
'projects': getProjects(request)
})
@login_required
def stream_edit(request, id):
stream = models.Stream.objects.get(id=id)
if request.method == "POST":
form = forms.StreamForm(request.POST, instance=stream)
if form.is_valid():
stream = form.save(commit=False)
stream.save()
form.save_m2m()
return redirect('projects_view', id=stream.repo.project.id)
else:
form = forms.StreamForm(instance=stream)
form.fields['targets'].queryset = stream.project.target_set.all().order_by('description')
return render(request, 'stream/create_edit.html', {
'form': form,
'stream': stream,
'project': stream.repo.project,
'projects': getProjects(request)
})
@login_required
def stream_delete(request, id):
stream = models.Stream.objects.get(id=id)
project = stream.project
if (request.user.is_superuser) or (
project in request.user.project_set.all()):
stream.delete()
return redirect('projects_view', id=project.id)
@login_required
def stream_push(request, flow, build):
flow = models.ReleaseStream.objects.get(id=flow)
project = flow.project
build = models.Build.objects.get(id=build)
if (request.user.is_superuser) or (
project in request.user.project_set.all()):
tasks.doRelease.delay(build, flow)
return redirect('projects_view', id=project.id)
@login_required
def stream_schedule(request, flow, build):
flow = models.ReleaseStream.objects.get(id=flow)
build = models.Build.objects.get(id=build)
if request.method == "POST":
form = forms.ReleasePushForm(request.POST)
if form.is_valid():
release = form.cleaned_data
schedule = release['scheduled'] + timedelta(hours=int(release['tz']))
tasks.doRelease.delay(build, flow, scheduled=schedule)
return redirect('projects_view', id=flow.project.id)
else:
form = forms.ReleasePushForm()
return render(request, 'stream/schedule.html', {
'projects': getProjects(request),
'project': flow.project,
'form': form,
'flow': flow,
'build': build
})
@login_required
def target_create(request, project):
project = models.Project.objects.get(id=project)
if request.method == "POST":
form = forms.TargetForm(request.POST)
if form.is_valid():
target = form.save(commit=False)
target.project = project
target.save()
return redirect('projects_view', id=project.id)
else:
form = forms.TargetForm()
#form.fields['server'].queryset = m.all().order_by('github_url')
return render(request, 'target/create_edit.html', {
'form': form,
'project': project,
'projects': getProjects(request)
})
@login_required
def target_edit(request, id):
target = models.Target.objects.get(id=id)
if request.method == "POST":
form = forms.TargetForm(request.POST, instance=target)
if form.is_valid():
target = form.save(commit=False)
target.save()
return redirect('projects_view', id=target.project.id)
else:
form = forms.TargetForm(instance=target)
return render(request, 'target/create_edit.html', {
'form': form,
'target': target,
'project': target.project,
'projects': getProjects(request)
})
@login_required
def target_delete(request, id):
target = models.Target.objects.get(id=id)
project = target.project
if (request.user.is_superuser) or (
project in request.user.project_set.all()):
target.delete()
return redirect('projects_view', id=project.id)
@login_required
def release_delete(request, id):
release = models.Release.objects.get(id=id)
project = release.flow.project
if (request.user.is_superuser) or (
project in request.user.project_set.all()):
release.delete()
return redirect('projects_view', id=project.id)
@login_required
def build_view(request, id):
build = models.Build.objects.get(id=id)
d = {
'projects': getProjects(request),
'project': build.project
}
if (request.user.is_superuser) or (
build.project in request.user.project_set.all()):
d['build'] = build
return render(request, 'projects/build_view.html', d)
@login_required
def projects_view(request, id):
project = models.Project.objects.get(id=id)
if (request.user.is_superuser) or (project in request.user.project_set.all()):
repos = project.repo_set.all().order_by('github_url')
builds = []
streams = []
releases = []
for repo in repos:
builds.extend(repo.build_set.all().order_by('-build_time'))
streams.extend(repo.stream_set.all().order_by('name'))
for stream in streams:
releases.extend(stream.release_set.all().order_by(
'-release_date'))
releases.sort(key=lambda r: r.release_date)
builds.sort(key=lambda r: r.build_time)
streams.sort(key=lambda r: r.name)
requests = project.serverrequest_set.filter(approval=0).order_by('request_date')
d = {
'project': project,
'repos': repos,
'targets': project.target_set.all().order_by('description'),
'builds': reversed(builds),
'streams': streams,
'releases': reversed(releases[-5:]),
'projects': getProjects(request),
'requests': requests
}
else:
d = {}
return render(request, 'projects/view.html', d)
@login_required
def project_graph(request, id):
# Server checkin endpoint
project = models.Project.objects.get(id=id)
data = {
'project': project.name,
'repos': [],
'targets': [],
'streams': []
}
for repo in project.repo_set.all():
data['repos'].append({
'name': repo.github_url,
'id': 'R%s' % repo.id
})
for target in project.target_set.all():
data['targets'].append({
'name': target.description,
'id': 'T%s' % target.id
})
for stream in project.stream_set.all():
data['streams'].append({
'id': 'S%s' % stream.id,
'name': stream.name,
'branch': stream.branch,
'repo_link': 'R%s' % stream.repo.id,
'target_link': ['T%s' % t.id for t in stream.targets.all()]
})
return HttpResponse(json.dumps(data),
content_type='application/json')
@login_required
def projects_delete(request, id):
if not request.user.is_superuser:
return redirect('home')
models.Project.objects.get(id=id).delete()
return redirect('home')
@login_required
def projects_create(request):
if not request.user.is_superuser:
return redirect('home')
if request.method == "POST":
form = forms.ProjectForm(request.POST)
if form.is_valid():
project = form.save(commit=False)
project.save()
return redirect('projects_view', id=project.id)
else:
form = forms.ProjectForm()
return render(request, 'projects/create_edit.html', {
'projects': getProjects(request),
'form': form
})
@login_required
def projects_edit(request, id):
if not request.user.is_superuser:
return redirect('home')
project = models.Project.objects.get(id=id)
if request.method == "POST":
form = forms.ProjectForm(request.POST, instance=project)
if form.is_valid():
project = form.save(commit=False)
project.save()
form.save_m2m()
return redirect('projects_view', id=id)
else:
form = forms.ProjectForm(instance=project)
d = {
'form': form,
'project': project,
'projects': getProjects(request)
}
return render(request, 'projects/create_edit.html', d)
@login_required
def server_request(request, project):
project = models.Project.objects.get(id=project)
if request.method == "POST":
form = forms.ServerRequestForm(request.POST)
if form.is_valid():
server = form.save(commit=False)
server.requested_by = request.user
server.project = project
server.save()
return redirect('projects_view', id=project.id)
else:
form = forms.ServerRequestForm()
return render(request, 'projects/server_request.html', {
'form': form,
'project': project,
'projects': getProjects(request),
})
@login_required
def repo_edit(request, id):
repo = models.Repo.objects.get(id=id)
project = repo.project
if request.method == "POST":
form = forms.RepoForm(request.POST, instance=repo)
if form.is_valid():
repo = form.save(commit=False)
repo.project = project
repo.save()
return redirect('projects_view', id=id)
else:
form = forms.RepoForm(instance=repo)
d = {
'projects': getProjects(request),
'repo': repo,
'form': form
}
return render(request, 'repo/create_edit.html', d)
@login_required
def repo_delete(request, id):
repo = models.Repo.objects.get(id=id)
projectid = repo.project.id
repo.delete()
return redirect('projects_view', id=projectid)
@login_required
def repo_create(request, project):
project = models.Project.objects.get(id=project)
if request.method == "POST":
form = forms.RepoForm(request.POST)
if form.is_valid():
repo = form.save(commit=False)
repo.created_by_user = request.user
repo.idhash = uuid.uuid1().get_hex()
repo.project = project
repo.save()
return redirect('projects_view', id=project.id)
else:
form = forms.RepoForm()
return render(request, 'repo/create_edit.html', {
'projects': getProjects(request),
'project': project,
'form': form
})
@login_required
def help_index(request):
return render(request, 'help/index.html')
@login_required
def build_cancel(request, id):
build = models.Build.objects.get(id=id)
if build.project in request.user.project_set.all():
build.state = 3
build.save()
return redirect('home')
@login_required
def projects_build(request, id):
project = models.Project.objects.get(id=id)
if project and (request.user.is_superuser or (
project in request.user.project_set.all())):
current_builds = models.Build.objects.filter(project=project, state=0)
if current_builds:
return redirect('build_view', id=current_builds[0].id)
else:
bcount = project.build_counter + 1
build = models.Build.objects.create(project=project, state=0, build_num=bcount)
task_id = tasks.build(build)
build.task_id = task_id
build.save()
project.build_counter = bcount
project.save()
return redirect('build_view', id=build.id)
return redirect('home')
@login_required
def build_output(request, id):
build = models.Build.objects.get(id=id)
if (request.user.is_superuser) or (
build.project in request.user.project_set.all()):
d = {'state': build.state, 'log': build.log}
else:
d = {}
return HttpResponse(json.dumps(d), content_type='application/json')
@login_required
def get_servers(request):
d = [s.name for s in models.Server.objects.all()]
return HttpResponse(json.dumps(d), content_type='application/json')
@login_required
def get_stream_servers(request, id):
stream = models.ReleaseStream.objects.get(id=id)
d = [s.server.name for s in stream.target_set.all()]
return HttpResponse(json.dumps(d), content_type='application/json')
#############
# API methods
@csrf_exempt
def api_build(request, hash):
project = models.Project.objects.get(idhash=hash)
if project:
if request.method == 'POST':
if request.POST.get('payload'):
r = json.loads(request.POST['payload'])
else:
r = json.loads(request.body)
ref = r.get('ref', '')
branch = ref.split('/',2)[-1]
if branch != project.branch:
return HttpResponse('{"result": "Request ignored"}',
content_type='application/json')
current_builds = models.Build.objects.filter(project=project, state=0)
if not current_builds:
build = models.Build.objects.create(project=project, state=0)
task = tasks.build(build)
build.task_id = task.task_id
build.save()
return HttpResponse('{"result": "Building"}',
content_type='application/json')
return HttpResponse('{"result": "Already building"}',
content_type='application/json')
return redirect('home')
@csrf_exempt
def api_sign(request, hash):
signoff = models.ReleaseSignoff.objects.get(idhash=hash)
signoff.signed = True
signoff.save()
if signoff.release.waiting:
if signoff.release.check_signoff():
tasks.runRelease.delay(signoff.release)
return render(request, "sign.html", {
'signoff': signoff
})
@csrf_exempt
def api_checkin(request):
# Server checkin endpoint
if request.method == 'POST':
if verifyHMAC(request, request.body):
data = json.loads(request.body)
try:
server = models.Server.objects.get(name=data['hostname'])
except models.Server.DoesNotExist:
server = models.Server.objects.create(name=data['hostname'])
server.last_checkin = datetime.now()
server.save()
return HttpResponse(json.dumps({}),
content_type='application/json')
return HttpResponse(
json.dumps({"error": "Not authorized"}),
content_type='application/json'
)
@csrf_exempt
def api_enc(request, server):
# Puppet ENC
if verifyHMAC(request):
# Build our ENC dict
try:
server = models.Server.objects.get(name=server)
except:
server = None
if server:
releases = [target.release for target in server.target_set.all()]
server.last_checkin = datetime.now()
server.last_puppet_run = datetime.now()
server.change = False
server.status = "Success"
cdict = {}
for release in releases:
for manifest in release.servermanifest_set.all():
key = manifest.module.key
try:
value = json.loads(manifest.value)
except Exception, e:
server.status = "Validation error in manifest "
server.status += "%s -> %s -> %s: %s" % (
release.project.name,
release.name,
manifest.module.name,
e
)
continue
if isinstance(value, list):
if key in cdict:
cdict[key].extend(value)
else:
cdict[key] = value
if isinstance(value, dict):
for k, v in value.items():
if key in cdict:
cdict[key][k] = v
else:
cdict[key] = {k: v}
server.save()
node = {
'parameters': cdict
}
else:
node = {}
return HttpResponse(yaml.safe_dump(node),
content_type='application/yaml')
return HttpResponse(
json.dumps({"error": "Not authorized"}),
content_type='application/json'
)
| mit | 105,687,536,872,928,980 | 27.108696 | 113 | 0.593599 | false | 3.928954 | false | false | false |
ThomasZh/legend-league-portal | foo/portal/newsup.py | 1 | 53200 | #!/usr/bin/env python
# _*_ coding: utf-8_*_
#
# Copyright 2016 planc2c.com
# [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tornado.web
import logging
import time
import sys
import os
import uuid
import smtplib
import json as JSON # 启用别名,不会跟方法里的局部变量混淆
from bson import json_util
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../"))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../dao"))
from tornado.escape import json_encode, json_decode
from tornado.httpclient import *
from tornado.httputil import url_concat
from bson import json_util
from comm import *
from global_const import *
class WxMpVerifyHandler(tornado.web.RequestHandler):
def get(self):
self.finish('qdkkOWgyqqLTrijx')
return
class NewsupLoginNextHandler(tornado.web.RequestHandler):
def get(self):
login_next = self.get_secure_cookie("login_next")
logging.info("got login_next %r",login_next)
if login_next:
self.redirect(login_next)
else:
self.redirect("/portal/newsup/index")
class NewsupIndexHandler(BaseHandler):
def get(self):
logging.info(self.request)
# league(联盟信息)
league_info = self.get_league_info()
# franchises(景区)
params = {"filter":"league", "franchise_type":"景区", "page":1, "limit":5}
url = url_concat(API_DOMAIN+"/api/leagues/"+LEAGUE_ID+"/clubs", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
rs = data['rs']
franchises = rs['data']
for franchise in franchises:
franchise['create_time'] = timestamp_friendly_date(franchise['create_time'])
# suppliers(供应商)
params = {"filter":"league", "franchise_type":"供应商", "page":1, "limit":5}
url = url_concat(API_DOMAIN+"/api/leagues/"+LEAGUE_ID+"/clubs", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
rs = data['rs']
suppliers = rs['data']
for supplier in suppliers:
supplier['create_time'] = timestamp_friendly_date(supplier['create_time'])
# sceneries(景点)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"41c057a6f73411e69a3c00163e023e51", "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
sceneries = data['rs']
for article in sceneries:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# journey(游记)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"01d6120cf73411e69a3c00163e023e51", "idx":0, "limit":12}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
journeies = data['rs']
for article in journeies:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# recently articles(最新文章news)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# hot(热点新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"1b86ad38f73411e69a3c00163e023e51", "idx":0, "limit":12}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
hots = data['rs']
for article in hots:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
# multimedia
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":8}
url = url_concat(API_DOMAIN+"/api/multimedias", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
multimedias = data['rs']
for multimedia in multimedias:
multimedia['publish_time'] = timestamp_friendly_date(multimedia['publish_time'])
# notices
params = {"filter":"league", "league_id":LEAGUE_ID, "page":1, "limit":3}
url = url_concat(API_DOMAIN+"/api/notice-board", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
notices = data['rs']
is_login = False
access_token = self.get_secure_cookie("access_token")
logging.info("got access_token>>>>> %r",access_token)
if access_token:
is_login = True
self.render('newsup/index.html',
is_login=is_login,
franchises=franchises,
suppliers=suppliers,
sceneries=sceneries,
journeies=journeies,
news=news,
populars=populars,
hots=hots,
league_info=league_info,
activities=activities,
lastest_comments=lastest_comments,
multimedias=multimedias,
api_domain=API_DOMAIN,
notices=notices['data'])
class NewsupAccountHandler(AuthorizationHandler):
@tornado.web.authenticated # if no session, redirect to login page
def get(self):
logging.info(self.request)
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
# league(联盟信息)
league_info = self.get_league_info()
headers = {"Authorization":"Bearer "+access_token}
url = API_DOMAIN+"/api/myinfo?filter=login"
http_client = HTTPClient()
response = http_client.fetch(url, method="GET", headers=headers)
logging.info("got response %r", response.body)
data = json_decode(response.body)
user = data['rs']
self.render('newsup/account.html',
is_login=is_login,
league_info=league_info,
user = user,
access_token=access_token,
api_domain=API_DOMAIN,
upyun_domain=UPYUN_DOMAIN,
upyun_notify_url=UPYUN_NOTIFY_URL,
upyun_form_api_secret=UPYUN_FORM_API_SECRET,
upyun_bucket=UPYUN_BUCKET)
class NewsupAuthorHandler(BaseHandler):
def get(self):
logging.info(self.request)
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
# league(联盟信息)
league_info = self.get_league_info()
# news(新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"30a56cb8f73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
self.render('newsup/author.html',
is_login=is_login,
league_info=league_info,
news=news,
populars=populars,
activities=activities,
api_domain=API_DOMAIN,
lastest_comments=lastest_comments)
class NewsupMediaHandler(BaseHandler):
def get(self):
logging.info(self.request)
# league(联盟信息)
league_info = self.get_league_info()
# multimedia
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":14}
url = url_concat(API_DOMAIN+"/api/multimedias", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
multimedias = data['rs']
# news(新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"30a56cb8f73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# hot(热点新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"1b86ad38f73411e69a3c00163e023e51", "idx":0, "limit":12}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
hots = data['rs']
for article in hots:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/media.html',
is_login=is_login,
league_info=league_info,
news=news,
populars=populars,
activities=activities,
hots=hots,
lastest_comments=lastest_comments,
league_id=LEAGUE_ID,
api_domain=API_DOMAIN,
multimedias=multimedias)
class NewsupShortcodesHandler(BaseHandler):
def get(self):
logging.info(self.request)
# league(联盟信息)
league_info = self.get_league_info()
# news(新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"30a56cb8f73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/shortcodes.html',
is_login=is_login,
league_info=league_info,
news=news,
activities=activities,
api_domain=API_DOMAIN,
populars=populars)
class NewsupContactHandler(BaseHandler):
def get(self):
logging.info(self.request)
# league(联盟信息)
league_info = self.get_league_info()
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/contact.html',
is_login=is_login,
league_info=league_info,
lastest_comments=lastest_comments,
api_domain=API_DOMAIN,
league_id=LEAGUE_ID)
class NewsupItemDetailHandler(BaseHandler):
def get(self):
logging.info(self.request)
article_id = self.get_argument("id", "")
# league(联盟信息)
league_info = self.get_league_info()
# recently articles(最新文章news)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# article
url = API_DOMAIN+"/api/articles/"+article_id
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got article response %r", response.body)
data = json_decode(response.body)
article_info = data['rs']
article_info['publish_time'] = timestamp_friendly_date(article_info['publish_time'])
# hot(热点新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"1b86ad38f73411e69a3c00163e023e51", "idx":0, "limit":12}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
hots = data['rs']
for article in hots:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# update read_num
read_num = article_info['read_num']
url = API_DOMAIN+"/api/articles/"+article_id+"/read"
http_client = HTTPClient()
_body = {"read_num": read_num+1}
_json = json_encode(_body)
response = http_client.fetch(url, method="POST", body=_json)
logging.info("got update read_num response %r", response.body)
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
# multimedia
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/multimedias", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
multimedias = data['rs']
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/item-detail.html',
is_login=is_login,
access_token=access_token,
league_info=league_info,
article_info=article_info,
news=news,
populars=populars,
hots=hots,
activities=activities,
api_domain=API_DOMAIN,
multimedias=multimedias,
lastest_comments=lastest_comments)
class NewsupNewHandler(BaseHandler):
def get(self):
logging.info(self.request)
# league(联盟信息)
league_info = self.get_league_info()
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/new.html',
league_info=league_info,
api_domain=API_DOMAIN,
is_login=is_login)
class NewsupCategoryTileHandler(BaseHandler):
def get(self):
logging.info(self.request)
# league(联盟信息)
league_info = self.get_league_info()
# recently articles(最新文章news)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/category-tile.html',
is_login=is_login,
league_info=league_info,
lastest_comments=lastest_comments,
news=news,
activities=activities,
api_domain=API_DOMAIN,
populars=populars)
class NewsupCategoryHandler(BaseHandler):
def get(self):
logging.info(self.request)
category_id = self.get_argument("id", "")
# league(联盟信息)
league_info = self.get_league_info()
# query category_name by category_id
url = API_DOMAIN+"/api/categories/" + category_id
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
category = data['rs']
# query by category_id
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":category_id, "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
sceneries = data['rs']
for article in sceneries:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# multimedia
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/multimedias", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
multimedias = data['rs']
# recently articles(最新文章news)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# hot(热点新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"1b86ad38f73411e69a3c00163e023e51", "idx":0, "limit":12}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
hots = data['rs']
for article in hots:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/category.html',
is_login=is_login,
league_info=league_info,
sceneries=sceneries,
news=news,
hots=hots,
populars=populars,
activities=activities,
lastest_comments=lastest_comments,
multimedias=multimedias,
league_id=LEAGUE_ID,
category_id=category_id,
api_domain=API_DOMAIN,
category=category)
class NewsupCategorySearchHandler(BaseHandler):
def get(self):
logging.info(self.request)
category_id = self.get_argument("id", "")
# league(联盟信息)
league_info = self.get_league_info()
# query category_name by category_id
url = API_DOMAIN+"/api/categories/" + category_id
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
category = data['rs']
# query by category_id
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":category_id, "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
sceneries = data['rs']
for article in sceneries:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# multimedia
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/multimedias", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
multimedias = data['rs']
# recently articles(最新文章news)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# hot(热点新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"1b86ad38f73411e69a3c00163e023e51", "idx":0, "limit":12}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
hots = data['rs']
for article in hots:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/category-search.html',
is_login=is_login,
league_info=league_info,
sceneries=sceneries,
news=news,
hots=hots,
populars=populars,
activities=activities,
lastest_comments=lastest_comments,
multimedias=multimedias,
league_id=LEAGUE_ID,
category_id=category_id,
api_domain=API_DOMAIN,
category=category)
class NewsupFranchisesHandler(BaseHandler):
def get(self):
logging.info(self.request)
franchise_type = self.get_argument("franchise_type", "")
franchise_type = franchise_type.encode('utf-8')
logging.info("got franchise_type %r from argument", franchise_type)
# league(联盟信息)
league_info = self.get_league_info()
# franchises(景区)
params = {"franchise_type":franchise_type, "page":1, "limit":1}
url = url_concat(API_DOMAIN+"/api/leagues/"+LEAGUE_ID+"/clubs", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
franchises = data['rs']['data']
for franchise in franchises:
franchise['create_time'] = timestamp_friendly_date(franchise['create_time'])
# multimedia
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/multimedias", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
multimedias = data['rs']
# recently articles(最新文章news)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# hot(热点新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"1b86ad38f73411e69a3c00163e023e51", "idx":0, "limit":12}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
hots = data['rs']
for article in hots:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/franchises.html',
is_login=is_login,
league_info=league_info,
franchises=franchises,
multimedias=multimedias,
news=news,
hots= hots,
populars=populars,
activities=activities,
lastest_comments=lastest_comments,
league_id=LEAGUE_ID,
api_domain=API_DOMAIN,
franchise_type=franchise_type)
class NewsupFranchiseDetailHandler(BaseHandler):
def get(self):
logging.info(self.request)
franchise_id = self.get_argument("id", "")
access_token = self.get_secure_cookie("access_token")
# league(联盟信息)
league_info = self.get_league_info()
# recently articles(最新文章news)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# article
url = API_DOMAIN+"/api/clubs/"+franchise_id
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got article response %r", response.body)
data = json_decode(response.body)
franchise = data['rs']
if not franchise.has_key('paragraphs'):
franchise['paragraphs'] = ''
if not franchise.has_key('franchise_type'):
franchise['franchise_type'] = 'franchise'
if franchise.has_key('create_time'):
franchise['create_time'] = timestamp_friendly_date(franchise['create_time'])
else:
franchise['create_time'] = timestamp_friendly_date(0)
# franchise['create_time'] = timestamp_friendly_date(franchise['create_time'])
# hot(热点新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"1b86ad38f73411e69a3c00163e023e51", "idx":0, "limit":12}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
hots = data['rs']
for article in hots:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# update read_num
read_num = franchise['read_num']
url = API_DOMAIN+"/api/articles/"+franchise_id+"/read"
http_client = HTTPClient()
_body = {"read_num": read_num+1}
_json = json_encode(_body)
response = http_client.fetch(url, method="POST", body=_json)
logging.info("got update read_num response %r", response.body)
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
# multimedia
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/multimedias", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
multimedias = data['rs']
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/franchise-detail.html',
is_login=is_login,
access_token=access_token,
league_info=league_info,
franchise=franchise,
news=news,
populars=populars,
hots=hots,
activities=activities,
multimedias=multimedias,
api_domain=API_DOMAIN,
lastest_comments=lastest_comments)
class NewsupApplyFranchiseHandler(AuthorizationHandler):
@tornado.web.authenticated # if no session, redirect to login page
def get(self):
logging.info(self.request)
# league(联盟信息)
league_info = self.get_league_info()
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
franchise = None
try:
params = {"filter":"franchise"}
url = url_concat(API_DOMAIN+"/api/myinfo", params)
http_client = HTTPClient()
headers={"Authorization":"Bearer "+access_token}
response = http_client.fetch(url, method="GET", headers=headers)
logging.info("got response %r", response.body)
data = json_decode(response.body)
franchise = data['rs']
if franchise:
if not franchise['club'].has_key("province"):
franchise['club']['province'] = ''
franchise['club']['city'] = ''
if not franchise['club'].has_key("city"):
franchise['club']['city'] = ''
if not franchise['club'].has_key("franchise_type"):
franchise['club']['franchise_type'] = ''
franchise['create_time'] = timestamp_datetime(franchise['create_time'])
except:
logging.info("got franchise=[None]")
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
self.render('newsup/apply-franchise.html',
is_login=is_login,
league_info=league_info,
access_token=access_token,
league_id=LEAGUE_ID,
franchise=franchise,
api_domain=API_DOMAIN,
upyun_domain=UPYUN_DOMAIN,
upyun_notify_url=UPYUN_NOTIFY_URL,
upyun_form_api_secret=UPYUN_FORM_API_SECRET,
upyun_bucket=UPYUN_BUCKET,
lastest_comments=lastest_comments)
class NewsupSearchResultHandler(BaseHandler):
def get(self):
logging.info(self.request)
# category_id = self.get_argument("id", "")
# league(联盟信息)
league_info = self.get_league_info()
# query by category_id
# params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":category_id, "idx":0, "limit":6}
# url = url_concat(API_DOMAIN+"/api/articles", params)
# http_client = HTTPClient()
# response = http_client.fetch(url, method="GET")
# logging.info("got sceneries response %r", response.body)
# data = json_decode(response.body)
# sceneries = data['rs']
# for article in sceneries:
# article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# multimedia
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/multimedias", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
multimedias = data['rs']
# news(新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0e9a3c68e94511e6b40600163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/search-result.html',
is_login=is_login,
league_info=league_info,
news=news,
populars=populars,
activities=activities,
lastest_comments=lastest_comments,
multimedias=multimedias,
league_id=LEAGUE_ID,
api_domain=API_DOMAIN)
| apache-2.0 | -2,263,610,190,328,251,000 | 41.378117 | 147 | 0.594806 | false | 3.506124 | true | false | false |
MarkLark/dstore | tests/__init__.py | 1 | 1435 | from dstore import MemoryStore, Model, var, mod
from unittest import TestCase
__all__ = [ "BaseTest", "Car", "AllVars" ]
class Car( Model ):
_namespace = "cars.make"
_vars = [
var.RowID,
var.String( "manufacturer", 32, mods = [ mod.NotNull() ] ),
var.String( "make", 32, mods = [ mod.NotNull() ] ),
var.Number( "year", mods = [ mod.NotNull(), mod.Min( 1950 ), mod.Max( 2017 ) ] ),
]
class AllVars( Model ):
_namespace = "all.vars"
_vars = [
var.RowID,
var.Number( "number", mods = [ mod.Min( 0 ), mod.Max( 100 ) ] ),
var.Boolean( "boolean" ),
var.String( "string", 32, mods = [ mod.NotNull() ] ),
var.Character( "character", 4 ),
var.Binary( "binary", 25 ),
var.Text( "text" ),
var.Float( "float" ),
var.Enum( "enum", [ "one", "two", "three" ] ),
var.ForeignKey( "cars.make" )
]
class BaseTest( TestCase ):
models = [ Car, AllVars ]
auto_create = True
auto_init = True
def setUp( self ):
if self.auto_init:
self.store = MemoryStore( self.models )
self.store.init_app()
self.store.connect()
if self.auto_create: self.store.create_all()
def tearDown( self ):
if self.auto_create: self.store.destroy_all()
if self.auto_init:
self.store.disconnect()
self.store.destroy_app()
| mit | 3,888,849,410,973,275,600 | 28.285714 | 89 | 0.530314 | false | 3.368545 | false | false | false |
keialk/TRAP | OneTime.py | 1 | 6151 |
"""
TRAP - Time-series RNA-seq Analysis Package
Created by Kyuri Jo on 2014-02-05.
Copyright (c) 2014 Kyuri Jo. All rights reserved.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import random
import copy
import numpy as np
import scipy.stats as stats
import TRAP
colorCode = ["#FFAAAA", "#FF5555", "#FF0000", "#AAAAFF", "#5555FF", "#0000FF"]
def new_hypergeom_sf(k, *args, **kwds):
(M, n, N) = args[0:3]
try:
return stats.hypergeom.sf(k, *args, **kwds)
except Exception as inst:
if k >= n and type(inst) == IndexError:
return 0 ## or conversely 1 - hypergeom.cdf(k, *args, **kwds)
else:
raise inst
def calPF_one(g, wgene, redic, PFdic, recur) :
if (g in redic) :
PFsum = 0
for alist in redic[g] :
if (alist[0] not in PFdic) :
if (alist[0] in recur) :
PFdic[alist[0]]=wgene[alist[0]]
else :
recur.add(g)
calPF_one(alist[0], wgene, redic, PFdic, recur)
PFsum = PFsum + alist[2]*(PFdic[alist[0]]/alist[1])
PFdic[g]=PFsum+wgene[g]
else :
PFdic[g]=wgene[g]
def pickColor(fc, cut) :
if (fc==0) :
return "#FFFFFF"
elif (fc>0) :
index = int(fc/(cut/2))
if (index >2) :
index =2
return colorCode[index]
else :
index = int(abs(fc)/(cut/2))+3
if (index >5) :
index =5
return colorCode[index]
def pathwayAnalysis(outPath, fileN, wgene, wredic, DEG, DEGCut, idDic, pnameDic, ind) :
tA = []
status = []
pORA = []
pOFDR = []
pPERT = []
pG = []
pFDR = []
pMIX = []
for i in range(0, fileN) :
tA.append(0)
status.append("")
pORA.append(0)
pOFDR.append(0)
pPERT.append(0)
pG.append(0)
pFDR.append(0)
pMIX.append(0)
if wredic[i]=={} :
continue
# pPERT
# Calculation of PF
tempPF = {}
recur = set()
PFsum = 0
for gene in wgene[i] :
calPF_one(gene, wgene[i], wredic[i], tempPF, recur)
status[i] = sum(tempPF.values())
currtA = sum(tempPF.values())-sum(wgene[i].values())
tA[i] = currtA
# Calculation of tA from H0
nulltA = []
repeat = 2000
tempFC = copy.copy(wgene[i])
sh = tempFC.values()
recur = set()
for j in range(0, repeat) :
randPF = {}
random.shuffle(sh)
for key, value in tempFC.iteritems() :
tempFC[key]=sh[random.randint(0, len(tempFC)-1)]
for g in tempFC :
calPF_one(g, tempFC, wredic[i], randPF, recur)
nulltA.append(sum(randPF.values())-sum(tempFC.values()))
def above(x):
return round(x, 5)>=round(currtA, 5)
def below(x):
return round(x, 5)<=round(currtA, 5)
avgtA = np.median(nulltA)
if (currtA >=avgtA) :
pPERT[i]=float(len(filter(above, nulltA)))/float(repeat)
else :
pPERT[i]=float(len(filter(below, nulltA)))/float(repeat)
if status[i]>=0 :
status[i]="Activated"
else :
status[i]="Inhibited"
# pORA
genesum = {}
DEGsum = set()
for i in range(0, fileN) :
genesum.update(wgene[i])
DEGsum = DEGsum.union(DEG[i])
totG = len(genesum)
totD = len(DEGsum)
for i in range(0, fileN) :
pORA[i]=new_hypergeom_sf(len(DEG[i]), totG, totD, len(wgene[i]), loc=0)
# pG
for i in range(0, fileN) :
c = pORA[i]*pPERT[i]
if (c==0) :
pG[i]==0
else :
pG[i] = c-c*math.log(c)
pFDR = TRAP.cal_FDR(pG)
pOFDR = TRAP.cal_FDR(pORA)
for i in range(0, fileN) :
if (wredic[i]=={}) :
pMIX[i]=pOFDR[i]
else :
pMIX[i]=pFDR[i]
# Text result
outDEG = open(outPath+"_DEG.txt", "w")
for gene in DEGsum :
if (gene in idDic) :
outDEG.write(idDic[gene][0]+"\n")
else :
outDEG.write(gene+"\n")
outDEG.close()
outColor = open(outPath+"_color.txt", "w")
for g,fc in genesum.iteritems() :
outColor.write(g+"\t"+pickColor(fc, DEGCut)+"\n")
outColor.close()
outPathway = open(outPath+"_pathway.txt", "w")
outPathway.write("PathwayID\tPathwayName \tGeneNum\tDEGNum\tpORA\tpORAfdr\ttA\tpPERT\tpG\tpG_FDR\tStatus\n")
sortedkey = sorted(ind, key = lambda x : pMIX[ind[x]])
for sk in sortedkey :
i = ind[sk]
pathwayName = ""
if (sk in pnameDic) :
pathwayName = pnameDic[sk]
nameLen = len(pathwayName)
if (nameLen<15) :
pathwayName = pathwayName+TRAP.addStr(18-nameLen)
else :
pathwayName = pathwayName[0:15]+"..."
if (wredic[i]=={}) :
outPathway.write(sk+"\t"+pathwayName+"\t"+str(len(wgene[i]))+"\t"+str(len(DEG[i]))+"\t"+str(round(pORA[i],3))+"\t"+str(round(pOFDR[i], 3))+"\t.\t.\t.\t.\t.\n")
else :
outPathway.write(sk+"\t"+pathwayName+"\t"+str(len(wgene[i]))+"\t"+str(len(DEG[i]))+"\t"+str(round(pORA[i],3))+"\t"+str(round(pOFDR[i], 3))+"\t"+str(round(tA[i],3))+"\t"+str(round(pPERT[i],3))+"\t"+str(round(pG[i],3))+"\t"+str(round(pFDR[i],3))+"\t"+status[i]+"\n")
outPathway.close()
| gpl-3.0 | 2,745,135,926,260,084,000 | 30.54359 | 281 | 0.52902 | false | 2.902784 | false | false | false |
PhilippMundhenk/Kindle-Alarm-Clock | mnt/us/alarm/alarmControl.py | 1 | 4394 | from datetime import datetime, timedelta
from threading import Thread, Timer
import time
import os
import pickle
import subprocess
from subprocess import call
from settings import secondsToAutoOff
from alarm import Alarm
from settings import wificontrol
from audioControl import AudioControl
from settings import backupSound
class AlarmControl():
alarms = []
activeAlarm = None
class __AlarmControl:
#def __init__(self):
def __str__(self):
return repr(self)
instance = None
def __init__(self):
if not AlarmControl.instance:
AlarmControl.instance = AlarmControl.__AlarmControl()
def __getattr__(self):
return getattr(self.instance)
def getAlarms(self):
return self.alarms
def deleteAllAlarms(self):
for a in self.alarms:
a.setActive(False)
del self.alarms[:]
self.saveAlarms()
def setAlarms(self, alarmsList):
del self.alarms[:]
self.append(alarmsList)
self.saveAlarms()
def addAlarm(self, alarm):
print "addAlarm(): "+str(alarm.weekdays)+", "+str(alarm.hour)+":"+str(alarm.minute)
self.alarms.append(alarm)
self.saveAlarms()
def stopAlarm(self):
print "stopping alarm..."
for x in self.alarms:
print "id: "+str(id(x))
if id(x)==self.activeAlarm and len(x.weekdays)==0:
print "deleting..."
alarms.remove(x)
saveAlarms()
call(["killall", "mplayer"])
def createAlarm(self, hour, minute, weekdays):
print "createAlarm(): "+str(weekdays)+", "+str(hour)+":"+str(minute)
alarmHour=int(hour)
alarmMinute=int(minute)
format="%H:%M"
alarmString=str(alarmHour)+":"+str(alarmMinute)
now = datetime.now()
diff=datetime.strptime(alarmString, format)-now
seconds=diff.seconds
nextRing=datetime.now()+timedelta(seconds=seconds)
if len(weekdays) == 0:
newAlarm = Alarm([], alarmHour, alarmMinute)
else:
newAlarm = Alarm([int(i) for i in weekdays], alarmHour, alarmMinute)
self.alarms.append(newAlarm)
self.saveAlarms()
t=Thread(target=AlarmControl().ringIn, args=[seconds, newAlarm])
t.start()
return newAlarm
def WifiOn(self):
global wificontrol
if wificontrol:
#Need to turn off WiFi via Kindle Framework first, so that it auto connects when turning on
call(["lipc-set-prop", "com.lab126.cmd", "wirelessEnable", "0"])
time.sleep(30)
call(["lipc-set-prop", "com.lab126.cmd", "wirelessEnable", "1"])
call(["ifup", "wlan0"])
time.sleep(10)
def WifiOff(self):
global wificontrol
if wificontrol:
time.sleep(5)
call(["ifdown", "wlan0"])
#Better do not use propper WiFi off here, will trigger UI elements:
# call(["lipc-set-prop", "com.lab126.cmd", "wirelessEnable", "0"])
def saveAlarms(self):
if os.path.exists('/mnt/us/alarm/alarms.bak'):
os.remove('/mnt/us/alarm/alarms.bak')
afile = open(r'/mnt/us/alarm/alarms.bak', 'wb')
pickle.dump(self.alarms, afile)
afile.close()
def stopRingIn(self, i):
time.sleep(i)
self.stopAlarm()
def ringIn(self, i, alarm):
global stream
global secondsToAutoOff
time.sleep(i-20)
#print "today: "+str(datetime.today().weekday())
#print "days: "+str(alarm.weekdays)
if not alarm.getActive():
print "alarm deactivated, exiting..."
return
if len(alarm.weekdays) > 0:
if not datetime.today().weekday() in alarm.weekdays:
seconds = 24*60*60;
t=Thread(target=AlarmControl().ringIn, args=[seconds, alarm])
t.start()
print "seconds: "+str(seconds)
print "alarm for: days: "+str(alarm.weekdays)+" "+str(alarm.hour)+":"+str(alarm.minute)+" ("+str(seconds)+"seconds)"
return
print "preparing alarm..."
self.activeAlarm=id(alarm)
self.WifiOn()
AudioControl.phaseIn(1)
Thread(target=AlarmControl().stopRingIn, args=[secondsToAutoOff]).start()
print "waiting for check..."
time.sleep(10)
#ToDo: move this to thread? What if mplayer/wget/pipe cache hangs and there is no sound output? How to detect?
if(AudioControl.isMplayerRunning()==""):
command = "/mnt/us/mplayer/mplayer -loop 0 "+backupSound+" &"
os.system(command)
#self.alarms.remove(alarm)
#self.saveAlarms()
if len(alarm.weekdays) > 0:
#check in 24h if ring is required
seconds = 24*60*60;
t=Thread(target=AlarmControl().ringIn, args=[seconds, alarm])
t.start()
print "seconds: "+str(seconds)
print "alarm for: days "+str(alarm.weekdays)+" "+str(alarm.hour)+":"+str(alarm.minute)
else:
self.alarms.remove(alarm) | mit | 715,075,976,977,626,500 | 26.12963 | 120 | 0.684342 | false | 2.94504 | false | false | false |
MishtuBanerjee/xaya | xaya/xayabee.py | 1 | 3044 | #!/usr/bin/env python
"""
xayabee: a little dose of bee genetics ...
BeginDate:2012
CurrentRevisionDate:20150324
Development Version : core 001
Release Version: pre-release
Author(s): Mishtu Banerjee, Robin Owens
Contact: [email protected]
Copyright: 2012-2015, The Authors
License: Distributed under MIT License
[http://opensource.org/licenses/mit-license.html]
Original Environment: Programmed and tested under Python 2.7.X
Dependencies:
Python Interpreter and base libraries.
xaya: xayacore, xaystats
"""
import xayastats
def genHaploid(numberofalleles= 0):
"""
Given a number of sex alleles, randomly generate a haploid genotype.
"""
#Set internal variable from parameters
alleles = numberofalleles
# Randomly generate haploid
haploid = xayastats.diceroll(1, alleles)
return (haploid) # return haploid as a 1 item tuple
def genDiploid(numberofalleles=0):
"""
"""
alleles = numberofalleles
diploid = (xayastats.diceroll(1,alleles), xayastats.diceroll(1,alleles))
return diploid # return dipoid as a two item tuple
def createPop(numberalleles= 0, dippopsize=0, happopsize=0):
"""
Build haploid and diploic population given alleles, number of diploids, number haploids
"""
# Set internal variables from parameters
alleles = numberalleles
diploids = dippopsize
haploids = happopsize
# Build a list of haploids
haploidcounter = range(haploids)
haploidslist = []
for bee in haploidcounter:
haploidslist.append(genHaploid(alleles))
# Build a list of diploids
diploidcounter = range(diploids)
diploidlist = []
for beecouple in diploidcounter:
diploidlist.append(genDiploid(alleles))
return [haploidslist, diploidlist]
# Next we must build up a dictonary where keys are tuples and
# the values are counts.
# Later can make the values more complicated as lists, dicts.
# Give Robin the choice -- and ask him how he likes it.
# if lists -- can have multiple properties
# if values -- can have named properties:"COUNT"; MUTATION RATE
def summarizePop(poplist=[]):
"""
Creates a summary table of the bee population
"""
mypop=poplist
myhaploids=poplist[0]
mydiploids=poplist[1]
myhaptable = xayastats.histograph(myhaploids)
mydiptable=xayastats.histograph(mydiploids)
return [myhaptable, mydiptable]
def findHomozygotes(diptable={}):
"""
Given a summary table of diploids, finds those
which are homozygous
"""
mydiptable=diptable
homozygouslist=[]
mydipkeys=mydiptable.keys()
for key in mydipkeys:
if key[0]==key[1]:
homozygouslist.append(key)
homozygtable = {}
for key in homozygouslist:
homozygtable[key] = mydiptable[key]
return homozygtable
def countPopulation(poptable):
"""
Counts all indivuals in a population; can be applied to
a diploid, haploid, or homzygotes table
"""
mypoptable = poptable
vals = mypoptable.values()
vals2 = []
for item in vals:
vals2.append(item[0])
popsum = sum(vals2)
return popsum
# Create a function checkforHomozygotes
# Get population as a dictionary where keys are alleles and values are counts | mit | 2,082,932,308,635,575,300 | 25.946903 | 88 | 0.752957 | false | 2.749774 | false | false | false |
hunter-cameron/Bioinformatics | python/gbk_get_entry_by_locusid.py | 1 | 2464 |
from Bio import SeqIO
import sys
import re
def get_entries_by_locid(gbk_file, id_list):
print(id_list)
results = dict()
for record in SeqIO.parse(open(gbk_file,"r"), "genbank"): # record == contig
for feature in record.features: # all annotations per contig
if feature.type == "CDS": # get only CDS
#sys.exit()
if "locus_tag" in feature.qualifiers: # check if CDS has a locus tag (it should)
if feature.qualifiers['locus_tag'][0] in id_list: # check if locus tag is on the list
results[feature.qualifiers['locus_tag'][0]] = {
"location": feature.location,
"product": feature.qualifiers['product'][0],
}
#sys.exit()
return results
def read_locid_list(id_file):
""" Returns a list of sorted ids from a file """
with open(id_file, 'r') as IN:
return sorted([line[:-1] for line in IN])
if __name__ == "__main__":
id_file = sys.argv[1]
gbk_file = sys.argv[2]
id_list = []
with open(id_file, 'r') as IN:
for line in IN:
qry = line.split("\t")[0]
loctag = qry.split(" ")[0]
id_list.append(loctag)
id_info = get_entries_by_locid(gbk_file, id_list)
for tag in id_list:
if tag not in id_info:
print("Locus tag '{}' not found.".format(tag))
with open(id_file, 'r') as IN, open("final_matrix.txt", 'w') as OUT:
for line in IN:
if line.startswith("qry"):
OUT.write("\t".join(["locus_tag", "contig", "contig_length", "start", "end", "strand", "product", "closest_match", "perc_id", "aln_length", "query_cov", "closest_match_cov", "bitscore"]) + "\n")
else:
elements = line[:-1].split("\t")
qry_info = elements[0].split(" ")
locid = qry_info[0]
contig = qry_info[2]
m = re.search("len_(?P<length>\d+)_", contig)
contig_length = m.group("length")
OUT.write("\t".join([locid, contig, contig_length, str(id_info[locid]['location'].start), str(id_info[locid]['location'].end), str(id_info[locid]['location'].strand), id_info[locid]['product'], "\t".join(elements[1:])]) + "\n")
| mit | -884,123,956,013,676,300 | 33.222222 | 243 | 0.500812 | false | 3.5 | false | false | false |
jstapleton/Maynard-2015 | mutationCounter.py | 1 | 13150 | #!/usr/bin/env python
#############################################################################
# mutationCounter.py
# 2015 James A. Stapleton, Justin R. Klesmith
#
# This program takes short reads from shotgun sequencing of mutant
# libraries and creates FASTQ files compatible with ENRICH.
#
# The program fills in wild-type sequence around the short reads
# to create full-length sequences, and puts in a made-up
# quality score.
#
# Overlapping read pairs are merged by FLASH.
#
#############################################################################
import argparse
import subprocess
import os
import itertools
from Bio.SeqIO.QualityIO import FastqGeneralIterator
from Bio.Emboss.Applications import WaterCommandline
def main(forward_paired, forward_unpaired, reverse_paired, reverse_unpaired):
fakeFASTQ = ''
notAligned = 0
wrongLength = 0
# open file containing wild-type sequence and pull it out as a string
wt = wtParser()
# take trimmed paired-end FASTQ files as input
# run FLASH to combine overlapping read pairs
# or remove this line and add to shell script
subprocess.call(["flash", "-M", "140", "-t", "1", forward_paired, reverse_paired])
# merged read pairs
notAligned = align_and_index('out.extendedFrags.fastq', notAligned, '')
with open("fakeFASTQ.fastq", "w") as fakeFASTQ:
with open('indexes.txt', 'rU') as indexes:
with open('out.extendedFrags.fastq', 'rU') as merged:
f_iter = FastqGeneralIterator(merged)
for (title, seq, qual), indexline in itertools.izip(f_iter, indexes):
index1, index2, rc_flag = indexline.split()
# print title, seq, qual, index1, index2, rc_flag
if int(index1) and int(index2):
if int(rc_flag):
seq = revcomp(seq)
fakeSeq = buildFakeSeq(seq, 0, wt, index1, index2, 0, 0)
if len(fakeSeq) != len(wt):
wrongLength += 1
# print fakeSeq
# print rc_flag, seq, index1, index2
continue
fakeFASTQwriter(fakeSeq, title, fakeFASTQ)
notAligned = align_and_index(forward_unpaired, notAligned, '')
with open("fakeFASTQ.fastq", "a") as fakeFASTQ:
with open('indexes.txt', 'rU') as indexes:
with open(forward_unpaired, "rU") as merged:
f_iter = FastqGeneralIterator(merged)
for (title, seq, qual), indexline in itertools.izip(f_iter, indexes):
index1, index2, rc_flag = indexline.split()
if int(index1) and int(index2):
if int(rc_flag):
seq = revcomp(seq)
fakeSeq = buildFakeSeq(seq, 0, wt, index1, index2, 0, 0)
if len(fakeSeq) != len(wt):
wrongLength += 1
continue
fakeFASTQwriter(fakeSeq, title, fakeFASTQ)
notAligned = align_and_index(reverse_unpaired, notAligned, '')
with open("fakeFASTQ.fastq", "a") as fakeFASTQ:
with open('indexes.txt', 'rU') as indexes:
with open(reverse_unpaired, "rU") as merged:
f_iter = FastqGeneralIterator(merged)
for (title, seq, qual), indexline in itertools.izip(f_iter, indexes):
index1, index2, rc_flag = indexline.split()
if int(index1) and int(index2):
if int(rc_flag):
seq = revcomp(seq)
fakeSeq = buildFakeSeq(seq, 0, wt, index1, index2, 0, 0)
if len(fakeSeq) != len(wt):
wrongLength += 1
continue
fakeFASTQwriter(fakeSeq, title, fakeFASTQ)
notAligned = align_and_index('out.notCombined_1.fastq', notAligned, '_F')
notAligned = align_and_index('out.notCombined_2.fastq', notAligned, '_R')
# unmerged (non-overlapping) read pairs
with open("fakeFASTQ.fastq", "a") as fakeFASTQ:
with open("indexes_F.txt", 'rU') as indexes_F:
with open("indexes_R.txt", 'rU') as indexes_R:
with open("out.notCombined_1.fastq", 'rU') as unmerged_F:
with open("out.notCombined_2.fastq", 'rU') as unmerged_R:
f_iter = FastqGeneralIterator(unmerged_F)
r_iter = FastqGeneralIterator(unmerged_R)
for (title, seq, qual), (title_R, seq_R, qual_R), indexline_F, indexline_R in itertools.izip(f_iter, r_iter, indexes_F, indexes_R):
index1, index2, rc_flag_F = indexline_F.split()
index3, index4, rc_flag_R = indexline_R.split()
if int(index1) and int(index2) and int(index3) and int(index4):
if int(rc_flag_F):
seq = revcomp(seq)
if int(rc_flag_R):
seq_R = revcomp(seq_R)
fakeSeq = buildFakeSeq(seq, seq_R, wt, index1, index2, index3, index4)
if len(fakeSeq) != len(wt):
wrongLength += 1
# print fakeSeq
# print index1, index2, index3, index4
# print seq, seq_R
continue
fakeFASTQwriter(fakeSeq, title, fakeFASTQ)
print notAligned, wrongLength
return 0
######## Function definitions ##############
def revcomp(seq):
"""Returns the reverse complement of a DNA sequence."""
COMPLEMENT_DICT = {'A': 'T', 'G': 'C', 'T': 'A', 'C': 'G', 'N': 'N'}
rc = ''.join([COMPLEMENT_DICT[base] for base in seq])[::-1]
return rc
def buildFakeSeq(seq_F, seq_R, wt, index1, index2, index3, index4):
"""Builds a fake full-length DNA sequence line consisting of one
merged read or two short reads filled in with wild-type sequence.
"""
index1 = int(index1)
index2 = int(index2)
index3 = int(index3)
index4 = int(index4)
if seq_R:
diff = 0
if index1 < index3:
if index2 > index3 - 1:
diff = index2 - index3 + 1
index2 = index3 - 1
fakeRead = wt[:index1 - 1] + seq_F + wt[index2:index3 - 1] + seq_R[diff:] + wt[index4:]
else:
if index4 > index1 - 1:
diff = index4 - index1 + 1
index4 = index1 -1
fakeRead = wt[:index3 - 1] + seq_R + wt[index4:index1 - 1] + seq_F[diff:] + wt[index2:]
else:
fakeRead = wt[:index1-1] + seq_F + wt[index2:]
return fakeRead.upper()
def index_finder(line):
"""Searches the water output line
for alignment position indexes.
"""
index = 0
if len(line.split()) > 1:
if line.split()[1] == 'al_start:':
index = int(line.split()[2])
elif line.split()[1] == 'al_stop:':
index = int(line.split()[2])
return index
def Ntest(seq):
"""Trims sequences with N's.
Removes N from the first position,
truncates the sequence at subsequent N's.
"""
if seq[0] == 'N':
seq = seq[1:]
Ntest = 0
for i, ch in enumerate(seq):
if ch == 'N':
Ntest = 1
break
if Ntest == 1:
seq = seq[:i-1]
return seq
def runWater(fastq, out):
"""Removes existing water.txt file,
generates a water command line, and runs water.
"""
if os.path.isfile(out):
os.remove(out)
water_cline = WaterCommandline(asequence="wt.fasta", bsequence=fastq, gapopen=10, gapextend=0.5, outfile=out, aformat='markx10')
stdout, stderr = water_cline()
return 0
def wtParser():
"""Takes a wild-type DNA sequence in FASTA format
and reads it into a string.
"""
with open('wt.fasta', 'rU') as wildtype:
wildtype = wildtype.read()
if wildtype[0] == ">":
wildtype = wildtype.split('\n', 1)[1]
wt = ''.join([line.strip() for line in wildtype.split('\n')])
return wt
def identity_finder(line):
identity = 0
if len(line.split()) > 1:
if line.split()[1] == 'Identity:':
identity = line.split()[3]
identity = identity[1:4]
return identity
def align_and_index(fastq, notAligned, suffix):
"""Runs a pipeline to align a sequence (merged or unmerged
sequencing reads) to a wild-type reference with the EMBOSS
water local alignment program, align the reverse complement
if the first alignment was poor, and parse and return the
wt positions where the alignment begins and ends.
"""
# generate water command line and call it
runWater(fastq, 'water_fwd.txt')
# reverse-complement the reads in fastq
with open('fastq_rc.fastq', 'w') as reverse:
with open(fastq, 'rU') as forward:
next_line_is_seq = 0
for line in forward:
if next_line_is_seq:
line_rc = revcomp(line.strip())
reverse.write(line_rc + '\n')
next_line_is_seq = 0
elif line[0] == '@':
next_line_is_seq = 1
reverse.write(line)
else:
reverse.write(line)
# run water on the reverse complements
runWater('fastq_rc.fastq', 'water_rc.txt')
# Write only the index
# and identity lines to new files
with open('water_fwd.txt', 'rU') as forward:
with open('water_fwd_indexes.txt', 'w') as forward_index_lines:
for line in forward:
if identity_finder(line) or index_finder(line):
forward_index_lines.write(line)
with open('water_rc.txt', 'rU') as forward:
with open('water_rc_indexes.txt', 'w') as forward_index_lines:
for line in forward:
if identity_finder(line) or index_finder(line):
forward_index_lines.write(line)
# Check whether the read was in the right orientation:
# Iterate over the water outfiles and pick the best match
# Write the alignment start and stop of the best matches
with open('water_fwd_indexes.txt', 'rU') as forward:
with open('water_rc_indexes.txt', 'rU') as reverse:
with open('indexes' + suffix + '.txt', 'w') as outfile:
find_index_F = 0
find_index_R = 0
index1 = 0
index2 = 0
for line_F, line_R in itertools.izip(forward, reverse):
if not find_index_F and not find_index_R:
identity_F = identity_finder(line_F)
identity_R = identity_finder(line_R)
if float(identity_F) > 90:
find_index_F = 1
rev_flag = 0
elif float(identity_R) > 90:
find_index_R = 1
rev_flag = 1
elif identity_F and identity_R:
outfile.write('0 0 0\n')
notAligned += 1
elif find_index_F:
if not index1 and not index2:
index1 = index_finder(line_F)
elif index1:
index2 = index_finder(line_F)
outfile.write(str(index1) + ' ' + str(index2) + ' ' + str(rev_flag) + '\n')
find_index_F = 0
index1 = 0
index2 = 0
elif find_index_R:
if not index1 and not index2:
index1 = index_finder(line_R)
elif index1:
index2 = index_finder(line_R)
outfile.write(str(index1) + ' ' + str(index2) + ' ' + str(rev_flag) + '\n')
find_index_R = 0
index1 = 0
index2 = 0
return notAligned
def fakeFASTQwriter(fakeSeq, title, handle):
"""Writes the four lines of a fake FASTQ."""
handle.write('@' + title + '\n')
handle.write(fakeSeq + '\n')
handle.write('+\n')
fakeQual = ''.join(['A' for ch in fakeSeq])
handle.write(fakeQual + '\n')
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('forward_paired')
parser.add_argument('forward_unpaired')
parser.add_argument('reverse_paired')
parser.add_argument('reverse_unpaired')
args = parser.parse_args()
main(args.forward_paired, args.forward_unpaired, args.reverse_paired, args.reverse_unpaired)
| mit | -2,058,295,108,188,137,700 | 40.09375 | 155 | 0.516502 | false | 3.937126 | false | false | false |
raphaeldussin/EZmovie | EZmovie/ezmovie_plots.py | 1 | 3423 | import matplotlib.pylab as plt
import matplotlib.cm as cm
import matplotlib.colors as mc
from mpl_toolkits.basemap import Basemap
import numpy as np
def setup_contour(vmin,vmax,ncontours):
''' set the contours and norm '''
plotcmin = float(vmin)
plotcmax = float(vmax)
stepticks = (plotcmax - plotcmin) / 10.
ticks = np.arange(plotcmin,plotcmax+stepticks,stepticks)
step = (plotcmax - plotcmin) / ncontours
contours = np.arange(plotcmin,plotcmax+step,step)
norm = mc.Normalize(vmin=plotcmin, vmax=plotcmax)
return contours, norm, ticks
def setup_colorbar_fmt(data):
''' setup the format for colorbar '''
if data.max() < 0.1:
cbarfmt = '%.1e'
elif data.max() > 10.:
cbarfmt = '%.0f'
else:
cbarfmt = '%.2f'
return cbarfmt
def plot_map(ax,diag,coord1,coord2,data,current_date=None,current_step=None):
''' single plot '''
contours, norm, ticks = setup_contour(diag['vmin'],diag['vmax'],40)
cbarfmt = setup_colorbar_fmt(data)
# background
if diag['run']['grid'] == 'CCS1':
bmap = Basemap(projection='cyl',llcrnrlat=18,urcrnrlat=51,\
llcrnrlon=219,urcrnrlon=251,resolution='l')
parallels = np.arange(20.,60.,10.)
bmap.drawparallels(parallels,labels=[True,False,False,True])
meridians = np.arange(220.,260.,10.)
bmap.drawmeridians(meridians,labels=[True,False,False,True])
elif diag['run']['grid'] == 'NWA':
bmap = Basemap(projection='cyl',llcrnrlat=5,urcrnrlat=55, llcrnrlon=250, \
urcrnrlon=320,resolution='l')
parallels = np.arange(0.,70.,10.)
bmap.drawparallels(parallels,labels=[1,0,0,0],fontsize=10)
meridians = np.arange(240.,340.,10.)
bmap.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10)
else:
# default : autoguess domain
bmap = Basemap(projection='cyl',llcrnrlat=coord2.min(),urcrnrlat=coord2.max(), \
llcrnrlon=coord1.min(), urcrnrlon=coord1.max(),resolution='c')
bmap.drawcoastlines()
bmap.fillcontinents(color='grey',lake_color='white')
C = ax.contourf(coord1,coord2,data,contours,cmap=diag['pal'],norm=norm,extend='both')
if diag.has_key('cbar_shrink'):
cbar_shrink = diag['cbar_shrink']
else:
cbar_shrink = 1.0
cbar = plt.colorbar(C,format=cbarfmt,shrink=cbar_shrink,ticks=ticks)
fmt = "%Y %m %d"
if current_date is not None:
plt.title(diag['label'] + ' ' + current_date.strftime(fmt))
if current_step is not None:
plt.title(diag['label'] + ' ' + str(current_step))
return ax
def plot_section(ax,diag,coord1,coord2,data,current_date):
''' single plot '''
contours, norm, ticks = setup_contour(diag['vmin'],diag['vmax'],40)
cbarfmt = setup_colorbar_fmt(data)
ax.set_axis_bgcolor('grey')
C = ax.contourf(coord1,coord2,data,contours,cmap=diag['pal'],norm=norm,extend='both')
if diag.has_key('max_depth'):
ax.set_ylim(-diag['max_depth'],coord2[-1].min())
cbar = plt.colorbar(C,format=cbarfmt,shrink=0.8,ticks=ticks)
fmt = "%Y %m %d"
plt.title(diag['label'] + ' ' + current_date.strftime(fmt))
return ax
| gpl-3.0 | 2,947,154,667,678,862,300 | 40.743902 | 96 | 0.595384 | false | 3.094937 | false | false | false |
seanegoodwin/PandaViewer | PandaViewer/utils.py | 1 | 6258 | import os
import re
import sys
import hashlib
from sqlalchemy.engine import ResultProxy
from typing import List, Dict, Any, Tuple, Iterable, Optional
from PandaViewer.logger import Logger
class Utils(Logger):
ensure_trailing_sep = lambda x: x if x[-1] == os.path.sep else x + os.path.sep
@staticmethod
def convert_result(result: ResultProxy) -> List:
return list(map(dict, result))
@classmethod
def convert_from_relative_path(cls, path: str = "") -> str:
folder = os.path.dirname(__file__)
# TODO: Ugly hack please fix. Used to tell if program is running from source or not
if not os.path.exists(Utils.normalize_path(os.path.join(folder, __file__))):
folder = os.path.dirname(folder)
return cls.normalize_path(os.path.join(folder, path))
@classmethod
def convert_from_relative_lsv_path(cls, path: str = "") -> str:
portable_path = cls.convert_from_relative_path(os.path.join(".lsv", path))
if os.path.exists(portable_path):
return portable_path
else:
return cls.normalize_path(os.path.join("~/.lsv", path))
@classmethod
def path_exists_under_directory(cls, main_directory: str, sub_directory: str) -> bool:
main_directory = cls.ensure_trailing_sep(cls.normalize_path(main_directory))
sub_directory = cls.ensure_trailing_sep(cls.normalize_path(sub_directory))
return sub_directory.startswith(main_directory)
@classmethod
def get_parent_folder(cls, candidates: List[str], folder: str) -> str:
candidates = map(cls.normalize_path, candidates)
candidates = [c for c in candidates
if cls.path_exists_under_directory(c, folder)]
return max(candidates, key=len)
@staticmethod
def file_has_allowed_extension(check_file: str, allowed_extensions: List[str]) -> bool:
allowed_extensions = [ext.lower() for ext in allowed_extensions]
ext = os.path.splitext(check_file)[-1].lower()
return ext in allowed_extensions
@staticmethod
def normalize_path(path: str) -> str:
return os.path.normpath(os.path.realpath(os.path.expanduser(path)))
@staticmethod
def convert_to_qml_path(path: str) -> str:
base_string = "file://"
if os.name == "nt":
base_string += "/"
return base_string + path
@classmethod
def reduce_gallery_duplicates(cls, duplicate_map):
cls = cls()
for galleries in duplicate_map.values():
paths = [cls.normalize_path(gallery.location) for gallery in galleries]
assert len(paths) == len(set(paths))
method_names = ["has_ex_metadata", "has_custom_metadata", "is_archive_gallery"]
for method_name in method_names:
if any(getattr(gallery, method_name)() for gallery in galleries):
cls.logger.info("Applying method: %s" % method_name)
cls.logger.debug("Before galleries: %s" % galleries)
filtered_galleries = []
for gallery in galleries:
if not getattr(gallery, method_name)():
gallery.mark_for_deletion()
else:
filtered_galleries.append(gallery)
galleries = filtered_galleries
cls.logger.debug("After galleries: %s" % galleries)
for gallery in galleries[1:]:
gallery.mark_for_deletion()
@classmethod
def generate_hash_from_source(cls, source) -> str:
BUFF_SIZE = 65536
hash_algo = hashlib.sha1()
buff = source.read(BUFF_SIZE)
while len(buff) > 0:
hash_algo.update(buff)
buff = source.read(BUFF_SIZE)
return hash_algo.hexdigest()
@classmethod
def debug_trace(cls):
from PyQt5.QtCore import pyqtRemoveInputHook, pyqtRestoreInputHook
from pdb import set_trace
pyqtRemoveInputHook()
set_trace()
pyqtRestoreInputHook()
@classmethod
def convert_ui_tags(cls, ui_tags: str) -> List[str]:
return list(map(lambda x: x.replace(" ", "_"), cls.convert_csv_to_list(ui_tags)))
@classmethod
def process_ex_url(cls, url: str) -> (str, str):
split_url = url.split("/")
if split_url[-1]:
return int(split_url[-2]), split_url[-1]
else:
return int(split_url[-3]), split_url[-2]
@staticmethod
def convert_list_to_csv(input_list: List) -> str:
return ", ".join(input_list)
@staticmethod
def convert_csv_to_list(csv: str) -> List[str]:
return list(filter(None, map(lambda x: re.sub("^\s+", "", x), csv.split(","))))
@staticmethod
def human_sort_paths(paths: List[str]) -> List[str]:
key = None
if os.name == "nt":
import ctypes
import functools
key = functools.cmp_to_key(ctypes.windll.shlwapi.StrCmpLogicalW)
return sorted(paths, key=key)
@staticmethod
def separate_tag(tag: str) -> (str, Optional[str]):
namespace_regex = re.compile("^(.*):(.*)$")
match = re.search(namespace_regex, tag)
if match:
return match.group(2), match.group(1)
return tag, None
@classmethod
def clean_title(cls, title: str, remove_enclosed: bool = True) -> str:
banned_chars = ("=", "-", ":", "|", "~", "+", "]", "[", ",", ")", "(")
if remove_enclosed:
title = cls.removed_enclosed(title)
title = title.lstrip().lower()
for char in banned_chars:
title = title.replace(char, " ")
return " ".join(title.split())
@staticmethod
def removed_enclosed(input_str: str) -> str:
"""
Removes any values between/including containers (braces, parens, etc)
:param input_str: str to operate on
:return: str with enclosed data removed
"""
pairs = (("{", "}"), ("(", ")"), ("[", "]"))
regex = r"\s*\%s[^%s]*\%s"
for pair in pairs:
input_str = re.sub(regex % (pair[0], pair[0], pair[1]), " ", input_str)
return " ".join(filter(None, input_str.split()))
| gpl-3.0 | 3,753,022,364,863,361,000 | 37.392638 | 91 | 0.583733 | false | 3.776705 | false | false | false |
hb/gnome-bulk-rename | gnome-bulk-rename/preferences.py | 1 | 6523 | # GNOME bulk rename utility
# Copyright (C) 2010-2012 Holger Berndt <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import Gtk
import constants
class Window:
def __init__(self, previews_model, sorting_model, markups_model, markup_changed_cb):
self._window = None
self._previews_model = previews_model
self._sorting_model = sorting_model
self._markups_model = markups_model
self._markup_changed_cb = markup_changed_cb
def show(self):
if self._window is None:
self._setup()
self._window.show_all()
def _setup(self):
self._window = Gtk.Window(type=Gtk.WindowType.TOPLEVEL)
self._window.set_position(Gtk.WindowPosition.MOUSE)
self._window.set_title(_("Bulk Rename Preferences"))
self._window.set_border_width(4)
self._window.set_default_size(450, 400)
vbox = Gtk.VBox.new(False, 0)
self._window.add(vbox)
notebook = Gtk.Notebook()
vbox.pack_start(notebook, True, True, 0)
notebook.append_page(self._setup_extensible_model_tab(self._previews_model), Gtk.Label(label=_("Previewers")))
notebook.append_page(self._setup_extensible_model_tab(self._sorting_model, frozen_entries=["0"]), Gtk.Label(label=_("Sorting")))
notebook.append_page(self._setup_extensible_model_tab(self._markups_model, markup=True), Gtk.Label(label=_("Markup")))
# button box
buttonbox = Gtk.HButtonBox()
buttonbox.set_layout(Gtk.ButtonBoxStyle.END)
buttonbox.set_spacing(12)
vbox.pack_start(buttonbox, False, False, 4)
close_button = Gtk.Button(stock=Gtk.STOCK_CLOSE)
close_button.connect("clicked", lambda button, window : window.hide(), self._window)
buttonbox.add(close_button)
def _setup_extensible_model_tab(self, model, frozen_entries=None, markup=False):
"""If given, frozen_entries is a list of non-modifyable entry paths."""
def toggled_callback(cell, path, model=None, frozen_entries=None):
# ignore if entry is frozen
if frozen_entries and path in frozen_entries:
return
iter = model.get_iter(path)
is_active = not cell.get_active()
if markup:
if not is_active:
return
for row in model:
row[constants.EXTENSIBLE_MODEL_COLUMN_VISIBLE] = False
else:
short_desc = model.get_value(iter, constants.EXTENSIBLE_MODEL_COLUMN_SHORT_DESCRIPTION)
if is_active:
model.set_value(iter, constants.EXTENSIBLE_MODEL_COLUMN_SHORT_DESCRIPTION_MARKUP, short_desc)
else:
model.set_value(iter, constants.EXTENSIBLE_MODEL_COLUMN_SHORT_DESCRIPTION_MARKUP, "".join(['<span color="gray">', short_desc, '</span>']))
model.set_value(iter, constants.EXTENSIBLE_MODEL_COLUMN_VISIBLE, is_active)
if markup:
self._markup_changed_cb(model.get_path(iter))
def on_selection_changed(selection, infobutton):
(model, iter) = selection.get_selected()
if iter:
previewclass = model.get_value(iter, constants.EXTENSIBLE_MODEL_COLUMN_OBJECT)
infobutton.set_sensitive(hasattr(previewclass, "description"))
else:
infobutton.set_sensitive(False)
def on_info_button_clicked(button, treeview):
(model, iter) = treeview.get_selection().get_selected()
previewclass = model.get_value(iter, constants.EXTENSIBLE_MODEL_COLUMN_OBJECT)
dlg = Gtk.MessageDialog(parent=self._window, flags=Gtk.DialogFlags.DESTROY_WITH_PARENT, message_type=Gtk.MessageType.INFO, buttons=Gtk.ButtonsType.CLOSE, message_format=model.get_value(iter, constants.EXTENSIBLE_MODEL_COLUMN_SHORT_DESCRIPTION))
dlg.format_secondary_markup(previewclass.description)
dlg.connect("response", lambda dlg, response_id : dlg.destroy())
dlg.show_all()
tab_vbox = Gtk.VBox.new(False, 0)
tab_vbox.set_border_width(12)
scrolledwin = Gtk.ScrolledWindow()
scrolledwin.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolledwin.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
tab_vbox.pack_start(scrolledwin, True, True, 0)
treeview = Gtk.TreeView(model=model)
treeview.set_headers_visible(False)
scrolledwin.add(treeview)
textrenderer = Gtk.CellRendererText()
togglerenderer = Gtk.CellRendererToggle()
togglerenderer.set_radio(markup)
togglerenderer.set_property("activatable", True)
togglerenderer.connect('toggled', toggled_callback, model, frozen_entries)
# column "active"
column = Gtk.TreeViewColumn(None, togglerenderer, active=constants.EXTENSIBLE_MODEL_COLUMN_VISIBLE)
treeview.append_column(column)
# column "original"
column = Gtk.TreeViewColumn(None, textrenderer, markup=constants.EXTENSIBLE_MODEL_COLUMN_SHORT_DESCRIPTION_MARKUP)
column.set_expand(True)
treeview.append_column(column)
# information button
buttonbox = Gtk.HButtonBox()
buttonbox.set_layout(Gtk.ButtonBoxStyle.END)
buttonbox.set_spacing(12)
tab_vbox.pack_start(buttonbox, False, False, 8)
button = Gtk.Button(stock=Gtk.STOCK_INFO)
button.set_sensitive(False)
button.connect("clicked", on_info_button_clicked, treeview)
buttonbox.add(button)
selection = treeview.get_selection()
selection.connect("changed", on_selection_changed, button)
return tab_vbox
| lgpl-2.1 | 6,412,260,742,002,760,000 | 44.298611 | 256 | 0.650161 | false | 3.901316 | false | false | false |
LukeB42/Emissary | emissary/resources/feedgroups.py | 1 | 9800 | # _*_ coding: utf-8 _*_
# This file provides the HTTP endpoints for operating on groups of feeds.
from emissary import app, db
from flask import request
from flask.ext import restful
from sqlalchemy import and_, desc
from emissary.resources.api_key import auth
from emissary.models import FeedGroup, Feed, Article
from emissary.controllers.cron import CronError, parse_timings
from emissary.controllers.utils import cors, gzipped, make_response
class FeedGroupCollection(restful.Resource):
@cors
@gzipped
def get(self):
"""
Paginate an array of feed groups
associated with the requesting key.
"""
key = auth()
parser = restful.reqparse.RequestParser()
parser.add_argument("page", type=int, default=1)
parser.add_argument("per_page", type=int, default=10)
parser.add_argument("content", type=bool, default=None)
args = parser.parse_args()
query = FeedGroup.query.filter(FeedGroup.key == key)\
.order_by(desc(FeedGroup.created)).paginate(args.page, args.per_page)
return make_response(request.url, query)
@cors
@gzipped
def put(self):
"""
Create a new feed group, providing the name isn't already in use.
"""
key = auth(forbid_reader_keys=True)
parser = restful.reqparse.RequestParser()
parser.add_argument("name", type=str, required=True)
parser.add_argument("active", type=bool, default=True, help="Feed is active", required=False)
args = parser.parse_args()
# Check for this name already existing in the groups on this key
if [fg for fg in key.feedgroups if fg.name == args.name]:
return {"message":"Feed group %s already exists." % args.name}, 304
fg = FeedGroup(name=args.name, active=args.active)
key.feedgroups.append(fg)
db.session.add(fg)
db.session.add(key)
db.session.commit()
return fg.jsonify(), 201
class FeedGroupResource(restful.Resource):
@cors
@gzipped
def get(self, groupname):
"""
Review a specific feed group.
"""
key = auth()
fg = FeedGroup.query.filter(and_(FeedGroup.key == key, FeedGroup.name == groupname)).first()
if not fg:
restful.abort(404)
return fg.jsonify()
@cors
@gzipped
def put(self, groupname):
"""
Create a new feed providing the name and url are unique.
Feeds must be associated with a group.
"""
key = auth(forbid_reader_keys=True)
parser = restful.reqparse.RequestParser()
parser.add_argument("name", type=str, required=True)
parser.add_argument("url", type=str, required=True)
parser.add_argument("schedule", type=str, required=True)
parser.add_argument("active", type=bool, default=True, help="Feed is active", required=False)
args = parser.parse_args()
fg = FeedGroup.query.filter(and_(FeedGroup.key == key, FeedGroup.name == groupname)).first()
if not fg:
return {"message":"Unknown Feed Group %s" % groupname}, 304
# Verify the schedule
try:
parse_timings(args.schedule)
except CronError, err:
return {"message": err.message}, 500
# Check the URL isn't already scheduled on this key
if [feed for feed in key.feeds if feed.url == args.url]:
return {"message": "A feed on this key already exists with this url."}, 500
# Check the name is unique to this feedgroup
if [feed for feed in fg.feeds if feed.name == args.name]:
return {"message": "A feed in this group already exists with this name."}, 500
feed = Feed(name=args.name, url=args.url, schedule=args.schedule, active=args.active)
# We generally don't want to have objects in this system that don't belong to API keys.
fg.feeds.append(feed)
key.feeds.append(feed)
db.session.add(feed)
db.session.add(fg)
db.session.add(key)
db.session.commit()
feed = Feed.query.filter(and_(Feed.key == key, Feed.name == args.name)).first()
if not feed:
return {"message":"Error saving feed."}, 304
# Schedule this feed. 0 here is a response
# queue ID (we're not waiting for a reply)
app.inbox.put([0, "start", [key,feed.name]])
return feed.jsonify(), 201
@cors
@gzipped
def post(self, groupname):
"Rename a feedgroup or toggle active status"
key = auth(forbid_reader_keys=True)
parser = restful.reqparse.RequestParser()
parser.add_argument("name", type=str, help="Rename a feed group",)
parser.add_argument("active", type=bool, default=None)
args = parser.parse_args()
fg = FeedGroup.query.filter(
and_(FeedGroup.key == key, FeedGroup.name == groupname)
).first()
if not fg:
restful.abort(404)
if args.name:
if FeedGroup.query.filter(
and_(FeedGroup.key == key, FeedGroup.name == args.name)
).first():
return {"message":"A feed already exists with this name."}, 304
fg.name = args.name
if args.active or args.active == False:
fg.active = args.active
db.session.add(fg)
db.session.commit()
return fg.jsonify()
@cors
@gzipped
def delete(self, groupname):
key = auth(forbid_reader_keys=True)
fg = FeedGroup.query.filter(and_(FeedGroup.key == key, FeedGroup.name == groupname)).first()
if not fg:
restful.abort(404)
count=0
for feed in fg.feeds:
for article in feed.articles:
count += 1
db.session.delete(article)
db.session.delete(feed)
db.session.delete(fg)
db.session.commit()
count = "{:,}".format(count)
app.log('%s: Deleted feed group "%s". (%s articles)' % (key.name, fg.name, count))
return {}
class FeedGroupArticles(restful.Resource):
@cors
def get(self, groupname):
"""
Retrieve articles by feedgroup.
"""
key = auth()
# Summon the group or 404.
fg = FeedGroup.query.filter(and_(FeedGroup.key == key, FeedGroup.name == groupname)).first()
if not fg: restful.abort(404)
parser = restful.reqparse.RequestParser()
parser.add_argument("page", type=int, default=1)
parser.add_argument("per_page", type=int, default=10)
parser.add_argument("content", type=bool, default=None)
args = parser.parse_args()
if args.content == True:
query = Article.query.filter(
and_(Article.feed.has(group=fg), Article.content != None))\
.order_by(desc(Article.created)).paginate(args.page, args.per_page)
response = make_response(request.url, query)
# for doc in response['data']:
# if not doc['content_available']:
# response['data'].remove(doc)
# return response
if args.content == False:
query = Article.query.filter(
and_(Article.feed.has(group=fg), Article.content == None))\
.order_by(desc(Article.created)).paginate(args.page, args.per_page)
return make_response(request.url, query)
query = Article.query.filter(
Article.feed.has(group=fg))\
.order_by(desc(Article.created)).paginate(args.page, args.per_page)
return make_response(request.url, query)
class FeedGroupStart(restful.Resource):
@cors
def post(self, groupname):
"""
Start all feeds within a group.
"""
key = auth(forbid_reader_keys=True)
fg = FeedGroup.query.filter(and_(FeedGroup.key == key, FeedGroup.name == groupname)).first()
if not fg:
restful.abort(404)
for feed in fg.feeds:
app.inbox.put([0, "start", [key,feed.name]])
return {}
class FeedGroupStop(restful.Resource):
def post(self, groupname):
key = auth(forbid_reader_keys=True)
fg = FeedGroup.query.filter(and_(FeedGroup.key == key, FeedGroup.name == groupname)).first()
if not fg:
restful.abort(404)
for feed in fg.feeds:
app.inbox.put([0, "stop", [key,feed.name]])
return {}
class FeedGroupSearch(restful.Resource):
def get(self, groupname, terms):
"""
Return articles on feeds in this group with our search terms in the title.
"""
key = auth()
parser = restful.reqparse.RequestParser()
parser.add_argument("page", type=int, default=1)
parser.add_argument("per_page", type=int, default=10)
# parser.add_argument("content", type=bool, default=None)
args = parser.parse_args()
fg = FeedGroup.query.filter(and_(FeedGroup.key == key, FeedGroup.name == groupname)).first()
if not fg:
restful.abort(404)
query = Article.query.filter(
and_(Article.feed.has(group=fg), Article.title.like("%" + terms + "%")))\
.order_by(desc(Article.created)).paginate(args.page, args.per_page)
return make_response(request.url, query)
class FeedGroupCount(restful.Resource):
def get(self, groupname):
key = auth()
fg = FeedGroup.query.filter(and_(FeedGroup.key == key, FeedGroup.name == groupname)).first()
if not fg:
restful.abort(404)
return sum(len(f.articles) for f in fg.feeds)
| mit | 5,908,141,031,274,889,000 | 33.027778 | 103 | 0.591327 | false | 3.850688 | false | false | false |
gautamMalu/ThingSpeak_tweet | main.py | 1 | 1953 | import os
import time
import urllib2
import json
from twython import Twython
#proxy settings
os.environ['http_proxy'] = 'proxy.rolling_friction.in:8080'
os.environ['https_proxy'] = 'proxy.rolling_friction.in:8080'
# Consumer key aka API key for twitter app
APP_KEY = 'API Key for twitter app'
#Consumer Secret aka API secret obtained from above given url
APP_SECRET = 'Consumer Secret'
#Getting auth tokens
twitter = Twython(APP_KEY, APP_SECRET)
auth = twitter.get_authentication_tokens()
OAUTH_TOKEN = auth['oauth_token']
OAUTH_TOKEN_SECRET = auth['oauth_token_secret']
url=auth['auth_url']
print 'open this in browser and authorize Kaalu app '+url
oauth_verifier = raw_input("Provide PIN Number: ")
twitter = Twython(APP_KEY, APP_SECRET,OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
final_step = twitter.get_authorized_tokens(oauth_verifier)
OAUTH_TOKEN = final_step['oauth_token']
OAUTH_TOKEN_SECRET = final_step['oauth_token_secret']
twitter = Twython(APP_KEY, APP_SECRET,OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
# Getting channel number
chn = raw_input("Give Channel Number: ")
def doit():
# Data coming from given public channel in json format
# for private channel use your API key with ?key="API KEY"
url='http://api.thingspeak.com/channels/'+str(chn)+'/feed.json'
response = urllib2.urlopen(url)
html = response.read()
json_data = json.loads(html)
# Get the size of the array so that we could select the lastest lastest value
n_f=len(json_data["feeds"])
sensor_value = json_data["feeds"][n_f-1]["field1"] # getting data from field1 only
tweet = 'the current sensor value from channel '+str(chn)+' on thingspeak is '+str(sensor_value)
print tweet
twitter.update_status(status=tweet)
# Time intervals between consecutive tweets because of twitter API limit
# things speak API time limit is 15 seconds
time_interval = 15
if __name__ == "__main__":
while True:
doit()
time.sleep(time_interval*60)
| gpl-2.0 | -2,989,019,905,170,956,300 | 31.55 | 104 | 0.724526 | false | 3.310169 | false | false | false |
ergo/ziggurat_foundations | ziggurat_foundations/models/services/group.py | 1 | 4140 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from paginate_sqlalchemy import SqlalchemyOrmPage
from ziggurat_foundations.models.base import get_db_session
from ziggurat_foundations.models.services import BaseService
from ziggurat_foundations.permissions import (
ANY_PERMISSION,
ALL_PERMISSIONS,
PermissionTuple,
)
__all__ = ["GroupService"]
class GroupService(BaseService):
@classmethod
def get(cls, group_id, db_session=None):
"""
Fetch row using primary key -
will use existing object in session if already present
:param group_id:
:param db_session:
:return:
"""
db_session = get_db_session(db_session)
return db_session.query(cls.model).get(group_id)
@classmethod
def by_group_name(cls, group_name, db_session=None):
"""
fetch group by name
:param group_name:
:param db_session:
:return:
"""
db_session = get_db_session(db_session)
query = db_session.query(cls.model).filter(cls.model.group_name == group_name)
return query.first()
@classmethod
def get_user_paginator(
cls,
instance,
page=1,
item_count=None,
items_per_page=50,
user_ids=None,
GET_params=None,
):
"""
returns paginator over users belonging to the group
:param instance:
:param page:
:param item_count:
:param items_per_page:
:param user_ids:
:param GET_params:
:return:
"""
if not GET_params:
GET_params = {}
GET_params.pop("page", None)
query = instance.users_dynamic
if user_ids:
query = query.filter(cls.models_proxy.UserGroup.user_id.in_(user_ids))
return SqlalchemyOrmPage(
query,
page=page,
item_count=item_count,
items_per_page=items_per_page,
**GET_params
)
@classmethod
def resources_with_possible_perms(
cls,
instance,
perm_names=None,
resource_ids=None,
resource_types=None,
db_session=None,
):
"""
returns list of permissions and resources for this group,
resource_ids restricts the search to specific resources
:param instance:
:param perm_names:
:param resource_ids:
:param resource_types:
:param db_session:
:return:
"""
db_session = get_db_session(db_session, instance)
query = db_session.query(
cls.models_proxy.GroupResourcePermission.perm_name,
cls.models_proxy.Group,
cls.models_proxy.Resource,
)
query = query.filter(
cls.models_proxy.Resource.resource_id
== cls.models_proxy.GroupResourcePermission.resource_id
)
query = query.filter(
cls.models_proxy.Group.id
== cls.models_proxy.GroupResourcePermission.group_id
)
if resource_ids:
query = query.filter(
cls.models_proxy.GroupResourcePermission.resource_id.in_(resource_ids)
)
if resource_types:
query = query.filter(
cls.models_proxy.Resource.resource_type.in_(resource_types)
)
if perm_names not in ([ANY_PERMISSION], ANY_PERMISSION) and perm_names:
query = query.filter(
cls.models_proxy.GroupResourcePermission.perm_name.in_(perm_names)
)
query = query.filter(
cls.models_proxy.GroupResourcePermission.group_id == instance.id
)
perms = [
PermissionTuple(
None, row.perm_name, "group", instance, row.Resource, False, True
)
for row in query
]
for resource in instance.resources:
perms.append(
PermissionTuple(
None, ALL_PERMISSIONS, "group", instance, resource, True, True
)
)
return perms
| bsd-3-clause | -3,172,711,240,208,770,000 | 27.75 | 86 | 0.564976 | false | 4.123506 | false | false | false |
DataDog/integrations-core | datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/jmx_metrics.py | 1 | 4297 | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from collections import defaultdict
import click
import yaml
from datadog_checks.dev.utils import file_exists, read_file
from ...testing import process_checks_option
from ...utils import complete_valid_checks, get_default_config_spec, get_jmx_metrics_file, is_jmx_integration
from ..console import CONTEXT_SETTINGS, abort, echo_failure, echo_info, echo_success
@click.command('jmx-metrics', context_settings=CONTEXT_SETTINGS, short_help='Validate JMX metrics files')
@click.argument('check', autocompletion=complete_valid_checks, required=False)
@click.option('--verbose', '-v', is_flag=True, help='Verbose mode')
def jmx_metrics(check, verbose):
"""Validate all default JMX metrics definitions.
If `check` is specified, only the check will be validated, if check value is 'changed' will only apply to changed
checks, an 'all' or empty `check` value will validate all README files.
"""
checks = process_checks_option(check, source='integrations')
integrations = sorted(check for check in checks if is_jmx_integration(check))
echo_info(f"Validating JMX metrics files for {len(integrations)} checks ...")
saved_errors = defaultdict(list)
for check_name in integrations:
validate_jmx_metrics(check_name, saved_errors, verbose)
validate_config_spec(check_name, saved_errors)
for check_name, errors in saved_errors.items():
if not errors:
continue
echo_info(f"{check_name}:")
for err in errors:
echo_failure(f" - {err}")
echo_info(f"{len(integrations)} total JMX integrations")
echo_success(f"{len(integrations) - len(saved_errors)} valid metrics files")
if saved_errors:
echo_failure(f"{len(saved_errors)} invalid metrics files")
abort()
def validate_jmx_metrics(check_name, saved_errors, verbose):
jmx_metrics_file, metrics_file_exists = get_jmx_metrics_file(check_name)
if not metrics_file_exists:
saved_errors[check_name].append(f'{jmx_metrics_file} does not exist')
return
jmx_metrics_data = yaml.safe_load(read_file(jmx_metrics_file)).get('jmx_metrics')
if jmx_metrics_data is None:
saved_errors[check_name].append(f'{jmx_metrics_file} does not have jmx_metrics definition')
return
for rule in jmx_metrics_data:
include = rule.get('include')
include_str = truncate_message(str(include), verbose)
rule_str = truncate_message(str(rule), verbose)
if not include:
saved_errors[check_name].append(f"missing include: {rule_str}")
return
domain = include.get('domain')
beans = include.get('bean')
if (not domain) and (not beans):
# Require `domain` or `bean` to be present,
# that helps JMXFetch to better scope the beans to retrieve
saved_errors[check_name].append(f"domain or bean attribute is missing for rule: {include_str}")
def validate_config_spec(check_name, saved_errors):
spec_file = get_default_config_spec(check_name)
if not file_exists(spec_file):
saved_errors[check_name].append(f"config spec does not exist: {spec_file}")
return
spec_files = yaml.safe_load(read_file(spec_file)).get('files')
init_config_jmx = False
instances_jmx = False
for spec_file in spec_files:
for base_option in spec_file.get('options', []):
base_template = base_option.get('template')
for option in base_option.get("options", []):
template = option.get('template')
if template == 'init_config/jmx' and base_template == 'init_config':
init_config_jmx = True
elif template == 'instances/jmx' and base_template == 'instances':
instances_jmx = True
if not init_config_jmx:
saved_errors[check_name].append("config spec: does not use `init_config/jmx` template")
if not instances_jmx:
saved_errors[check_name].append("config spec: does not use `instances/jmx` template")
def truncate_message(s, verbose):
if not verbose:
s = (s[:100] + '...') if len(s) > 100 else s
return s
| bsd-3-clause | -913,970,256,265,694,000 | 38.422018 | 117 | 0.661857 | false | 3.678938 | true | false | false |
stczhc/neupy | neupy/helpers/table.py | 1 | 7989 | from __future__ import print_function
import time
import textwrap
from operator import attrgetter
from abc import abstractmethod
import numpy as np
from six import with_metaclass
from neupy.core.docs import SharedDocs, SharedDocsABCMeta
__all__ = ("TableBuilder", "Column", "TimeColumn", "NumberColumn",
"TableDrawingError")
class TableDrawingError(AttributeError):
""" Exception specific for ``TableBuilder`` class functionality.
"""
class Column(SharedDocs):
""" Simple column class that helps discribe structure for
``TableBuilder`` class instance.
Parameters
----------
name : str
Column name. Value would be displayed in header. In case when
``width`` parameter equal to ``None``, string width will identify
column width.
dtype : object
Column data format. Defaults to ``str``.
width : int or None
Column width. Defaults to ``None``.
"""
def __init__(self, name, dtype=str, width=None):
if width is None:
width = len(name)
self.name = name
self.dtype = dtype
self.width = width
def format_value(self, value):
""" Convert input value to specified type
Parameters
----------
value : object
Returns
-------
object
Function return converted input value to specified
data type.
"""
return self.dtype(value)
def format_time(value):
""" Convert seconds to the value format that easy
to understand.
Parameters
----------
value : float
Time interval in seconds.
Returns
-------
str
Examples
--------
>>> col = TimeColumn("Time")
>>> col.format_value(0.001)
'1 ms'
>>> col.format_value(0.5)
'0.5 sec'
>>> col.format_value(1.5)
'1.5 sec'
>>> col.format_value(15)
'00:00:15'
>>> col.format_value(15045)
'04:10:45'
"""
if value < 0.05:
return "{} ms".format(round(value * 10 ** 3))
elif value < 10:
return "{} sec".format(round(value, 1))
return time.strftime("%H:%M:%S", time.gmtime(value))
class TimeColumn(Column):
""" Columns useful for time formating from seconds to more
informative and readable format.
Parameters
----------
{Column.name}
{Column.dtype}
{Column.width}
"""
def format_value(self, value):
return format_time(value)
class NumberColumn(Column):
""" Class describe float column type.
Parameters
----------
places : int
Float number rounding precision. Defaults to ``6``.
{Column.name}
{Column.dtype}
{Column.width}
"""
def __init__(self, places=6, *args, **kwargs):
super(NumberColumn, self).__init__(*args, **kwargs)
self.places = places
def format_value(self, value):
""" Round a number to a given precision in decimal digits
Parameters
----------
value : float
Returns
-------
float
Rounded input value.
"""
if not isinstance(value, (int, float, np.floating, np.integer)):
return value
if value > 100:
return "~{:.0f}".format(value)
return "{value:.{places}f}".format(value=value,
places=self.places)
class BaseState(with_metaclass(SharedDocsABCMeta)):
""" Base abstract class that identify all important methods for
``TableBuilder`` class states.
Parameters
----------
table : TableBuilder instance
Accept summary table instance. State is able to control
properties from main ``TableBuilder`` class instantance
"""
def __init__(self, table):
self.table = table
def line(self):
""" Draw ASCII line. Line width depence on the table
column sizes.
"""
self.table.stdout('\r' + '-' * self.table.total_width)
def message(self, text):
""" Write additional message in table. All seperators
between columns will be ignored.
"""
self.line()
# Excluding from the total width 2 symbols related to
# the separators near the table edges and 2 symbols
# related to the spaces near these edges
max_line_width = self.table.total_width - 4
for text_row in textwrap.wrap(text, max_line_width):
formated_text = text_row.ljust(max_line_width)
self.table.stdout("\r| " + formated_text + " |")
self.line()
@abstractmethod
def start(self):
pass
@abstractmethod
def finish(self):
pass
@abstractmethod
def header(self):
pass
@abstractmethod
def row(self, data):
pass
class DrawingState(BaseState):
""" Identify active state for ``TableBuilder`` class instance.
In this state summary table instance is able to show information
in terminal.
Parameters
----------
{BaseState.table}
"""
def start(self):
raise TableDrawingError("Table drawing already started")
def finish(self):
self.line()
self.table.state = IdleState(self.table)
def header(self):
raise TableDrawingError("Header already drawn")
def row(self, data):
formated_data = []
for val, column in zip(data, self.table.columns):
val = column.format_value(val)
cell_value = str(val).ljust(column.width)
formated_data.append(cell_value)
self.table.stdout("\r| " + " | ".join(formated_data) + " |")
class IdleState(BaseState):
""" Identify idle state for ``TableBuilder`` class instance.
In this state summary table instance isn't able to show information
in terminal.
Parameters
----------
{BaseState.table}
"""
def start(self):
self.header()
self.table.state = DrawingState(self.table)
def finish(self):
raise TableDrawingError("Table drawing already finished or "
"didn't started")
def header(self):
self.line()
headers = []
for column in self.table.columns:
header_name = str(column.name).ljust(column.width)
headers.append(header_name)
self.table.stdout("\r| " + " | ".join(headers) + " |")
self.line()
def row(self, data):
raise TableDrawingError("Table drawing already finished or "
"didn't started")
class TableBuilder(SharedDocs):
""" Build ASCII tables using simple structure.
Parameters
----------
*columns
Table structure. Accept ``Column`` instance classes.
stdout : func
Function through which the message will be transmitted.
"""
def __init__(self, *columns, **kwargs):
valid_kwargs = ['stdout']
# In Python 2 doesn't work syntax like
# def __init__(self, *columns, stdout=print):
# Code below implements the same.
stdout = kwargs.get('stdout', print)
if any(kwarg not in valid_kwargs for kwarg in kwargs):
raise ValueError("Invalid keyword arguments. Available "
"only: {}".format(valid_kwargs))
for column in columns:
if not isinstance(column, Column):
raise TypeError("Column should be ``Column`` class "
"instance.")
self.columns = columns
self.stdout = stdout
self.state = IdleState(self)
text_width = sum(map(attrgetter('width'), columns))
n_columns = len(columns)
n_separators = n_columns + 1
n_margins = 2 * n_columns
self.total_width = text_width + n_separators + n_margins
def __getattr__(self, attr):
if attr not in self.__dict__:
return getattr(self.state, attr)
return super(TableBuilder, self).__getattr__(attr)
| mit | -877,040,094,586,411,600 | 24.938312 | 73 | 0.577294 | false | 4.341848 | false | false | false |
myshkov/bnn-analysis | experiments/experiment.py | 1 | 9636 | import logging
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import utils
from sampler import Sampler
from models.mcmc_sampler import MCMC_sampler
from models.dropout_sampler import DropoutSampler
from models.bbb_sampler import BBBSampler
from models.pbp_sampler import PBPSampler
import evaluation.visualisation as vis
class Experiment:
"""
Configures, tests and evaluates models (Sampler) for a particular environment (Env).
Contains default setups for common types of samplers for easier configuration.
"""
def __init__(self):
pass
def _setup_sampler_defaults(self, sampler_params):
pass
def setup_env_defaults(self, env):
env.create_training_test_sets()
def configure_env_mcmc(self, env, sampler_class=None, sampler_params=None, loss='mse'):
env.model_parameters_size = Sampler.get_model_parameters_size(env.layers_description)
loss = MCMC_sampler.get_mse_loss if loss is 'mse' else MCMC_sampler.get_ce_loss
def sampler_factory():
params = env.get_default_sampler_params()
self._setup_sampler_defaults(params)
params['loss_fn'] = loss(env.chains_num, env.layers_description)
params['initial_position'] = MCMC_sampler.create_random_position(env.chains_num,
env.layers_description)
params['burn_in'] = int(.45 * env.n_chunks * env.samples_per_chunk())
if sampler_params is not None:
params.update(sampler_params)
sampler = sampler_class(**params)
pos_size = env.model_parameters_size
model_parameters = tf.placeholder(dtype=tf.float32, shape=[1, pos_size])
model = MCMC_sampler.model_from_position(env.layers_description, model_parameters, sampler.test_x)
sampler.test_model = [model, model_parameters]
sampler.construct()
sampler.fit()
return sampler
env.sampler_factory = sampler_factory
def configure_env_dropout(self, env, sampler_params=None, dropout=0.01, tau=0.15, length_scale=1e-2):
def sampler_factory():
params = env.get_default_sampler_params()
params['n_epochs'] = 50
wreg = length_scale ** 2 * (1 - dropout) / (2. * env.get_train_x().shape[0] * tau)
model = DropoutSampler.model_from_description(env.layers_description, wreg, dropout)
logging.info(f'Reg: {wreg}')
if sampler_params is not None:
params.update(sampler_params)
sampler = DropoutSampler(model=model, **params)
sampler.construct()
return sampler
env.sampler_factory = sampler_factory
def configure_env_bbb(self, env, sampler_params=None, noise_std=0.01, weigts_std=1., n_epochs=5):
def sampler_factory():
params = env.get_default_sampler_params()
params['step_size'] = .1
if sampler_params is not None:
params.update(sampler_params)
params['n_epochs'] = n_epochs
model = BBBSampler.model_from_description(env.layers_description, noise_std, weigts_std, env.batch_size,
env.get_train_x().shape[0])
sampler = BBBSampler(model=model, **params)
sampler.construct()
return sampler
env.sampler_factory = sampler_factory
def configure_env_pbp(self, env, sampler_params=None, n_epochs=50):
def sampler_factory():
params = env.get_default_sampler_params()
params['model_desc'] = env.layers_description
params['n_epochs'] = n_epochs
if sampler_params is not None:
params.update(sampler_params)
sampler = PBPSampler(**params)
sampler.construct()
return sampler
env.sampler_factory = sampler_factory
def run_queue(self, queue, skip_completed=True, cpu=False):
if cpu:
with tf.device('/cpu:0'):
self._run_queue(queue, skip_completed=skip_completed)
else:
self._run_queue(queue, skip_completed=skip_completed)
def is_complete(self, name):
return utils.get_latest_data_subdir(self.__to_pattern(name)) is not None
def plot_predictive_baseline(self, name=None, split=0, discard=.5):
env, samples = self.__load_env_baseline(name, split, discard_left=discard)
vis.plot_predictive_baseline(env, samples, title_name=name)
def plot_predictive_comparison(self, baseline, target, split=0, discard_left=0., discard_right=0.,
target_metrics=None):
# baseline
env, baseline_samples = self.__load_env_baseline(baseline, split=split, discard_left=0.5)
# target
target_samples, target_times = self.__load_target(env, target, split, discard_left=discard_left,
discard_right=discard_right)
vis.plot_predictive_comparison(env, baseline_samples, target_samples, target_metrics=target_metrics,
title_name=target)
def plot_predictive_point(self, baseline, target, split=0, discard_left=0., discard_right=0., point_index=0):
# baseline
env, baseline_samples = self.__load_env_baseline(baseline, split=split, discard_left=0.5)
# target
target_samples, target_times = self.__load_target(env, target, split, discard_left=discard_left,
discard_right=discard_right)
true_x = env.get_test_x()[point_index][0]
true_y = env.get_test_y()[point_index][0]
vis.plot_hist(baseline_samples[:, point_index], target_samples[:, point_index], true_x, true_y)
def compute_metrics(self, baseline, target, split=0, discard_left=0., discard_right=0., metric_names=None):
# baseline
env, baseline_samples = self.__load_env_baseline(baseline, split=split, discard_left=0.5)
# target
target_samples, target_times = self.__load_target(env, target, split, discard_left=discard_left,
discard_right=discard_right)
return env.compute_metrics(baseline_samples, target_samples, metric_names=metric_names)
def plot_metrics(self, baseline, target, metric_names, split=0):
# baseline
env, baseline_samples = self.__load_env_baseline(baseline, split=split, discard_left=.5)
# target
target_samples, target_times = self.__load_target(env, target, split)
samples_dict = OrderedDict()
samples_dict[target] = target_samples
times_dict = OrderedDict()
times_dict[target] = target_times
vis.plot_metrics(baseline_samples, samples_dict, times_dict, metric_names)
def plot_multiple_metrics(self, baseline, targets, metric_names, split=0, max_time=60, title_name=None):
# baseline
env, baseline_samples = self.__load_env_baseline(baseline, split=split, discard_left=.5)
# targets
samples_dict = OrderedDict()
times_dict = OrderedDict()
for t in targets:
samples_dict[t], times_dict[t] = self.__load_target(env, name=t, split=split)
vis.plot_metrics(baseline_samples, samples_dict, times_dict, metric_names, max_time=max_time,
title_name=title_name)
def report_metrics_table(self, queue, discard_left=.75):
for target in queue.keys():
metrics = []
for split in range(4):
target_metrics = self.compute_metrics('HMC', target, discard_left=discard_left, discard_right=.0,
metric_names=['RMSE', 'KS', 'KL', 'Precision', 'Recall', 'F1'])
metrics.append([v for v in target_metrics.values()])
print(self.__report_avg_metrics(target, metrics))
def __report_metrics(self, target, scores):
str = target
for name, score in scores.items():
str += f' & {score:.2f}'
str += ' \\\\'
return str
def __report_avg_metrics(self, target, scores):
scores = np.asarray(scores)
mean = scores.mean(axis=0)
std = scores.std(axis=0)
str = target
for m, s in zip(mean, std):
str += f' & {m:.2f} $\\pm$ {s:.3f}'
str += ' \\\\'
return str
def _run_queue(self, queue, skip_completed):
for name, run_fn in queue.items():
if not skip_completed or not self.is_complete(name):
run_fn()
def __to_pattern(self, name):
return '-' + name.lower() + '-'
def __load_env_baseline(self, name=None, split=0, discard_left=.5, discard_right=0.):
utils.set_latest_data_subdir(pattern=self.__to_pattern(name))
env = utils.deserialize('env')
env.current_split = split
samples = env.load_samples(split=split, discard_left=discard_left, discard_right=discard_right)
return env, samples
def __load_target(self, env, name=None, split=0, discard_left=0., discard_right=0.):
utils.set_latest_data_subdir(pattern=self.__to_pattern(name))
samples = env.load_samples(split=split, discard_left=discard_left, discard_right=discard_right)
times = env.load_times(split=split, discard_left=discard_left, discard_right=discard_right)
return samples, times
| mit | 2,032,147,816,376,529,400 | 38.012146 | 117 | 0.603051 | false | 3.915482 | true | false | false |
rjw57/rbc | rbc/compiler.py | 1 | 7098 | """
High-level interface to the B compiler.
"""
import os
import subprocess
import llvmlite.binding as llvm
import pkg_resources
import whichcraft
import rbc.codegen as codegen
from rbc.parser import BParser
from rbc.semantics import BSemantics
from rbc._backport import TemporaryDirectory
# pylint: disable=assignment-from-no-return
_LIBB_C_SOURCE_FILE = pkg_resources.resource_filename(__name__, 'libb.c')
_LIBB_B_SOURCE_FILE = pkg_resources.resource_filename(__name__, 'libb.b')
def _ensure_llvm():
"""Ensure that LLVM has been initialised."""
if _ensure_llvm.was_initialized:
return
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
_ensure_llvm.was_initialized = True
_ensure_llvm.was_initialized = False
class CompilerOptions(object):
"""There are many options which affect the behaviour of the compiler. They
are collected into this class for easy transport.
The target and machine attributes are given default values based on the host
machine running the compiler. The default optimisation level is 1.
IMPORTANT: Make sure that LLVM and in particular the native target has been
initialised via the llvmlite.binding.initialize...() functions before
constructing an instance of this object.
Attributes:
target: The llvm.Target which is the target of compilation.
machine: The llvm.TargetMachine which is the target of compilation.
opt_level: The optimisation level from 0 (no optimisation) to 3 (full
optimisation.)
"""
def __init__(self):
_ensure_llvm()
self.target = llvm.Target.from_default_triple()
self.machine = self.target.create_target_machine(codemodel='default')
self.opt_level = 1
def compile_b_source(source, options):
"""The B front end converts B source code into a LLVM module. No significant
optimisation is performed.
Args:
source (str): B source code as a string
options (CompilerOptions): compiler options
Returns:
A string with the LLVM assembly code for an unoptimised module
corresponding to the input source.
"""
# Set parser semantics and go forth and parse.
program = BParser().parse(source, 'program',
semantics=BSemantics(codegen.make_node))
# Emit LLVM assembly for the correct target.
module_str = program.emit(options.target, options.machine)
# Return the string representation of the module.
return module_str
def optimize_module(module_assembly, options):
"""Verify and optimise the passed LLVM module assembly.
Args:
module_assembly (str): LLVM module assembly
options (CompilerOptions): options for the compiler
Returns:
A llvmlite.binding.ModuleRef for the verified and optimised module.
"""
_ensure_llvm()
# Parse LLVM module assembly
module = llvm.parse_assembly(module_assembly)
module.verify()
# Create optimiser pass manager
pass_manager = llvm.ModulePassManager()
# Populate with target passes
options.machine.target_data.add_pass(pass_manager)
# Populate with optimisation passes
pass_manager_builder = llvm.PassManagerBuilder()
pass_manager_builder.opt_level = options.opt_level
pass_manager_builder.populate(pass_manager)
# Run optimiser
pass_manager.run(module)
return module
class CompilationEnvironment(object):
"""
Detect compiler tools available in the environment.
Some parts of ``rbc`` call out to external compiler tools. This class
centralises the automatic discovery of these tools. Custom environments may
be created by creating an instance of this class and setting attributes
manually.
Attributes:
gcc: path to the GCC compiler binary or None if no GCC present
cppflags: list of C pre-processor flags
cflags: list of C compiler flags
ldflags: list of linker flags
"""
def __init__(self):
self.gcc = whichcraft.which('gcc')
self.cflags = ['-std=gnu99']
self.cppflags = []
self.ldflags = []
def compile_c_source(self, obj_filename, c_filename):
subprocess.check_call(
[self.gcc] + self.cppflags + self.cflags +
['-c', '-o', obj_filename, c_filename])
def link_objects(self, output_filename, obj_filenames):
subprocess.check_call(
[self.gcc] + self.ldflags +
['-o', output_filename] + obj_filenames)
_DEFAULT_ENVIRONMENT = CompilationEnvironment()
def compile_b_to_native_object(obj_filename, b_filename, options):
"""Convenience function to compile an on-disk B file to a native object.
Args:
obj_filename (str): file to write object code to
b_filename (str): file containing B source
options (CompilerOptions): compiler options to use
"""
with open(b_filename) as fobj:
source = fobj.read()
module_asm = compile_b_source(source, options)
module = optimize_module(module_asm, options)
module.name = os.path.basename(b_filename)
with open(obj_filename, 'wb') as fobj:
fobj.write(options.machine.emit_object(module))
def compile_and_link(output, source_files, options=None,
env=_DEFAULT_ENVIRONMENT):
"""Compile and link source files into an output file. Uses GCC for the heavy
lifting. This will implicitly link in the B standard library.
Input files may be anything GCC accepts along with B source files.
If no compiler options are used, a new CompilerOptions object is
constructed.
Note: the passed compiler options *only* affect the B compiler. Use the
'cflags', 'ldflags' and 'cppflags' attributes in the compilation
environment.
Args:
output (str): path to output file
source_files (sequence): paths of input files
options (CompilerOptions): compiler options
env (CompilationEnvironment): specify custom compiler environment
"""
options = options if options is not None else CompilerOptions()
with TemporaryDirectory() as tmp_dir:
libb1_obj = os.path.join(tmp_dir, 'libb1.o')
env.compile_c_source(libb1_obj, _LIBB_C_SOURCE_FILE)
libb2_obj = os.path.join(tmp_dir, 'libb2.o')
compile_b_to_native_object(libb2_obj, _LIBB_B_SOURCE_FILE, options)
compiled_source_files = [libb1_obj, libb2_obj]
for file_idx, source_file in enumerate(source_files):
out_file = os.path.join(tmp_dir, 'tmp{}.o'.format(file_idx))
_, ext = os.path.splitext(source_file)
if ext == '.b':
compile_b_to_native_object(out_file, source_file, options)
compiled_source_files.append(out_file)
elif ext == '.c':
env.compile_c_source(out_file, source_file)
compiled_source_files.append(out_file)
else:
compiled_source_files.append(source_file)
env.link_objects(output, compiled_source_files)
| mit | -6,122,891,864,101,704,000 | 33.125 | 80 | 0.672866 | false | 3.9787 | false | false | false |
BD2KGenomics/dcc-storage-schemas | metadata_indexer.py | 1 | 53653 | # Authors: Jean Rodriguez & Chris Wong
# Date: July 2016
#
# Description:This script merges metadata json files into one jsonl file. Each json object is grouped by donor and then each individual
# donor object is merged into one jsonl file.
#
# Usage: python metadata_indexer.py --only_Program TEST --only_Project TEST --awsAccessToken `cat ucsc-storage-client/accessToken` --clientPath ucsc-storage-client/ --metadataSchema metadata_schema.json
import semver
import logging
import os
import os.path
import platform
import argparse
import json
import jsonschema
import datetime
import re
import dateutil
import ssl
import dateutil.parser
import ast
from urllib import urlopen
from subprocess import Popen, PIPE
first_write = dict()
index_index = 0
#Dictionary to hold the File UUIDs to later get the right file size
bundle_uuid_filename_to_file_uuid = {}
#Call the storage endpoint and get the list of the
def get_size_list(token, redwood_host):
"""
This function assigns file_uuid_and_size with all the ids and file size,
so they can be used later to fill the missing file_size entries
"""
print "Downloading the listing"
#Attempt to download
try:
command = ["curl"]
command.append("-k")
command.append("-H")
command.append("Authorization: Bearer "+token)
command.append("https://aws:"+token+"@"+redwood_host+":5431/listing")
c_data=Popen(command, stdout=PIPE, stderr=PIPE)
size_list, stderr = c_data.communicate()
file_uuid_and_size = ast.literal_eval(size_list)
print "Done downloading the file size listing"
except Exception:
logging.error('Error while getting the list of file sizes')
print 'Error while while getting the list of file sizes'
#Return the list of file sizes.
return file_uuid_and_size
#Fills in the contents of bundle_uuid_filename_to_file_uuid
def requires(redwood_host):
"""
Fills the dictionary for the files and their UUIDs.
"""
print "** COORDINATOR **"
print "**ACQUIRING FILE UUIDS**"
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# now query the metadata service so I have the mapping of bundle_uuid & file names -> file_uuid
print str("https://"+redwood_host+":8444/entities?page=0")
json_str = urlopen(str("https://"+redwood_host+":8444/entities?page=0"), context=ctx).read()
metadata_struct = json.loads(json_str)
print "** METADATA TOTAL PAGES: "+str(metadata_struct["totalPages"])
for i in range(0, metadata_struct["totalPages"]):
print "** CURRENT METADATA TOTAL PAGES: "+str(i)
json_str = urlopen(str("https://"+redwood_host+":8444/entities?page="+str(i)), context=ctx).read()
metadata_struct = json.loads(json_str)
for file_hash in metadata_struct["content"]:
bundle_uuid_filename_to_file_uuid[file_hash["gnosId"]+"_"+file_hash["fileName"]] = file_hash["id"]
# HACK!!! Please remove once the behavior has been fixed in the workflow!!
if file_hash["fileName"].endswith(".sortedByCoord.md.bam"):
bundle_uuid_filename_to_file_uuid[file_hash["gnosId"] + "_sortedByCoord.md.bam"] = file_hash["id"]
if file_hash["fileName"].endswith(".tar.gz"):
bundle_uuid_filename_to_file_uuid[file_hash["gnosId"] + "_tar.gz"] = file_hash["id"]
if file_hash["fileName"].endswith(".wiggle.bg"):
bundle_uuid_filename_to_file_uuid[file_hash["gnosId"] + "_wiggle.bg"] = file_hash["id"]
def insert_size(file_name, file_uuid_and_size):
"""
Opens the file and inserts any missing file_size
"""
#Open the file and do the size insertion
with open(file_name, 'r') as f:
data = json.load(f)
#Special flat-ish kind of format.
if 'workflow_outputs' in data:
bundle_uuid = data['bundle_uuid']
for file_ in data['workflow_outputs']:
file_name_uploaded = file_['file_path']
if 'file_size' not in file_:
try:
file_uuid = bundle_uuid_filename_to_file_uuid[bundle_uuid+'_'+file_name_uploaded]
file_entry = filter(lambda x:x['id'] == file_uuid, file_uuid_and_size)
file_['file_size'] = file_entry[0]['size']
except Exception as e:
logging.error('Error while assigning missing size. Associated file may not exist. File Id: %s' % file_uuid)
print 'Error while assigning missing size. Associated file may not exist. File Id: %s' % file_uuid
#The more generic format
else:
for specimen in data['specimen']:
for sample in specimen['samples']:
for analysis in sample['analysis']:
bundle_uuid = analysis['bundle_uuid']
for file_ in analysis['workflow_outputs']:
file_name_uploaded = file_['file_path']
if 'file_size' not in file_:
try:
#Get the size for the file uuid
file_uuid = bundle_uuid_filename_to_file_uuid[bundle_uuid+'_'+file_name_uploaded]
file_entry = filter(lambda x: x['id'] == file_uuid, file_uuid_and_size)
file_['file_size'] = file_entry[0]['size']
except Exception as e:
logging.error('Error while assigning missing size. Associated file may not exist. File Id: %s' % file_uuid)
print 'Error while assigning missing size. Associated file may not exist. File Id: %s' % file_uuid
#Remove and replace the old file with the new one.
os.remove(file_name)
with open(file_name, 'w') as f:
json.dump(data, f, indent=4)
def input_Options():
"""
Creates the parse options
"""
parser = argparse.ArgumentParser(description='Directory that contains Json files.')
parser.add_argument('-d', '--test-directory', help='Directory that contains the json metadata files')
parser.add_argument('-u', '--skip-uuid-directory', help='Directory that contains files with file uuids (bundle uuids, one per line, file ending with .redacted) that represent databundles that should be skipped, useful for redacting content (but not deleting it)')
parser.add_argument('-m', '--metadata-schema', help='File that contains the metadata schema')
parser.add_argument('-s', '--skip-program', help='Lets user skip certain json files that contain a specific program test')
parser.add_argument('-o', '--only-program', help='Lets user include certain json files that contain a specific program test')
parser.add_argument('-r', '--skip-project', help='Lets user skip certain json files that contain a specific program test')
parser.add_argument('-t', '--only-project', help='Lets user include certain json files that contain a specific program test')
parser.add_argument('-a', '--storage-access-token', default="NA", help='Storage access token to download the metadata.json files')
parser.add_argument('-c', '--client-path', default="ucsc-storage-client/", help='Path to access the ucsc-storage-client tool')
parser.add_argument('-n', '--server-host', default="storage.ucsc-cgl.org", help='hostname for the storage service')
parser.add_argument('-p', '--max-pages', default=None, type=int, help='Specify maximum number of pages to download')
parser.add_argument('-preserve-version',action='store_true', default=False, help='Keep all copies of analysis events')
args = parser.parse_args()
return args
def make_output_dir():
"""
Creates directory named "endpoint_metadata" to store all the metadata that is downloaded
"""
directory= "endpoint_metadata"
mkdir_Command=["mkdir"]
mkdir_Command.append(directory)
c_data=Popen(mkdir_Command, stdout=PIPE, stderr=PIPE)
stdout, stderr = c_data.communicate()
logging.info("created directory: %s/" % (directory))
print "created directory: %s/" % (directory)
return directory
def endpint_mapping(data_array):
"""
data_array: array of json objects
create a maping: gnos-id -> id
"""
numberOfElements=0
page=0
my_dictionary= dict()
for j_obj in data_array:
numberOfElements += j_obj["numberOfElements"]
page= j_obj["number"]
for content in j_obj["content"]:
content_id= content["id"]
my_dictionary[content_id]={"content": content, "page": page}
page += 1
logging.info("Total pages downloaded: %s" % page)
logging.info("Total number of elements: %s" % numberOfElements)
print "Total pages downloaded: ",page
print "Total number of elements: ", numberOfElements
return my_dictionary
def create_merge_input_folder(id_to_content,directory,accessToken,client_Path, size_list):
"""
id_to_content: dictionary that maps content id to content object.
directory: name of directory where the json files will be stored.
Uses the ucsc-download.sh script to download the json files
and store them in the "directory".
"""
"""
java
-Djavax.net.ssl.trustStore=/ucsc-storage-client/ssl/cacerts
-Djavax.net.ssl.trustStorePassword=changeit
-Dmetadata.url=https://storage.ucsc-cgl.org:8444
-Dmetadata.ssl.enabled=true -Dclient.ssl.custom=false
-Dstorage.url=https://storage.ucsc-cgl.org:5431
-DaccessToken=${accessToken}
-jar
/ucsc-storage-client/icgc-storage-client-1.0.14-SNAPSHOT/lib/icgc-storage-client.jar
download
--output-dir ${download}
--object-id ${object}
--output-layout bundle
"""
args = input_Options()
metadataClientJar = os.path.join(client_Path,"icgc-storage-client-1.0.14-SNAPSHOT/lib/icgc-storage-client.jar")
metadataUrl= "https://"+args.server_host+":8444"
storageUrl= "https://"+args.server_host+":5431"
trustStore = os.path.join(client_Path,"ssl/cacerts")
trustStorePw = "changeit"
# If the path is not correct then the download and merge will not be performed.
if not os.path.isfile(metadataClientJar):
logging.critical("File not found: %s. Path may not be correct: %s" % (metadataClientJar,client_Path))
print "File not found: %s" % metadataClientJar
print "Path may not be correct: %s" % client_Path
print "Exiting program."
exit(1)
logging.info('Begin Download.')
print "downloading metadata..."
for content_id in id_to_content:
file_create_time_server = id_to_content[content_id]["content"]["createdTime"]
if os.path.isfile(directory+"/"+id_to_content[content_id]["content"]["gnosId"]+"/metadata.json") and \
creation_date(directory+"/"+id_to_content[content_id]["content"]["gnosId"]+"/metadata.json") == file_create_time_server/1000:
#Assign any missing file size
insert_size(directory+"/"+id_to_content[content_id]["content"]["gnosId"]+"/metadata.json", size_list)
#Set the time created to be the one supplied by redwood (since insert_size() modifies the file)
os.utime(directory + "/" + id_to_content[content_id]["content"]["gnosId"] + "/metadata.json",
(file_create_time_server/1000, file_create_time_server/1000))
#Open the file and add the file size if missing.
print " + using cached file "+directory+"/"+id_to_content[content_id]["content"]["gnosId"]+"/metadata.json created on "+str(file_create_time_server)
#os.utime(directory + "/" + id_to_content[content_id]["content"]["gnosId"] + "/metadata.json", (file_create_time_server/1000, file_create_time_server/1000))
else:
print " + downloading "+content_id
# build command string
command = ["java"]
command.append("-Djavax.net.ssl.trustStore=" + trustStore)
command.append("-Djavax.net.ssl.trustStorePassword=" + trustStorePw)
command.append("-Dmetadata.url=" + str(metadataUrl))
command.append("-Dmetadata.ssl.enabled=true")
command.append("-Dclient.ssl.custom=false")
command.append("-Dstorage.url=" + str(storageUrl))
command.append("-DaccessToken=" + str(accessToken))
command.append("-jar")
command.append(metadataClientJar)
command.append("download")
command.append("--output-dir")
command.append(str(directory))
command.append("--object-id")
command.append(str(content_id))
command.append("--output-layout")
command.append("bundle")
#print " ".join(command)
try:
c_data=Popen(command, stdout=PIPE, stderr=PIPE)
stdout, stderr = c_data.communicate()
# now set the create timestamp
insert_size(directory+"/"+id_to_content[content_id]["content"]["gnosId"]+"/metadata.json", size_list)
os.utime(directory + "/" + id_to_content[content_id]["content"]["gnosId"] + "/metadata.json",
(file_create_time_server/1000, file_create_time_server/1000))
except Exception:
logging.error('Error while downloading file with content ID: %s' % content_id)
print 'Error while downloading file with content ID: %s' % content_id
logging.info('End Download.')
def creation_date(path_to_file):
"""
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
"""
if platform.system() == 'Windows':
return os.path.getctime(path_to_file)
else:
stat = os.stat(path_to_file)
try:
return stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return stat.st_mtime
def load_json_obj(json_path):
"""
:param json_path: Name or path of the json metadata file.
:return: A json object.
"""
json_file = open(json_path, 'r')
print "JSON FILE: "+json_path
json_obj = json.load(json_file)
json_file.close()
return json_obj
def load_json_arr(input_dir, data_arr, redacted):
"""
:param input_dir: Directory that contains the json files.
:param data_arr: Empty array.
Gets all of the json files, converts them into objects and stores
them in an array.
"""
for folder in os.listdir(input_dir):
current_folder = os.path.join(input_dir, folder)
if os.path.isdir(current_folder):
for file in os.listdir(current_folder):
if file.endswith(".json") and folder not in redacted:
current_file = os.path.join(current_folder, file)
try:
json_obj = load_json_obj(current_file)
data_arr.append(json_obj)
except ValueError:
print "ERROR PARSING JSON: will skip this record."
def skip_option(donorLevelObjs, option_skip, key):
for json_obj in donorLevelObjs:
keys = json_obj[key]
if keys == option_skip:
donorLevelObjs.remove(json_obj)
def only_option(donorLevelObjs,option_only, key):
for json_obj in donorLevelObjs:
keys = json_obj[key]
if keys != option_only:
donorLevelObjs.remove(json_obj)
def validate_json(json_obj,schema):
"""
:return: Returns true if the json is in the correct schema.
"""
try:
jsonschema.validate(json_obj, schema)
except Exception as exc:
logging.error("jsonschema.validate FAILED in validate_json: %s" % (str(exc)))
return False
return True
def insert_detached_metadata(detachedObjs, uuid_mapping, preserve_version=False):
"""
Inserts a Analysis object, that contains a parent ID, to its respective donor object.
"""
de_timestamp = dateutil.parser.parse(detachedObjs["timestamp"])
for parent_uuid in detachedObjs["parent_uuids"]:
for key in uuid_mapping:
donor_obj= uuid_mapping[key]
donor_timestamp= dateutil.parser.parse(donor_obj["timestamp"])
donor_uuid = donor_obj["donor_uuid"]
# Check if it needs to be inserted in the donor section
if parent_uuid== donor_uuid:
if "analysis" in donor_obj:
donor_obj["analysis"].append(detachedObjs)
else:
donor_obj["analysis"]= [detachedObjs]
# Check if it needs to be inserted in the specimen section
for specimen in donor_obj["specimen"]:
specimen_uuid =specimen["specimen_uuid"]
if specimen_uuid == parent_uuid:
if "analysis" in specimen:
specimen["analysis"].append(detachedObjs)
else:
specimen["analysis"]= [detachedObjs]
# Check if it needs to be inserted in the sample section
for sample in specimen["samples"]:
sample_uuid= sample["sample_uuid"]
if sample_uuid == parent_uuid:
analysis_type = detachedObjs["analysis_type"]
savedAnalysisTypes = set()
for donor_analysis in sample["analysis"]:
savedAnalysisType = donor_analysis["analysis_type"]
savedAnalysisTypes.add(savedAnalysisType)
if analysis_type == savedAnalysisType:
analysisObj = donor_analysis
if not analysis_type in savedAnalysisTypes:
sample["analysis"].append(detachedObjs)
continue
else:
# compare 2 analysis to keep only most relevant one
# saved is analysisObj
# currently being considered is new_analysis
if preserve_version:
sample["analysis"].append(detachedObjs)
else:
new_workflow_version = detachedObjs["workflow_version"]
saved_version = analysisObj["workflow_version"]
# current is older than new
if saved_version == new_workflow_version:
# use the timestamp
if "timestamp" in detachedObjs and "timestamp" in analysisObj:
saved_timestamp = dateutil.parser.parse(analysisObj["timestamp"])
new_timestamp = dateutil.parser.parse(detachedObjs["timestamp"])
timestamp_diff = saved_timestamp - new_timestamp
if timestamp_diff.total_seconds() < 0:
sample["analysis"].remove(analysisObj)
sample["analysis"].append(detachedObjs)
elif semver.compare(saved_version, new_workflow_version) == -1:
sample["analysis"].remove(analysisObj)
sample["analysis"].append(detachedObjs)
#if semver.compare(saved_version, new_workflow_version) == 0:
timestamp_diff = donor_timestamp - de_timestamp
if timestamp_diff.total_seconds() < 0:
donor_obj["timestamp"] = detachedObjs["timestamp"]
def mergeDonors(metadataObjs, preserve_version):
'''
Merge data bundle metadata.json objects into correct donor objects.
'''
donorMapping = {}
uuid_to_timestamp={}
for metaObj in metadataObjs:
# check if donor exists
donor_uuid = metaObj["donor_uuid"]
if not donor_uuid in donorMapping:
donorMapping[donor_uuid] = metaObj
uuid_to_timestamp[donor_uuid]= [metaObj["timestamp"]]
continue
# check if specimen exists
donorObj = donorMapping[donor_uuid]
for specimen in metaObj["specimen"]:
specimen_uuid = specimen["specimen_uuid"]
savedSpecUuids = set()
for savedSpecObj in donorObj["specimen"]:
savedSpecUuid = savedSpecObj["specimen_uuid"]
savedSpecUuids.add(savedSpecUuid)
if specimen_uuid == savedSpecUuid:
specObj = savedSpecObj
if not specimen_uuid in savedSpecUuids:
donorObj["specimen"].append(specimen)
continue
# check if sample exists
for sample in specimen["samples"]:
sample_uuid = sample["sample_uuid"]
savedSampleUuids = set()
for savedSampleObj in specObj["samples"]:
savedSampleUuid = savedSampleObj["sample_uuid"]
savedSampleUuids.add(savedSampleUuid)
if sample_uuid == savedSampleUuid:
sampleObj = savedSampleObj
if not sample_uuid in savedSampleUuids:
specObj["samples"].append(sample)
continue
# check if analysis exists
# need to compare analysis for uniqueness by looking at analysis_type... bundle_uuid is not the right one here.
for bundle in sample["analysis"]:
bundle_uuid = bundle["bundle_uuid"]
analysis_type = bundle["analysis_type"]
savedAnalysisTypes = set()
for savedBundle in sampleObj["analysis"]:
savedAnalysisType = savedBundle["analysis_type"]
savedAnalysisTypes.add(savedAnalysisType)
if analysis_type == savedAnalysisType:
analysisObj = savedBundle
if not analysis_type in savedAnalysisTypes or preserve_version:
sampleObj["analysis"].append(bundle)
# timestamp mapping
if "timestamp" in bundle:
uuid_to_timestamp[donor_uuid].append(bundle["timestamp"])
continue
else:
# compare 2 analysis to keep only most relevant one
# saved is analysisObj
# currently being considered is bundle
new_workflow_version= bundle["workflow_version"]
saved_version= analysisObj["workflow_version"]
# current is older than new
if semver.compare(saved_version, new_workflow_version) == -1:
sampleObj["analysis"].remove(analysisObj)
sampleObj["analysis"].append(bundle)
# timestamp mapping
if "timestamp" in bundle:
uuid_to_timestamp[donor_uuid].append(bundle["timestamp"])
if semver.compare(saved_version, new_workflow_version) == 0:
# use the timestamp to determine which analysis to choose
if "timestamp" in bundle and "timestamp" in analysisObj :
saved_timestamp = dateutil.parser.parse(analysisObj["timestamp"])
new_timestamp= dateutil.parser.parse(bundle["timestamp"])
timestamp_diff = saved_timestamp - new_timestamp
if timestamp_diff.total_seconds() < 0:
sampleObj["analysis"].remove(analysisObj)
sampleObj["analysis"].append(bundle)
# timestamp mapping
if "timestamp" in bundle:
uuid_to_timestamp[donor_uuid].append(bundle["timestamp"])
# Get the most recent timstamp from uuid_to_timestamp(for each donor) and use donorMapping to substitute it
for uuid in uuid_to_timestamp:
timestamp_list= uuid_to_timestamp[uuid]
donorMapping[uuid]["timestamp"] = max(timestamp_list)
return donorMapping
def validate_Donor(uuid_mapping, schema):
"""
Validates each donor object with the schema provided.
"""
valid = []
invalid = []
for uuid in uuid_mapping:
donor_Obj = uuid_mapping[uuid]
if validate_json(donor_Obj, schema):
valid.append(donor_Obj)
else:
invalid.append(donor_Obj)
return valid, invalid
def allHaveItems(lenght):
"""
Returns the value of each flag, based on the lenght of the array in 'missing_items'.
"""
#print ("ALLHAVEITEMS: %s" % lenght)
result= False
if lenght == 0:
result =True
#print "RESULT: %s" % result
return result
def arrayMissingItems(itemsName, regex, items,submitter_specimen_types):
"""
Returns a list of 'sample_uuid' for the analysis that were missing.
"""
return arrayItems(itemsName, regex, items,submitter_specimen_types, True)
def arrayContainingItems(itemsName, regex, items,submitter_specimen_types):
"""
Returns a list of 'sample_uuid' for the analysis that were present.
"""
return arrayItems(itemsName, regex, items,submitter_specimen_types, False)
def arrayItems(itemsName, regex, items,submitter_specimen_types, missing):
"""
Returns a list of 'sample_uuid' for the analysis that were missing.
"""
analysis_type = False
results = []
for specimen in items['specimen']:
if re.search(regex, specimen['submitter_specimen_type']):
submitter_specimen_types.append(specimen['submitter_specimen_type'])
for sample in specimen['samples']:
for analysis in sample['analysis']:
if analysis["analysis_type"] == itemsName:
analysis_type = True
break
if (missing and not analysis_type) or (not missing and analysis_type):
results.append(sample['sample_uuid'])
analysis_type = False
return results
def arrayMissingItemsWorkflow(workflow_name, workflow_version_regex, regex, items,submitter_specimen_types):
"""
Returns a list of 'sample_uuid' for the analysis that were missing.
"""
return arrayItemsWorkflow(workflow_name, workflow_version_regex, regex, items,submitter_specimen_types, True)
def arrayContainingItemsWorkflow(workflow_name, workflow_version_regex, regex, items,submitter_specimen_types):
"""
Returns a list of 'sample_uuid' for the analysis that were present.
"""
return arrayItemsWorkflow(workflow_name, workflow_version_regex, regex, items,submitter_specimen_types, False)
def arrayItemsWorkflow(workflow_name, workflow_version_regex, regex, items,submitter_specimen_types, missing):
"""
Returns a list of 'sample_uuid' for the analysis that were missing.
"""
analysis_type = False
results = []
for specimen in items['specimen']:
if re.search(regex, specimen['submitter_specimen_type']):
submitter_specimen_types.append(specimen['submitter_specimen_type'])
for sample in specimen['samples']:
for analysis in sample['analysis']:
if analysis["workflow_name"] == workflow_name and re.search(workflow_version_regex, analysis["workflow_version"]):
analysis_type = True
break
if (missing and not analysis_type) or (not missing and analysis_type):
results.append(sample['sample_uuid'])
analysis_type = False
return results
def createFlags(uuid_to_donor):
"""
uuid_to_donor: dictionary that maps uuid with its json object.
Creates and adds "flags" and "missing_items" to each donor object.
"""
for uuid in uuid_to_donor:
json_object = uuid_to_donor[uuid]
submitter_specimen_types=[]
flagsWithArrs = {'normal_sequence': arrayMissingItems('sequence_upload', "^Normal - ", json_object,submitter_specimen_types),
'tumor_sequence': arrayMissingItems('sequence_upload',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line - ",
json_object,submitter_specimen_types),
'normal_sequence_qc_report': arrayMissingItems('sequence_upload_qc_report', "^Normal - ", json_object,submitter_specimen_types),
'tumor_sequence_qc_report': arrayMissingItems('sequence_upload_qc_report',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_alignment': arrayMissingItems('alignment', "^Normal - ", json_object,submitter_specimen_types),
'tumor_alignment': arrayMissingItems('alignment',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_alignment_qc_report': arrayMissingItems('alignment_qc_report', "^Normal - ", json_object,submitter_specimen_types),
'tumor_alignment_qc_report': arrayMissingItems('alignment_qc_report',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_rna_seq_quantification': arrayMissingItems('rna_seq_quantification', "^Normal - ", json_object,submitter_specimen_types),
'tumor_rna_seq_quantification': arrayMissingItems('rna_seq_quantification',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_rna_seq_cgl_workflow_3_0_x': arrayMissingItemsWorkflow('quay.io/ucsc_cgl/rnaseq-cgl-pipeline', '3\.0\.', "^Normal - ", json_object,submitter_specimen_types),
'tumor_rna_seq_cgl_workflow_3_0_x': arrayMissingItemsWorkflow('quay.io/ucsc_cgl/rnaseq-cgl-pipeline', '3\.0\.',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_rna_seq_cgl_workflow_3_1_x': arrayMissingItemsWorkflow('quay.io/ucsc_cgl/rnaseq-cgl-pipeline', '3\.1\.', "^Normal - ", json_object,submitter_specimen_types),
'tumor_rna_seq_cgl_workflow_3_1_x': arrayMissingItemsWorkflow('quay.io/ucsc_cgl/rnaseq-cgl-pipeline', '3\.1\.',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_rna_seq_cgl_workflow_3_2_x': arrayMissingItemsWorkflow('quay.io/ucsc_cgl/rnaseq-cgl-pipeline', '3\.2\.', "^Normal - ", json_object,submitter_specimen_types),
'tumor_rna_seq_cgl_workflow_3_2_x': arrayMissingItemsWorkflow('quay.io/ucsc_cgl/rnaseq-cgl-pipeline', '3\.2\.',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_protect_cgl_workflow_2_3_x': arrayMissingItemsWorkflow('quay.io/ucsc_cgl/protect', '2\.3\.', "^Normal - ", json_object,submitter_specimen_types),
'tumor_protect_cgl_workflow_2_3_x': arrayMissingItemsWorkflow('quay.io/ucsc_cgl/protect', '2\.3\.',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_cnv_workflow': arrayContainingItemsWorkflow('https://github.com/BD2KGenomics/dockstore_workflow_cnv', '1\.0\.', "^Normal - ", json_object,submitter_specimen_types),
'tumor_cnv_workflow': arrayContainingItemsWorkflow('https://github.com/BD2KGenomics/dockstore_workflow_cnv', '1\.0\.',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_germline_variants': arrayMissingItems('germline_variant_calling', "^Normal - ", json_object,submitter_specimen_types),
'tumor_somatic_variants': arrayMissingItems('somatic_variant_calling',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types)}
flagsPresentWithArrs = {'normal_sequence': arrayContainingItems('sequence_upload', "^Normal - ", json_object,submitter_specimen_types),
'tumor_sequence': arrayContainingItems('sequence_upload',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_sequence_qc_report': arrayContainingItems('sequence_upload_qc_report', "^Normal - ", json_object,submitter_specimen_types),
'tumor_sequence_qc_report': arrayContainingItems('sequence_upload_qc_report',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_alignment': arrayContainingItems('alignment', "^Normal - ", json_object,submitter_specimen_types),
'tumor_alignment': arrayContainingItems('alignment',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_alignment_qc_report': arrayContainingItems('alignment_qc_report', "^Normal - ", json_object,submitter_specimen_types),
'tumor_alignment_qc_report': arrayContainingItems('alignment_qc_report',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_rna_seq_quantification': arrayContainingItems('rna_seq_quantification', "^Normal - ", json_object,submitter_specimen_types),
'tumor_rna_seq_quantification': arrayContainingItems('rna_seq_quantification',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_rna_seq_cgl_workflow_3_0_x': arrayContainingItemsWorkflow('quay.io/ucsc_cgl/rnaseq-cgl-pipeline', '3\.0\.', "^Normal - ", json_object,submitter_specimen_types),
'tumor_rna_seq_cgl_workflow_3_0_x': arrayContainingItemsWorkflow('quay.io/ucsc_cgl/rnaseq-cgl-pipeline', '3\.0\.',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_rna_seq_cgl_workflow_3_1_x': arrayContainingItemsWorkflow('quay.io/ucsc_cgl/rnaseq-cgl-pipeline', '3\.1\.', "^Normal - ", json_object,submitter_specimen_types),
'tumor_rna_seq_cgl_workflow_3_1_x': arrayContainingItemsWorkflow('quay.io/ucsc_cgl/rnaseq-cgl-pipeline', '3\.1\.',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_rna_seq_cgl_workflow_3_2_x': arrayContainingItemsWorkflow('quay.io/ucsc_cgl/rnaseq-cgl-pipeline', '3\.2\.', "^Normal - ", json_object,submitter_specimen_types),
'tumor_rna_seq_cgl_workflow_3_2_x': arrayContainingItemsWorkflow('quay.io/ucsc_cgl/rnaseq-cgl-pipeline', '3\.2\.',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_protect_cgl_workflow_2_3_x': arrayContainingItemsWorkflow('quay.io/ucsc_cgl/protect', '2\.3\.', "^Normal - ", json_object,submitter_specimen_types),
'tumor_protect_cgl_workflow_2_3_x': arrayContainingItemsWorkflow('quay.io/ucsc_cgl/protect', '2\.3\.',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_cnv_workflow': arrayContainingItemsWorkflow('https://github.com/BD2KGenomics/dockstore_workflow_cnv', '1\.0\.', "^Normal - ", json_object,submitter_specimen_types),
'tumor_cnv_workflow': arrayContainingItemsWorkflow('https://github.com/BD2KGenomics/dockstore_workflow_cnv', '1\.0\.',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types),
'normal_germline_variants': arrayContainingItems('germline_variant_calling', "^Normal - ", json_object,submitter_specimen_types),
'tumor_somatic_variants': arrayContainingItems('somatic_variant_calling',
"^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line -",
json_object,submitter_specimen_types)}
flagsWithStr = {'normal_sequence' : len(flagsWithArrs["normal_sequence"]) == 0 and len(flagsPresentWithArrs["normal_sequence"]) > 0,
'normal_sequence_qc_report' : len(flagsWithArrs["normal_sequence_qc_report"]) == 0 and len(flagsPresentWithArrs["normal_sequence_qc_report"]) > 0,
'tumor_sequence': len(flagsWithArrs["tumor_sequence"]) == 0 and len(flagsPresentWithArrs["tumor_sequence"]) > 0,
'tumor_sequence_qc_report' :len(flagsWithArrs["tumor_sequence_qc_report"]) == 0 and len(flagsPresentWithArrs["tumor_sequence_qc_report"]) > 0,
'normal_alignment': len(flagsWithArrs["normal_alignment"]) == 0 and len(flagsPresentWithArrs["normal_alignment"]) > 0,
'normal_alignment_qc_report': len(flagsWithArrs["normal_alignment_qc_report"]) == 0 and len(flagsPresentWithArrs["normal_alignment_qc_report"]) > 0,
'tumor_alignment': len(flagsWithArrs["tumor_alignment"]) == 0 and len(flagsPresentWithArrs["tumor_alignment"]) > 0,
'tumor_alignment_qc_report': len(flagsWithArrs["tumor_alignment_qc_report"]) == 0 and len(flagsPresentWithArrs["tumor_alignment_qc_report"]) > 0,
'normal_rna_seq_quantification': len(flagsWithArrs["normal_rna_seq_quantification"]) == 0 and len(flagsPresentWithArrs["normal_rna_seq_quantification"]) > 0,
'tumor_rna_seq_quantification': len(flagsWithArrs["tumor_rna_seq_quantification"]) == 0 and len(flagsPresentWithArrs["tumor_rna_seq_quantification"]) > 0,
'normal_rna_seq_cgl_workflow_3_0_x': len(flagsWithArrs["normal_rna_seq_cgl_workflow_3_0_x"]) == 0 and len(flagsPresentWithArrs["normal_rna_seq_cgl_workflow_3_0_x"]) > 0,
'tumor_rna_seq_cgl_workflow_3_0_x': len(flagsWithArrs["tumor_rna_seq_cgl_workflow_3_0_x"]) == 0 and len(flagsPresentWithArrs["tumor_rna_seq_cgl_workflow_3_0_x"]) > 0,
'normal_rna_seq_cgl_workflow_3_1_x': len(flagsWithArrs["normal_rna_seq_cgl_workflow_3_1_x"]) == 0 and len(flagsPresentWithArrs["normal_rna_seq_cgl_workflow_3_1_x"]) > 0,
'tumor_rna_seq_cgl_workflow_3_1_x': len(flagsWithArrs["tumor_rna_seq_cgl_workflow_3_1_x"]) == 0 and len(flagsPresentWithArrs["tumor_rna_seq_cgl_workflow_3_1_x"]) > 0,
'normal_rna_seq_cgl_workflow_3_2_x': len(flagsWithArrs["normal_rna_seq_cgl_workflow_3_2_x"]) == 0 and len(flagsPresentWithArrs["normal_rna_seq_cgl_workflow_3_2_x"]) > 0,
'tumor_rna_seq_cgl_workflow_3_2_x': len(flagsWithArrs["tumor_rna_seq_cgl_workflow_3_2_x"]) == 0 and len(flagsPresentWithArrs["tumor_rna_seq_cgl_workflow_3_2_x"]) > 0,
'normal_protect_cgl_workflow_2_3_x': len(flagsWithArrs["normal_protect_cgl_workflow_2_3_x"]) == 0 and len(flagsPresentWithArrs["normal_protect_cgl_workflow_2_3_x"]) > 0,
'tumor_protect_cgl_workflow_2_3_x': len(flagsWithArrs["tumor_protect_cgl_workflow_2_3_x"]) == 0 and len(flagsPresentWithArrs["tumor_protect_cgl_workflow_2_3_x"]) > 0,
'normal_cnv_workflow': len(flagsWithArrs["normal_cnv_workflow"]) == 0 and len(flagsPresentWithArrs["normal_cnv_workflow"]) > 0,
'tumor_cnv_workflow': len(flagsWithArrs["tumor_cnv_workflow"]) == 0 and len(flagsPresentWithArrs["tumor_cnv_workflow"]) > 0,
'normal_germline_variants': len(flagsWithArrs["normal_germline_variants"]) == 0 and len(flagsPresentWithArrs["normal_germline_variants"]) > 0,
'tumor_somatic_variants': len(flagsWithArrs["tumor_somatic_variants"]) == 0 and len(flagsPresentWithArrs["tumor_somatic_variants"]) > 0}
json_object['flags'] = flagsWithStr
json_object['missing_items'] = flagsWithArrs
json_object['present_items'] = flagsPresentWithArrs
def dumpResult(result, filename, ES_file_name="elasticsearch.jsonl"):
"""
Creates the .jsonl files.
"""
global index_index
for donor in result:
if filename not in first_write:
with open(filename, 'w') as outfile:
if filename == ES_file_name:
outfile.write('{"index":{"_id":"' + str(index_index) + '","_type":"meta"}}\n')
index_index += 1
json.dump(donor, outfile)
outfile.write('\n')
first_write[filename] = "true"
else:
with open(filename, 'a') as outfile:
if filename == ES_file_name:
outfile.write('{"index":{"_id":"' + str(index_index) + '","_type":"meta"}}\n')
index_index += 1
json.dump(donor, outfile)
outfile.write('\n')
def findRedactedUuids(skip_uuid_directory):
"""
Creates a dict of file UUIDs that need to be skipped
"""
result = {}
if skip_uuid_directory is not None:
for file in os.listdir(skip_uuid_directory):
if file.endswith(".redacted"):
current_file = os.path.join(skip_uuid_directory, file)
f = open(current_file, "r")
for line in f.readlines():
result[line.rstrip()] = True
f.close()
print result
return result
def main():
args = input_Options()
directory_meta = args.test_directory
# redacted metadata.json file UUIDs
skip_uuid_directory = args.skip_uuid_directory
skip_uuids = findRedactedUuids(skip_uuid_directory)
preserve_version = args.preserve_version
logfileName = os.path.basename(__file__).replace(".py", ".log")
logging_format= '%(asctime)s - %(levelname)s: %(message)s'
logging.basicConfig(filename=logfileName, level=logging.DEBUG, format=logging_format, datefmt='%m/%d/%Y %I:%M:%S %p')
if not directory_meta:
#Getting the File UUIDs
requires(args.server_host)
#Get the size listing
file_uuid_and_size = get_size_list(args.storage_access_token, args.server_host)
#Trying to download the data.
last= False
page=0
obj_arr=[]
# figure out the pages
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
json_str = urlopen(str("https://"+args.server_host+":8444/entities?fileName=metadata.json&page=0"), context=ctx).read()
metadata_struct = json.loads(json_str)
# Download all of the data that is stored.
if args.max_pages is not None:
metadata_struct["totalPages"] = int(args.max_pages)
for page in range(0, metadata_struct["totalPages"]):
print "DOWNLOADING PAGE "+str(page)
meta_cmd= ["curl", "-k"]
url= 'https://'+args.server_host+':8444/entities?fileName=metadata.json&page='
new_url= url + str(page)
meta_cmd.append(new_url)
c_data=Popen(meta_cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = c_data.communicate()
json_obj= json.loads(stdout)
last = json_obj["last"]
obj_arr.append(json_obj)
# Create a mapping of all the data provided from the endpoint.
id_to_content= endpint_mapping(obj_arr)
# Download the metadata.json files using the id stored in id_to_content dictionary
directory_meta= make_output_dir()
access_Token=args.storage_access_token
client_Path= args.client_path
create_merge_input_folder(id_to_content, directory_meta,access_Token,client_Path, file_uuid_and_size)
# END DOWNLOAD
# BEGIN json Merge
logging.info("Begin Merging.")
print "Begin Merging."
schema = load_json_obj(args.metadata_schema)
#if there is no schema the program cannot continue.
if schema == None:
logging.critical("No metadata schema was recognized. Exiting program.")
exit(1)
schema_version= schema["definitions"]["schema_version"]["pattern"]
#sche_version= schema_version.replace("^","")
#schema_version= sche_version.replace("$","")
logging.info("Schema Version: %s" % schema_version)
print "Schema Version: ",schema_version
data_arr = []
# Loads the json files and stores them into an array.
load_json_arr(directory_meta, data_arr, skip_uuids)
donorLevelObjs = []
detachedObjs = []
# Separates the detached anlaysis obj from the donor obj.
for metaobj in data_arr:
if "donor_uuid" in metaobj:
donorLevelObjs.append(metaobj)
elif "parent_uuids" in metaobj:
detachedObjs.append(metaobj)
# Skip Program Test Option.
skip_prog_option= args.skip_program
if skip_prog_option:
logging.info("Skip Programs with values: %s" % (skip_prog_option))
print "Skip Programs with values: %s" % (skip_prog_option)
skip_option(donorLevelObjs, skip_prog_option,'program')
# Use Only Program Test Option.
only_program_option= args.only_program
if only_program_option:
logging.info("Only use Programs with values: %s" % (only_program_option))
print "Only use Programs with values: %s" % (only_program_option)
only_option(donorLevelObjs,only_program_option,'program')
# Skip Program Test Option.
skip_project_option= args.skip_project
if skip_project_option:
logging.info("Skip Projects with values: %s" % (skip_project_option))
print "Skip Projects with values: %s" % (skip_project_option)
skip_option(donorLevelObjs, skip_project_option,"project")
# Use Only Program Test Option.
only_project_option= args.only_project
if only_project_option:
logging.info("Only use Projects with values: %s" % (only_project_option))
print "Only use Projects with values: %s" % (only_project_option)
only_option(donorLevelObjs,only_project_option,"project")
# Merge only those that are of the same schema_version as the Schema.
invalid_version_arr= []
valid_version_arr= []
for donor_object in donorLevelObjs:
obj_schema_version= donor_object["schema_version"]
p = re.compile(schema_version)
if not p.match(obj_schema_version):
invalid_version_arr.append(donor_object)
else:
valid_version_arr.append(donor_object)
logging.info("%s valid donor objects with correct schema version." % str(len(valid_version_arr)))
print len(valid_version_arr), " valid donor objects with correct schema version."
# Inserts the detached analysis to the merged donor obj.
uuid_mapping = mergeDonors(valid_version_arr, preserve_version)
for de_obj in detachedObjs:
insert_detached_metadata(de_obj, uuid_mapping, preserve_version)
# Creates and adds the flags and missingItems to each donor obj.
createFlags(uuid_mapping)
# Validates each donor obj
(validated, invalid) = validate_Donor(uuid_mapping,schema)
# Check if there are invalid json objects.
invalid_num= len(invalid)
if invalid_num:
logging.info("%s merged donor objects invalid." % (invalid_num))
print "%s merged donor objects invalid." % (invalid_num)
dumpResult(invalid, "invalid.jsonl")
logging.info("Invalid merged objects in invalid.jsonl.")
print "Invalid merged objects in invalid.jsonl. "
# Creates the jsonl files .
validated_num= len(validated)
if validated_num:
logging.info("%s merged json objects were valid." % (validated_num))
print "%s merged json objects were valid." % (validated_num)
if preserve_version:
dumpResult(validated, "duped_validated.jsonl")
dumpResult(validated, 'duped_elasticsearch.jsonl', ES_file_name="duped_elasticsearch.jsonl")
logging.info("All done, find index in duped_elasticsearch.jsonl")
print "All done, find index in duped_elasticsearch.jsonl"
else:
dumpResult(validated, "validated.jsonl")
dumpResult(validated, 'elasticsearch.jsonl')
logging.info("All done, find index in elasticsearch.jsonl")
print "All done, find index in elasticsearch.jsonl"
if not validated:
logging.info("No objects were merged.")
print "No objects were merged."
if __name__ == "__main__":
main()
| apache-2.0 | 6,270,652,349,861,014,000 | 53.304656 | 267 | 0.575774 | false | 4.047756 | false | false | false |
marmarek/qubes-core-mgmt-client | qubesadmin/tools/qvm_features.py | 1 | 3066 | # coding=utf-8
#
# The Qubes OS Project, https://www.qubes-os.org/
#
# Copyright (C) 2010-2016 Joanna Rutkowska <[email protected]>
# Copyright (C) 2016 Wojtek Porczyk <[email protected]>
# Copyright (C) 2017 Marek Marczykowski-Górecki
# <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
'''qvm-features - Manage domain's features'''
from __future__ import print_function
import sys
import qubesadmin
import qubesadmin.tools
parser = qubesadmin.tools.QubesArgumentParser(
vmname_nargs=1,
description='manage domain\'s features')
parser.add_argument('feature', metavar='FEATURE',
action='store', nargs='?',
help='name of the feature')
parser.add_argument('value', metavar='VALUE',
action='store', nargs='?',
help='new value of the feature')
parser.add_argument('--unset', '--default', '--delete', '-D',
dest='delete',
action='store_true',
help='unset the feature')
def main(args=None, app=None):
'''Main routine of :program:`qvm-features`.
:param list args: Optional arguments to override those delivered from \
command line.
'''
args = parser.parse_args(args, app=app)
vm = args.domains[0]
if args.feature is None:
if args.delete:
parser.error('--unset requires a feature')
try:
features = [(feat, vm.features[feat]) for feat in vm.features]
qubesadmin.tools.print_table(features)
except qubesadmin.exc.QubesException as e:
parser.error_runtime(e)
elif args.delete:
if args.value is not None:
parser.error('cannot both set and unset a value')
try:
del vm.features[args.feature]
except KeyError:
pass
except qubesadmin.exc.QubesException as e:
parser.error_runtime(e)
elif args.value is None:
try:
print(vm.features[args.feature])
return 0
except KeyError:
return 1
except qubesadmin.exc.QubesException as e:
parser.error_runtime(e)
else:
try:
vm.features[args.feature] = args.value
except qubesadmin.exc.QubesException as e:
parser.error_runtime(e)
return 0
if __name__ == '__main__':
sys.exit(main())
| lgpl-2.1 | 2,822,929,185,765,776,400 | 29.65 | 80 | 0.649918 | false | 3.769988 | false | false | false |
matt-hayden/dedup | futil.py | 1 | 4783 | #! /usr/bin/env python3
"""
"""
from datetime import datetime
import os, os.path
import tarfile
import zipfile
#from . import *
from __init__ import *
import characterize
def comm(lhs, rhs):
"""Returns (left-only, common, right-only)
"""
com = lhs & rhs
return (lhs-com), com, (rhs-com)
def cmp_stat(lhs, rhs):
if lhs.st_size == rhs.st_size:
if lhs.st_dev == rhs.st_dev:
if lhs.st_ino == rhs.st_ino:
assert lhs.st_mtime == rhs.st_mtime
return 0
if lhs.st_mtime < rhs.st_mtime:
return 1
if lhs.st_size < rhs.st_size:
return 1
return -1
def get_match_code(lhs, rhs):
_, com, _ = comm(lhs, rhs)
return pack_match_code(com)
class Comparable:
"""stat, sums
"""
def __eq__(self, other):
if hasattr(self, 'stat') and hasattr(other, 'stat'):
if (cmp_stat(self.stat, other.stat) == 0):
return True
if self.matches(other):
return True
return False
def matches(self, other):
return 1 <= self.get_match_value(other)
def get_match_value(self, other, divisor=float(THRESHOLD_FOR_EQUALITY)):
if isinstance(other, Comparable):
mc = get_match_code(self.sums, other.sums)
else:
mc = get_match_code(self.sums, other)
return mc/divisor
def __and__(self, other):
if isinstance(other, Comparable):
return self.matches(other)
else:
return self.sums & set(other)
def __ior__(self, other):
# TODO: conservative
assert self.stat == other.stat
self.sums |= other.sums
return self
class FileObj(Comparable):
def __init__(self, my_stat):
self.members = []
self.is_dup = None
if my_stat:
self.datetime = datetime.fromtimestamp(my_stat.st_mtime)
self.size = my_stat.st_size
self.stat = my_stat
else:
self.datetime = ()
self.size = None
self.stat = ()
def get_flags(self):
if hasattr(self, 'size'):
if self.size in (0, None):
yield '0'
if hasattr(self, 'sums'):
for tup in self.sums:
label = tup[0]
if 'TOTAL' in label:
try:
s = len(tup[-1])
if 10 < s:
yield 'H{}'.format(s)
except TypeError:
pass
continue
yield ' '
for tup in self.sums:
label = tup[0]
if 'FINGERPRINT' in label:
yield 'f'
elif 'BW' in label:
yield 't'
elif 'COLOR' in label:
yield 't'
if hasattr(self, 'members'):
if self.members:
yield 'a'
if hasattr(self, 'is_dup'):
if self.is_dup:
yield 'D'
def describe(self):
return [ self.datetime or '',
self.size,
''.join(self.get_flags()) ]
def __repr__(self):
return "<File {1:,} b modified {0:%c} flags '{2}'>".format(*self.describe())
def __str__(self):
blank = ' '
parts = zip(('{:%c}', '{:12d}', '{:>10}'),
self.describe(),
(24, 12, 10))
return blank.join( (fs.format(s) if s else blank*fl) for fs, s, fl in parts)
def get_file_info(arg, sums=None, method=characterize.fast, method_for_archives=characterize.exhaustive):
row = FileObj(STAT(arg))
row.filename = arg
if sums:
row.sums = sums
else:
c = method(arg, size_hint=row.size)
row.sums = set(c)
if tarfile.is_tarfile(arg):
row.members = dict(expand_tarfile(arg, method=method_for_archives))
elif zipfile.is_zipfile(arg):
row.members = dict(expand_zipinfo(arg, method=method_for_archives))
return row
class ZipFileObj(FileObj):
def __init__(self, zi):
self.members = None
# zi is a ZipInfo object
dt = datetime(*zi.date_time)
self.datetime = dt if (datetime(1980, 1, 1) < dt) else None
self.filename = zi.filename
self.size = zi.file_size
self.volume = zi.volume
def expand_zipinfo(arg, method=characterize.fast):
with zipfile.ZipFile(arg) as zf:
for internal_f in zf.infolist():
if internal_f.filename.endswith('/'): # dirs end in / across platforms?
continue
row = ZipFileObj(internal_f)
if row.size == 0:
continue
row.sums = set( method(zf.open(internal_f), size_hint=row.size) )
row.sums.update( [ (('TOTAL', 'CRC'), hex(internal_f.CRC)) ] )
yield os.path.join(arg, row.filename), row
class TarFileObj(FileObj):
def __init__(self, ti):
self.members = None
self.datetime = datetime.fromtimestamp(ti.mtime)
self.filename = ti.name
self.size = ti.size
def expand_tarfile(arg, method=characterize.fast, ignore_symlinks=True):
"""
st_mode, st_ino, st_dev, st_nlink, st_uid, st_gid, st_size, st_atime, st_mtime, st_ctime
"""
with tarfile.open(arg) as tf:
for internal_f in tf.getmembers():
if not internal_f.isfile():
continue
# internal_f also has islnk() and issym()
if ignore_symlinks and internal_f.issym():
continue
row = TarFileObj(internal_f)
if not row.size:
continue
row.sums = set( method(internal_f.tobuf(), size_hint=row.size) )
yield os.path.join(arg, row.filename), row
# vim: tabstop=4 shiftwidth=4 softtabstop=4 number :
| unlicense | -1,474,127,793,389,779,500 | 24.441489 | 105 | 0.639557 | false | 2.690101 | false | false | false |
kburts/drf-music | Backend/api/tasks.py | 1 | 2798 | import re
import requests
from music.celery import app
from .models import Playlist, Song, User
@app.task
def task():
print 'Hello, world!'
return 'Returned hello!'
@app.task
def create_playlist_from_yt(url, user):
"""
Generate a playlist and populate it from a url to a youtube playlist
args:
url: youtube playlist url, examples:
https://www.youtube.com/watch?v=PpJVIhidBXM&index=15&list=PLXNnxXrfrLitw1tTuUFigZhY4C2FZhvLe
https://www.youtube.com/watch?v=k7Z7USWo2Lk&list=PLXNnxXrfrLitw1tTuUFigZhY4C2FZhvLe&index=18
user: username (required)
title: title of the playlist (default title from youtube)
description: description of playlist (default auto-generated playlist from a youtube playlist url.)
"""
playlist_id = re.search('list=[\w_-]+', url) # \w, _ or -
playlist_id = playlist_id.group()[5:]
if playlist_id is None:
print 'No youtube playlist ID found in URL (should contain list=\\w+)'
return 1
# Make youtube api request
api_key = "AIzaSyBvdmvgZzy3N59lM4pp_0L2h8u5cPD17ro"
data = get_videos_from_playlist(playlist_id, api_key)
songs_to_add = []
playlist_title = requests.get((
"https://www.googleapis.com/youtube/v3/playlists?part=snippet"
"&id={0}"
"&key={1}"
).format(playlist_id, api_key)).json()['items'][0]['snippet']['title']
user = User.objects.get(username=user)
playlist = Playlist(
title = playlist_title,
description = "auto-generated playlist from a youtube playlist url.",
user = user)
playlist.save()
for item in data:
s = Song(
name = item[0],
url = "https://www.youtube.com/watch?v=%s" %item[1],
added_by = user
)
s.save()
songs_to_add.append(s)
playlist.songs.add(*songs_to_add)
return playlist.id
def get_videos_from_playlist(playlist_id, api_key):
"""
Returns a list of tuples: [(title: video_id)...] of youtube videos and their youtube id's
args:
playlist_id -- Id of youtube playlist (eg. PLXNnxXrfrLitw1tTuUFigZhY4C2FZhvLe)
api_key -- youtube api key
"""
page_token = ""
videos = []
while True:
# Request
url = (
"https://www.googleapis.com/youtube/v3/playlistItems?"
"part=snippet"
"&playlistId={0}"
"&pageToken={1}"
"&key={2}"
).format(playlist_id, page_token, api_key)
data = requests.get(url).json()
for item in data['items']:
videos.append((item['snippet']['title'], item['snippet']['resourceId']['videoId']))
if not 'nextPageToken' in data.keys():
break
else:
page_token = data['nextPageToken']
return videos | mit | -825,444,804,723,014,100 | 31.172414 | 103 | 0.613295 | false | 3.399757 | false | false | false |
anchore/anchore | anchore/anchore-modules/examples/gates/gate-template.py | 1 | 1045 | #!/usr/bin/env python
import sys
import os
import json
import re
import anchore
from anchore import anchore_utils
gate_name = "GATENAMEHERE"
triggers = {
'TRIGGER1':
{
'description':'triggers if this happens',
'params':'TRIGGER1_PARAMS'
},
'TRIGGER2':
{
'description':'triggers if that happens',
'params':'None'
},
}
try:
config = anchore.anchore_utils.init_gate_cmdline(sys.argv, gate_name, gate_help=triggers)
except Exception as err:
print str(err)
sys.exit(1)
if not config:
print "ERROR: could not set up environment for gate"
sys.exit(1)
imgid = config['imgid']
try:
params = config['params']
except:
params = None
outlist = list()
# do somthing
try:
image = anchore.anchore_image.AnchoreImage(imgid, allimages={})
#outlist.append("TRIGGER1 Some text")
except Exception as err:
#print "ERROR: could not do something" + str(err)
exit(1)
# write output
anchore.anchore_utils.save_gate_output(imgid, gate_name, outlist)
sys.exit(0)
| apache-2.0 | -2,019,843,133,459,232,000 | 18.716981 | 93 | 0.661244 | false | 3.10089 | false | false | false |
bilke/OpenSG-1.8 | SConsLocal/scons-local-0.96.1/SCons/Environment.py | 2 | 48546 | """SCons.Environment
Base class for construction Environments. These are
the primary objects used to communicate dependency and
construction information to the build engine.
Keyword arguments supplied when the construction Environment
is created are construction variables used to initialize the
Environment
"""
#
# Copyright (c) 2001, 2002, 2003, 2004 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "/home/scons/scons/branch.0/baseline/src/engine/SCons/Environment.py 0.96.1.D001 2004/08/23 09:55:29 knight"
import copy
import os
import os.path
import string
from UserDict import UserDict
import SCons.Action
import SCons.Builder
from SCons.Debug import logInstanceCreation
import SCons.Defaults
import SCons.Errors
import SCons.Node
import SCons.Node.Alias
import SCons.Node.FS
import SCons.Node.Python
import SCons.Platform
import SCons.SConsign
import SCons.Sig
import SCons.Sig.MD5
import SCons.Sig.TimeStamp
import SCons.Tool
import SCons.Util
import SCons.Warnings
class _Null:
pass
_null = _Null
CleanTargets = {}
CalculatorArgs = {}
# Pull UserError into the global name space for the benefit of
# Environment().SourceSignatures(), which has some import statements
# which seem to mess up its ability to reference SCons directly.
UserError = SCons.Errors.UserError
def installFunc(target, source, env):
"""Install a source file into a target using the function specified
as the INSTALL construction variable."""
try:
install = env['INSTALL']
except KeyError:
raise SCons.Errors.UserError('Missing INSTALL construction variable.')
return install(target[0].path, source[0].path, env)
def installString(target, source, env):
return 'Install file: "%s" as "%s"' % (source[0], target[0])
installAction = SCons.Action.Action(installFunc, installString)
InstallBuilder = SCons.Builder.Builder(action=installAction)
def alias_builder(env, target, source):
pass
AliasBuilder = SCons.Builder.Builder(action = alias_builder,
target_factory = SCons.Node.Alias.default_ans.Alias,
source_factory = SCons.Node.FS.default_fs.Entry,
multi = 1)
def our_deepcopy(x):
"""deepcopy lists and dictionaries, and just copy the reference
for everything else."""
if SCons.Util.is_Dict(x):
copy = {}
for key in x.keys():
copy[key] = our_deepcopy(x[key])
elif SCons.Util.is_List(x):
copy = map(our_deepcopy, x)
try:
copy = x.__class__(copy)
except AttributeError:
pass
else:
copy = x
return copy
def apply_tools(env, tools, toolpath):
if tools:
# Filter out null tools from the list.
tools = filter(None, tools)
for tool in tools:
if SCons.Util.is_String(tool):
env.Tool(tool, toolpath)
else:
tool(env)
# These names are controlled by SCons; users should never set or override
# them. This warning can optionally be turned off, but scons will still
# ignore the illegal variable names even if it's off.
reserved_construction_var_names = \
['TARGET', 'TARGETS', 'SOURCE', 'SOURCES']
def copy_non_reserved_keywords(dict):
result = our_deepcopy(dict)
for k in result.keys():
if k in reserved_construction_var_names:
SCons.Warnings.warn(SCons.Warnings.ReservedVariableWarning,
"Ignoring attempt to set reserved variable `%s'" % k)
del result[k]
return result
class BuilderWrapper:
"""Wrapper class that associates an environment with a Builder at
instantiation."""
def __init__(self, env, builder):
self.env = env
self.builder = builder
def __call__(self, *args, **kw):
return apply(self.builder, (self.env,) + args, kw)
# This allows a Builder to be executed directly
# through the Environment to which it's attached.
# In practice, we shouldn't need this, because
# builders actually get executed through a Node.
# But we do have a unit test for this, and can't
# yet rule out that it would be useful in the
# future, so leave it for now.
def execute(self, **kw):
kw['env'] = self.env
apply(self.builder.execute, (), kw)
class BuilderDict(UserDict):
"""This is a dictionary-like class used by an Environment to hold
the Builders. We need to do this because every time someone changes
the Builders in the Environment's BUILDERS dictionary, we must
update the Environment's attributes."""
def __init__(self, dict, env):
# Set self.env before calling the superclass initialization,
# because it will end up calling our other methods, which will
# need to point the values in this dictionary to self.env.
self.env = env
UserDict.__init__(self, dict)
def __setitem__(self, item, val):
UserDict.__setitem__(self, item, val)
try:
self.setenvattr(item, val)
except AttributeError:
# Have to catch this because sometimes __setitem__ gets
# called out of __init__, when we don't have an env
# attribute yet, nor do we want one!
pass
def setenvattr(self, item, val):
"""Set the corresponding environment attribute for this Builder.
If the value is already a BuilderWrapper, we pull the builder
out of it and make another one, so that making a copy of an
existing BuilderDict is guaranteed separate wrappers for each
Builder + Environment pair."""
try:
builder = val.builder
except AttributeError:
builder = val
setattr(self.env, item, BuilderWrapper(self.env, builder))
def __delitem__(self, item):
UserDict.__delitem__(self, item)
delattr(self.env, item)
def update(self, dict):
for i, v in dict.items():
self.__setitem__(i, v)
class Base:
"""Base class for construction Environments. These are
the primary objects used to communicate dependency and
construction information to the build engine.
Keyword arguments supplied when the construction Environment
is created are construction variables used to initialize the
Environment.
"""
#######################################################################
# This is THE class for interacting with the SCons build engine,
# and it contains a lot of stuff, so we're going to try to keep this
# a little organized by grouping the methods.
#######################################################################
#######################################################################
# Methods that make an Environment act like a dictionary. These have
# the expected standard names for Python mapping objects. Note that
# we don't actually make an Environment a subclass of UserDict for
# performance reasons. Note also that we only supply methods for
# dictionary functionality that we actually need and use.
#######################################################################
def __init__(self,
platform=None,
tools=None,
toolpath=[],
options=None,
**kw):
if __debug__: logInstanceCreation(self)
self.fs = SCons.Node.FS.default_fs
self.ans = SCons.Node.Alias.default_ans
self.lookup_list = SCons.Node.arg2nodes_lookups
self._dict = our_deepcopy(SCons.Defaults.ConstructionEnvironment)
self._dict['__env__'] = self
self._dict['BUILDERS'] = BuilderDict(self._dict['BUILDERS'], self)
if platform is None:
platform = self._dict.get('PLATFORM', None)
if platform is None:
platform = SCons.Platform.Platform()
if SCons.Util.is_String(platform):
platform = SCons.Platform.Platform(platform)
self._dict['PLATFORM'] = str(platform)
platform(self)
# Apply the passed-in variables before calling the tools,
# because they may use some of them:
apply(self.Replace, (), kw)
# Update the environment with the customizable options
# before calling the tools, since they may use some of the options:
if options:
options.Update(self)
if tools is None:
tools = self._dict.get('TOOLS', None)
if tools is None:
tools = ['default']
apply_tools(self, tools, toolpath)
# Reapply the passed in variables after calling the tools,
# since they should overide anything set by the tools:
apply(self.Replace, (), kw)
# Update the environment with the customizable options
# after calling the tools, since they should override anything
# set by the tools:
if options:
options.Update(self)
def __cmp__(self, other):
# Since an Environment now has an '__env__' construction variable
# that refers to itself, delete that variable to avoid infinite
# loops when comparing the underlying dictionaries in some Python
# versions (*cough* 1.5.2 *cough*)...
sdict = self._dict.copy()
del sdict['__env__']
odict = other._dict.copy()
del odict['__env__']
return cmp(sdict, odict)
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
if key in reserved_construction_var_names:
SCons.Warnings.warn(SCons.Warnings.ReservedVariableWarning,
"Ignoring attempt to set reserved variable `%s'" % key)
elif key == 'BUILDERS':
try:
bd = self._dict[key]
for k in bd.keys():
del bd[k]
except KeyError:
self._dict[key] = BuilderDict(kwbd, self)
self._dict[key].update(value)
elif key == 'SCANNERS':
self._dict[key] = value
self.scanner_map_delete()
else:
if not SCons.Util.is_valid_construction_var(key):
raise SCons.Errors.UserError, "Illegal construction variable `%s'" % key
self._dict[key] = value
def __delitem__(self, key):
del self._dict[key]
def items(self):
"Emulates the items() method of dictionaries."""
return self._dict.items()
def has_key(self, key):
return self._dict.has_key(key)
def get(self, key, default=None):
"Emulates the get() method of dictionaries."""
return self._dict.get(key, default)
#######################################################################
# Utility methods that are primarily for internal use by SCons.
# These begin with lower-case letters. Note that the subst() method
# is actually already out of the closet and used by people.
#######################################################################
def arg2nodes(self, args, node_factory=_null, lookup_list=_null):
if node_factory is _null:
node_factory = self.fs.File
if lookup_list is _null:
lookup_list = self.lookup_list
if not args:
return []
if SCons.Util.is_List(args):
args = SCons.Util.flatten(args)
else:
args = [args]
nodes = []
for v in args:
if SCons.Util.is_String(v):
n = None
for l in lookup_list:
n = l(v)
if not n is None:
break
if not n is None:
if SCons.Util.is_String(n):
n = self.subst(n, raw=1)
if node_factory:
n = node_factory(n)
if SCons.Util.is_List(n):
nodes.extend(n)
else:
nodes.append(n)
elif node_factory:
v = node_factory(self.subst(v, raw=1))
if SCons.Util.is_List(v):
nodes.extend(v)
else:
nodes.append(v)
else:
nodes.append(v)
return nodes
def get_calculator(self):
try:
return self._calculator
except AttributeError:
try:
module = self._calc_module
c = apply(SCons.Sig.Calculator, (module,), CalculatorArgs)
except AttributeError:
# Note that we're calling get_calculator() here, so the
# DefaultEnvironment() must have a _calc_module attribute
# to avoid infinite recursion.
c = SCons.Defaults.DefaultEnvironment().get_calculator()
self._calculator = c
return c
def get_builder(self, name):
"""Fetch the builder with the specified name from the environment.
"""
try:
return self._dict['BUILDERS'][name]
except KeyError:
return None
def get_scanner(self, skey):
"""Find the appropriate scanner given a key (usually a file suffix).
"""
try:
sm = self.scanner_map
except AttributeError:
try:
scanners = self._dict['SCANNERS']
except KeyError:
self.scanner_map = {}
return None
else:
self.scanner_map = sm = {}
# Reverse the scanner list so that, if multiple scanners
# claim they can scan the same suffix, earlier scanners
# in the list will overwrite later scanners, so that
# the result looks like a "first match" to the user.
if not SCons.Util.is_List(scanners):
scanners = [scanners]
scanners.reverse()
for scanner in scanners:
for k in scanner.get_skeys(self):
sm[k] = scanner
try:
return sm[skey]
except KeyError:
return None
def scanner_map_delete(self, kw=None):
"""Delete the cached scanner map (if we need to).
"""
if not kw is None and not kw.has_key('SCANNERS'):
return
try:
del self.scanner_map
except AttributeError:
pass
def subst(self, string, raw=0, target=None, source=None, dict=None, conv=None):
"""Recursively interpolates construction variables from the
Environment into the specified string, returning the expanded
result. Construction variables are specified by a $ prefix
in the string and begin with an initial underscore or
alphabetic character followed by any number of underscores
or alphanumeric characters. The construction variable names
may be surrounded by curly braces to separate the name from
trailing characters.
"""
return SCons.Util.scons_subst(string, self, raw, target, source, dict, conv)
def subst_kw(self, kw, raw=0, target=None, source=None, dict=None):
nkw = {}
for k, v in kw.items():
k = self.subst(k, raw, target, source, dict)
if SCons.Util.is_String(v):
v = self.subst(v, raw, target, source, dict)
nkw[k] = v
return nkw
def subst_list(self, string, raw=0, target=None, source=None, dict=None, conv=None):
"""Calls through to SCons.Util.scons_subst_list(). See
the documentation for that function."""
return SCons.Util.scons_subst_list(string, self, raw, target, source, dict, conv)
def subst_path(self, path):
"""Substitute a path list, turning EntryProxies into Nodes
and leaving Nodes (and other objects) as-is."""
if not SCons.Util.is_List(path):
path = [path]
def s(obj):
"""This is the "string conversion" routine that we have our
substitutions use to return Nodes, not strings. This relies
on the fact that an EntryProxy object has a get() method that
returns the underlying Node that it wraps, which is a bit of
architectural dependence that we might need to break or modify
in the future in response to additional requirements."""
try:
get = obj.get
except AttributeError:
pass
else:
obj = get()
return obj
r = []
for p in path:
if SCons.Util.is_String(p):
p = self.subst(p, conv=s)
if SCons.Util.is_List(p):
if len(p) == 1:
p = p[0]
else:
# We have an object plus a string, or multiple
# objects that we need to smush together. No choice
# but to make them into a string.
p = string.join(map(SCons.Util.to_String, p), '')
else:
p = s(p)
r.append(p)
return r
subst_target_source = subst
def _update(self, dict):
"""Update an environment's values directly, bypassing the normal
checks that occur when users try to set items.
"""
self._dict.update(dict)
def use_build_signature(self):
try:
return self._build_signature
except AttributeError:
b = SCons.Defaults.DefaultEnvironment()._build_signature
self._build_signature = b
return b
#######################################################################
# Public methods for manipulating an Environment. These begin with
# upper-case letters. The essential characteristic of methods in
# this section is that they do *not* have corresponding same-named
# global functions. For example, a stand-alone Append() function
# makes no sense, because Append() is all about appending values to
# an Environment's construction variables.
#######################################################################
def Append(self, **kw):
"""Append values to existing construction variables
in an Environment.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
# It would be easier on the eyes to write this using
# "continue" statements whenever we finish processing an item,
# but Python 1.5.2 apparently doesn't let you use "continue"
# within try:-except: blocks, so we have to nest our code.
try:
orig = self._dict[key]
except KeyError:
# No existing variable in the environment, so just set
# it to the new value.
self._dict[key] = val
else:
try:
# Most straightforward: just try to add them
# together. This will work in most cases, when the
# original and new values are of compatible types.
self._dict[key] = orig + val
except TypeError:
try:
# Try to update a dictionary value with another.
# If orig isn't a dictionary, it won't have an
# update() method; if val isn't a dictionary,
# it won't have a keys() method. Either way,
# it's an AttributeError.
orig.update(val)
except AttributeError:
try:
# Check if the original is a list.
add_to_orig = orig.append
except AttributeError:
# The original isn't a list, but the new
# value is (by process of elimination),
# so insert the original in the new value
# (if there's one to insert) and replace
# the variable with it.
if orig:
val.insert(0, orig)
self._dict[key] = val
else:
# The original is a list, so append the new
# value to it (if there's a value to append).
if val:
add_to_orig(val)
self.scanner_map_delete(kw)
def AppendENVPath(self, name, newpath, envname = 'ENV', sep = os.pathsep):
"""Append path elements to the path 'name' in the 'ENV'
dictionary for this environment. Will only add any particular
path once, and will normpath and normcase all paths to help
assure this. This can also handle the case where the env
variable is a list instead of a string.
"""
orig = ''
if self._dict.has_key(envname) and self._dict[envname].has_key(name):
orig = self._dict[envname][name]
nv = SCons.Util.AppendPath(orig, newpath, sep)
if not self._dict.has_key(envname):
self._dict[envname] = {}
self._dict[envname][name] = nv
def AppendUnique(self, **kw):
"""Append values to existing construction variables
in an Environment, if they're not already there.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
if not self._dict.has_key(key):
self._dict[key] = val
elif SCons.Util.is_Dict(self._dict[key]) and \
SCons.Util.is_Dict(val):
self._dict[key].update(val)
elif SCons.Util.is_List(val):
dk = self._dict[key]
if not SCons.Util.is_List(dk):
dk = [dk]
val = filter(lambda x, dk=dk: x not in dk, val)
self._dict[key] = dk + val
else:
dk = self._dict[key]
if SCons.Util.is_List(dk):
if not val in dk:
self._dict[key] = dk + val
else:
self._dict[key] = self._dict[key] + val
self.scanner_map_delete(kw)
def Copy(self, tools=None, toolpath=[], **kw):
"""Return a copy of a construction Environment. The
copy is like a Python "deep copy"--that is, independent
copies are made recursively of each objects--except that
a reference is copied when an object is not deep-copyable
(like a function). There are no references to any mutable
objects in the original Environment.
"""
clone = copy.copy(self)
clone._dict = our_deepcopy(self._dict)
clone['__env__'] = clone
try:
cbd = clone._dict['BUILDERS']
clone._dict['BUILDERS'] = BuilderDict(cbd, clone)
except KeyError:
pass
apply_tools(clone, tools, toolpath)
# Apply passed-in variables after the new tools.
kw = copy_non_reserved_keywords(kw)
new = {}
for key, value in kw.items():
new[key] = SCons.Util.scons_subst_once(value, self, key)
apply(clone.Replace, (), new)
return clone
def Detect(self, progs):
"""Return the first available program in progs.
"""
if not SCons.Util.is_List(progs):
progs = [ progs ]
for prog in progs:
path = self.WhereIs(prog)
if path: return prog
return None
def Dictionary(self, *args):
if not args:
return self._dict
dlist = map(lambda x, s=self: s._dict[x], args)
if len(dlist) == 1:
dlist = dlist[0]
return dlist
def FindIxes(self, paths, prefix, suffix):
"""
Search a list of paths for something that matches the prefix and suffix.
paths - the list of paths or nodes.
prefix - construction variable for the prefix.
suffix - construction variable for the suffix.
"""
suffix = self.subst('$'+suffix)
prefix = self.subst('$'+prefix)
for path in paths:
dir,name = os.path.split(str(path))
if name[:len(prefix)] == prefix and name[-len(suffix):] == suffix:
return path
def Override(self, overrides):
"""
Produce a modified environment whose variables
are overriden by the overrides dictionaries.
overrides - a dictionary that will override
the variables of this environment.
This function is much more efficient than Copy()
or creating a new Environment because it doesn't do
a deep copy of the dictionary, and doesn't do a copy
at all if there are no overrides.
"""
if overrides:
env = copy.copy(self)
env._dict = copy.copy(self._dict)
env['__env__'] = env
overrides = copy_non_reserved_keywords(overrides)
new = {}
for key, value in overrides.items():
new[key] = SCons.Util.scons_subst_once(value, self, key)
env._dict.update(new)
return env
else:
return self
def ParseConfig(self, command, function=None):
"""
Use the specified function to parse the output of the command
in order to modify the current environment. The 'command' can
be a string or a list of strings representing a command and
it's arguments. 'Function' is an optional argument that takes
the environment and the output of the command. If no function is
specified, the output will be treated as the output of a typical
'X-config' command (i.e. gtk-config) and used to append to the
ASFLAGS, CCFLAGS, CPPFLAGS, CPPPATH, LIBPATH, LIBS, LINKFLAGS
and CCFLAGS variables.
"""
# the default parse function
def parse_conf(env, output):
dict = {
'ASFLAGS' : [],
'CCFLAGS' : [],
'CPPFLAGS' : [],
'CPPPATH' : [],
'LIBPATH' : [],
'LIBS' : [],
'LINKFLAGS' : [],
}
static_libs = []
params = string.split(output)
for arg in params:
if arg[0] != '-':
static_libs.append(arg)
elif arg[:2] == '-L':
dict['LIBPATH'].append(arg[2:])
elif arg[:2] == '-l':
dict['LIBS'].append(arg[2:])
elif arg[:2] == '-I':
dict['CPPPATH'].append(arg[2:])
elif arg[:4] == '-Wa,':
dict['ASFLAGS'].append(arg)
elif arg[:4] == '-Wl,':
dict['LINKFLAGS'].append(arg)
elif arg[:4] == '-Wp,':
dict['CPPFLAGS'].append(arg)
elif arg == '-pthread':
dict['CCFLAGS'].append(arg)
dict['LINKFLAGS'].append(arg)
else:
dict['CCFLAGS'].append(arg)
apply(env.Append, (), dict)
return static_libs
if function is None:
function = parse_conf
if type(command) is type([]):
command = string.join(command)
command = self.subst(command)
return function(self, os.popen(command).read())
def Platform(self, platform):
platform = self.subst(platform)
return SCons.Platform.Platform(platform)(self)
def Prepend(self, **kw):
"""Prepend values to existing construction variables
in an Environment.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
# It would be easier on the eyes to write this using
# "continue" statements whenever we finish processing an item,
# but Python 1.5.2 apparently doesn't let you use "continue"
# within try:-except: blocks, so we have to nest our code.
try:
orig = self._dict[key]
except KeyError:
# No existing variable in the environment, so just set
# it to the new value.
self._dict[key] = val
else:
try:
# Most straightforward: just try to add them
# together. This will work in most cases, when the
# original and new values are of compatible types.
self._dict[key] = val + orig
except TypeError:
try:
# Try to update a dictionary value with another.
# If orig isn't a dictionary, it won't have an
# update() method; if val isn't a dictionary,
# it won't have a keys() method. Either way,
# it's an AttributeError.
orig.update(val)
except AttributeError:
try:
# Check if the added value is a list.
add_to_val = val.append
except AttributeError:
# The added value isn't a list, but the
# original is (by process of elimination),
# so insert the the new value in the original
# (if there's one to insert).
if val:
orig.insert(0, val)
else:
# The added value is a list, so append
# the original to it (if there's a value
# to append).
if orig:
add_to_val(orig)
self._dict[key] = val
self.scanner_map_delete(kw)
def PrependENVPath(self, name, newpath, envname = 'ENV', sep = os.pathsep):
"""Prepend path elements to the path 'name' in the 'ENV'
dictionary for this environment. Will only add any particular
path once, and will normpath and normcase all paths to help
assure this. This can also handle the case where the env
variable is a list instead of a string.
"""
orig = ''
if self._dict.has_key(envname) and self._dict[envname].has_key(name):
orig = self._dict[envname][name]
nv = SCons.Util.PrependPath(orig, newpath, sep)
if not self._dict.has_key(envname):
self._dict[envname] = {}
self._dict[envname][name] = nv
def PrependUnique(self, **kw):
"""Append values to existing construction variables
in an Environment, if they're not already there.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
if not self._dict.has_key(key):
self._dict[key] = val
elif SCons.Util.is_Dict(self._dict[key]) and \
SCons.Util.is_Dict(val):
self._dict[key].update(val)
elif SCons.Util.is_List(val):
dk = self._dict[key]
if not SCons.Util.is_List(dk):
dk = [dk]
val = filter(lambda x, dk=dk: x not in dk, val)
self._dict[key] = val + dk
else:
dk = self._dict[key]
if SCons.Util.is_List(dk):
if not val in dk:
self._dict[key] = val + dk
else:
self._dict[key] = val + dk
self.scanner_map_delete(kw)
def Replace(self, **kw):
"""Replace existing construction variables in an Environment
with new construction variables and/or values.
"""
try:
kwbd = our_deepcopy(kw['BUILDERS'])
del kw['BUILDERS']
self.__setitem__('BUILDERS', kwbd)
except KeyError:
pass
kw = copy_non_reserved_keywords(kw)
self._dict.update(our_deepcopy(kw))
self.scanner_map_delete(kw)
def ReplaceIxes(self, path, old_prefix, old_suffix, new_prefix, new_suffix):
"""
Replace old_prefix with new_prefix and old_suffix with new_suffix.
env - Environment used to interpolate variables.
path - the path that will be modified.
old_prefix - construction variable for the old prefix.
old_suffix - construction variable for the old suffix.
new_prefix - construction variable for the new prefix.
new_suffix - construction variable for the new suffix.
"""
old_prefix = self.subst('$'+old_prefix)
old_suffix = self.subst('$'+old_suffix)
new_prefix = self.subst('$'+new_prefix)
new_suffix = self.subst('$'+new_suffix)
dir,name = os.path.split(str(path))
if name[:len(old_prefix)] == old_prefix:
name = name[len(old_prefix):]
if name[-len(old_suffix):] == old_suffix:
name = name[:-len(old_suffix)]
return os.path.join(dir, new_prefix+name+new_suffix)
def Tool(self, tool, toolpath=[]):
tool = self.subst(tool)
return SCons.Tool.Tool(tool, map(self.subst, toolpath))(self)
def WhereIs(self, prog, path=None, pathext=None, reject=[]):
"""Find prog in the path.
"""
if path is None:
try:
path = self['ENV']['PATH']
except KeyError:
pass
elif SCons.Util.is_String(path):
path = self.subst(path)
if pathext is None:
try:
pathext = self['ENV']['PATHEXT']
except KeyError:
pass
elif SCons.Util.is_String(pathext):
pathext = self.subst(pathext)
path = SCons.Util.WhereIs(prog, path, pathext, reject)
if path: return path
return None
#######################################################################
# Public methods for doing real "SCons stuff" (manipulating
# dependencies, setting attributes on targets, etc.). These begin
# with upper-case letters. The essential characteristic of methods
# in this section is that they all *should* have corresponding
# same-named global functions.
#######################################################################
def Action(self, *args, **kw):
nargs = self.subst(args)
nkw = self.subst_kw(kw)
return apply(SCons.Action.Action, nargs, nkw)
def AddPreAction(self, files, action):
nodes = self.arg2nodes(files, self.fs.Entry)
action = SCons.Action.Action(action)
for n in nodes:
n.add_pre_action(action)
return nodes
def AddPostAction(self, files, action):
nodes = self.arg2nodes(files, self.fs.Entry)
action = SCons.Action.Action(action)
for n in nodes:
n.add_post_action(action)
return nodes
def Alias(self, target, *source, **kw):
if not SCons.Util.is_List(target):
target = [target]
tlist = []
for t in target:
if not isinstance(t, SCons.Node.Alias.Alias):
t = self.arg2nodes(self.subst(t), self.ans.Alias)[0]
tlist.append(t)
try:
s = kw['source']
except KeyError:
try:
s = source[0]
except IndexError:
s = None
if s:
if not SCons.Util.is_List(s):
s = [s]
s = filter(None, s)
s = self.arg2nodes(s, self.fs.Entry)
for t in tlist:
AliasBuilder(self, t, s)
return tlist
def AlwaysBuild(self, *targets):
tlist = []
for t in targets:
tlist.extend(self.arg2nodes(t, self.fs.File))
for t in tlist:
t.set_always_build()
return tlist
def BuildDir(self, build_dir, src_dir, duplicate=1):
build_dir = self.arg2nodes(build_dir, self.fs.Dir)[0]
src_dir = self.arg2nodes(src_dir, self.fs.Dir)[0]
self.fs.BuildDir(build_dir, src_dir, duplicate)
def Builder(self, **kw):
nkw = self.subst_kw(kw)
return apply(SCons.Builder.Builder, [], nkw)
def CacheDir(self, path):
self.fs.CacheDir(self.subst(path))
def Clean(self, targets, files):
global CleanTargets
tlist = self.arg2nodes(targets, self.fs.Entry)
flist = self.arg2nodes(files, self.fs.Entry)
for t in tlist:
try:
CleanTargets[t].extend(flist)
except KeyError:
CleanTargets[t] = flist
def Configure(self, *args, **kw):
nargs = [self]
if args:
nargs = nargs + self.subst_list(args)[0]
nkw = self.subst_kw(kw)
try:
nkw['custom_tests'] = self.subst_kw(nkw['custom_tests'])
except KeyError:
pass
return apply(SCons.SConf.SConf, nargs, nkw)
def Command(self, target, source, action, **kw):
"""Builds the supplied target files from the supplied
source files using the supplied action. Action may
be any type that the Builder constructor will accept
for an action."""
nkw = self.subst_kw(kw)
nkw['action'] = action
nkw['source_factory'] = self.fs.Entry
bld = apply(SCons.Builder.Builder, (), nkw)
return bld(self, target, source)
def Depends(self, target, dependency):
"""Explicity specify that 'target's depend on 'dependency'."""
tlist = self.arg2nodes(target, self.fs.Entry)
dlist = self.arg2nodes(dependency, self.fs.Entry)
for t in tlist:
t.add_dependency(dlist)
return tlist
def Dir(self, name, *args, **kw):
"""
"""
return apply(self.fs.Dir, (self.subst(name),) + args, kw)
def Environment(self, **kw):
return apply(SCons.Environment.Environment, [], self.subst_kw(kw))
def Execute(self, action, *args, **kw):
"""Directly execute an action through an Environment
"""
action = apply(self.Action, (action,) + args, kw)
return action([], [], self)
def File(self, name, *args, **kw):
"""
"""
return apply(self.fs.File, (self.subst(name),) + args, kw)
def FindFile(self, file, dirs):
file = self.subst(file)
nodes = self.arg2nodes(dirs, self.fs.Dir)
return SCons.Node.FS.find_file(file, nodes, self.fs.File)
def Flatten(self, sequence):
return SCons.Util.flatten(sequence)
def GetBuildPath(self, files):
result = map(str, self.arg2nodes(files, self.fs.Entry))
if SCons.Util.is_List(files):
return result
else:
return result[0]
def Ignore(self, target, dependency):
"""Ignore a dependency."""
tlist = self.arg2nodes(target, self.fs.Entry)
dlist = self.arg2nodes(dependency, self.fs.Entry)
for t in tlist:
t.add_ignore(dlist)
return tlist
def Install(self, dir, source):
"""Install specified files in the given directory."""
try:
dnodes = self.arg2nodes(dir, self.fs.Dir)
except TypeError:
raise SCons.Errors.UserError, "Target `%s' of Install() is a file, but should be a directory. Perhaps you have the Install() arguments backwards?" % str(dir)
try:
sources = self.arg2nodes(source, self.fs.File)
except TypeError:
if SCons.Util.is_List(source):
raise SCons.Errors.UserError, "Source `%s' of Install() contains one or more non-files. Install() source must be one or more files." % repr(map(str, source))
else:
raise SCons.Errors.UserError, "Source `%s' of Install() is not a file. Install() source must be one or more files." % str(source)
tgt = []
for dnode in dnodes:
for src in sources:
target = self.fs.File(src.name, dnode)
tgt.extend(InstallBuilder(self, target, src))
return tgt
def InstallAs(self, target, source):
"""Install sources as targets."""
sources = self.arg2nodes(source, self.fs.File)
targets = self.arg2nodes(target, self.fs.File)
result = []
for src, tgt in map(lambda x, y: (x, y), sources, targets):
result.extend(InstallBuilder(self, tgt, src))
return result
def Literal(self, string):
return SCons.Util.Literal(string)
def Local(self, *targets):
ret = []
for targ in targets:
if isinstance(targ, SCons.Node.Node):
targ.set_local()
ret.append(targ)
else:
for t in self.arg2nodes(targ, self.fs.Entry):
t.set_local()
ret.append(t)
return ret
def Precious(self, *targets):
tlist = []
for t in targets:
tlist.extend(self.arg2nodes(t, self.fs.Entry))
for t in tlist:
t.set_precious()
return tlist
def Repository(self, *dirs, **kw):
dirs = self.arg2nodes(list(dirs), self.fs.Dir)
apply(self.fs.Repository, dirs, kw)
def Scanner(self, *args, **kw):
nargs = []
for arg in args:
if SCons.Util.is_String(arg):
arg = self.subst(arg)
nargs.append(arg)
nkw = self.subst_kw(kw)
return apply(SCons.Scanner.Scanner, nargs, nkw)
def SConsignFile(self, name=".sconsign", dbm_module=None):
name = self.subst(name)
if not os.path.isabs(name):
name = os.path.join(str(self.fs.SConstruct_dir), name)
SCons.SConsign.File(name, dbm_module)
def SideEffect(self, side_effect, target):
"""Tell scons that side_effects are built as side
effects of building targets."""
side_effects = self.arg2nodes(side_effect, self.fs.Entry)
targets = self.arg2nodes(target, self.fs.Entry)
for side_effect in side_effects:
if side_effect.multiple_side_effect_has_builder():
raise SCons.Errors.UserError, "Multiple ways to build the same target were specified for: %s" % str(side_effect)
side_effect.add_source(targets)
side_effect.side_effect = 1
self.Precious(side_effect)
for target in targets:
target.side_effects.append(side_effect)
return side_effects
def SourceCode(self, entry, builder):
"""Arrange for a source code builder for (part of) a tree."""
entries = self.arg2nodes(entry, self.fs.Entry)
for entry in entries:
entry.set_src_builder(builder)
return entries
def SourceSignatures(self, type):
type = self.subst(type)
if type == 'MD5':
import SCons.Sig.MD5
self._calc_module = SCons.Sig.MD5
elif type == 'timestamp':
import SCons.Sig.TimeStamp
self._calc_module = SCons.Sig.TimeStamp
else:
raise UserError, "Unknown source signature type '%s'"%type
def Split(self, arg):
"""This function converts a string or list into a list of strings
or Nodes. This makes things easier for users by allowing files to
be specified as a white-space separated list to be split.
The input rules are:
- A single string containing names separated by spaces. These will be
split apart at the spaces.
- A single Node instance
- A list containing either strings or Node instances. Any strings
in the list are not split at spaces.
In all cases, the function returns a list of Nodes and strings."""
if SCons.Util.is_List(arg):
return map(self.subst, arg)
elif SCons.Util.is_String(arg):
return string.split(self.subst(arg))
else:
return [self.subst(arg)]
def TargetSignatures(self, type):
type = self.subst(type)
if type == 'build':
self._build_signature = 1
elif type == 'content':
self._build_signature = 0
else:
raise SCons.Errors.UserError, "Unknown target signature type '%s'"%type
def Value(self, value):
"""
"""
return SCons.Node.Python.Value(value)
# The entry point that will be used by the external world
# to refer to a construction environment. This allows the wrapper
# interface to extend a construction environment for its own purposes
# by subclassing SCons.Environment.Base and then assigning the
# class to SCons.Environment.Environment.
Environment = Base
# An entry point for returning a proxy subclass instance that overrides
# the subst*() methods so they don't actually perform construction
# variable substitution. This is specifically intended to be the shim
# layer in between global function calls (which don't want construction
# variable substitution) and the DefaultEnvironment() (which would
# substitute variables if left to its own devices)."""
#
# We have to wrap this in a function that allows us to delay definition of
# the class until it's necessary, so that when it subclasses Environment
# it will pick up whatever Environment subclass the wrapper interface
# might have assigned to SCons.Environment.Environment.
def NoSubstitutionProxy(subject):
class _NoSubstitutionProxy(Environment):
def __init__(self, subject):
self.__dict__['__subject'] = subject
def __getattr__(self, name):
return getattr(self.__dict__['__subject'], name)
def __setattr__(self, name, value):
return setattr(self.__dict__['__subject'], name, value)
def raw_to_mode(self, dict):
try:
raw = dict['raw']
except KeyError:
pass
else:
del dict['raw']
dict['mode'] = raw
def subst(self, string, *args, **kwargs):
return string
def subst_kw(self, kw, *args, **kwargs):
return kw
def subst_list(self, string, *args, **kwargs):
nargs = (string, self,) + args
nkw = kwargs.copy()
nkw['gvars'] = {}
self.raw_to_mode(nkw)
return apply(SCons.Util.scons_subst_list, nargs, nkw)
def subst_target_source(self, string, *args, **kwargs):
nargs = (string, self,) + args
nkw = kwargs.copy()
nkw['gvars'] = {}
self.raw_to_mode(nkw)
return apply(SCons.Util.scons_subst, nargs, nkw)
return _NoSubstitutionProxy(subject)
| lgpl-2.1 | 1,026,291,506,311,848,700 | 37.195122 | 174 | 0.554649 | false | 4.388537 | false | false | false |
angus-ai/angus-doc | restful/building-blocks/services/sceneanalysis/sceneanalysis_fromwebcam.py | 1 | 2774 | # -*- coding: utf-8 -*-
import StringIO
import angus.client
import cv2
import numpy as np
import datetime
import pytz
def main(stream_index):
camera = cv2.VideoCapture(stream_index)
camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
camera.set(cv2.cv.CV_CAP_PROP_FPS, 10)
if not camera.isOpened():
print("Cannot open stream of index {}".format(stream_index))
exit(1)
print("Input stream is of resolution: {} x {}".format(camera.get(3), camera.get(4)))
conn = angus.client.connect()
service = conn.services.get_service("scene_analysis", version=1)
service.enable_session()
while camera.isOpened():
ret, frame = camera.read()
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, buff = cv2.imencode(".jpg", gray, [cv2.IMWRITE_JPEG_QUALITY, 80])
buff = StringIO.StringIO(np.array(buff).tostring())
t = datetime.datetime.now(pytz.utc)
job = service.process({"image": buff,
"timestamp" : t.isoformat(),
"camera_position": "facing",
"sensitivity": {
"appearance": 0.7,
"disappearance": 0.7,
"age_estimated": 0.4,
"gender_estimated": 0.5,
"focus_locked": 0.9,
"emotion_detected": 0.4,
"direction_estimated": 0.8
},
})
res = job.result
if "error" in res:
print(res["error"])
else:
# This parses the events
if "events" in res:
for event in res["events"]:
value = res["entities"][event["entity_id"]][event["key"]]
print("{}| {}, {}".format(event["type"],
event["key"],
value))
# This parses the entities data
for key, val in res["entities"].iteritems():
x, y, dx, dy = map(int, val["face_roi"])
cv2.rectangle(frame, (x, y), (x+dx, y+dy), (0, 255, 0), 2)
cv2.imshow("original", frame)
if cv2.waitKey(1) & 0xFF == 27:
break
service.disable_session()
camera.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
### Web cam index might be different from 0 on your setup.
### To grab a given video file instead of the host computer cam, try:
### main("/path/to/myvideo.avi")
main(0)
| apache-2.0 | 6,750,106,073,965,175,000 | 33.675 | 88 | 0.483778 | false | 4.031977 | false | false | false |
lsaffre/atelier | atelier/sphinxconf/interproject.py | 1 | 4949 | # -*- coding: utf-8 -*-
# Copyright 2011-2020 Rumma & Ko Ltd
# License: BSD, see LICENSE for more details.
"""
Defines the :func:`atelier.sphinxconf.interproject.configure` function.
"""
import os
from pathlib import Path
# from importlib import import_module
from sphinx.util import logging ; logger = logging.getLogger(__name__)
# from invoke import Context
# import atelier
from atelier.projects import load_projects, get_project_info_from_mod
from atelier.projects import get_project_from_nickname
USE_LOCAL_BUILDS = os.environ.get("ATELIER_IGNORE_LOCAL_BUILDS", "") != "yes"
# Whether to use objects.inv files from other local doctrees if they exist.
# E.g. on Travis no other projects are installed from source, so there we
# cannot use it.
def configure(globals_dict, prjspec=None, **nicknames):
"""
Install doctrees of all (or some) atelier projects into the
:envvar:`intersphinx_mapping` of your :xfile:`conf.py`.
See :doc:`/sphinxext/interproject`.
"""
intersphinx_mapping = dict()
# extlinks = dict()
# this = atelier.current_project
# if this is None:
# raise Exception("current_project in {} is None!".format(globals_dict['__file__']))
this_conf_file = Path(globals_dict['__file__']).resolve()
if prjspec:
if isinstance(prjspec, str):
prjspec = prjspec.split()
prjlist = [get_project_info_from_mod(n) for n in prjspec]
else:
prjlist = []
# for p in load_projects():
for p in reversed(list(load_projects())):
if str(this_conf_file).startswith(str(p.root_dir)):
# print("20190122 {} startswith {}".format(this_conf_file, p.root_dir))
continue
prjlist.append(p)
for k, v in nicknames.items():
p = get_project_from_nickname(k)
if p:
prjlist.append(p)
else:
intersphinx_mapping[k] = v
# logger.info("20180907 prjlist {}".format(prjlist))
for prj in prjlist:
# This will load the `tasks.py` of other
# projects. Possible side effects.
# print("20180428 {} {}".format(prj.name, prj.config['doc_trees']))
# config = prj.inv_namespace.configuration()
# print("20180428 {} {}".format(prj.name, config['doc_trees']))
# ctx = Context(config)
# for doc_tree in prj.config['doc_trees']:
count = 0
for doc_tree in prj.get_doc_trees():
if not doc_tree.has_intersphinx:
logger.info("%s has no intersphinx", p)
continue
count += 1
urls = prj.get_xconfig('intersphinx_urls') or {}
url = urls.get(doc_tree.rel_path)
if not url:
if prjspec:
logger.warning(
"No intersphinx mapping for {} of {} ({})".format(
doc_tree.rel_path, prj.nickname, urls))
continue
# if prj.nickname == "getlino":
# raise Exception("20191003 {}".format(doc_tree.src_path))
p = None
src_path = doc_tree.src_path
if src_path is not None:
if this_conf_file == src_path / 'conf.py':
# don't add myself to intersphinx.
continue
if USE_LOCAL_BUILDS:
# print("20190306a", doc_tree, src_path)
# p = prj.root_dir / (doc_tree + '/.build/objects.inv')
p = src_path / '.build/objects.inv'
if p.exists():
logger.info("Found local {}".format(p))
else:
logger.info("File %s does not exist", p)
p = None
# The unique identifier can be used to prefix cross-reference targets
# http://www.sphinx-doc.org/en/master/ext/intersphinx.html#confval-intersphinx_mapping
k = prj.nickname + doc_tree.rel_path
k = k.replace('_', '')
k = str(k)
if k in intersphinx_mapping:
raise Exception("Duplicate intersphinx key {} used for {} "
"(you ask to redefine it to {})".format(
k, intersphinx_mapping[k], p))
if p is not None:
p = str(p)
intersphinx_mapping[k] = (url, p)
if count == 0 and prjspec:
logger.warning("No doctree for {}".format(prj))
# if prj.srcref_url:
# k = '%s_srcref' % prj.nickname
# extlinks[str(k)] = (prj.srcref_url, '')
# atelier.current_project = this
globals_dict.update(intersphinx_mapping=intersphinx_mapping)
# logger.info("20190306 prjlist is {}, intersphinx_mapping is {}".format(
# prjlist, intersphinx_mapping))
# if False: # no longer used
# globals_dict.update(extlinks=extlinks)
| bsd-2-clause | -5,703,355,728,723,782,000 | 34.099291 | 98 | 0.553849 | false | 3.752085 | true | false | false |
tiagoantao/mega-analysis | haploStats/doHaploStats.py | 1 | 4169 | #!/usr/bin/env python3
import os
import sys
import MEGA
from MEGA import karyo
if len(sys.argv) not in [3, 4, 5, 6, 7, 8]:
print("python3 %s COMMAND study [file] [source] [refPop] [chro]" % (sys.argv[0]))
print("""COMMAND is
prepareData
iHS
statIHS
XPEHH
statXPEHH
if command is prepareData, source has the phasedset and
refPop the reference population
""")
sys.exit(-1)
command = sys.argv[1]
myKaryo = sys.argv[2]
karyo.loadBuiltIn(myKaryo)
lexec = MEGA.executor
maxChro = 23 # Needs refactor... No hard max
def removeFixed(tmp, final):
fhap = open("%s.hap" % tmp)
haplo = fhap.readline().rstrip().split(' ')
alleles = [set([a]) for a in haplo]
for l in fhap:
haplo = l.rstrip().split(' ')
for i in range(len(haplo)):
alleles[i].add(haplo[i])
fhap.close()
fmap = open("%s.map" % tmp)
wmap = open("%s.map" % final, 'w')
i = 0
for l in fmap:
if len(alleles[i]) > 1:
wmap.write(l)
i += 1
fmap.close()
wmap.close()
fhap = open("%s.hap" % tmp)
whap = open("%s.hap" % final, 'w')
for l in fhap:
haplo = l.rstrip().split(' ')
cleanHaplo = []
for i in range(len(alleles)):
if len(alleles[i]) > 1:
cleanHaplo.append(haplo[i])
whap.write(' '.join(cleanHaplo))
whap.write('\n')
fhap.close()
whap.close()
def prepPop2(allInds, database, refPop, isXPEHH):
f = open(allInds)
w = open(allInds + "_", "w")
for l in f:
toks = l.rstrip().replace(" ", "\t").split("\t")
w.write(toks[1] + "\n")
w.close()
for chro in range(1, 23 + 1): # Human dependent
if not karyo.accept(chro, 1):
continue
if refPop != "shapeIt":
lexec.submit("python3",
"%s/beagle2ihs.py %s/%s/%s-%d.gz %s tmp-%d %s/37-%d.map %d"
% (MEGA.phasingScripts, MEGA.phaseDB, database,
refPop, chro, allInds, chro, MEGA.geneticMapDB, chro, chro))
else:
lexec.submit("python3",
"%s/beagle2ihs.py %s/%s/%d.gz %s tmp-%d %s/37-%d.map %d"
% (MEGA.phasingScripts, MEGA.phaseDB, database,
chro, allInds, chro, MEGA.geneticMapDB, chro, chro))
lexec.wait(True)
for chro in range(1, 23 + 1): # Human dependent
if not karyo.accept(chro, 1):
continue
if isXPEHH:
os.rename('tmp-%d.hap' % chro, '%d.hap' % chro)
os.rename('tmp-%d.map' % chro, '%d.map' % chro)
else:
removeFixed('tmp-%d' % chro, '%d' % chro)
if command == 'prepareData':
studyName = sys.argv[3]
allInds = sys.argv[4]
source = sys.argv[5]
refPop = sys.argv[6]
if len(sys.argv) > 7:
isXPEHH = True
else:
isXPEHH = False
prepPop2(allInds, source, refPop, isXPEHH)
elif command == 'iHS':
for k in range(1, maxChro + 1):
if not karyo.accept(k, 1): # needs whole chromosome accepted
continue
input = "%d.hap" % (k,)
inmap = "%d.map" % (k,)
out = "%d.uiHS" % (k,)
lexec.out = out
lexec.submit("ihs", "%s %s" % (inmap, input))
lexec.wait(True)
elif command == 'statIHS':
maxBin = sys.argv[3]
winSize = sys.argv[4]
minSNPs = sys.argv[5]
os.system("python3 %s/statIHS.py %s %s %s" % (
MEGA.haploScripts, maxBin, winSize, minSNPs))
elif command in ['XPEHH', 'XPEHH2']:
for k in range(1, maxChro + 1):
if not karyo.accept(k, 1): # needs whole chromosome accepted
continue
input = "%d.hap" % k
sinput = "s%d.hap" % k
inmap = "%d.map" % k
out = "%d.xpEHH" % k
lexec.out = out
print("-m %s -h %s %s" % (inmap, input, sinput))
lexec.submit("xpehh", "-m %s -h %s %s" % (inmap, input, sinput))
lexec.wait(True)
elif command == 'statXPEHH':
winSize = sys.argv[3]
os.system("python3 %s/statXPEHH.py 4 %s" % (MEGA.haploScripts, winSize))
else:
print("ERROR: Command not known!")
| agpl-3.0 | -8,782,770,987,302,517,000 | 28.359155 | 88 | 0.528184 | false | 2.820704 | false | false | false |
johnarban/arban | schmidt_funcs.py | 1 | 26232 | import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle # formerly dfm/triangle
# from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter # , SimplexLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
#import ipdb;
import pdb
# # # # # # # # # # # # # # # # # # # # # #
# make iPython print immediately
import sys
oldsysstdout = sys.stdout
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stdout = oldsysstdout
def rot_matrix(theta):
'''
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
'''
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def rectangle(c, w, h, angle=0, center=True):
'''
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
'''
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
# correct center if starting from corner
if center is not True:
if center[0] == 'b':
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.
if center[1] == 'l':
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def comp(arr):
'''
returns the compressed version
of the input array if it is a
numpy MaskedArray
'''
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
'''
returns the moving average of an array.
returned array is shorter by (n-1)
'''
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
def mgeo(arr, n=2):
'''
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
'''
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
def avg(arr, n=2):
'''
NOT a general averaging function
return bin centers (lin and log)
'''
diff = np.diff(arr)
# 2nd derivative of linear bin is 0
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
# return mgeo(arr, n=n) # equivalent methods, only easier
def shift_bins(arr,phase=0,nonneg=False):
# assume original bins are nonneg
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
#pre = arr[0] + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'''
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
'''
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
# either user specifies log or gives dex and not dx
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
# print nisNone, dxisNone, dexisNone, log # for debugging logic
if not nisNone: # this will make dex or dx if they are not specified
if log and dexisNone: # if want log but dex not given
dex = (xmax - xmin) / n
# print dex
elif (not log) and dxisNone: # else if want lin but dx not given
dx = (xmax - xmin) / n # takes floor
#print dx
if log:
#return np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
#return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1)
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
'''
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
'''
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
'''
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
'''
try:
xp, yp = extwcs.all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
xp, yp = WCS(extwcs).all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[yp[int(round(i))], xp[int(round(i))]])
if errmap is not None:
err.append(errmap[yp[int(round(i))], xp[int(round(i))]])
except IndexError:
ext.append(np.nan)
if errmap is not None:
err.append(np.nan)
if errmap is not None:
return np.array(ext), np.array(err)
else:
return np.array(ext), None
def pdf(values, bins):
'''
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
# From the definition of Pr(x) = dF(x)/dx this
# is the correct form. It returns the correct
# probabilities when tested
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, avg(x)
def pdf2(values, bins):
'''
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, avg(x)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
def cdf(values, bins):
'''
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range = (np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int
c = np.cumsum(h / np.sum(h, dtype=float)) # cumulative fraction below bin_k
# append 0 to beginning because P( X < min(x)) = 0
return np.append(0, c), bins
def cdf2(values, bins):
'''
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0., c), bins
def area_function(extmap, bins):
'''
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
'''
c, bins = cdf2(extmap, bins)
return c.max() - c, bins
def diff_area_function(extmap, bins,scale=1):
'''
See pdf2
'''
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx*scale, avg(bins)
def log_diff_area_function(extmap, bins):
'''
See pdf2
'''
s, bins = diff_area_function(extmap, bins)
g=s>0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, avg(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
'''
M(>Ak), mass weighted complimentary cdf
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False, weights=values*aktomassd*scale)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def hist(values, bins, err=False, density=False, **kwargs):
'''
really just a wrapper for numpy.histogram
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
hist, x = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if (err is None) or (err is False):
return hist.astype(np.float), avg(x)
else:
return hist.astype(np.float), avg(x), np.sqrt(hist)
def bootstrap(X, X_err=None, n=None, smooth=False):
'''
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
'''
if X_err is None:
smooth = False
if n is None: # default n
n = len(X)
resample_i = np.random.randint(0,len(X),size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, \
np.asarray(X_err)[resample_i])
return X_resample
def num_above(values, level):
return np.sum((values >= level) & np.isfinite(values), dtype=np.float)
def num_below(values, level):
return np.sum((values < level) & np.isfinite(values), dtype=np.float)
def alpha_ML(data, xmin,xmax):
'''
uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010
'''
data = data[np.isfinite(data)]
data = data[(data >= xmin) & (data <= xmax)]
alpha = 1 + len(data) * (np.sum(np.log(data / xmin))**(-1))
error = (alpha -1 )/np.sqrt(len(data))
#loglike = np.sum((-1+alpha)*np.log(xmin)-alpha*np.log(data)+np.log(-1+alpha))
N = len(data)
loglike = N*np.log(alpha-1) - N*np.log(xmin) - alpha * np.sum(np.log(data/xmin))
return alpha , error, loglike, xmin, xmax
def sigconf1d(n):
cdf = (1/2.)*(1+special.erf(n/np.sqrt(2)))
return (1-cdf)*100,100* cdf,100*special.erf(n/np.sqrt(2))
def surfd(X, Xmap, bins, Xerr = None, Xmaperr = None, boot=False, scale=1., return_err=False, smooth=False):
'''
call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x)
'''
# get dn/dx
if boot:
n = np.histogram(bootstrap(X,Xerr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(bootstrap(Xmap,Xmaperr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0] * scale
else:
n = np.histogram(X, bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(Xmap, bins = bins, range=(bins.min(),bins.max()))[0] * scale
if not return_err:
return n / s
else:
return n / s, n / s * np.sqrt(1. / n - scale / s)
def alpha(y, x, err=None, return_kappa=False, cov=False):
'''
this returns -1*alpha, and optionally kappa and errors
'''
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list(a1 & a2))
y = np.log(y[a])
x = np.log(x[a])
if err is None:
p, covar = np.polyfit(x, y, 1, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
else:
err = err[a]
err = err / y
p, covar = np.polyfit(x, y, 1, w=1. / err**2, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
if return_kappa:
if cov:
return m, np.exp(b), me, be
else:
return m, np.exp(b)
else:
if cov:
return m, me
else:
return m
def Heaviside(x):
return 0.5 * (np.sign(x) + 1.)
def schmidt_law(Ak, theta):
'''
schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha)
'''
if len(theta) == 2:
beta, kappa = theta
return kappa * (Ak ** beta)
elif len(theta) == 3:
beta, kappa, Ak0 = theta
sfr = Heaviside(Ak - Ak0) * kappa * (Ak ** beta)
sfr[Ak < Ak0] = 0#np.nan # kappa * (Ak0 ** beta)
return sfr
def lmfit_powerlaw(x, y, yerr=None, xmin=-np.inf, xmax=np.inf, init=None, maxiter=1000000):
@custom_model
def model(x, beta=init[0], kappa=init[1]):
return np.log(kappa * (np.exp(x) ** beta))
keep = np.isfinite(1. / y) & (x >= xmin) & (x <= xmax)
if yerr is not None:
keep = keep & np.isfinite(1. / yerr)
m_init = model()
fit = LevMarLSQFitter()
#weights = (yerr / y)[keep]**(-2.)
m = fit(m_init, np.log(x[keep]), np.log(y[keep]), maxiter=maxiter)
return m, fit
def fit_lmfit_schmidt(x, y, yerr, init=None):
m, _ = lmfit_powerlaw(x,y,yerr,init=init)
return m.parameters
def emcee_schmidt(x, y, yerr, pos=None, pose=None,
nwalkers=None, nsteps=None, burnin=200,verbose=True):
'''
emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope
'''
def model(x, theta):
'''
theta = (beta, kappa)
'''
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = 1 / yerr**2
# Poisson statistics -- not using this
#mu = (yerr)**2 # often called lambda = poisson variance for bin x_i
#resid = np.abs(y - mod) # where w calculate the poisson probability
#return np.sum(resid * np.log(mu) - mu) - np.sum(np.log(misc.factorial(resid)))
#######################################################
########## CHI^2 log-likelihood #######################
return -0.5 * (np.sum((y - mod)**2 * inv_sigma2))# - 0.5 * 3 * np.log(np.sum(k))
def lnprior(theta):
# different priors for different version of
# the schmidt law
if len(theta) == 3:
beta, kappa, Ak0 = theta
c3 = 0. < Ak0 <= 5.
c4 = True
else:
beta, kappa = theta
c3 = True
c4 = True
c1 = 0 <= beta <= 6# Never run's into this region
c2 = 0 <= kappa # Never run's into this region
if c1 and c2 and c3 and c4:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
## update likelihood
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = len(pos), nwalkers
pos = [np.array(pos) + np.array(pose) * 0.5 *
(0.5 - np.random.rand(ndim)) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
# Get input values
# x, y, yerr = sampler.args
samples = sampler.chain[:, burnin:, :].reshape((-1, sampler.ndim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose: print(sampler.acor)
if verbose:
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
return sampler, np.median(samples, axis=0), np.std(samples, axis=0)
def fit(bins, samp, samperr, maps, mapserr, scale=1., sampler=None, log=False,
pos=None, pose=None, nwalkers=100, nsteps=1e4, boot=1000, burnin=200,
threshold=False, threshold2=False,verbose=True):
'''
# # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers
'''
#print 'Hi!. It\'s hammer time...'
# x values are bin midpoints
x = avg(bins) # assume if log=True, then bins are already log
# x = bins[:-1]
# y = np.asarray([surfd(samp,maps,bins,boot=True,scale=scale) for i in xrange(boot)])
# yerr = np.nanstd(y,axis=0)
#if log:
# samp = np.log10(samp)
# maps = np.log10(maps)
# bins = np.log10(bins) # because bins doesn't get used again after surfd
y, yerr = surfd(samp, maps, bins, scale=scale, return_err=True)
###########################################+
###### ADDED FOR SHIFTING EXPERIMENT ######+
###########################################+
bins2 = shift_bins(bins,0.5)
bin
x2 = avg(bins2)
y2, yerr2 = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x,x2))
concaty = np.concatenate((y,y2))
concatyerr = np.concatenate((yerr,yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = np.isfinite(1. / y) & np.isfinite(yerr) & np.isfinite(1./yerr)
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
# initialize walker positions and walker bundle size
init = alpha(y, x, return_kappa=True, cov=True)
if pos is None:
pos = init[:2]
if pose is None:
if np.isnan(init[2] + init[3]):
pose = (1, 1)
else:
pose = (init[2], init[3])
if threshold | threshold2:
pos = pos + (0.4,)
pose = pose + (0.2,)
if threshold2:
pos = pos + (8.,)
pose = pose + (.5,)
#print pos
#print pose
pos = np.asarray(pos)
pose = .1*pos#np.asarray(pose)
# This function only fits sources, it doesn't plot, so don't pass
# and emcee sampler type. it will spit it back out
# # # # # # # RUN EMCEE # # # # # # #
# pdb.set_trace()
if sampler is None:
if verbose: print('Sampler autocorrelation times . . .')
sampler, theta, theta_std = emcee_schmidt(x, np.log(y), yerr/y,
pos=pos, pose=pose,
nwalkers=nwalkers,
nsteps=nsteps, burnin=burnin,verbose=verbose)
else:
print('Next time don\'t give me a ' + str(type(sampler)) + '.')
#
try:
return sampler, x, y, yerr, theta, theta_std
except:
return sampler, x, y, yerr
def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None,
bins=None, scale=None, triangle_plot=True):
'''
model: should pass schmidt_law()
'''
try:
mpl.style.use('john')
except:
None
# Get input values
# x, y, yerr = sampler.args
if hasattr(sampler,'__getitem__'):
chain = sampler
dim = chain.shape[-1]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape((-1, dim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T # Get percentiles for each parameter
n_params = len(theta_mcmc[:,1])
#print n_params
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}','A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
# Plot corner plot
if triangle_plot:
if n_params == 3:
labels = ['beta', 'kappa', 'A_{K,0}']
elif n_params == 4:
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
#print labels
_ = triangle.corner(samples, labels=labels,
truths=theta_mcmc[:, 1], quantiles=[.16, .84],
verbose=False)
# generate schmidt laws from parameter samples
xln = np.logspace(np.log10(x.min()*.5),np.log10(x.max()*2.),100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
# get percentile bands
percent = lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0)
# Plot fits
fig = plt.figure()
# Plot data with errorbars
plt.plot(xln, percent(50), 'k') # 3 sigma band
# yperr = np.abs(np.exp(np.log(y)+yerr/y) - y)
# ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y)
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'],
loc='upper left', fontsize=12)
# draw 1,2,3 sigma bands
plt.fill_between(xln, percent(1), percent(99), color='0.9') # 1 sigma band
plt.fill_between(xln, percent(2), percent(98), color='0.75') # 2 sigma band
plt.fill_between(xln, percent(16), percent(84), color='0.5') # 3 sigma band
plt.loglog(nonposy='clip')
return plt.gca()
def flatchain(chain):
return chain.reshape((-1,chain.shape[-1]))
def norm_chain(chain, axis=0):
std = np.std(flatchain(chain), axis=axis)
med = np.median(flatchain(chain), axis=axis)
return (chain-med)/std
def plot_walkers(sampler,limits = None, bad = None):
'''
sampler : emcee Sampler class
'''
if hasattr(sampler,'__getitem__'):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
if hasattr(limits,'__getitem__'):
limits += [None] * (3-len(limits))
slices = slice(limits[0],limits[1],limits[2])
else:
slices = slice(None,limits,None)
for w,walk in enumerate(chain[:,slices,:]):
if bad is None:
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
def tester():
print('hi ya\'ll')
| mit | -1,129,073,763,768,099,700 | 30.007092 | 116 | 0.559126 | false | 3.166586 | false | false | false |
sangwook236/SWDT | sw_dev/python/ext/test/gui/wxpython/wx_basic.py | 1 | 8474 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import wx
class SimpleFrame(wx.Frame):
def __init__(self, *args, **kwargs):
# Ensure the parent's __init__() is called.
super(SimpleFrame, self).__init__(*args, **kwargs)
# Create a panel in the frame.
panel = wx.Panel(self)
# Put some text with a larger bold font on it.
st = wx.StaticText(panel, label="Hello World!")
font = st.GetFont()
font.PointSize += 10
font = font.Bold()
st.SetFont(font)
# Create a sizer to manage the layout of child widgets.
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(st, wx.SizerFlags().Border(wx.TOP | wx.LEFT, 25))
panel.SetSizer(sizer)
# Initialize UI.
self.InitUI()
#--------------------
self.SetSize((450, 350))
self.SetTitle("Simple Example")
self.Centre()
def InitUI(self):
"""
A menu bar is composed of menus, which are composed of menu items.
This method builds a set of menus and binds handlers to be called
when the menu item is selected.
"""
# Create a menu bar.
fileMenu = wx.Menu()
newItem = fileMenu.Append(wx.ID_NEW, "&New")
openItem = fileMenu.Append(wx.ID_OPEN, "&Open...")
saveAsItem = fileMenu.Append(wx.ID_SAVE, "&Save As...")
fileMenu.AppendSeparator()
importMenu = wx.Menu()
importMenu.Append(wx.ID_ANY, "Import Newsfeed List...")
importMenu.Append(wx.ID_ANY, "mport Bookmarks...")
importMenu.Append(wx.ID_ANY, "Import Mail...")
fileMenu.AppendMenu(wx.ID_ANY, "I&mport", importMenu)
fileMenu.AppendSeparator()
if True:
# When using a stock ID we don't need to specify the menu item's label.
exitItem = fileMenu.Append(wx.ID_EXIT)
else:
exitItem = wx.MenuItem(fileMenu, 1, "&Quit\tCtrl+Q")
exitItem.SetBitmap(wx.Bitmap("./exit.png"))
fileMenu.Append(exitItem)
viewMenu = wx.Menu()
self.showStatusbarItem = viewMenu.Append(wx.ID_ANY, "Show Statusbar", "Show Statusbar", kind=wx.ITEM_CHECK)
self.showToolbarItem = viewMenu.Append(wx.ID_ANY, "Show Toolbar", "Show Toolbar", kind=wx.ITEM_CHECK)
viewMenu.Check(self.showStatusbarItem.GetId(), True)
viewMenu.Check(self.showToolbarItem.GetId(), True)
messageMenu = wx.Menu()
# The "\t..." syntax defines an accelerator key that also triggers the same event.
helloItem = messageMenu.Append(wx.ID_ANY, "&Hello...\tCtrl-H", "Help string shown in status bar for this menu item")
messageMenu.AppendSeparator()
messageItem = messageMenu.Append(wx.ID_ANY, "&Message...\tCtrl-M", "Message")
errorItem = messageMenu.Append(wx.ID_ANY, "&Error...\tCtrl-E", "Error")
questionItem = messageMenu.Append(wx.ID_ANY, "&Question...\tCtrl-U", "Question")
exclamationItem = messageMenu.Append(wx.ID_ANY, "&Exclamation...\tCtrl-C", "Exclamation")
# Now a help menu for the about item.
helpMenu = wx.Menu()
aboutItem = helpMenu.Append(wx.ID_ABOUT)
# Make the menu bar and add the two menus to it. The '&' defines
# that the next letter is the "mnemonic" for the menu item. On the
# platforms that support it those letters are underlined and can be
# triggered from the keyboard.
menuBar = wx.MenuBar()
menuBar.Append(fileMenu, "&File")
menuBar.Append(viewMenu, "&View")
menuBar.Append(messageMenu, "&Message")
menuBar.Append(helpMenu, "&Help")
# Give the menu bar to the frame.
self.SetMenuBar(menuBar)
#--------------------
# Create a status bar.
self.statusbar = self.CreateStatusBar()
self.SetStatusText("Welcome to wxPython!")
#self.statusbar.SetStatusText("Welcome to wxPython!")
#--------------------
# Create a toolbar.
self.toolbar = self.CreateToolBar()
self.toolbar.AddTool(1, "Tool 1", wx.Bitmap("./right.png"), wx.Bitmap("./wrong.png"), kind=wx.ITEM_RADIO, shortHelp="Simple Tool 1")
#self.toolbar.AddStretchableSpace()
self.toolbar.AddTool(1, "Tool 2", wx.Bitmap("./right.png"), wx.Bitmap("./wrong.png"), kind=wx.ITEM_CHECK, shortHelp="Simple Tool 2")
#self.toolbar.AddStretchableSpace()
self.toolbar.AddTool(1, "Tool 3", wx.Bitmap("./right.png"), wx.Bitmap("./wrong.png"), kind=wx.ITEM_NORMAL, shortHelp="Simple Tool 3")
self.toolbar.Realize()
#--------------------
# Finally, associate a handler function with the EVT_MENU event for each of the menu items.
# That means that when that menu item is activated then the associated handler function will be called.
self.Bind(wx.EVT_MENU, self.OnNew, newItem)
self.Bind(wx.EVT_MENU, self.OnOpen, openItem)
self.Bind(wx.EVT_MENU, self.OnSaveAs, saveAsItem)
self.Bind(wx.EVT_MENU, self.OnExit, exitItem)
self.Bind(wx.EVT_MENU, self.OnToggleStatusBar, self.showStatusbarItem)
self.Bind(wx.EVT_MENU, self.OnToggleToolBar, self.showToolbarItem)
self.Bind(wx.EVT_MENU, self.OnHello, helloItem)
self.Bind(wx.EVT_MENU, self.OnMessage, messageItem)
self.Bind(wx.EVT_MENU, self.OnError, errorItem)
self.Bind(wx.EVT_MENU, self.OnQuestion, questionItem)
self.Bind(wx.EVT_MENU, self.OnExclamation, exclamationItem)
self.Bind(wx.EVT_MENU, self.OnAbout, aboutItem)
self.Bind(wx.EVT_PAINT, self.OnPaint)
def OnNew(self, event):
wx.MessageBox("New MenuItem Clicked")
def OnOpen(self, event):
# REF [site] >> https://docs.wxpython.org/wx.FileDialog.html
with wx.FileDialog(self, "Open File", wildcard="PNG files (*.png)|*.png|JPG files (*.jpg)|*.jpg|BMP and GIF files (*.bmp;*.gif)|*.bmp;*.gif|All files (*.*)|*.*", style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as dlg:
if dlg.ShowModal() == wx.ID_CANCEL:
return
filepath = dlg.GetPath()
try:
with open(filepath, "r") as fd:
wx.MessageBox("{} opened".format(filepath))
except IOError as ex:
wx.LogError("Cannot open {}: {}.".filepath(filepath, ex))
def OnSaveAs(self, event):
# REF [site] >> https://docs.wxpython.org/wx.FileDialog.html
with wx.FileDialog(self, "Save File", wildcard="PNG files (*.png)|*.png|JPG files (*.jpg)|*.jpg|BMP and GIF files (*.bmp;*.gif)|*.bmp;*.gif|All files (*.*)|*.*", style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) as dlg:
if dlg.ShowModal() == wx.ID_CANCEL:
return
filepath = dlg.GetPath()
try:
with open(filepath, "w") as fd:
wx.MessageBox("{} saved".format(filepath))
except IOError as ex:
wx.LogError("Cannot save to {}: {}.".format(filepath, ex))
def OnExit(self, event):
self.Close(True)
def OnToggleStatusBar(self, event):
if self.showStatusbarItem.IsChecked():
self.statusbar.Show()
else:
self.statusbar.Hide()
def OnToggleToolBar(self, event):
if self.showToolbarItem.IsChecked():
self.toolbar.Show()
else:
self.toolbar.Hide()
def OnHello(self, event):
wx.MessageBox("Hello again from wxPython")
def OnMessage(self, event):
dial = wx.MessageDialog(None, "Download completed", "Info", wx.OK)
dial.ShowModal()
def OnError(self, event):
dlg = wx.MessageDialog(None, "Error loading file", "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
def OnQuestion(self, event):
dlg = wx.MessageDialog(None, "Are you sure to quit?", "Question", wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
dlg.ShowModal()
def OnExclamation(self, event):
dlg = wx.MessageDialog(None, "Unallowed operation", "Exclamation", wx.OK | wx.ICON_EXCLAMATION)
dlg.ShowModal()
def OnAbout(self, event):
wx.MessageBox("This is a simple wxPython sample",
"About Simple Example",
wx.OK | wx.ICON_INFORMATION)
def OnPaint(self, event):
dc = wx.PaintDC(self)
dc.SetPen(wx.Pen("#d4d4d4"))
dc.SetBrush(wx.Brush("#c56c00"))
dc.DrawRectangle(10, 15, 90, 60)
dc.SetBrush(wx.Brush("#1ac500"))
dc.DrawRectangle(130, 15, 90, 60)
dc.SetBrush(wx.Brush("#539e47"))
dc.DrawRectangle(250, 15, 90, 60)
dc.SetBrush(wx.Brush("#004fc5"))
dc.DrawRectangle(10, 105, 90, 60)
dc.SetBrush(wx.Brush("#c50024"))
dc.DrawRectangle(130, 105, 90, 60)
dc.SetBrush(wx.Brush("#9e4757"))
dc.DrawRectangle(250, 105, 90, 60)
dc.SetBrush(wx.Brush("#5f3b00"))
dc.DrawRectangle(10, 195, 90, 60)
dc.SetBrush(wx.Brush("#4c4c4c"))
dc.DrawRectangle(130, 195, 90, 60)
dc.SetBrush(wx.Brush("#785f36"))
dc.DrawRectangle(250, 195, 90, 60)
# REF [site] >>
# https://www.wxpython.org/pages/overview/
# https://zetcode.com/wxpython/
def simple_example():
# When this module is run (not imported) then create the app, the frame, show it, and start the event loop.
app = wx.App()
frame = SimpleFrame(None, title="Simple Example !!!")
frame.Show()
app.MainLoop()
def main():
simple_example()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-3.0 | -2,242,071,532,905,609,000 | 33.587755 | 214 | 0.675006 | false | 2.927116 | false | false | false |
afolmert/mentor | src/utils_qt.py | 1 | 15420 | #!/usr/bin/env python
# -*- coding: iso-8859-2 -*-
#
# Copyright (C) 2007 Adam Folmert <[email protected]>
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#
#
"""Here are misc python utilities for PyQt development.
They are collected from different sources and some are written from scratch by
me.
"""
# TODO change import only necessary widgets
import release
import sys
from PyQt4.QtGui import *
from PyQt4.QtCore import *
__version__ = release.version
#
#----------------------------------------------------------
# Misc routines
def tr(text):
return qApp.tr(text)
#
#----------------------------------------------------------
# dialog boxes
# shortcut for displaying message box
def msgbox(aMesg, parent = None):
QMessageBox.information( parent
, "Info"
, aMesg )
def show_info(message, parent=None):
class InfoWidget(QDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self.setWindowTitle('Information')
self.setGeometry(400, 300, 200, 200)
self.lbl = QLabel()
self.btn = QPushButton('OK')
self.btn.setStyle(Styles.windowsStyle())
layout = QVBoxLayout()
layout.addWidget(self.lbl)
layout.addWidget(self.btn)
self.setLayout(layout)
self.connect(self.btn, SIGNAL("clicked()"), SLOT("accept()"))
propagate_fonts(self, QFont("Fixed", 8))
widget = InfoWidget(parent)
widget.lbl.setText(message)
widget.exec_()
#
#----------------------------------------------------------
# styles classes and routines
class Styles(object):
"""Singleton object for retrieving styles."""
_windowsStyle = None
_cdeStyle = None
_motifStyle = None
_plastiqueStyle = None
@staticmethod
def windowsStyle():
if Styles._windowsStyle is None:
Styles._windowsStyle = QStyleFactory.create('Windows')
return Styles._windowsStyle
@staticmethod
def cdeStyle():
if Styles._cdeStyle is None:
Styles._cdeStyle = QStyleFactory.create('Cde')
return Styles._cdeStyle
@staticmethod
def motifStyle():
if Styles._motifStyle is None:
Styles._motifStyle = QStyleFactory.create('Motif')
return Styles._motifStyle
@staticmethod
def plastiqueStyle():
if Styles._plastiqueStyle is None:
Styles._plastiqueStyle = QStyleFactory.create('Plastique')
return Styles._plastiqueStyle
#
#----------------------------------------------------------
# border layout
class ItemWrapper(object):
def __init__(self, i, p):
self.item = i
self.position = p
class BorderLayout(QLayout):
West, North, South, East, Center = range(5)
MinimumSize, SizeHint = range(2)
def __init__(self, parent=None, margin=0, spacing=-1):
QLayout.__init__(self, parent)
self.setMargin(margin)
self.setSpacing(spacing)
self.list = []
def __del__(self):
l = self.takeAt(0)
while l:
l = self.takeAt(0)
def addItem(self, item):
self.add(item, BorderLayout.West)
def addWidget(self, widget, position):
self.add(QWidgetItem(widget), position)
def expandingDirections(self):
return Qt.Horizontal | Qt.Vertical
def hasHeightForWidth(self):
return False
def count(self):
return len(self.list)
def itemAt(self, index):
if index < len(self.list):
return self.list[index].item
return None
def minimumSize(self):
return self.calculateSize(BorderLayout.MinimumSize)
def setGeometry(self, rect):
center = 0
eastWidth = 0
westWidth = 0
northHeight = 0
southHeight = 0
centerHeight = 0
QLayout.setGeometry(self, rect)
for wrapper in self.list:
item = wrapper.item
position = wrapper.position
if position == BorderLayout.North:
item.setGeometry(QRect(rect.x(), northHeight, rect.width(), item.sizeHint().height()))
northHeight += item.geometry().height() + self.spacing()
elif position == BorderLayout.South:
item.setGeometry(QRect(item.geometry().x(), item.geometry().y(), rect.width(), item.sizeHint().height()))
southHeight += item.geometry().height() + self.spacing()
item.setGeometry(QRect(rect.x(), rect.y() + rect.height() - southHeight + self.spacing(), item.geometry().width(), item.geometry().height()))
elif position == BorderLayout.Center:
center = wrapper
centerHeight = rect.height() - northHeight - southHeight
for wrapper in self.list:
item = wrapper.item
position = wrapper.position
if position == BorderLayout.West:
item.setGeometry(QRect(rect.x() + westWidth, northHeight, item.sizeHint().width(), centerHeight))
westWidth += item.geometry().width() + self.spacing()
elif position == BorderLayout.East:
item.setGeometry(QRect(item.geometry().x(), item.geometry().y(), item.sizeHint().width(), centerHeight))
eastWidth += item.geometry().width() + self.spacing()
item.setGeometry(QRect(rect.x() + rect.width() - eastWidth + self.spacing(), northHeight, item.geometry().width(), item.geometry().height()))
if center:
center.item.setGeometry(QRect(westWidth, northHeight, rect.width() - eastWidth - westWidth, centerHeight))
def sizeHint(self):
return self.calculateSize(BorderLayout.SizeHint)
def takeAt(self, index):
if index >= 0 and index < len(self.list):
layoutStruct = self.list.pop(index)
return layoutStruct.item
return None
def add(self, item, position):
self.list.append(ItemWrapper(item, position))
def calculateSize(self, sizeType):
totalSize = QSize()
for wrapper in self.list:
position = wrapper.position
itemSize = QSize()
if sizeType == BorderLayout.MinimumSize:
itemSize = wrapper.item.minimumSize()
else: # sizeType == BorderLayout.SizeHint
itemSize = wrapper.item.sizeHint()
if position == BorderLayout.North or position == BorderLayout.South or position == BorderLayout.Center:
totalSize.setHeight(totalSize.height() + itemSize.height())
if position == BorderLayout.West or position == BorderLayout.East or position == BorderLayout.Center:
totalSize.setWidth(totalSize.width() + itemSize.width())
return totalSize
def demoBorderLayout():
class Window(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
centralWidget = QTextBrowser()
centralWidget.setPlainText(self.tr("Central widget"))
layout = BorderLayout()
layout.addWidget(centralWidget, BorderLayout.Center)
# Qt takes ownership of the widgets in the layout when setLayout() is
# called. Therefore we keep a local reference to each label to prevent
# it being garbage collected until the call to setLayout().
label_n = self.createLabel("North")
layout.addWidget(label_n, BorderLayout.North)
label_w = self.createLabel("West")
layout.addWidget(label_w, BorderLayout.West)
label_e1 = self.createLabel("East 1")
layout.addWidget(label_e1, BorderLayout.East)
label_e2 = self.createLabel("East 2")
layout.addWidget(label_e2, BorderLayout.East)
label_s = self.createLabel("South")
layout.addWidget(label_s, BorderLayout.South)
self.setLayout(layout)
self.setWindowTitle(self.tr("Border Layout"))
def createLabel(self, text):
label = QLabel(text)
label.setFrameStyle(QFrame.Box | QFrame.Raised)
return label
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
#
#----------------------------------------------------------
# flow layout
class FlowLayout(QLayout):
def __init__(self, parent=None, margin=0, spacing=-1):
QLayout.__init__(self, parent)
if parent is not None:
self.setMargin(margin)
self.setSpacing(spacing)
self.itemList = []
def addItem(self, item):
self.itemList.append(item)
def count(self):
return len(self.itemList)
def itemAt(self, index):
if index >= 0 and index < len(self.itemList):
return self.itemList[index]
def takeAt(self, index):
if index >= 0 and index < len(self.itemList):
return self.itemList.pop(index)
def expandingDirections(self):
return Qt.Orientations(Qt.Orientation(0))
def hasHeightForWidth(self):
return True
def heightForWidth(self, width):
height = self.doLayout(QRect(0, 0, width, 0), True)
return height
def setGeometry(self, rect):
QLayout.setGeometry(self, rect)
self.doLayout(rect, False)
def sizeHint(self):
return self.minimumSize()
def minimumSize(self):
size = QSize()
for item in self.itemList:
size = size.expandedTo(item.minimumSize())
size += QSize(2 * self.margin(), 2 * self.margin())
return size
def doLayout(self, rect, testOnly):
x = rect.x()
y = rect.y()
lineHeight = 0
for item in self.itemList:
nextX = x + item.sizeHint().width() + self.spacing()
if nextX - self.spacing() > rect.right() and lineHeight > 0:
x = rect.x()
y = y + lineHeight + self.spacing()
nextX = x + item.sizeHint().width() + self.spacing()
lineHeight = 0
if not testOnly:
item.setGeometry(QRect(QPoint(x, y), item.sizeHint()))
x = nextX
lineHeight = max(lineHeight, item.sizeHint().height())
return y + lineHeight - rect.y()
def demoFlowLayout():
class Window(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
flowLayout = FlowLayout()
flowLayout.addWidget(QPushButton(self.tr("Short")))
flowLayout.addWidget(QPushButton(self.tr("Longer")))
flowLayout.addWidget(QPushButton(self.tr("Different text")))
flowLayout.addWidget(QPushButton(self.tr("More text")))
flowLayout.addWidget(QPushButton(self.tr("Even longer button text")))
self.setLayout(flowLayout)
self.setWindowTitle(self.tr("Flow Layout"))
app = QApplication(sys.argv)
mainWin = Window()
mainWin.show()
sys.exit(app.exec_())
#---------------------------------------------------
# This is hackish workaround for smoother displaying dialog boxes and windows in qt
# Basically it delays showing of a window until it is fully drawn.
class MyDesktopFragment(QWidget):
"""This is widget which displays fragment of desktop screen.
It can grab the screen contents and then display it on itself. It may be
useful if we want to simulate buffered dialogs which are initially hidden.
"""
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self._label = QLabel(self)
self._borderWidth = 0
self._initialPalette = self.palette()
self._borderPalette = QPalette(QColor(255, 0, 0))
self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
def resizeEvent(self, event):
b = self._borderWidth
self._label.setGeometry(b, b, self.width() - b * 2, self.height() - b * 2)
def setBorderEnabled(self, enabled=True):
"""This enabled or disables widget border for debugging purposes."""
if enabled:
self.setAutoFillBackground(True)
self.setPalette(self._borderPalette)
self._borderWidth = 1
else:
self.setAutoFillBackground(False)
self.setPalette(self._initialPalette)
self._borderWidth = 0
def grabDesktop(self, rect):
"""Grabs desktop fragment which should be displayed."""
p = QPixmap.grabWindow(QApplication.desktop().winId(), rect.x(), rect.y(), rect.width(), rect.height())
self._label.setPixmap(p)
class LazyWidget(object):
"""Widget proxy which delays window showing until it is fully initialized."""
DelayTime = 100
def __init__(self):
self._widget = None
self._savedPos = QPoint(0, 0)
self._desktopFragment = MyDesktopFragment()
def setWidget(self, widget):
self._widget = widget
def _checkWidget(self):
assert isinstance(self._widget, QWidget), "Invalid widget set!"
def show(self):
self._checkWidget()
self._desktopFragment.grabDesktop(QRect(1000, 700, 1010, 710))
self._desktopFragment.setGeometry(QRect(1000, 700, 1010, 710))
self._desktopFragment.show()
self._moveOffScreen()
self._widget.show()
QTimer.singleShot(LazyWidget.DelayTime, self._moveOnScreen)
def _moveOffScreen(self):
"""Moves widget off screen, so it can initialize without flicker."""
self._checkWidget()
self._savedPos = QPoint(self._widget.x(), self._widget.y())
self._widget.move(1019, 716)
def _moveOnScreen(self):
"""Moves widget on screen, after it has initialized."""
self._checkWidget()
self._widget.move(self._savedPos.x(), self._savedPos.y())
self._desktopFragment.hide()
_lazyWidget = None
def lazyshow(widget):
"""Convenience function for showing windows fully initialized."""
# must initialize here, because QApplication must be constructed first
# this works only for not maximized windows
if widget.isMaximized():
widget.show()
else:
global _lazyWidget
if _lazyWidget is None:
_lazyWidget = LazyWidget()
_lazyWidget.setWidget(widget)
_lazyWidget.show()
# FIXME there must be a way to configure another way!
def propagate_fonts(widget, font):
for c in widget.children():
if isinstance(c, QWidget):
c.setFont(font)
propagate_fonts(c, font)
| gpl-2.0 | -325,066,226,027,989,600 | 28.76834 | 157 | 0.596239 | false | 4.148507 | false | false | false |
UCSD-CCAL/ccal | ccal/plot_context.py | 1 | 6286 | from numpy import absolute
from pandas import Series
from .compute_context import compute_context
from .plot_and_save import plot_and_save
def plot_context(
_1d_array_or_series,
text=None,
n_data=None,
location=None,
scale=None,
degree_of_freedom=None,
shape=None,
fit_fixed_location=None,
fit_fixed_scale=None,
fit_initial_location=None,
fit_initial_scale=None,
n_grid=1e3,
degree_of_freedom_for_tail_reduction=1e8,
minimum_kl=1e-2,
scale_with_kl=True,
multiply_distance_from_reference_argmax=False,
global_location=None,
global_scale=None,
global_degree_of_freedom=None,
global_shape=None,
y_max_is_pdf_max=False,
plot_rug=True,
layout_width=None,
layout_height=None,
title=None,
xaxis_title=None,
html_file_path=None,
plotly_html_file_path=None,
):
if isinstance(_1d_array_or_series, Series):
if title is None:
title = _1d_array_or_series.name
if xaxis_title is None:
xaxis_title = "Value"
if text is None:
text = _1d_array_or_series.index
_1d_array = _1d_array_or_series.values
else:
_1d_array = _1d_array_or_series
context_dict = compute_context(
_1d_array,
n_data=n_data,
location=location,
scale=scale,
degree_of_freedom=degree_of_freedom,
shape=shape,
fit_fixed_location=fit_fixed_location,
fit_fixed_scale=fit_fixed_scale,
fit_initial_location=fit_initial_location,
fit_initial_scale=fit_initial_scale,
n_grid=n_grid,
degree_of_freedom_for_tail_reduction=degree_of_freedom_for_tail_reduction,
minimum_kl=minimum_kl,
scale_with_kl=scale_with_kl,
multiply_distance_from_reference_argmax=multiply_distance_from_reference_argmax,
global_location=global_location,
global_scale=global_scale,
global_degree_of_freedom=global_degree_of_freedom,
global_shape=global_shape,
)
pdf_max = context_dict["pdf"].max()
context_indices = context_dict["context_indices"]
absolute_context_indices = absolute(context_indices)
absolute_context_indices_max = absolute_context_indices.max()
if y_max_is_pdf_max:
y_max = pdf_max
if y_max < absolute_context_indices_max:
absolute_context_indices = (
absolute_context_indices / absolute_context_indices_max * y_max
)
else:
y_max = max(pdf_max, absolute_context_indices_max)
if plot_rug:
yaxis_max = 0.16
yaxis2_min = yaxis_max + 0.08
else:
yaxis_max = 0
yaxis2_min = 0
layout = dict(
width=layout_width,
height=layout_height,
title=title,
xaxis=dict(anchor="y", title=xaxis_title),
yaxis=dict(
domain=(0, yaxis_max), dtick=1, zeroline=False, showticklabels=False
),
yaxis2=dict(domain=(yaxis2_min, 1)),
legend=dict(orientation="h", xanchor="center", x=0.5, y=-0.2),
)
annotations = []
for i, (template, fit_parameter) in enumerate(
zip(
(
"N = {:.0f}",
"Location = {:.2f}",
"Scale = {:.2f}",
"DF = {:.2f}",
"Shape = {:.2f}",
),
context_dict["fit"],
)
):
annotations.append(
dict(
xref="paper",
yref="paper",
x=(i + 1) / (5 + 1),
y=1.064,
xanchor="center",
text=template.format(fit_parameter),
showarrow=False,
)
)
layout.update(annotations=annotations)
data = []
data.append(
dict(
yaxis="y2",
type="histogram",
name="Data",
legendgroup="Data",
x=_1d_array,
marker=dict(color="#20d9ba"),
histnorm="probability density",
hoverinfo="x+y",
)
)
if plot_rug:
data.append(
dict(
type="scatter",
legendgroup="Data",
showlegend=False,
x=_1d_array,
y=(0,) * _1d_array.size,
text=text,
mode="markers",
marker=dict(symbol="line-ns-open", color="#20d9ba"),
hoverinfo="x+text",
)
)
grid = context_dict["grid"]
line_width = 3.2
pdf = context_dict["pdf"]
data.append(
dict(
yaxis="y2",
type="scatter",
name="PDF",
x=grid,
y=pdf,
line=dict(width=line_width, color="#24e7c0"),
)
)
shape_pdf_reference = context_dict["shape_pdf_reference"]
shape_pdf_reference[pdf <= shape_pdf_reference] = None
data.append(
dict(
yaxis="y2",
type="scatter",
name="Shape Reference",
x=grid,
y=shape_pdf_reference,
line=dict(width=line_width, color="#9017e6"),
)
)
location_pdf_reference = context_dict["location_pdf_reference"]
if location_pdf_reference is not None:
location_pdf_reference[pdf <= location_pdf_reference] = None
data.append(
dict(
yaxis="y2",
type="scatter",
name="Location Reference",
x=grid,
y=location_pdf_reference,
line=dict(width=line_width, color="#4e40d8"),
)
)
is_negative = context_dict["context_indices"] < 0
for name, indices, color in (
("- Context", is_negative, "#0088ff"),
("+ Context", ~is_negative, "#ff1968"),
):
data.append(
dict(
yaxis="y2",
type="scatter",
name=name,
x=grid[indices],
y=absolute_context_indices[indices],
line=dict(width=line_width, color=color),
fill="tozeroy",
)
)
plot_and_save(dict(layout=layout, data=data), html_file_path, plotly_html_file_path)
| mit | -5,359,194,133,037,782,000 | 23.65098 | 88 | 0.522431 | false | 3.656777 | false | false | false |
Subsets and Splits