repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fos/fos-legacy | fos/actor/odfslicer.py | 1 | 2054 | import numpy as np
class ODF_Slice(object):
def __init__(self,odfs,vertices,faces,noiso,batch,group=None):
J=0
self.odfs_no=J
self.vertex_list=(odfs.shape[0]*odfs.shape[1])*[None]
for index in np.ndindex(odfs.shape[:2]):
values=odfs[index]
if noiso:
values=np.interp(values,[values.min(),values.max()],[0,.5])
inds=faces.ravel().tolist()
shift=index+(0,)
print J,odfs.shape[0]*odfs.shape[1]
points=np.dot(np.diag(values),vertices)
points=points+np.array(shift)
verx=points.ravel().tolist()
normals=np.zeros((len(vertices),3))
ones_=np.ones(len(values))
colors=np.vstack((values,ones_,ones_)).T
colors=colors.ravel().tolist()
p=vertices
l=faces
trinormals=np.cross(p[l[:,0]]-p[l[:,1]],\
p[l[:,1]]-p[l[:,2]],\
axisa=1,axisb=1)
for (i,lp) in enumerate(faces):
normals[lp]+=trinormals[i]
div=np.sqrt(np.sum(normals**2,axis=1))
div=div.reshape(len(div),1)
normals=(normals/div)
norms=np.array(normals).ravel().tolist()
self.vertex_list[i] = batch.add_indexed(len(vertices),\
GL_TRIANGLES,\
group,\
inds,\
('v3d/static',verx),\
('n3d/static',norms),\
('c3d/static',colors))
J+=1
def update(self):
pass
def delete(self):
for i in range(self.odfs_no):
self.vertex_list.delete()
| bsd-3-clause | -3,123,293,143,917,437,000 | 32.672131 | 75 | 0.399708 | false | 4.183299 | false | false | false |
kkoksvik/FreeCAD | src/Mod/Start/StartPage/StartPage.py | 2 | 26929 | #***************************************************************************
#* *
#* Copyright (c) 2012 *
#* Yorik van Havre <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
# This is the start page template
import os,FreeCAD,FreeCADGui,tempfile,time,zipfile,urllib,re,cStringIO
from PySide import QtGui
from xml.etree.ElementTree import parse
FreeCADGui.addLanguagePath(":/translations")
FreeCADGui.updateLocale()
def translate(context,text):
"convenience function for the Qt translator"
# return str(QtGui.QApplication.translate(context, text, None, QtGui.QApplication.UnicodeUTF8).toUtf8())
u = QtGui.QApplication.translate(context, text, None,
QtGui.QApplication.UnicodeUTF8).encode("utf8")
s = cStringIO.StringIO()
for i in u:
if ord(i) == 39:
s.write("\\'")
else:
s.write(i)
t = s.getvalue()
s.close()
return t
# texts to be translated
text01 = translate("StartPage","FreeCAD Start Center")
text02 = translate("StartPage","Start a new project")
text03 = translate("StartPage","Recent Files")
text04 = translate("StartPage","Latest videos")
text05 = translate("StartPage","Latest commits")
text06 = translate("StartPage","On the web")
text07 = translate("StartPage","This is the FreeCAD Homepage. Here you will be able to find a lot of information about FreeCAD, including tutorials, examples and user documentation.")
text08 = translate("StartPage","FreeCAD Homepage")
text09 = translate("StartPage","Example projects")
text10 = translate("StartPage","Schenkel STEP file")
text11 = translate("StartPage","Load a PartDesign example")
text12 = translate("StartPage","Load a Drawing extraction")
text13 = translate("StartPage","Load a Robot simulation example")
text14 = translate("StartPage","Projects from the Web")
text15 = translate("StartPage","Schenkel STEP")
text16 = translate("StartPage","Complex Part")
text17 = translate("StartPage","Close this window after opening or creating a file")
text18 = translate("StartPage","Don't show me this window again next time")
text19 = translate("StartPage","Designing parts")
text20 = translate("StartPage","The <b>Part Design</b> workbench is designed to create complex pieces based on constrained 2D sketches. Use it to draw 2D shapes, constrain some of their elements and extrude them to form 3D pieces.")
text21 = translate("StartPage","Example workflow")
text22 = translate("StartPage","Part Design")
text23 = translate("StartPage","Designing architectural elements")
text24 = translate("StartPage","The <b>Architectural Design</b> workbench is specially designed for working with architectural elements such as walls or windows. Start by drawing 2D shapes, and use them as guides to build architecutral objects.")
text25 = translate("StartPage","Architectural Design")
text26 = translate("StartPage","Working with Meshes")
text27 = translate("StartPage","The <b>Mesh Workbench</b> is used to work with Mesh objects. Meshes are simpler 3D objects than Part objects, but they are often easier to import and export to/from other applications.")
text28 = translate("StartPage","FreeCAD offers you several tools to convert between Mesh and Part objects.")
text29 = translate("StartPage","Work with Meshes")
text30 = translate("StartPage","The complete workbench")
text31 = translate("StartPage","FreeCAD Complete workbench")
text32 = translate("StartPage","populated with some of the most commonly used tools.")
text33 = translate("StartPage","file size:")
text34 = translate("StartPage","creation time:")
text35 = translate("StartPage","last modified:")
text36 = translate("StartPage","location:")
text37 = translate("StartPage","User manual")
text38 = translate("StartPage","http://www.freecadweb.org/wiki/index.php?title=Online_Help_Toc")
text39 = translate("StartPage","Tutorials")
text40 = translate("StartPage","Python resources")
text41 = translate("StartPage","File not found")
text42 = translate("StartPage","from <a href=http://twitter.com/FreeCADNews>@FreeCADNews</a>")
text43 = translate("StartPage","The FreeCAD-tutorial blog")
text44 = translate("StartPage","from <a href=http://www.youtube.com/user/FreeCADNews?feature=mhee>FreeCADNews channel</a>")
text45 = translate("StartPage","This is the official user manual of FreeCAD, built, maintained and translated by the FreeCAD community.")
text46 = translate("StartPage","The tutorials section on the FreeCAD website")
text47 = translate("StartPage","The section of the FreeCAD website dedicated to python scripting, with examples, explanations, and API commands.")
text48 = translate("StartPage","A blog dedicated to teaching FreeCAD, maintained by members of the FreeCAD community")
text49 = translate("StartPage","Getting started")
text50 = translate("StartPage","The FreeCAD interface is divided in workbenches, which are sets of tools suited for a specific task. You can start with one of the workbenches in this list, or with the complete workbench, which presents you with some of the most used tools gathered from other workbenches. Click to read more about workbenches on the FreeCAD website.")
text51 = translate("StartPage","http://www.freecadweb.org/wiki/index.php?title=Workbenches")
text52 = translate("StartPage","Ship Design")
text53 = translate("StartPage","Designing and calculating ships")
text54 = translate("StartPage","The <b>Ship Design</b> module offers several tools to help ship designers to view, model and calculate profiles and other specific properties of ship hulls.")
text55 = translate("StartPage","Load an Architectural example model")
text56 = translate("StartPage","http://www.freecadweb.org/wiki/index.php?title=Tutorials")
text57 = translate("StartPage","http://www.freecadweb.org/wiki/index.php?title=Power_users_hub")
text58 = translate("StartPage","Your version of FreeCAD is up to date.")
text59 = translate("StartPage","There is a new release of FreeCAD available.")
text60 = translate("StartPage","Load an FEM example analysis")
text61 = translate("StartPage","Obtain a development version")
text62 = translate("StartPage","<b>Development versions</b> are made available by community members from time to time and usually contain the latest changes, but are more likely to contain bugs.")
text63 = translate("StartPage","See all commits")
# get FreeCAD version
v = FreeCAD.Version()
vmajor = v[0]
vminor = v[1]
vbuild = v[2].split(" ")[0]
# here is the html page skeleton
page = """
<html>
<head>
<title>FreeCAD - Start page</title>
<script language="javascript">
var linkDescriptions = [];
function JSONscriptRequest(fullUrl) {
// REST request path
this.fullUrl = fullUrl;
// Get the DOM location to put the script tag
this.headLoc = document.getElementsByTagName("head").item(0);
// Generate a unique script tag id
this.scriptId = 'JscriptId' + JSONscriptRequest.scriptCounter++;
}
// Static script ID counter
JSONscriptRequest.scriptCounter = 1;
JSONscriptRequest.prototype.buildScriptTag = function () {
// Create the script tag
this.scriptObj = document.createElement("script");
// Add script object attributes
this.scriptObj.setAttribute("type", "text/javascript");
this.scriptObj.setAttribute("charset", "utf-8");
this.scriptObj.setAttribute("src", this.fullUrl);
this.scriptObj.setAttribute("id", this.scriptId);
}
JSONscriptRequest.prototype.removeScriptTag = function () {
// Destroy the script tag
this.headLoc.removeChild(this.scriptObj);
}
JSONscriptRequest.prototype.addScriptTag = function () {
// Create the script tag
this.headLoc.appendChild(this.scriptObj);
}
function show(theText) {
ddiv = document.getElementById("description");
if (theText == "") theText = " ";
ddiv.innerHTML = theText;
}
function checkVersion(data) {
vdiv = document.getElementById("versionbox");
var cmajor = """ + vmajor + """;
var cminor = """ + vminor + """;
var cbuild = """ + vbuild + """;
var amajor = data[0]['major'];
var aminor = data[0]['minor'];
var abuild = data[0]['build'];
if (cmajor >= amajor && cminor >= aminor && cbuild >= abuild) {
vdiv.innerHTML=" """ + text58 + """: """ + vmajor + """.""" + vminor + """.""" + vbuild + """";
} else {
vdiv.innerHTML="<a href=exthttp://github.com/FreeCAD/FreeCAD/releases/latest> """ + text59 + """:"+amajor+"."+aminor+"."+abuild+"</a>";
}
}
function load() {
// load latest news
ddiv = document.getElementById("news");
ddiv.innerHTML = "Connecting...";
var tobj=new JSONscriptRequest('https://api.github.com/repos/FreeCAD/FreeCAD/commits?callback=showTweets');
tobj.buildScriptTag(); // Build the script tag
tobj.addScriptTag(); // Execute (add) the script tag
ddiv.innerHTML = "Downloading latest news...";
// load version
var script = document.createElement('script');
script.src = 'http://www.freecadweb.org/version.php?callback=checkVersion';
document.body.appendChild(script);
}
function stripTags(text) {
// from http://www.pagecolumn.com/tool/all_about_html_tags.htm /<\s*\/?\s*span\s*.*?>/g
stripped = text.replace("<table", "<div");
stripped = stripped.replace("</table", "</div");
stripped = stripped.replace("<tr", "<tr");
stripped = stripped.replace("</tr", "</tr");
stripped = stripped.replace("<td", "<td");
stripped = stripped.replace("</td", "</td");
stripped = stripped.replace("555px", "auto");
stripped = stripped.replace("border:1px", "border:0px");
stripped = stripped.replace("color:#000000;","");
return stripped;
}
function showTweets(data) {
ddiv = document.getElementById('news');
ddiv.innerHTML = "Received";
var html = ['<ul>'];
for (var i = 0; i < 15; i++) {
html.push('<li><img src="web.png"> <a href="ext', data.data[i].commit.url, '" onMouseOver="showDescr(', i+1, ')" onMouseOut="showDescr()">', data.data[i].commit.message, '</a></li>');
if ("message" in data.data[i].commit) {
linkDescriptions.push(stripTags(data.data[i].commit.message)+'<br/>'+data.data[i].commit.author.name+'<br/>'+data.data[i].commit.author.date);
} else {
linkDescriptions.push("");
}
}
html.push('</ul>');
html.push('<a href="exthttp://github.com/FreeCAD/FreeCAD/commits/master">""" + text63 + """<a/>');
ddiv.innerHTML = html.join('');
}
function showDescr(d) {
if (d) {
show(linkDescriptions[d-1]);
} else {
show("");
}
}
function scroller() {
desc = document.getElementById("description");
base = document.getElementById("column").offsetTop;
scro = window.scrollY;
if (scro > base) {
desc.className = "stick";
} else {
desc.className = "";
}
}
document.onmousemove=scroller;
</script>
<style type="text/css">
body {
background: #basecolor;
color: #textcolor;
font-family: Arial, Helvetica, Sans;
font-size: 11px;
}
a {
color: #linkcolor;
font-weight: bold;
text-decoration: none;
padding: 2px;
}
a:hover {
color: white;
background: #linkcolor;
border-radius: 5px;
}
p {
text-align: justify;
}
.left {
text-align: left;
}
h1 {
font-size: 3em;
letter-spacing: 2px;
padding: 20px 0 0 80px;
align: bottom;
color: #ffffff;
}
h2 {
font-size: 1.2em;
}
ul {
list-style-type: none;
padding: 0;
}
#column {
margin: 0 350px 0 10px;
}
#column img {
max-width: 14px;
}
.block {
background: #windowcolor;
border-radius: 5px;
padding: 8px;
margin-bottom: 10px;
color: #windowtextcolor;
width: auto;
}
.options {
clear: both;
}
.from {
font-size: 0.7em;
font-weight: normal;
}
#versionbox {
float: right;
text-align: right;
font-size: 0.33em;
font-weight: normal;
padding-right: 20px;
letter-spacing: 0;
color: #ffffff;
}
#description {
background: #windowcolor;
border-radius: 5px;
padding: 8px;
color: #windowtextcolor;
float: right;
width: 316px;
right: 10px;
height: 100%;
position: relative;
}
#description img {
max-width: 300px;
clear: both;
}
pre {
width: 300px !important;
white-space: pre-wrap;
}
.stick {
position: fixed !important;
top: 0px;
right: 18px !important;
}
</style>
</head>
<body onload="load()">
<h1><img src="FreeCAD.png"> """ + text01 + """<div id=versionbox> </div></h1>
<div id="description">
</div>
<div id="column">
<div class="block">
<h2>""" + text02 + """</h2>
defaultworkbenches
</div>
<div class="block">
<h2>""" + text03 + """</h2>
recentfiles
</div>
<div class="block">
<h2>""" + text05 + """</h2>
<div id="news">news feed</div>
</div>
<div class="block">
<h2>""" + text06 + """</h2>
defaultlinks
</div>
<div class="block">
<h2>""" + text09 + """</h2>
defaultexamples
</div>
customblocks
</div>
<!--
<form class="options">
<input type="checkbox" name="closeThisDialog">
""" + text17 + """<br/>
<input type="checkbox" name="dontShowAgain">
""" + text18 + """
</form>
-->
</body>
</html>
"""
def getWebExamples():
return """
<ul>
<li><a href="http://freecad-project.de/svn/ExampleData/FileFormates/Schenkel.stp">""" + text15 + """</a></li>
<li><a href="http://freecad-project.de/svn/ExampleData/Examples/CAD/Complex.FCStd">""" + text16 + """</a></li>
</ul>"""
def getExamples():
return """
<ul>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadSchenkel.py">""" + text10 + """</a></li>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadPartDesignExample.py">""" + text11 + """</a></li>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadDrawingExample.py">""" + text12 + """</a></li>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadRobotExample.py">""" + text13 + """</a></li>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadArchExample.py">""" + text55 + """</a></li>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadFemExample.py">""" + text60 + """</a></li>
</ul>"""
def getLinks():
return """
<ul>
<li><img src="web.png">
<a onMouseover="show('<p>""" + text07 + """</p>')"
onMouseout="show('')"
href="exthttp://www.freecadweb.org/">""" + text08 + """</a></li>
<li><img src="web.png">
<a onMouseover="show('<p>""" + text45 + """</p>')"
onMouseout="show('')"
href=ext""" + text38 + """>""" + text37 + """</a></li>
<li><img src="web.png">
<a onMouseover="show('<p>""" + text46 + """</p>')"
onMouseout="show('')"
href=ext""" + text56 + """>""" + text39 + """</a></li>
<li><img src="web.png">
<a onMouseover="show('<p>""" + text47 + """</p>')"
onMouseout="show('')"
href=ext""" + text57 + """>""" + text40 + """</a></li>
<li><img src="web.png">
<a onMouseover="show('<p>""" + text48 + """</p>')"
onMouseout="show('')"
href="exthttp://freecad-tutorial.blogspot.com/">""" + text43 + """</a></li>
<li><img src="web.png">
<a href="exthttp://github.com/FreeCAD/FreeCAD/releases"
onMouseOver="show('<p>""" + text62 + """</p>')"
onMouseOut="show('')">""" + text61 + """</a></li>
</ul>"""
def getWorkbenches():
return """
<ul>
<li><img src="blank.png">
<a onMouseover="show('<h3>""" + text49 + """</h3> \
<p>""" + text50 + """</p>')"
onMouseout="show('')"
href=""" + text51 + """>""" + text49 + """</a>
</li>
<li><img src="PartDesign.png">
<a onMouseover="show('<h3>""" + text19 + """</h3> \
<p>""" + text20 + """</p><p><small>""" + text21 + """ \
:</small></p><img src=PartDesignExample.png>')"
onMouseout="show('')"
href="PartDesign.py">""" + text22 + """</a>
</li>
<li><img src="ArchDesign.png">
<a onMouseover="show('<h3>""" + text23 + """</h3> \
<p>""" + text24 + """</p><p><small>""" + text21 + """ \
:</small></p><img src=ArchExample.png>')"
onMouseout="show('')"
href="ArchDesign.py">""" + text25 + """</a>
</li>
<li><img src="Ship.png">
<a onMouseover="show('<h3>""" + text53 + """</h3> \
<p>""" + text54 + """</p><p><small>""" + text21 + """ \
:</small></p><img src=ShipExample.png>')"
onMouseout="show('')"
href="Ship.py">""" + text52 + """</a>
</li>
<li><img src="Mesh.png">
<a onMouseover="show('<h3>""" + text26 + """</h3> \
<p>""" + text27 + """</p><p>""" + text28 + """</p>')"
onMouseout="show('')"
href="Mesh.py">""" + text29 + """</a>
</li>
</ul>"""
def getInfo(filename):
"returns available file information"
def getLocalTime(timestamp):
"returns a local time from a timestamp"
return time.strftime("%m/%d/%Y %H:%M:%S",time.localtime(timestamp))
def getSize(size):
"returns a human-readable size"
if size > 1024*1024:
hsize = str(size/(1024*1024)) + "Mb"
elif size > 1024:
hsize = str(size/1024) + "Kb"
else:
hsize = str(size) + "b"
return hsize
html = '<h3>'+os.path.basename(filename)+'</h3>'
if os.path.exists(filename):
# get normal file info
s = os.stat(filename)
html += "<p>" + text33 + " " + getSize(s.st_size) + "<br/>"
html += text34 + " " + getLocalTime(s.st_ctime) + "<br/>"
html += text35 + " " + getLocalTime(s.st_mtime) + "<br/>"
html += "<span>" + text36 + " " + filename + "</span></p>"
# get additional info from fcstd files
if os.path.splitext(filename)[1].upper() in [".FCSTD"]:
zfile=zipfile.ZipFile(filename)
files=zfile.namelist()
# check for meta-file if it's really a FreeCAD document
if files[0] == "Document.xml":
html += "<p>FreeCAD Standard File</p>"
image="thumbnails/Thumbnail.png"
if image in files:
image=zfile.read(image)
thumbfile = tempfile.mkstemp(suffix='.png')[1]
thumb = open(thumbfile,"wb")
thumb.write(image)
thumb.close()
html += '<img src=file://'
html += thumbfile + '><br/>'
else:
html += "<p>" + text41 + "</p>"
return html
def getRecentFiles():
"returns a list of 3 latest recent files"
rf = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/RecentFiles")
ct = rf.GetInt("RecentFiles")
html = '<ul>'
for i in range(3):
if i < ct:
mr = rf.GetString("MRU%d" % (i))
if os.path.exists(mr):
fn = os.path.basename(mr)
html += '<li>'
if mr[-5:].upper() == "FCSTD":
html += '<img src="freecad-doc.png" style="width: 16px"> '
else:
html += '<img src="blank.png" style="width: 16px"> '
html += '<a '
html += 'onMouseover="show(\''+getInfo(mr)+'\')" '
html += 'onMouseout="show(\'\')" '
html += 'href="LoadMRU'+str(i)+'.py">'
html += fn
html += '</a></li>'
html += '</ul>'
return html
def getFeed(url,numitems=3):
"returns a html list with links from the given RSS feed url"
xml = parse(urllib.urlopen(url)).getroot()
items = []
channel = xml.find('channel')
for element in channel.findall('item'):
items.append({'title': element.find('title').text,
'description': element.find('description').text,
'link': element.find('link').text})
if len(items) > numitems:
items = items[:numitems]
resp = '<ul>'
for item in items:
descr = re.compile("style=\".*?\"").sub('',item['description'])
descr = re.compile("alt=\".*?\"").sub('',descr)
descr = re.compile("\"").sub('',descr)
d1 = re.findall("<img.*?>",descr)[0]
d2 = re.findall("<span>.*?</span>",descr)[0]
descr = "<h3>" + item['title'] + "</h3>"
descr += d1 + "<br/>"
descr += d2
resp += '<li><a onMouseover="show(\''
resp += descr
resp += '\')" onMouseout="show(\'\')" href="'
resp += item['link']
resp += '">'
resp += item['title']
resp += '</a></li>'
resp += '</ul>'
print resp
return resp
def getCustomBlocks():
"fetches custom html files in FreeCAD user dir"
output = ""
return output
def setColors(html):
"gets theme colors from the system, and sets appropriate styles"
defaults = {"#basecolor":"#191B26",
"#linkcolor":"#0092E8",
"#textcolor":"#FFFFFF",
"#windowcolor":"#FFFFFF",
"#windowtextcolor":"#000000"}
try:
palette = QtGui.qApp.palette()
except:
pass
else:
#defaults["#basecolor"] = palette.base().color().name()
defaults["#basecolor"] = "#171A2B url(Background.jpg)"
#defaults["#linkcolor"] = palette.link().color().name() # UGLY!!
defaults["#textcolor"] = palette.text().color().name()
defaults["#windowcolor"] = palette.window().color().name()
defaults["#windowtextcolor"] = palette.windowText().color().name()
for k,v in defaults.iteritems():
html = html.replace(k,str(v))
return html
def handle():
"returns the complete html startpage"
# add recent files
recentfiles = getRecentFiles()
html = page.replace("recentfiles",recentfiles)
# add default workbenches
html = html.replace("defaultworkbenches",getWorkbenches())
# add default web links
html = html.replace("defaultlinks",getLinks())
# add default examples
html = html.replace("defaultexamples",getExamples())
# add web examples
#html = html.replace("webexamples",getWebExamples())
# add custom blocks
html = html.replace("customblocks",getCustomBlocks())
# fetches system colors
html = setColors(html)
return html
def exportTestFile():
f = open(os.path.expanduser("~")+os.sep+"freecad-startpage.html","wb")
f.write(handle())
f.close()
| lgpl-2.1 | -881,293,649,298,753,800 | 38.252616 | 368 | 0.527461 | false | 3.922651 | false | false | false |
Nikita1710/ANUFifty50-Online-Mentoring-Platform | project/fifty_fifty/webcore/views.py | 1 | 4115 | from django.shortcuts import render, get_object_or_404
from django.core.mail import send_mail, BadHeaderError
from django.contrib import messages
from django.conf import settings
from django.contrib.auth.decorators import login_required
from content.models import Mentee, Mentor, Content_Summary
from blog.models import Post
from webcore.models import Profile
from feedback.forms import FeedbackForm
from feedback.models import Feedback_contact
from django.utils import timezone
#from content
# Create your views here.
def home(request):
context = locals()
template = 'index.html'
return render(request,template,context)
@login_required
def userProfile(request):
user = request.user
context = {'user':user, 'summary_list':Content_Summary.objects.all()}
template = 'menteelogin.html'
return render(request,template,context)
@login_required
def userProfileNews(request):
user = request.user
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
template = 'blog/post_list.html'
return render(request,template, {'posts': posts})
## post_detail views the blog posts individually
@login_required
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
template = 'blog/post_detail.html'
return render(request, template, {'post': post})
@login_required
def userProfileMentor(request):
user = request.user
template = 'mentor.html'
return render(request,template)
@login_required
def userProfileResources(request):
user = request.user
context = {'user':user, 'post_list':Post.objects.all(), 'mentee_list':Mentee.objects.all(), 'mentor_list':Mentor.objects.all(), 'Content_Summary_list':Content_Summary.objects.all()}
template = 'resources.html'
return render(request,template,context)
@login_required
def userProfileFAQ(request):
user = request.user
context = {'user':user}
template = 'FAQ.html'
return render(request,template,context)
@login_required
def userProfileProfile(request):
user = request.user
context = {'user':user}
template = 'profile.html'
return render(request,template,context)
@login_required
def userProfileContent(request):
user = request.user
context = {'user':user, 'mentee_list':Mentee.objects.all(), 'mentor_list':Mentor.objects.all()}
template = 'content.html'
return render(request,template,context)
@login_required
def userProfileSettings(request):
user = request.user
context = {'user':user}
template = 'settings.html'
return render(request,template,context)
@login_required
def feedback_process(request):
User = get_object_or_404(Profile, pk=request.user.pk)
contact_template = 'feedback/feedback_contact.html'
# sucess_template = 'thanks.html'
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = FeedbackForm(request.POST)
# check whether it's valid:
if form.is_valid():
receiver_email = settings.EMAIL_HOST_USER
subject = form.subject(User.role)
message = form.cleaned_data['message']
# handle email eceptions
try:
send_mail(subject, message, request.user.email, [receiver_email])
except Exception as ex:
data = messages.add_message(request, messages.ERROR,'An error occurred. {}'.format(str(ex)))
else:
feedback_form = form.save(commit=False)
# feedback_form.receiver_email = receiver_email
feedback_form.user = User
feedback_form.save()
data = messages.add_message(request, messages.INFO, 'Thanks for sending a feedback.')
# render thank you message
return render(request, contact_template, {'message': data})
# if a GET (or any other method) we'll create a blank form
else:
form = FeedbackForm(user=User.user)
return render(request, contact_template, {'form': form})
| apache-2.0 | 8,039,884,220,214,451,000 | 33.579832 | 185 | 0.687728 | false | 3.849392 | false | false | false |
NicoVarg99/daf-recipes | ckan/ckan/ckan/ckan/tests/logic/action/test_delete.py | 1 | 20446 | # encoding: utf-8
import nose.tools
import ckan.tests.helpers as helpers
import ckan.tests.factories as factories
import ckan.logic as logic
import ckan.model as model
import ckan.plugins as p
import ckan.lib.search as search
assert_equals = nose.tools.assert_equals
assert_raises = nose.tools.assert_raises
class TestDelete:
def setup(self):
helpers.reset_db()
def test_resource_delete(self):
user = factories.User()
sysadmin = factories.Sysadmin()
resource = factories.Resource(user=user)
context = {}
params = {'id': resource['id']}
helpers.call_action('resource_delete', context, **params)
# Not even a sysadmin can see it now
assert_raises(logic.NotFound, helpers.call_action, 'resource_show',
{'user': sysadmin['name']}, **params)
# It is still there but with state=deleted
res_obj = model.Resource.get(resource['id'])
assert_equals(res_obj.state, 'deleted')
class TestDeleteResourceViews(object):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('image_view'):
p.load('image_view')
helpers.reset_db()
@classmethod
def teardown_class(cls):
p.unload('image_view')
def test_resource_view_delete(self):
resource_view = factories.ResourceView()
params = {'id': resource_view['id']}
helpers.call_action('resource_view_delete', context={}, **params)
assert_raises(logic.NotFound, helpers.call_action,
'resource_view_show',
context={}, **params)
# The model object is actually deleted
resource_view_obj = model.ResourceView.get(resource_view['id'])
assert_equals(resource_view_obj, None)
def test_delete_no_id_raises_validation_error(self):
params = {}
assert_raises(logic.ValidationError, helpers.call_action,
'resource_view_delete',
context={}, **params)
def test_delete_wrong_id_raises_not_found_error(self):
params = {'id': 'does_not_exist'}
assert_raises(logic.NotFound, helpers.call_action,
'resource_view_delete',
context={}, **params)
class TestClearResourceViews(object):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('image_view'):
p.load('image_view')
if not p.plugin_loaded('recline_view'):
p.load('recline_view')
helpers.reset_db()
@classmethod
def teardown_class(cls):
p.unload('image_view')
p.unload('recline_view')
def test_resource_view_clear(self):
factories.ResourceView(view_type='image_view')
factories.ResourceView(view_type='image_view')
factories.ResourceView(view_type='recline_view')
factories.ResourceView(view_type='recline_view')
count = model.Session.query(model.ResourceView).count()
assert_equals(count, 4)
helpers.call_action('resource_view_clear', context={})
count = model.Session.query(model.ResourceView).count()
assert_equals(count, 0)
def test_resource_view_clear_with_types(self):
factories.ResourceView(view_type='image_view')
factories.ResourceView(view_type='image_view')
factories.ResourceView(view_type='recline_view')
factories.ResourceView(view_type='recline_view')
count = model.Session.query(model.ResourceView).count()
assert_equals(count, 4)
helpers.call_action('resource_view_clear', context={},
view_types=['image_view'])
view_types = model.Session.query(model.ResourceView.view_type).all()
assert_equals(len(view_types), 2)
for view_type in view_types:
assert_equals(view_type[0], 'recline_view')
class TestDeleteTags(object):
def test_tag_delete_with_unicode_returns_unicode_error(self):
# There is not a lot of call for it, but in theory there could be
# unicode in the ActionError error message, so ensure that comes
# through in NotFound as unicode.
try:
helpers.call_action('tag_delete', id=u'Delta symbol: \u0394')
except logic.NotFound, e:
assert u'Delta symbol: \u0394' in unicode(e)
else:
assert 0, 'Should have raised NotFound'
class TestGroupPurge(object):
def setup(self):
helpers.reset_db()
def test_a_non_sysadmin_cant_purge_group(self):
user = factories.User()
group = factories.Group(user=user)
assert_raises(logic.NotAuthorized,
helpers.call_action,
'group_purge',
context={'user': user['name'], 'ignore_auth': False},
id=group['name'])
def test_purged_group_does_not_show(self):
group = factories.Group()
helpers.call_action('group_purge', id=group['name'])
assert_raises(logic.NotFound, helpers.call_action, 'group_show',
context={}, id=group['name'])
def test_purged_group_is_not_listed(self):
group = factories.Group()
helpers.call_action('group_purge', id=group['name'])
assert_equals(helpers.call_action('group_list', context={}), [])
def test_dataset_in_a_purged_group_no_longer_shows_that_group(self):
group = factories.Group()
dataset = factories.Dataset(groups=[{'name': group['name']}])
helpers.call_action('group_purge', id=group['name'])
dataset_shown = helpers.call_action('package_show', context={},
id=dataset['id'])
assert_equals(dataset_shown['groups'], [])
def test_purged_group_is_not_in_search_results_for_its_ex_dataset(self):
search.clear_all()
group = factories.Group()
dataset = factories.Dataset(groups=[{'name': group['name']}])
def get_search_result_groups():
results = helpers.call_action('package_search',
q=dataset['title'])['results']
return [g['name'] for g in results[0]['groups']]
assert_equals(get_search_result_groups(), [group['name']])
helpers.call_action('group_purge', id=group['name'])
assert_equals(get_search_result_groups(), [])
def test_purged_group_leaves_no_trace_in_the_model(self):
factories.Group(name='parent')
user = factories.User()
group1 = factories.Group(name='group1',
extras=[{'key': 'key1', 'value': 'val1'}],
users=[{'name': user['name']}],
groups=[{'name': 'parent'}])
factories.Dataset(name='ds', groups=[{'name': 'group1'}])
factories.Group(name='child', groups=[{'name': 'group1'}])
num_revisions_before = model.Session.query(model.Revision).count()
helpers.call_action('group_purge', id=group1['name'])
num_revisions_after = model.Session.query(model.Revision).count()
# the Group and related objects are gone
assert_equals(sorted([g.name for g in
model.Session.query(model.Group).all()]),
['child', 'parent'])
assert_equals(model.Session.query(model.GroupExtra).all(), [])
# the only members left are the users for the parent and child
assert_equals(sorted([
(m.table_name, m.group.name)
for m in model.Session.query(model.Member).join(model.Group)]),
[('user', 'child'), ('user', 'parent')])
# the dataset is still there though
assert_equals([p.name for p in model.Session.query(model.Package)],
['ds'])
# the group's object revisions were purged too
assert_equals(sorted(
[gr.name for gr in model.Session.query(model.GroupRevision)]),
['child', 'parent'])
assert_equals(model.Session.query(model.GroupExtraRevision).all(),
[])
# Member is not revisioned
# No Revision objects were purged, in fact 1 is created for the purge
assert_equals(num_revisions_after - num_revisions_before, 1)
def test_missing_id_returns_error(self):
assert_raises(logic.ValidationError,
helpers.call_action, 'group_purge')
def test_bad_id_returns_404(self):
assert_raises(logic.NotFound,
helpers.call_action, 'group_purge', id='123')
class TestOrganizationPurge(object):
def setup(self):
helpers.reset_db()
def test_a_non_sysadmin_cant_purge_org(self):
user = factories.User()
org = factories.Organization(user=user)
assert_raises(logic.NotAuthorized,
helpers.call_action,
'organization_purge',
context={'user': user['name'], 'ignore_auth': False},
id=org['name'])
def test_purged_org_does_not_show(self):
org = factories.Organization()
helpers.call_action('organization_purge', id=org['name'])
assert_raises(logic.NotFound, helpers.call_action, 'organization_show',
context={}, id=org['name'])
def test_purged_org_is_not_listed(self):
org = factories.Organization()
helpers.call_action('organization_purge', id=org['name'])
assert_equals(helpers.call_action('organization_list', context={}), [])
def test_dataset_in_a_purged_org_no_longer_shows_that_org(self):
org = factories.Organization()
dataset = factories.Dataset(owner_org=org['id'])
helpers.call_action('organization_purge', id=org['name'])
dataset_shown = helpers.call_action('package_show', context={},
id=dataset['id'])
assert_equals(dataset_shown['owner_org'], None)
def test_purged_org_is_not_in_search_results_for_its_ex_dataset(self):
search.clear_all()
org = factories.Organization()
dataset = factories.Dataset(owner_org=org['id'])
def get_search_result_owner_org():
results = helpers.call_action('package_search',
q=dataset['title'])['results']
return results[0]['owner_org']
assert_equals(get_search_result_owner_org(), org['id'])
helpers.call_action('organization_purge', id=org['name'])
assert_equals(get_search_result_owner_org(), None)
def test_purged_organization_leaves_no_trace_in_the_model(self):
factories.Organization(name='parent')
user = factories.User()
org1 = factories.Organization(
name='org1',
extras=[{'key': 'key1', 'value': 'val1'}],
users=[{'name': user['name']}],
groups=[{'name': 'parent'}])
factories.Dataset(name='ds', owner_org=org1['id'])
factories.Organization(name='child', groups=[{'name': 'org1'}])
num_revisions_before = model.Session.query(model.Revision).count()
helpers.call_action('organization_purge', id=org1['name'])
num_revisions_after = model.Session.query(model.Revision).count()
# the Organization and related objects are gone
assert_equals(sorted([o.name for o in
model.Session.query(model.Group).all()]),
['child', 'parent'])
assert_equals(model.Session.query(model.GroupExtra).all(), [])
# the only members left are the users for the parent and child
assert_equals(sorted([
(m.table_name, m.group.name)
for m in model.Session.query(model.Member).join(model.Group)]),
[('user', 'child'), ('user', 'parent')])
# the dataset is still there though
assert_equals([p.name for p in model.Session.query(model.Package)],
['ds'])
# the organization's object revisions were purged too
assert_equals(sorted(
[gr.name for gr in model.Session.query(model.GroupRevision)]),
['child', 'parent'])
assert_equals(model.Session.query(model.GroupExtraRevision).all(),
[])
# Member is not revisioned
# No Revision objects were purged, in fact 1 is created for the purge
assert_equals(num_revisions_after - num_revisions_before, 1)
def test_missing_id_returns_error(self):
assert_raises(logic.ValidationError,
helpers.call_action, 'organization_purge')
def test_bad_id_returns_404(self):
assert_raises(logic.NotFound,
helpers.call_action, 'organization_purge', id='123')
class TestDatasetPurge(object):
def setup(self):
helpers.reset_db()
def test_a_non_sysadmin_cant_purge_dataset(self):
user = factories.User()
dataset = factories.Dataset(user=user)
assert_raises(logic.NotAuthorized,
helpers.call_action,
'dataset_purge',
context={'user': user['name'], 'ignore_auth': False},
id=dataset['name'])
def test_purged_dataset_does_not_show(self):
dataset = factories.Dataset()
helpers.call_action('dataset_purge',
context={'ignore_auth': True},
id=dataset['name'])
assert_raises(logic.NotFound, helpers.call_action, 'package_show',
context={}, id=dataset['name'])
def test_purged_dataset_is_not_listed(self):
dataset = factories.Dataset()
helpers.call_action('dataset_purge', id=dataset['name'])
assert_equals(helpers.call_action('package_list', context={}), [])
def test_group_no_longer_shows_its_purged_dataset(self):
group = factories.Group()
dataset = factories.Dataset(groups=[{'name': group['name']}])
helpers.call_action('dataset_purge', id=dataset['name'])
dataset_shown = helpers.call_action('group_show', context={},
id=group['id'],
include_datasets=True)
assert_equals(dataset_shown['packages'], [])
def test_purged_dataset_is_not_in_search_results(self):
search.clear_all()
dataset = factories.Dataset()
def get_search_results():
results = helpers.call_action('package_search',
q=dataset['title'])['results']
return [d['name'] for d in results]
assert_equals(get_search_results(), [dataset['name']])
helpers.call_action('dataset_purge', id=dataset['name'])
assert_equals(get_search_results(), [])
def test_purged_dataset_leaves_no_trace_in_the_model(self):
factories.Group(name='group1')
org = factories.Organization()
dataset = factories.Dataset(
tags=[{'name': 'tag1'}],
groups=[{'name': 'group1'}],
owner_org=org['id'],
extras=[{'key': 'testkey', 'value': 'testvalue'}])
factories.Resource(package_id=dataset['id'])
num_revisions_before = model.Session.query(model.Revision).count()
helpers.call_action('dataset_purge',
context={'ignore_auth': True},
id=dataset['name'])
num_revisions_after = model.Session.query(model.Revision).count()
# the Package and related objects are gone
assert_equals(model.Session.query(model.Package).all(), [])
assert_equals(model.Session.query(model.Resource).all(), [])
assert_equals(model.Session.query(model.PackageTag).all(), [])
# there is no clean-up of the tag object itself, just the PackageTag.
assert_equals([t.name for t in model.Session.query(model.Tag).all()],
['tag1'])
assert_equals(model.Session.query(model.PackageExtra).all(), [])
# the only member left is for the user created in factories.Group() and
# factories.Organization()
assert_equals(sorted(
[(m.table_name, m.group.name)
for m in model.Session.query(model.Member).join(model.Group)]),
[('user', 'group1'), ('user', org['name'])])
# all the object revisions were purged too
assert_equals(model.Session.query(model.PackageRevision).all(), [])
assert_equals(model.Session.query(model.ResourceRevision).all(), [])
assert_equals(model.Session.query(model.PackageTagRevision).all(), [])
assert_equals(model.Session.query(model.PackageExtraRevision).all(),
[])
# Member is not revisioned
# No Revision objects were purged or created
assert_equals(num_revisions_after - num_revisions_before, 0)
def test_purged_dataset_removed_from_relationships(self):
child = factories.Dataset()
parent = factories.Dataset()
grandparent = factories.Dataset()
helpers.call_action('package_relationship_create',
subject=child['id'],
type='child_of',
object=parent['id'])
helpers.call_action('package_relationship_create',
subject=parent['id'],
type='child_of',
object=grandparent['id'])
assert_equals(len(
model.Session.query(model.PackageRelationship).all()), 2)
helpers.call_action('dataset_purge',
context={'ignore_auth': True},
id=parent['name'])
assert_equals(model.Session.query(model.PackageRelationship).all(), [])
def test_missing_id_returns_error(self):
assert_raises(logic.ValidationError,
helpers.call_action, 'dataset_purge')
def test_bad_id_returns_404(self):
assert_raises(logic.NotFound,
helpers.call_action, 'dataset_purge', id='123')
class TestUserDelete(object):
def setup(self):
helpers.reset_db()
def test_user_delete(self):
user = factories.User()
context = {}
params = {u'id': user[u'id']}
helpers.call_action(u'user_delete', context, **params)
# It is still there but with state=deleted
user_obj = model.User.get(user[u'id'])
assert_equals(user_obj.state, u'deleted')
def test_user_delete_removes_memberships(self):
user = factories.User()
factories.Organization(
users=[{u'name': user[u'id'], u'capacity': u'admin'}])
factories.Group(
users=[{u'name': user[u'id'], u'capacity': u'admin'}])
user_memberships = model.Session.query(model.Member).filter(
model.Member.table_id == user[u'id']).all()
assert_equals(len(user_memberships), 2)
assert_equals([m.state for m in user_memberships],
[u'active', u'active'])
context = {}
params = {u'id': user[u'id']}
helpers.call_action(u'user_delete', context, **params)
user_memberships = model.Session.query(model.Member).filter(
model.Member.table_id == user[u'id']).all()
# Member objects are still there, but flagged as deleted
assert_equals(len(user_memberships), 2)
assert_equals([m.state for m in user_memberships],
[u'deleted', u'deleted'])
def test_user_delete_removes_memberships_when_using_name(self):
user = factories.User()
factories.Organization(
users=[{u'name': user[u'id'], u'capacity': u'admin'}])
factories.Group(
users=[{u'name': user[u'id'], u'capacity': u'admin'}])
context = {}
params = {u'id': user[u'name']}
helpers.call_action(u'user_delete', context, **params)
user_memberships = model.Session.query(model.Member).filter(
model.Member.table_id == user[u'id']).all()
# Member objects are still there, but flagged as deleted
assert_equals(len(user_memberships), 2)
assert_equals([m.state for m in user_memberships],
[u'deleted', u'deleted'])
| gpl-3.0 | -774,451,038,322,788,900 | 36.039855 | 79 | 0.581923 | false | 4.063196 | true | false | false |
arnaldog12/Manual-Pratico-Deep-Learning | utils/samples_generator.py | 1 | 1868 | import numpy as np
def make_cubic(n_samples, x_min, x_max, a=1, b=0, c=0, d=0, noise=0.0, random_state=None):
np.random.seed(random_state)
x = np.linspace(x_min, x_max, n_samples)
y = a*x**3 + b*x**2 + c*x + d + (2*noise*np.random.random(n_samples) - noise)
return x.reshape(-1,1), y.reshape(-1,1)
def make_exp(n_samples, x_min, x_max, noise=0.0, random_state=None):
np.random.seed(random_state)
x = np.linspace(x_min, x_max, n_samples)
y = np.exp(x) + 2*noise*np.random.random(n_samples) - noise
return x.reshape(-1,1), y.reshape(-1,1)
def make_log10(n_samples, x_min, x_max, noise=0.0, random_state=None):
np.random.seed(random_state)
x = np.logspace(np.log10(x_min), np.log10(x_max), n_samples)
y = np.log10(x) + 2*noise*np.random.random(n_samples) - noise
return x.reshape(-1,1), y.reshape(-1,1)
def make_spiral(n_samples, n_class=2, radius=1, laps=1.0, noise=0.0, random_state=None):
np.random.seed(random_state)
x = np.zeros((n_samples * n_class, 2))
y = np.zeros((n_samples * n_class))
pi_2 = 2 * np.math.pi
points = np.linspace(0, 1, n_samples)
r = points * radius
t = points * pi_2 * laps
for label, delta_t in zip(range(n_class), np.arange(0, pi_2, pi_2/n_class)):
random_noise = (2 * np.random.rand(n_samples) - 1) * noise
index = np.arange(label*n_samples, (label+1)*n_samples)
x[index] = np.c_[r * np.sin(t + delta_t) + random_noise,
r * np.cos(t + delta_t) + random_noise]
y[index] = label
return x, y.reshape(-1, 1)
def make_square(n_samples, x_min, x_max, a=1, b=0, c=0, noise=0.0, random_state=None):
np.random.seed(random_state)
x = np.linspace(x_min, x_max, n_samples)
y = a*x**2 + b*x + c + (2*noise*np.random.random(n_samples) - noise)
return x.reshape(-1,1), y.reshape(-1,1)
| mit | 8,612,343,678,604,408,000 | 43.47619 | 90 | 0.600107 | false | 2.461133 | false | false | false |
sthyme/ZFSchizophrenia | BehaviorAnalysis/HSMovieAnalysis/setResolutionWidget.py | 1 | 5960 | #-----------------------
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'selectUI.ui'
# Created: Thu Feb 26 13:45:32 2015 by: PyQt4 UI code generator 4.11.3
#
# Created by Emily Conklin
# February 2015
# This program is connected to the main widget (NeuroGUI.py) and is a sub-user interface
# Called from imageTools.setCameraResolution
# Allows the user to specify:
# 1) default resolution
# 2) fit-to-screen resolution
# 3) fit-to-projector resolution
#-----------------------
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import sys
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_setResolutionWidget(QtGui.QDialog):
'''
sub-window class - QDialog type
'''
def __init__(self):
'''
initializes the dialog, data member
'''
QtGui.QDialog.__init__(self)
self.setupUi(self)
self.videoType=0
def setupUi(self, setResolutionWidget):
'''
called in the initialization method
sets up each layout, labels, buttons, etc.
'''
setResolutionWidget.setObjectName(_fromUtf8("setResolutionWidget"))
setResolutionWidget.resize(404, 300)
self.verticalLayout_2 = QtGui.QVBoxLayout(setResolutionWidget)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
#line 1: label for desired resolution
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.desiredResolutionLabel = QtGui.QLabel(setResolutionWidget)
self.desiredResolutionLabel.setObjectName(_fromUtf8("desiredResolutionLabel"))
self.horizontalLayout.addWidget(self.desiredResolutionLabel)
#lines 2,3,4: resolution options
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.defaultResRB = QtGui.QRadioButton(setResolutionWidget)
self.defaultResRB.setObjectName(_fromUtf8("defaultResRB"))
self.verticalLayout_3.addWidget(self.defaultResRB)
self.fitToScreenLE = QtGui.QRadioButton(setResolutionWidget)
self.fitToScreenLE.setObjectName(_fromUtf8("fitToScreenLE"))
self.verticalLayout_3.addWidget(self.fitToScreenLE)
self.fitToProjectorLE = QtGui.QRadioButton(setResolutionWidget)
self.fitToProjectorLE.setObjectName(_fromUtf8("fitToProjectorLE"))
self.verticalLayout_3.addWidget(self.fitToProjectorLE)
self.horizontalLayout.addLayout(self.verticalLayout_3)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.defaultResRB.setChecked(True) #defaults default resolution
#sets up button group with the three options
self.buttonGroup = QtGui.QButtonGroup()
self.buttonGroup.addButton(self.defaultResRB,0)
self.buttonGroup.addButton(self.fitToScreenLE,1)
self.buttonGroup.addButton(self.fitToProjectorLE,2)
#line 5: submit button
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem4)
self.Submit = QtGui.QPushButton(setResolutionWidget)
self.Submit.setObjectName(_fromUtf8("Submit"))
self.horizontalLayout_4.addWidget(self.Submit)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem5)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.retranslateUi(setResolutionWidget)
QtCore.QMetaObject.connectSlotsByName(setResolutionWidget)
def retranslateUi(self, setResolutionWidget):
'''
called in the setup method
sets label/button text and window titles
links buttons to other methods
'''
setResolutionWidget.setWindowTitle(_translate("setResolutionWidget", "Resolution Options", None))
self.desiredResolutionLabel.setText(_translate("setResolutionWidget", "Choose desired resolution:", None))
self.defaultResRB.setText(_translate("setResolutionWidget", "Default resolution", None))
self.fitToScreenLE.setText(_translate("setResolutionWidget", "Fit to screen (~720p)", None))
self.fitToProjectorLE.setText(_translate("setResolutionWidget", "Fit to projector (~480p)", None))
self.Submit.setText(_translate("setResolutionWidget", "Submit",None))
#finds out which radio button was pressed
self.defaultResRB.clicked.connect(self.readSignal)
self.fitToScreenLE.clicked.connect(self.readSignal)
self.fitToProjectorLE.clicked.connect(self.readSignal)
self.Submit.clicked.connect(self.submitClose) #connects submit button to submitClose
def readSignal(self):
'''
checks button group signal to determine radio button clicked
'''
self.videoType = self.buttonGroup.checkedId() #checks radio button signal
def submitClose(self):
'''
closes window when user hits submit, passes videoType
'''
self.accept()
if __name__=='__main__':
'''
main function to test widget as a standalone
'''
app=QtGui.QApplication(sys.argv)
ex=Ui_setResolutionWidget()
ex.show()
sys.exit(app.exec_())
| mit | -8,555,149,342,993,067,000 | 41.571429 | 114 | 0.69245 | false | 4.238976 | false | false | false |
Detry322/map-creator | app/random.py | 1 | 1453 | from app.models import all_models
from app.utils import mkdir_p
from app import GENERATED_TILES_FOLDER, RANDOM_FOLDER, BACKPROPS_FOLDER
from scipy import misc
import glob
import numpy as np
import os
from keras.models import load_model, Model
from keras.optimizers import Adam, SGD, Adagrad
from keras.layers import LocallyConnected1D, Input, Reshape
from app import BACKPROPS_FOLDER, FORWARDPROPS_FOLDER, RANDOM_FOLDER
from app.utils import mkdir_p
from app.forwardprop import forwardprop_single_image
NOISE_SIZE = 100
import time
def random(model_file):
model = load_model(model_file)
generator = model.layers[0]
generator.trainable = False
for layer in generator.layers:
layer.trainable = False
api_key_water = [np.loadtxt(filename) for filename in glob.glob(os.path.join(BACKPROPS_FOLDER, 'api_key', 'water', '*.txt'))]
no_api_key_water = [np.loadtxt(filename) for filename in glob.glob(os.path.join(BACKPROPS_FOLDER, 'no_api_key', 'water', '*.txt'))]
no_api_key_trees = np.loadtxt(os.path.join(BACKPROPS_FOLDER, 'no_api_key', 'trees', '3391.png.txt'))
folder = os.path.join(RANDOM_FOLDER, '{}'.format(time.time()))
mkdir_p(folder)
for a in api_key_water:
for na in no_api_key_water:
api_key_trees = a - na + no_api_key_trees
image = forwardprop_single_image(generator, api_key_trees)
misc.imsave(os.path.join(folder, 'land-{}.png'.format(time.time())), ((image + 1)*128).astype('uint8'))
| mit | -2,502,770,354,397,162,500 | 32.022727 | 133 | 0.722643 | false | 3.091489 | false | false | false |
prefetchnta/questlab | bin/x64bin/python/36/Lib/calendar.py | 1 | 23926 | """Calendar printing functions
Note when comparing these calendars to the ones printed by cal(1): By
default, these calendars have Monday as the first day of the week, and
Sunday as the last (the European convention). Use setfirstweekday() to
set the first day of the week (0=Monday, 6=Sunday)."""
import sys
import datetime
import locale as _locale
from itertools import repeat
__all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday",
"firstweekday", "isleap", "leapdays", "weekday", "monthrange",
"monthcalendar", "prmonth", "month", "prcal", "calendar",
"timegm", "month_name", "month_abbr", "day_name", "day_abbr",
"Calendar", "TextCalendar", "HTMLCalendar", "LocaleTextCalendar",
"LocaleHTMLCalendar", "weekheader"]
# Exception raised for bad input (with string parameter for details)
error = ValueError
# Exceptions raised for bad input
class IllegalMonthError(ValueError):
def __init__(self, month):
self.month = month
def __str__(self):
return "bad month number %r; must be 1-12" % self.month
class IllegalWeekdayError(ValueError):
def __init__(self, weekday):
self.weekday = weekday
def __str__(self):
return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday
# Constants for months referenced later
January = 1
February = 2
# Number of days per month (except for February in leap years)
mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# This module used to have hard-coded lists of day and month names, as
# English strings. The classes following emulate a read-only version of
# that, but supply localized names. Note that the values are computed
# fresh on each call, in case the user changes locale between calls.
class _localized_month:
_months = [datetime.date(2001, i+1, 1).strftime for i in range(12)]
_months.insert(0, lambda x: "")
def __init__(self, format):
self.format = format
def __getitem__(self, i):
funcs = self._months[i]
if isinstance(i, slice):
return [f(self.format) for f in funcs]
else:
return funcs(self.format)
def __len__(self):
return 13
class _localized_day:
# January 1, 2001, was a Monday.
_days = [datetime.date(2001, 1, i+1).strftime for i in range(7)]
def __init__(self, format):
self.format = format
def __getitem__(self, i):
funcs = self._days[i]
if isinstance(i, slice):
return [f(self.format) for f in funcs]
else:
return funcs(self.format)
def __len__(self):
return 7
# Full and abbreviated names of weekdays
day_name = _localized_day('%A')
day_abbr = _localized_day('%a')
# Full and abbreviated names of months (1-based arrays!!!)
month_name = _localized_month('%B')
month_abbr = _localized_month('%b')
# Constants for weekdays
(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
def isleap(year):
"""Return True for leap years, False for non-leap years."""
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def leapdays(y1, y2):
"""Return number of leap years in range [y1, y2).
Assume y1 <= y2."""
y1 -= 1
y2 -= 1
return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400)
def weekday(year, month, day):
"""Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12),
day (1-31)."""
return datetime.date(year, month, day).weekday()
def monthrange(year, month):
"""Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
year, month."""
if not 1 <= month <= 12:
raise IllegalMonthError(month)
day1 = weekday(year, month, 1)
ndays = mdays[month] + (month == February and isleap(year))
return day1, ndays
class Calendar(object):
"""
Base calendar class. This class doesn't do any formatting. It simply
provides data to subclasses.
"""
def __init__(self, firstweekday=0):
self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday
def getfirstweekday(self):
return self._firstweekday % 7
def setfirstweekday(self, firstweekday):
self._firstweekday = firstweekday
firstweekday = property(getfirstweekday, setfirstweekday)
def iterweekdays(self):
"""
Return an iterator for one week of weekday numbers starting with the
configured first one.
"""
for i in range(self.firstweekday, self.firstweekday + 7):
yield i%7
def itermonthdates(self, year, month):
"""
Return an iterator for one month. The iterator will yield datetime.date
values and will always iterate through complete weeks, so it will yield
dates outside the specified month.
"""
date = datetime.date(year, month, 1)
# Go back to the beginning of the week
days = (date.weekday() - self.firstweekday) % 7
date -= datetime.timedelta(days=days)
oneday = datetime.timedelta(days=1)
while True:
yield date
try:
date += oneday
except OverflowError:
# Adding one day could fail after datetime.MAXYEAR
break
if date.month != month and date.weekday() == self.firstweekday:
break
def itermonthdays2(self, year, month):
"""
Like itermonthdates(), but will yield (day number, weekday number)
tuples. For days outside the specified month the day number is 0.
"""
for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday):
yield d, i % 7
def itermonthdays(self, year, month):
"""
Like itermonthdates(), but will yield day numbers. For days outside
the specified month the day number is 0.
"""
day1, ndays = monthrange(year, month)
days_before = (day1 - self.firstweekday) % 7
yield from repeat(0, days_before)
yield from range(1, ndays + 1)
days_after = (self.firstweekday - day1 - ndays) % 7
yield from repeat(0, days_after)
def monthdatescalendar(self, year, month):
"""
Return a matrix (list of lists) representing a month's calendar.
Each row represents a week; week entries are datetime.date values.
"""
dates = list(self.itermonthdates(year, month))
return [ dates[i:i+7] for i in range(0, len(dates), 7) ]
def monthdays2calendar(self, year, month):
"""
Return a matrix representing a month's calendar.
Each row represents a week; week entries are
(day number, weekday number) tuples. Day numbers outside this month
are zero.
"""
days = list(self.itermonthdays2(year, month))
return [ days[i:i+7] for i in range(0, len(days), 7) ]
def monthdayscalendar(self, year, month):
"""
Return a matrix representing a month's calendar.
Each row represents a week; days outside this month are zero.
"""
days = list(self.itermonthdays(year, month))
return [ days[i:i+7] for i in range(0, len(days), 7) ]
def yeardatescalendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting. The return
value is a list of month rows. Each month row contains up to width months.
Each month contains between 4 and 6 weeks and each week contains 1-7
days. Days are datetime.date objects.
"""
months = [
self.monthdatescalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
def yeardays2calendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting (similar to
yeardatescalendar()). Entries in the week lists are
(day number, weekday number) tuples. Day numbers outside this month are
zero.
"""
months = [
self.monthdays2calendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
def yeardayscalendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting (similar to
yeardatescalendar()). Entries in the week lists are day numbers.
Day numbers outside this month are zero.
"""
months = [
self.monthdayscalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
class TextCalendar(Calendar):
"""
Subclass of Calendar that outputs a calendar as a simple plain text
similar to the UNIX program cal.
"""
def prweek(self, theweek, width):
"""
Print a single week (no newline).
"""
print(self.formatweek(theweek, width), end=' ')
def formatday(self, day, weekday, width):
"""
Returns a formatted day.
"""
if day == 0:
s = ''
else:
s = '%2i' % day # right-align single-digit days
return s.center(width)
def formatweek(self, theweek, width):
"""
Returns a single week in a string (no newline).
"""
return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek)
def formatweekday(self, day, width):
"""
Returns a formatted week day name.
"""
if width >= 9:
names = day_name
else:
names = day_abbr
return names[day][:width].center(width)
def formatweekheader(self, width):
"""
Return a header for a week.
"""
return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays())
def formatmonthname(self, theyear, themonth, width, withyear=True):
"""
Return a formatted month name.
"""
s = month_name[themonth]
if withyear:
s = "%s %r" % (s, theyear)
return s.center(width)
def prmonth(self, theyear, themonth, w=0, l=0):
"""
Print a month's calendar.
"""
print(self.formatmonth(theyear, themonth, w, l), end='')
def formatmonth(self, theyear, themonth, w=0, l=0):
"""
Return a month's calendar string (multi-line).
"""
w = max(2, w)
l = max(1, l)
s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1)
s = s.rstrip()
s += '\n' * l
s += self.formatweekheader(w).rstrip()
s += '\n' * l
for week in self.monthdays2calendar(theyear, themonth):
s += self.formatweek(week, w).rstrip()
s += '\n' * l
return s
def formatyear(self, theyear, w=2, l=1, c=6, m=3):
"""
Returns a year's calendar as a multi-line string.
"""
w = max(2, w)
l = max(1, l)
c = max(2, c)
colwidth = (w + 1) * 7 - 1
v = []
a = v.append
a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip())
a('\n'*l)
header = self.formatweekheader(w)
for (i, row) in enumerate(self.yeardays2calendar(theyear, m)):
# months in this row
months = range(m*i+1, min(m*(i+1)+1, 13))
a('\n'*l)
names = (self.formatmonthname(theyear, k, colwidth, False)
for k in months)
a(formatstring(names, colwidth, c).rstrip())
a('\n'*l)
headers = (header for k in months)
a(formatstring(headers, colwidth, c).rstrip())
a('\n'*l)
# max number of weeks for this row
height = max(len(cal) for cal in row)
for j in range(height):
weeks = []
for cal in row:
if j >= len(cal):
weeks.append('')
else:
weeks.append(self.formatweek(cal[j], w))
a(formatstring(weeks, colwidth, c).rstrip())
a('\n' * l)
return ''.join(v)
def pryear(self, theyear, w=0, l=0, c=6, m=3):
"""Print a year's calendar."""
print(self.formatyear(theyear, w, l, c, m))
class HTMLCalendar(Calendar):
"""
This calendar returns complete HTML pages.
"""
# CSS classes for the day <td>s
cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
def formatday(self, day, weekday):
"""
Return a day as a table cell.
"""
if day == 0:
return '<td class="noday"> </td>' # day outside month
else:
return '<td class="%s">%d</td>' % (self.cssclasses[weekday], day)
def formatweek(self, theweek):
"""
Return a complete week as a table row.
"""
s = ''.join(self.formatday(d, wd) for (d, wd) in theweek)
return '<tr>%s</tr>' % s
def formatweekday(self, day):
"""
Return a weekday name as a table header.
"""
return '<th class="%s">%s</th>' % (self.cssclasses[day], day_abbr[day])
def formatweekheader(self):
"""
Return a header for a week as a table row.
"""
s = ''.join(self.formatweekday(i) for i in self.iterweekdays())
return '<tr>%s</tr>' % s
def formatmonthname(self, theyear, themonth, withyear=True):
"""
Return a month name as a table row.
"""
if withyear:
s = '%s %s' % (month_name[themonth], theyear)
else:
s = '%s' % month_name[themonth]
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
def formatmonth(self, theyear, themonth, withyear=True):
"""
Return a formatted month as a table.
"""
v = []
a = v.append
a('<table border="0" cellpadding="0" cellspacing="0" class="month">')
a('\n')
a(self.formatmonthname(theyear, themonth, withyear=withyear))
a('\n')
a(self.formatweekheader())
a('\n')
for week in self.monthdays2calendar(theyear, themonth):
a(self.formatweek(week))
a('\n')
a('</table>')
a('\n')
return ''.join(v)
def formatyear(self, theyear, width=3):
"""
Return a formatted year as a table of tables.
"""
v = []
a = v.append
width = max(width, 1)
a('<table border="0" cellpadding="0" cellspacing="0" class="year">')
a('\n')
a('<tr><th colspan="%d" class="year">%s</th></tr>' % (width, theyear))
for i in range(January, January+12, width):
# months in this row
months = range(i, min(i+width, 13))
a('<tr>')
for m in months:
a('<td>')
a(self.formatmonth(theyear, m, withyear=False))
a('</td>')
a('</tr>')
a('</table>')
return ''.join(v)
def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None):
"""
Return a formatted year as a complete HTML page.
"""
if encoding is None:
encoding = sys.getdefaultencoding()
v = []
a = v.append
a('<?xml version="1.0" encoding="%s"?>\n' % encoding)
a('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n')
a('<html>\n')
a('<head>\n')
a('<meta http-equiv="Content-Type" content="text/html; charset=%s" />\n' % encoding)
if css is not None:
a('<link rel="stylesheet" type="text/css" href="%s" />\n' % css)
a('<title>Calendar for %d</title>\n' % theyear)
a('</head>\n')
a('<body>\n')
a(self.formatyear(theyear, width))
a('</body>\n')
a('</html>\n')
return ''.join(v).encode(encoding, "xmlcharrefreplace")
class different_locale:
def __init__(self, locale):
self.locale = locale
def __enter__(self):
self.oldlocale = _locale.getlocale(_locale.LC_TIME)
_locale.setlocale(_locale.LC_TIME, self.locale)
def __exit__(self, *args):
_locale.setlocale(_locale.LC_TIME, self.oldlocale)
class LocaleTextCalendar(TextCalendar):
"""
This class can be passed a locale name in the constructor and will return
month and weekday names in the specified locale. If this locale includes
an encoding all strings containing month and weekday names will be returned
as unicode.
"""
def __init__(self, firstweekday=0, locale=None):
TextCalendar.__init__(self, firstweekday)
if locale is None:
locale = _locale.getdefaultlocale()
self.locale = locale
def formatweekday(self, day, width):
with different_locale(self.locale):
if width >= 9:
names = day_name
else:
names = day_abbr
name = names[day]
return name[:width].center(width)
def formatmonthname(self, theyear, themonth, width, withyear=True):
with different_locale(self.locale):
s = month_name[themonth]
if withyear:
s = "%s %r" % (s, theyear)
return s.center(width)
class LocaleHTMLCalendar(HTMLCalendar):
"""
This class can be passed a locale name in the constructor and will return
month and weekday names in the specified locale. If this locale includes
an encoding all strings containing month and weekday names will be returned
as unicode.
"""
def __init__(self, firstweekday=0, locale=None):
HTMLCalendar.__init__(self, firstweekday)
if locale is None:
locale = _locale.getdefaultlocale()
self.locale = locale
def formatweekday(self, day):
with different_locale(self.locale):
s = day_abbr[day]
return '<th class="%s">%s</th>' % (self.cssclasses[day], s)
def formatmonthname(self, theyear, themonth, withyear=True):
with different_locale(self.locale):
s = month_name[themonth]
if withyear:
s = '%s %s' % (s, theyear)
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
# Support for old module level interface
c = TextCalendar()
firstweekday = c.getfirstweekday
def setfirstweekday(firstweekday):
if not MONDAY <= firstweekday <= SUNDAY:
raise IllegalWeekdayError(firstweekday)
c.firstweekday = firstweekday
monthcalendar = c.monthdayscalendar
prweek = c.prweek
week = c.formatweek
weekheader = c.formatweekheader
prmonth = c.prmonth
month = c.formatmonth
calendar = c.formatyear
prcal = c.pryear
# Spacing of month columns for multi-column year calendar
_colwidth = 7*3 - 1 # Amount printed by prweek()
_spacing = 6 # Number of spaces between columns
def format(cols, colwidth=_colwidth, spacing=_spacing):
"""Prints multi-column formatting for year calendars"""
print(formatstring(cols, colwidth, spacing))
def formatstring(cols, colwidth=_colwidth, spacing=_spacing):
"""Returns a string formatted from n strings, centered within n columns."""
spacing *= ' '
return spacing.join(c.center(colwidth) for c in cols)
EPOCH = 1970
_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal()
def timegm(tuple):
"""Unrelated but handy function to calculate Unix timestamp from GMT."""
year, month, day, hour, minute, second = tuple[:6]
days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
hours = days*24 + hour
minutes = hours*60 + minute
seconds = minutes*60 + second
return seconds
def main(args):
import argparse
parser = argparse.ArgumentParser()
textgroup = parser.add_argument_group('text only arguments')
htmlgroup = parser.add_argument_group('html only arguments')
textgroup.add_argument(
"-w", "--width",
type=int, default=2,
help="width of date column (default 2)"
)
textgroup.add_argument(
"-l", "--lines",
type=int, default=1,
help="number of lines for each week (default 1)"
)
textgroup.add_argument(
"-s", "--spacing",
type=int, default=6,
help="spacing between months (default 6)"
)
textgroup.add_argument(
"-m", "--months",
type=int, default=3,
help="months per row (default 3)"
)
htmlgroup.add_argument(
"-c", "--css",
default="calendar.css",
help="CSS to use for page"
)
parser.add_argument(
"-L", "--locale",
default=None,
help="locale to be used from month and weekday names"
)
parser.add_argument(
"-e", "--encoding",
default=None,
help="encoding to use for output"
)
parser.add_argument(
"-t", "--type",
default="text",
choices=("text", "html"),
help="output type (text or html)"
)
parser.add_argument(
"year",
nargs='?', type=int,
help="year number (1-9999)"
)
parser.add_argument(
"month",
nargs='?', type=int,
help="month number (1-12, text only)"
)
options = parser.parse_args(args[1:])
if options.locale and not options.encoding:
parser.error("if --locale is specified --encoding is required")
sys.exit(1)
locale = options.locale, options.encoding
if options.type == "html":
if options.locale:
cal = LocaleHTMLCalendar(locale=locale)
else:
cal = HTMLCalendar()
encoding = options.encoding
if encoding is None:
encoding = sys.getdefaultencoding()
optdict = dict(encoding=encoding, css=options.css)
write = sys.stdout.buffer.write
if options.year is None:
write(cal.formatyearpage(datetime.date.today().year, **optdict))
elif options.month is None:
write(cal.formatyearpage(options.year, **optdict))
else:
parser.error("incorrect number of arguments")
sys.exit(1)
else:
if options.locale:
cal = LocaleTextCalendar(locale=locale)
else:
cal = TextCalendar()
optdict = dict(w=options.width, l=options.lines)
if options.month is None:
optdict["c"] = options.spacing
optdict["m"] = options.months
if options.year is None:
result = cal.formatyear(datetime.date.today().year, **optdict)
elif options.month is None:
result = cal.formatyear(options.year, **optdict)
else:
result = cal.formatmonth(options.year, options.month, **optdict)
write = sys.stdout.write
if options.encoding:
result = result.encode(options.encoding)
write = sys.stdout.buffer.write
write(result)
if __name__ == "__main__":
main(sys.argv)
| lgpl-2.1 | 7,043,734,647,202,540,000 | 31.556802 | 124 | 0.557302 | false | 3.896743 | false | false | false |
linsalrob/EdwardsLab | phage_protein_blast_genera/tax_violin_plots.py | 1 | 2239 | """
"""
import os
import sys
import argparse
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument('-f', help='Genome average output file (from genera_per_phage_protein.py', default='/home/redwards/Desktop/gav_all_host.out')
parser.add_argument('-n', help='taxonomy name one of: kingdom / phylum / genus / species', default='genus')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
ynames = {'kingdom' : 'kingdoms', 'phylum' : 'phyla', 'genus' : 'genera', 'species' : 'species'}
col = None
colkey = {'kingdom' : 3, 'phylum' : 4, 'genus' : 5, 'species' : 6}
if args.n not in colkey:
sys.stderr.write("Sorry, taxonomy name must be one of {}\n".format("|".join(list(colkey.keys()))))
sys.exit(-1)
col = colkey[args.n]
want = {'Gut', 'Mouth', 'Nose', 'Skin', 'Lungs'}
data = {}
with open(args.f, 'r') as fin:
for l in fin:
p=l.strip().split("\t")
if p[2] not in want:
p[2] = 'All phages'
#continue ## comment or uncomment this to include/exclude all data
if p[2] not in data:
data[p[2]] = []
data[p[2]].append(float(p[col]))
labels = sorted(data.keys())
scores = []
count = 1
ticks = []
for l in labels:
scores.append(data[l])
ticks.append(count)
count += 1
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.boxplot(alldata)
vp = ax.violinplot(scores, showmeans=True)
for i, j in enumerate(vp['bodies']):
if i == 0:
j.set_color('gray')
elif i == 1:
j.set_color('sandybrown')
else:
j.set_color('lightpink')
ax.set_xlabel("Body Site")
ax.set_ylabel("Average number of {}".format(ynames[args.n]))
ax.set_xticks(ticks)
ax.set_xticklabels(labels, rotation='vertical')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
fig.set_facecolor('white')
plt.tight_layout()
#plt.show()
fig.savefig("/home/redwards/Desktop/bodysites.png")
| mit | 952,300,114,054,625,500 | 28.853333 | 149 | 0.571237 | false | 3.226225 | false | false | false |
euccas/CodingPuzzles-Python | leet/source/searchDFS/permutations.py | 1 | 1421 | class Solution():
def permute(self, nums):
if nums is None:
return [[]]
elif len(nums) <= 1:
return [nums]
result = []
for i, item in enumerate(nums):
#print("i={0}, item={1}".format(i, item))
for p in permute(nums[:i] + nums[i + 1:]):
#print("p={0}, item={1}, append {2}".format(p, item, p + [item]))
result.append([item] + p)
#print("now result is ... {0}".format(result))
return result
class Solution1(object):
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if nums is None:
return []
if len(nums) == 0:
return [[]]
self.result = []
visited = [False for i in nums]
self.dfs(nums, visited, [])
return self.result
def dfs(self, nums, visited, permutation):
if len(nums) == len(permutation):
self.result.append(permutation[:])
for i in range(0, len(nums)):
if visited[i] == True:
continue
permutation.append(nums[i])
visited[i] = True
self.dfs(nums, visited, permutation)
visited[i] = False
permutation.pop()
if __name__ == "__main__":
sln = Solution1()
result = sln.permute([1, 5, 9])
print(result)
| mit | 405,264,178,974,588,860 | 26.326923 | 81 | 0.474314 | false | 3.861413 | false | false | false |
ganeti-github-testing/ganeti-test-1 | lib/client/gnt_instance.py | 1 | 62250 | #
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Instance related commands"""
# pylint: disable=W0401,W0614,C0103
# W0401: Wildcard import ganeti.cli
# W0614: Unused import %s from wildcard import (since we need cli)
# C0103: Invalid name gnt-instance
import copy
import itertools
import simplejson
import logging
from ganeti.cli import *
from ganeti import opcodes
from ganeti import constants
from ganeti import compat
from ganeti import utils
from ganeti import errors
from ganeti import netutils
from ganeti import ssh
from ganeti import objects
from ganeti import ht
_EXPAND_CLUSTER = "cluster"
_EXPAND_NODES_BOTH = "nodes"
_EXPAND_NODES_PRI = "nodes-pri"
_EXPAND_NODES_SEC = "nodes-sec"
_EXPAND_NODES_BOTH_BY_TAGS = "nodes-by-tags"
_EXPAND_NODES_PRI_BY_TAGS = "nodes-pri-by-tags"
_EXPAND_NODES_SEC_BY_TAGS = "nodes-sec-by-tags"
_EXPAND_INSTANCES = "instances"
_EXPAND_INSTANCES_BY_TAGS = "instances-by-tags"
_EXPAND_NODES_TAGS_MODES = compat.UniqueFrozenset([
_EXPAND_NODES_BOTH_BY_TAGS,
_EXPAND_NODES_PRI_BY_TAGS,
_EXPAND_NODES_SEC_BY_TAGS,
])
#: default list of options for L{ListInstances}
_LIST_DEF_FIELDS = [
"name", "hypervisor", "os", "pnode", "status", "oper_ram",
]
_MISSING = object()
_ENV_OVERRIDE = compat.UniqueFrozenset(["list"])
_INST_DATA_VAL = ht.TListOf(ht.TDict)
def _ExpandMultiNames(mode, names, client=None):
"""Expand the given names using the passed mode.
For _EXPAND_CLUSTER, all instances will be returned. For
_EXPAND_NODES_PRI/SEC, all instances having those nodes as
primary/secondary will be returned. For _EXPAND_NODES_BOTH, all
instances having those nodes as either primary or secondary will be
returned. For _EXPAND_INSTANCES, the given instances will be
returned.
@param mode: one of L{_EXPAND_CLUSTER}, L{_EXPAND_NODES_BOTH},
L{_EXPAND_NODES_PRI}, L{_EXPAND_NODES_SEC} or
L{_EXPAND_INSTANCES}
@param names: a list of names; for cluster, it must be empty,
and for node and instance it must be a list of valid item
names (short names are valid as usual, e.g. node1 instead of
node1.example.com)
@rtype: list
@return: the list of names after the expansion
@raise errors.ProgrammerError: for unknown selection type
@raise errors.OpPrereqError: for invalid input parameters
"""
# pylint: disable=W0142
if client is None:
client = GetClient()
if mode == _EXPAND_CLUSTER:
if names:
raise errors.OpPrereqError("Cluster filter mode takes no arguments",
errors.ECODE_INVAL)
idata = client.QueryInstances([], ["name"], False)
inames = [row[0] for row in idata]
elif (mode in _EXPAND_NODES_TAGS_MODES or
mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_PRI, _EXPAND_NODES_SEC)):
if mode in _EXPAND_NODES_TAGS_MODES:
if not names:
raise errors.OpPrereqError("No node tags passed", errors.ECODE_INVAL)
ndata = client.QueryNodes([], ["name", "pinst_list",
"sinst_list", "tags"], False)
ndata = [row for row in ndata if set(row[3]).intersection(names)]
else:
if not names:
raise errors.OpPrereqError("No node names passed", errors.ECODE_INVAL)
ndata = client.QueryNodes(names, ["name", "pinst_list", "sinst_list"],
False)
ipri = [row[1] for row in ndata]
pri_names = list(itertools.chain(*ipri))
isec = [row[2] for row in ndata]
sec_names = list(itertools.chain(*isec))
if mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_BOTH_BY_TAGS):
inames = pri_names + sec_names
elif mode in (_EXPAND_NODES_PRI, _EXPAND_NODES_PRI_BY_TAGS):
inames = pri_names
elif mode in (_EXPAND_NODES_SEC, _EXPAND_NODES_SEC_BY_TAGS):
inames = sec_names
else:
raise errors.ProgrammerError("Unhandled shutdown type")
elif mode == _EXPAND_INSTANCES:
if not names:
raise errors.OpPrereqError("No instance names passed",
errors.ECODE_INVAL)
idata = client.QueryInstances(names, ["name"], False)
inames = [row[0] for row in idata]
elif mode == _EXPAND_INSTANCES_BY_TAGS:
if not names:
raise errors.OpPrereqError("No instance tags passed",
errors.ECODE_INVAL)
idata = client.QueryInstances([], ["name", "tags"], False)
inames = [row[0] for row in idata if set(row[1]).intersection(names)]
else:
raise errors.OpPrereqError("Unknown mode '%s'" % mode, errors.ECODE_INVAL)
return inames
def _EnsureInstancesExist(client, names):
"""Check for and ensure the given instance names exist.
This function will raise an OpPrereqError in case they don't
exist. Otherwise it will exit cleanly.
@type client: L{ganeti.luxi.Client}
@param client: the client to use for the query
@type names: list
@param names: the list of instance names to query
@raise errors.OpPrereqError: in case any instance is missing
"""
# TODO: change LUInstanceQuery to that it actually returns None
# instead of raising an exception, or devise a better mechanism
result = client.QueryInstances(names, ["name"], False)
for orig_name, row in zip(names, result):
if row[0] is None:
raise errors.OpPrereqError("Instance '%s' does not exist" % orig_name,
errors.ECODE_NOENT)
def GenericManyOps(operation, fn):
"""Generic multi-instance operations.
The will return a wrapper that processes the options and arguments
given, and uses the passed function to build the opcode needed for
the specific operation. Thus all the generic loop/confirmation code
is abstracted into this function.
"""
def realfn(opts, args):
if opts.multi_mode is None:
opts.multi_mode = _EXPAND_INSTANCES
cl = GetClient()
inames = _ExpandMultiNames(opts.multi_mode, args, client=cl)
if not inames:
if opts.multi_mode == _EXPAND_CLUSTER:
ToStdout("Cluster is empty, no instances to shutdown")
return 0
raise errors.OpPrereqError("Selection filter does not match"
" any instances", errors.ECODE_INVAL)
multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1
if not (opts.force_multi or not multi_on
or ConfirmOperation(inames, "instances", operation)):
return 1
jex = JobExecutor(verbose=multi_on, cl=cl, opts=opts)
for name in inames:
op = fn(name, opts)
jex.QueueJob(name, op)
results = jex.WaitOrShow(not opts.submit_only)
rcode = compat.all(row[0] for row in results)
return int(not rcode)
return realfn
def ListInstances(opts, args):
"""List instances and their properties.
@param opts: the command line options selected by the user
@type args: list
@param args: should be an empty list
@rtype: int
@return: the desired exit code
"""
selected_fields = ParseFields(opts.output, _LIST_DEF_FIELDS)
fmtoverride = dict.fromkeys(["tags", "disk.sizes", "nic.macs", "nic.ips",
"nic.modes", "nic.links", "nic.bridges",
"nic.networks",
"snodes", "snodes.group", "snodes.group.uuid"],
(lambda value: ",".join(str(item)
for item in value),
False))
cl = GetClient()
return GenericList(constants.QR_INSTANCE, selected_fields, args, opts.units,
opts.separator, not opts.no_headers,
format_override=fmtoverride, verbose=opts.verbose,
force_filter=opts.force_filter, cl=cl)
def ListInstanceFields(opts, args):
"""List instance fields.
@param opts: the command line options selected by the user
@type args: list
@param args: fields to list, or empty for all
@rtype: int
@return: the desired exit code
"""
return GenericListFields(constants.QR_INSTANCE, args, opts.separator,
not opts.no_headers)
def AddInstance(opts, args):
"""Add an instance to the cluster.
This is just a wrapper over L{GenericInstanceCreate}.
"""
return GenericInstanceCreate(constants.INSTANCE_CREATE, opts, args)
def BatchCreate(opts, args):
"""Create instances using a definition file.
This function reads a json file with L{opcodes.OpInstanceCreate}
serialisations.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain one element, the json filename
@rtype: int
@return: the desired exit code
"""
(json_filename,) = args
cl = GetClient()
try:
instance_data = simplejson.loads(utils.ReadFile(json_filename))
except Exception, err: # pylint: disable=W0703
ToStderr("Can't parse the instance definition file: %s" % str(err))
return 1
if not _INST_DATA_VAL(instance_data):
ToStderr("The instance definition file is not %s" % _INST_DATA_VAL)
return 1
instances = []
possible_params = set(opcodes.OpInstanceCreate.GetAllSlots())
for (idx, inst) in enumerate(instance_data):
unknown = set(inst.keys()) - possible_params
if unknown:
# TODO: Suggest closest match for more user friendly experience
raise errors.OpPrereqError("Unknown fields in definition %s: %s" %
(idx, utils.CommaJoin(unknown)),
errors.ECODE_INVAL)
op = opcodes.OpInstanceCreate(**inst) # pylint: disable=W0142
op.Validate(False)
instances.append(op)
op = opcodes.OpInstanceMultiAlloc(iallocator=opts.iallocator,
instances=instances)
result = SubmitOrSend(op, opts, cl=cl)
# Keep track of submitted jobs
jex = JobExecutor(cl=cl, opts=opts)
for (status, job_id) in result[constants.JOB_IDS_KEY]:
jex.AddJobId(None, status, job_id)
results = jex.GetResults()
bad_cnt = len([row for row in results if not row[0]])
if bad_cnt == 0:
ToStdout("All instances created successfully.")
rcode = constants.EXIT_SUCCESS
else:
ToStdout("There were %s errors during the creation.", bad_cnt)
rcode = constants.EXIT_FAILURE
return rcode
def ReinstallInstance(opts, args):
"""Reinstall an instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the name of the
instance to be reinstalled
@rtype: int
@return: the desired exit code
"""
# first, compute the desired name list
if opts.multi_mode is None:
opts.multi_mode = _EXPAND_INSTANCES
inames = _ExpandMultiNames(opts.multi_mode, args)
if not inames:
raise errors.OpPrereqError("Selection filter does not match any instances",
errors.ECODE_INVAL)
# second, if requested, ask for an OS
if opts.select_os is True:
op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
result = SubmitOpCode(op, opts=opts)
if not result:
ToStdout("Can't get the OS list")
return 1
ToStdout("Available OS templates:")
number = 0
choices = []
for (name, variants) in result:
for entry in CalculateOSNames(name, variants):
ToStdout("%3s: %s", number, entry)
choices.append(("%s" % number, entry, entry))
number += 1
choices.append(("x", "exit", "Exit gnt-instance reinstall"))
selected = AskUser("Enter OS template number (or x to abort):",
choices)
if selected == "exit":
ToStderr("User aborted reinstall, exiting")
return 1
os_name = selected
os_msg = "change the OS to '%s'" % selected
else:
os_name = opts.os
if opts.os is not None:
os_msg = "change the OS to '%s'" % os_name
else:
os_msg = "keep the same OS"
# third, get confirmation: multi-reinstall requires --force-multi,
# single-reinstall either --force or --force-multi (--force-multi is
# a stronger --force)
multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1
if multi_on:
warn_msg = ("Note: this will remove *all* data for the"
" below instances! It will %s.\n" % os_msg)
if not (opts.force_multi or
ConfirmOperation(inames, "instances", "reinstall", extra=warn_msg)):
return 1
else:
if not (opts.force or opts.force_multi):
usertext = ("This will reinstall the instance '%s' (and %s) which"
" removes all data. Continue?") % (inames[0], os_msg)
if not AskUser(usertext):
return 1
jex = JobExecutor(verbose=multi_on, opts=opts)
for instance_name in inames:
op = opcodes.OpInstanceReinstall(instance_name=instance_name,
os_type=os_name,
force_variant=opts.force_variant,
osparams=opts.osparams,
osparams_private=opts.osparams_private,
osparams_secret=opts.osparams_secret)
jex.QueueJob(instance_name, op)
results = jex.WaitOrShow(not opts.submit_only)
if compat.all(map(compat.fst, results)):
return constants.EXIT_SUCCESS
else:
return constants.EXIT_FAILURE
def RemoveInstance(opts, args):
"""Remove an instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the name of
the instance to be removed
@rtype: int
@return: the desired exit code
"""
instance_name = args[0]
force = opts.force
cl = GetClient()
if not force:
_EnsureInstancesExist(cl, [instance_name])
usertext = ("This will remove the volumes of the instance %s"
" (including mirrors), thus removing all the data"
" of the instance. Continue?") % instance_name
if not AskUser(usertext):
return 1
op = opcodes.OpInstanceRemove(instance_name=instance_name,
ignore_failures=opts.ignore_failures,
shutdown_timeout=opts.shutdown_timeout)
SubmitOrSend(op, opts, cl=cl)
return 0
def RenameInstance(opts, args):
"""Rename an instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain two elements, the old and the
new instance names
@rtype: int
@return: the desired exit code
"""
if not opts.name_check:
if not AskUser("As you disabled the check of the DNS entry, please verify"
" that '%s' is a FQDN. Continue?" % args[1]):
return 1
op = opcodes.OpInstanceRename(instance_name=args[0],
new_name=args[1],
ip_check=opts.ip_check,
name_check=opts.name_check)
result = SubmitOrSend(op, opts)
if result:
ToStdout("Instance '%s' renamed to '%s'", args[0], result)
return 0
def ActivateDisks(opts, args):
"""Activate an instance's disks.
This serves two purposes:
- it allows (as long as the instance is not running)
mounting the disks and modifying them from the node
- it repairs inactive secondary drbds
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
instance_name = args[0]
op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
ignore_size=opts.ignore_size,
wait_for_sync=opts.wait_for_sync)
disks_info = SubmitOrSend(op, opts)
for host, iname, nname in disks_info:
ToStdout("%s:%s:%s", host, iname, nname)
return 0
def DeactivateDisks(opts, args):
"""Deactivate an instance's disks.
This function takes the instance name, looks for its primary node
and the tries to shutdown its block devices on that node.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
instance_name = args[0]
op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name,
force=opts.force)
SubmitOrSend(op, opts)
return 0
def RecreateDisks(opts, args):
"""Recreate an instance's disks.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
instance_name = args[0]
disks = []
if opts.disks:
for didx, ddict in opts.disks:
didx = int(didx)
if not ht.TDict(ddict):
msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
if constants.IDISK_SIZE in ddict:
try:
ddict[constants.IDISK_SIZE] = \
utils.ParseUnit(ddict[constants.IDISK_SIZE])
except ValueError, err:
raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
(didx, err), errors.ECODE_INVAL)
if constants.IDISK_SPINDLES in ddict:
try:
ddict[constants.IDISK_SPINDLES] = \
int(ddict[constants.IDISK_SPINDLES])
except ValueError, err:
raise errors.OpPrereqError("Invalid spindles for disk %d: %s" %
(didx, err), errors.ECODE_INVAL)
disks.append((didx, ddict))
# TODO: Verify modifyable parameters (already done in
# LUInstanceRecreateDisks, but it'd be nice to have in the client)
if opts.node:
if opts.iallocator:
msg = "At most one of either --nodes or --iallocator can be passed"
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
pnode, snode = SplitNodeOption(opts.node)
nodes = [pnode]
if snode is not None:
nodes.append(snode)
else:
nodes = []
op = opcodes.OpInstanceRecreateDisks(instance_name=instance_name,
disks=disks, nodes=nodes,
iallocator=opts.iallocator)
SubmitOrSend(op, opts)
return 0
def GrowDisk(opts, args):
"""Grow an instance's disks.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain three elements, the target instance name,
the target disk id, and the target growth
@rtype: int
@return: the desired exit code
"""
instance = args[0]
disk = args[1]
try:
disk = int(disk)
except (TypeError, ValueError), err:
raise errors.OpPrereqError("Invalid disk index: %s" % str(err),
errors.ECODE_INVAL)
try:
amount = utils.ParseUnit(args[2])
except errors.UnitParseError:
raise errors.OpPrereqError("Can't parse the given amount '%s'" % args[2],
errors.ECODE_INVAL)
op = opcodes.OpInstanceGrowDisk(instance_name=instance,
disk=disk, amount=amount,
wait_for_sync=opts.wait_for_sync,
absolute=opts.absolute)
SubmitOrSend(op, opts)
return 0
def _StartupInstance(name, opts):
"""Startup instances.
This returns the opcode to start an instance, and its decorator will
wrap this into a loop starting all desired instances.
@param name: the name of the instance to act on
@param opts: the command line options selected by the user
@return: the opcode needed for the operation
"""
op = opcodes.OpInstanceStartup(instance_name=name,
force=opts.force,
ignore_offline_nodes=opts.ignore_offline,
no_remember=opts.no_remember,
startup_paused=opts.startup_paused)
# do not add these parameters to the opcode unless they're defined
if opts.hvparams:
op.hvparams = opts.hvparams
if opts.beparams:
op.beparams = opts.beparams
return op
def _RebootInstance(name, opts):
"""Reboot instance(s).
This returns the opcode to reboot an instance, and its decorator
will wrap this into a loop rebooting all desired instances.
@param name: the name of the instance to act on
@param opts: the command line options selected by the user
@return: the opcode needed for the operation
"""
return opcodes.OpInstanceReboot(instance_name=name,
reboot_type=opts.reboot_type,
ignore_secondaries=opts.ignore_secondaries,
shutdown_timeout=opts.shutdown_timeout)
def _ShutdownInstance(name, opts):
"""Shutdown an instance.
This returns the opcode to shutdown an instance, and its decorator
will wrap this into a loop shutting down all desired instances.
@param name: the name of the instance to act on
@param opts: the command line options selected by the user
@return: the opcode needed for the operation
"""
return opcodes.OpInstanceShutdown(instance_name=name,
force=opts.force,
timeout=opts.timeout,
ignore_offline_nodes=opts.ignore_offline,
no_remember=opts.no_remember)
def ReplaceDisks(opts, args):
"""Replace the disks of an instance
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
new_2ndary = opts.dst_node
iallocator = opts.iallocator
if opts.disks is None:
disks = []
else:
try:
disks = [int(i) for i in opts.disks.split(",")]
except (TypeError, ValueError), err:
raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
errors.ECODE_INVAL)
cnt = [opts.on_primary, opts.on_secondary, opts.auto,
new_2ndary is not None, iallocator is not None].count(True)
if cnt != 1:
raise errors.OpPrereqError("One and only one of the -p, -s, -a, -n and -I"
" options must be passed", errors.ECODE_INVAL)
elif opts.on_primary:
mode = constants.REPLACE_DISK_PRI
elif opts.on_secondary:
mode = constants.REPLACE_DISK_SEC
elif opts.auto:
mode = constants.REPLACE_DISK_AUTO
if disks:
raise errors.OpPrereqError("Cannot specify disks when using automatic"
" mode", errors.ECODE_INVAL)
elif new_2ndary is not None or iallocator is not None:
# replace secondary
mode = constants.REPLACE_DISK_CHG
op = opcodes.OpInstanceReplaceDisks(instance_name=args[0], disks=disks,
remote_node=new_2ndary, mode=mode,
iallocator=iallocator,
early_release=opts.early_release,
ignore_ipolicy=opts.ignore_ipolicy)
SubmitOrSend(op, opts)
return 0
def FailoverInstance(opts, args):
"""Failover an instance.
The failover is done by shutting it down on its present node and
starting it on the secondary.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
cl = GetClient()
instance_name = args[0]
force = opts.force
iallocator = opts.iallocator
target_node = opts.dst_node
if iallocator and target_node:
raise errors.OpPrereqError("Specify either an iallocator (-I), or a target"
" node (-n) but not both", errors.ECODE_INVAL)
if not force:
_EnsureInstancesExist(cl, [instance_name])
usertext = ("Failover will happen to image %s."
" This requires a shutdown of the instance. Continue?" %
(instance_name,))
if not AskUser(usertext):
return 1
op = opcodes.OpInstanceFailover(instance_name=instance_name,
ignore_consistency=opts.ignore_consistency,
shutdown_timeout=opts.shutdown_timeout,
iallocator=iallocator,
target_node=target_node,
ignore_ipolicy=opts.ignore_ipolicy)
SubmitOrSend(op, opts, cl=cl)
return 0
def MigrateInstance(opts, args):
"""Migrate an instance.
The migrate is done without shutdown.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
cl = GetClient()
instance_name = args[0]
force = opts.force
iallocator = opts.iallocator
target_node = opts.dst_node
if iallocator and target_node:
raise errors.OpPrereqError("Specify either an iallocator (-I), or a target"
" node (-n) but not both", errors.ECODE_INVAL)
if not force:
_EnsureInstancesExist(cl, [instance_name])
if opts.cleanup:
usertext = ("Instance %s will be recovered from a failed migration."
" Note that the migration procedure (including cleanup)" %
(instance_name,))
else:
usertext = ("Instance %s will be migrated. Note that migration" %
(instance_name,))
usertext += (" might impact the instance if anything goes wrong"
" (e.g. due to bugs in the hypervisor). Continue?")
if not AskUser(usertext):
return 1
# this should be removed once --non-live is deprecated
if not opts.live and opts.migration_mode is not None:
raise errors.OpPrereqError("Only one of the --non-live and "
"--migration-mode options can be passed",
errors.ECODE_INVAL)
if not opts.live: # --non-live passed
mode = constants.HT_MIGRATION_NONLIVE
else:
mode = opts.migration_mode
op = opcodes.OpInstanceMigrate(instance_name=instance_name, mode=mode,
cleanup=opts.cleanup, iallocator=iallocator,
target_node=target_node,
allow_failover=opts.allow_failover,
allow_runtime_changes=opts.allow_runtime_chgs,
ignore_ipolicy=opts.ignore_ipolicy,
ignore_hvversions=opts.ignore_hvversions)
SubmitOrSend(op, cl=cl, opts=opts)
return 0
def MoveInstance(opts, args):
"""Move an instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
cl = GetClient()
instance_name = args[0]
force = opts.force
if not force:
usertext = ("Instance %s will be moved."
" This requires a shutdown of the instance. Continue?" %
(instance_name,))
if not AskUser(usertext):
return 1
op = opcodes.OpInstanceMove(instance_name=instance_name,
target_node=opts.node,
compress=opts.compress,
shutdown_timeout=opts.shutdown_timeout,
ignore_consistency=opts.ignore_consistency,
ignore_ipolicy=opts.ignore_ipolicy)
SubmitOrSend(op, opts, cl=cl)
return 0
def ConnectToInstanceConsole(opts, args):
"""Connect to the console of an instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
instance_name = args[0]
cl = GetClient()
try:
cluster_name = cl.QueryConfigValues(["cluster_name"])[0]
idata = cl.QueryInstances([instance_name], ["console", "oper_state"], False)
if not idata:
raise errors.OpPrereqError("Instance '%s' does not exist" % instance_name,
errors.ECODE_NOENT)
finally:
# Ensure client connection is closed while external commands are run
cl.Close()
del cl
((console_data, oper_state), ) = idata
if not console_data:
if oper_state:
# Instance is running
raise errors.OpExecError("Console information for instance %s is"
" unavailable" % instance_name)
else:
raise errors.OpExecError("Instance %s is not running, can't get console" %
instance_name)
return _DoConsole(objects.InstanceConsole.FromDict(console_data),
opts.show_command, cluster_name)
def _DoConsole(console, show_command, cluster_name, feedback_fn=ToStdout,
_runcmd_fn=utils.RunCmd):
"""Acts based on the result of L{opcodes.OpInstanceConsole}.
@type console: L{objects.InstanceConsole}
@param console: Console object
@type show_command: bool
@param show_command: Whether to just display commands
@type cluster_name: string
@param cluster_name: Cluster name as retrieved from master daemon
"""
console.Validate()
if console.kind == constants.CONS_MESSAGE:
feedback_fn(console.message)
elif console.kind == constants.CONS_VNC:
feedback_fn("Instance %s has VNC listening on %s:%s (display %s),"
" URL <vnc://%s:%s/>",
console.instance, console.host, console.port,
console.display, console.host, console.port)
elif console.kind == constants.CONS_SPICE:
feedback_fn("Instance %s has SPICE listening on %s:%s", console.instance,
console.host, console.port)
elif console.kind == constants.CONS_SSH:
# Convert to string if not already one
if isinstance(console.command, basestring):
cmd = console.command
else:
cmd = utils.ShellQuoteArgs(console.command)
srun = ssh.SshRunner(cluster_name=cluster_name)
ssh_cmd = srun.BuildCmd(console.host, console.user, cmd,
port=console.port,
batch=True, quiet=False, tty=True)
if show_command:
feedback_fn(utils.ShellQuoteArgs(ssh_cmd))
else:
result = _runcmd_fn(ssh_cmd, interactive=True)
if result.failed:
logging.error("Console command \"%s\" failed with reason '%s' and"
" output %r", result.cmd, result.fail_reason,
result.output)
raise errors.OpExecError("Connection to console of instance %s failed,"
" please check cluster configuration" %
console.instance)
else:
raise errors.GenericError("Unknown console type '%s'" % console.kind)
return constants.EXIT_SUCCESS
def _FormatDiskDetails(dev_type, dev, roman):
"""Formats the logical_id of a disk.
"""
if dev_type == constants.DT_DRBD8:
drbd_info = dev["drbd_info"]
data = [
("nodeA", "%s, minor=%s" %
(drbd_info["primary_node"],
compat.TryToRoman(drbd_info["primary_minor"],
convert=roman))),
("nodeB", "%s, minor=%s" %
(drbd_info["secondary_node"],
compat.TryToRoman(drbd_info["secondary_minor"],
convert=roman))),
("port", str(compat.TryToRoman(drbd_info["port"], roman))),
("auth key", str(drbd_info["secret"])),
]
elif dev_type == constants.DT_PLAIN:
vg_name, lv_name = dev["logical_id"]
data = ["%s/%s" % (vg_name, lv_name)]
else:
data = [str(dev["logical_id"])]
return data
def _FormatBlockDevInfo(idx, top_level, dev, roman):
"""Show block device information.
This is only used by L{ShowInstanceConfig}, but it's too big to be
left for an inline definition.
@type idx: int
@param idx: the index of the current disk
@type top_level: boolean
@param top_level: if this a top-level disk?
@type dev: dict
@param dev: dictionary with disk information
@type roman: boolean
@param roman: whether to try to use roman integers
@return: a list of either strings, tuples or lists
(which should be formatted at a higher indent level)
"""
def helper(dtype, status):
"""Format one line for physical device status.
@type dtype: str
@param dtype: a constant from the L{constants.DTS_BLOCK} set
@type status: tuple
@param status: a tuple as returned from L{backend.FindBlockDevice}
@return: the string representing the status
"""
if not status:
return "not active"
txt = ""
(path, major, minor, syncp, estt, degr, ldisk_status) = status
if major is None:
major_string = "N/A"
else:
major_string = str(compat.TryToRoman(major, convert=roman))
if minor is None:
minor_string = "N/A"
else:
minor_string = str(compat.TryToRoman(minor, convert=roman))
txt += ("%s (%s:%s)" % (path, major_string, minor_string))
if dtype in (constants.DT_DRBD8, ):
if syncp is not None:
sync_text = "*RECOVERING* %5.2f%%," % syncp
if estt:
sync_text += " ETA %ss" % compat.TryToRoman(estt, convert=roman)
else:
sync_text += " ETA unknown"
else:
sync_text = "in sync"
if degr:
degr_text = "*DEGRADED*"
else:
degr_text = "ok"
if ldisk_status == constants.LDS_FAULTY:
ldisk_text = " *MISSING DISK*"
elif ldisk_status == constants.LDS_UNKNOWN:
ldisk_text = " *UNCERTAIN STATE*"
else:
ldisk_text = ""
txt += (" %s, status %s%s" % (sync_text, degr_text, ldisk_text))
elif dtype == constants.DT_PLAIN:
if ldisk_status == constants.LDS_FAULTY:
ldisk_text = " *FAILED* (failed drive?)"
else:
ldisk_text = ""
txt += ldisk_text
return txt
# the header
if top_level:
if dev["iv_name"] is not None:
txt = dev["iv_name"]
else:
txt = "disk %s" % compat.TryToRoman(idx, convert=roman)
else:
txt = "child %s" % compat.TryToRoman(idx, convert=roman)
if isinstance(dev["size"], int):
nice_size = utils.FormatUnit(dev["size"], "h", roman)
else:
nice_size = str(dev["size"])
data = [(txt, "%s, size %s" % (dev["dev_type"], nice_size))]
if top_level:
if dev["spindles"] is not None:
data.append(("spindles", dev["spindles"]))
data.append(("access mode", dev["mode"]))
if dev["logical_id"] is not None:
try:
l_id = _FormatDiskDetails(dev["dev_type"], dev, roman)
except ValueError:
l_id = [str(dev["logical_id"])]
if len(l_id) == 1:
data.append(("logical_id", l_id[0]))
else:
data.extend(l_id)
if dev["pstatus"]:
data.append(("on primary", helper(dev["dev_type"], dev["pstatus"])))
if dev["sstatus"]:
data.append(("on secondary", helper(dev["dev_type"], dev["sstatus"])))
data.append(("name", dev["name"]))
data.append(("UUID", dev["uuid"]))
if dev["children"]:
data.append(("child devices", [
_FormatBlockDevInfo(c_idx, False, child, roman)
for c_idx, child in enumerate(dev["children"])
]))
return data
def _FormatInstanceNicInfo(idx, nic, roman=False):
"""Helper function for L{_FormatInstanceInfo()}"""
(name, uuid, ip, mac, mode, link, vlan, _, netinfo) = nic
network_name = None
if netinfo:
network_name = netinfo["name"]
return [
("nic/%s" % str(compat.TryToRoman(idx, roman)), ""),
("MAC", str(mac)),
("IP", str(ip)),
("mode", str(mode)),
("link", str(link)),
("vlan", str(compat.TryToRoman(vlan, roman))),
("network", str(network_name)),
("UUID", str(uuid)),
("name", str(name)),
]
def _FormatInstanceNodesInfo(instance):
"""Helper function for L{_FormatInstanceInfo()}"""
pgroup = ("%s (UUID %s)" %
(instance["pnode_group_name"], instance["pnode_group_uuid"]))
secs = utils.CommaJoin(("%s (group %s, group UUID %s)" %
(name, group_name, group_uuid))
for (name, group_name, group_uuid) in
zip(instance["snodes"],
instance["snodes_group_names"],
instance["snodes_group_uuids"]))
return [
[
("primary", instance["pnode"]),
("group", pgroup),
],
[("secondaries", secs)],
]
def _GetVncConsoleInfo(instance):
"""Helper function for L{_FormatInstanceInfo()}"""
vnc_bind_address = instance["hv_actual"].get(constants.HV_VNC_BIND_ADDRESS,
None)
if vnc_bind_address:
port = instance["network_port"]
display = int(port) - constants.VNC_BASE_PORT
if display > 0 and vnc_bind_address == constants.IP4_ADDRESS_ANY:
vnc_console_port = "%s:%s (display %s)" % (instance["pnode"],
port,
display)
elif display > 0 and netutils.IP4Address.IsValid(vnc_bind_address):
vnc_console_port = ("%s:%s (node %s) (display %s)" %
(vnc_bind_address, port,
instance["pnode"], display))
else:
# vnc bind address is a file
vnc_console_port = "%s:%s" % (instance["pnode"],
vnc_bind_address)
ret = "vnc to %s" % vnc_console_port
else:
ret = None
return ret
def _FormatInstanceInfo(instance, roman_integers):
"""Format instance information for L{cli.PrintGenericInfo()}"""
istate = "configured to be %s" % instance["config_state"]
if instance["run_state"]:
istate += ", actual state is %s" % instance["run_state"]
info = [
("Instance name", instance["name"]),
("UUID", instance["uuid"]),
("Serial number",
str(compat.TryToRoman(instance["serial_no"], convert=roman_integers))),
("Creation time", utils.FormatTime(instance["ctime"])),
("Modification time", utils.FormatTime(instance["mtime"])),
("State", istate),
("Nodes", _FormatInstanceNodesInfo(instance)),
("Operating system", instance["os"]),
("Operating system parameters",
FormatParamsDictInfo(instance["os_instance"], instance["os_actual"],
roman_integers)),
]
if "network_port" in instance:
info.append(("Allocated network port",
str(compat.TryToRoman(instance["network_port"],
convert=roman_integers))))
info.append(("Hypervisor", instance["hypervisor"]))
console = _GetVncConsoleInfo(instance)
if console:
info.append(("console connection", console))
# deprecated "memory" value, kept for one version for compatibility
# TODO(ganeti 2.7) remove.
be_actual = copy.deepcopy(instance["be_actual"])
be_actual["memory"] = be_actual[constants.BE_MAXMEM]
info.extend([
("Hypervisor parameters",
FormatParamsDictInfo(instance["hv_instance"], instance["hv_actual"],
roman_integers)),
("Back-end parameters",
FormatParamsDictInfo(instance["be_instance"], be_actual,
roman_integers)),
("NICs", [
_FormatInstanceNicInfo(idx, nic, roman_integers)
for (idx, nic) in enumerate(instance["nics"])
]),
("Disk template", instance["disk_template"]),
("Disks", [
_FormatBlockDevInfo(idx, True, device, roman_integers)
for (idx, device) in enumerate(instance["disks"])
]),
])
return info
def ShowInstanceConfig(opts, args):
"""Compute instance run-time status.
@param opts: the command line options selected by the user
@type args: list
@param args: either an empty list, and then we query all
instances, or should contain a list of instance names
@rtype: int
@return: the desired exit code
"""
if not args and not opts.show_all:
ToStderr("No instance selected."
" Please pass in --all if you want to query all instances.\n"
"Note that this can take a long time on a big cluster.")
return 1
elif args and opts.show_all:
ToStderr("Cannot use --all if you specify instance names.")
return 1
retcode = 0
op = opcodes.OpInstanceQueryData(instances=args, static=opts.static,
use_locking=not opts.static)
result = SubmitOpCode(op, opts=opts)
if not result:
ToStdout("No instances.")
return 1
PrintGenericInfo([
_FormatInstanceInfo(instance, opts.roman_integers)
for instance in result.values()
])
return retcode
def _ConvertNicDiskModifications(mods):
"""Converts NIC/disk modifications from CLI to opcode.
When L{opcodes.OpInstanceSetParams} was changed to support adding/removing
disks at arbitrary indices, its parameter format changed. This function
converts legacy requests (e.g. "--net add" or "--disk add:size=4G") to the
newer format and adds support for new-style requests (e.g. "--new 4:add").
@type mods: list of tuples
@param mods: Modifications as given by command line parser
@rtype: list of tuples
@return: Modifications as understood by L{opcodes.OpInstanceSetParams}
"""
result = []
for (identifier, params) in mods:
if identifier == constants.DDM_ADD:
# Add item as last item (legacy interface)
action = constants.DDM_ADD
identifier = -1
elif identifier == constants.DDM_REMOVE:
# Remove last item (legacy interface)
action = constants.DDM_REMOVE
identifier = -1
else:
# Modifications and adding/removing at arbitrary indices
add = params.pop(constants.DDM_ADD, _MISSING)
remove = params.pop(constants.DDM_REMOVE, _MISSING)
modify = params.pop(constants.DDM_MODIFY, _MISSING)
if modify is _MISSING:
if not (add is _MISSING or remove is _MISSING):
raise errors.OpPrereqError("Cannot add and remove at the same time",
errors.ECODE_INVAL)
elif add is not _MISSING:
action = constants.DDM_ADD
elif remove is not _MISSING:
action = constants.DDM_REMOVE
else:
action = constants.DDM_MODIFY
elif add is _MISSING and remove is _MISSING:
action = constants.DDM_MODIFY
else:
raise errors.OpPrereqError("Cannot modify and add/remove at the"
" same time", errors.ECODE_INVAL)
assert not (constants.DDMS_VALUES_WITH_MODIFY & set(params.keys()))
if action == constants.DDM_REMOVE and params:
raise errors.OpPrereqError("Not accepting parameters on removal",
errors.ECODE_INVAL)
result.append((action, identifier, params))
return result
def _ParseExtStorageParams(params):
"""Parses the disk params for ExtStorage conversions.
"""
if params:
if constants.IDISK_PROVIDER not in params:
raise errors.OpPrereqError("Missing required parameter '%s' when"
" converting to an ExtStorage disk template" %
constants.IDISK_PROVIDER, errors.ECODE_INVAL)
else:
for param in params.keys():
if (param != constants.IDISK_PROVIDER and
param in constants.IDISK_PARAMS):
raise errors.OpPrereqError("Invalid parameter '%s' when converting"
" to an ExtStorage template (it is not"
" allowed modifying existing disk"
" parameters)" % param, errors.ECODE_INVAL)
return params
def _ParseDiskSizes(mods):
"""Parses disk sizes in parameters.
"""
for (action, _, params) in mods:
if params and constants.IDISK_SPINDLES in params:
params[constants.IDISK_SPINDLES] = \
int(params[constants.IDISK_SPINDLES])
if params and constants.IDISK_SIZE in params:
params[constants.IDISK_SIZE] = \
utils.ParseUnit(params[constants.IDISK_SIZE])
elif action == constants.DDM_ADD:
raise errors.OpPrereqError("Missing required parameter 'size'",
errors.ECODE_INVAL)
return mods
def SetInstanceParams(opts, args):
"""Modifies an instance.
All parameters take effect only at the next restart of the instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
if not (opts.nics or opts.disks or opts.disk_template or opts.hvparams or
opts.beparams or opts.os or opts.osparams or opts.osparams_private
or opts.offline_inst or opts.online_inst or opts.runtime_mem or
opts.new_primary_node or opts.instance_communication is not None):
ToStderr("Please give at least one of the parameters.")
return 1
for param in opts.beparams:
if isinstance(opts.beparams[param], basestring):
if opts.beparams[param].lower() == "default":
opts.beparams[param] = constants.VALUE_DEFAULT
utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT,
allowed_values=[constants.VALUE_DEFAULT])
for param in opts.hvparams:
if isinstance(opts.hvparams[param], basestring):
if opts.hvparams[param].lower() == "default":
opts.hvparams[param] = constants.VALUE_DEFAULT
utils.ForceDictType(opts.hvparams, constants.HVS_PARAMETER_TYPES,
allowed_values=[constants.VALUE_DEFAULT])
FixHvParams(opts.hvparams)
nics = _ConvertNicDiskModifications(opts.nics)
for action, _, __ in nics:
if action == constants.DDM_MODIFY and opts.hotplug and not opts.force:
usertext = ("You are about to hot-modify a NIC. This will be done"
" by removing the existing NIC and then adding a new one."
" Network connection might be lost. Continue?")
if not AskUser(usertext):
return 1
disks = _ParseDiskSizes(_ConvertNicDiskModifications(opts.disks))
# verify the user provided parameters for disk template conversions
if opts.disk_template:
if (not opts.node and
opts.disk_template in constants.DTS_INT_MIRROR):
ToStderr("Changing the disk template to a mirrored one requires"
" specifying a secondary node")
return 1
elif (opts.ext_params and
opts.disk_template != constants.DT_EXT):
ToStderr("Specifying ExtStorage parameters requires converting"
" to the '%s' disk template" % constants.DT_EXT)
return 1
elif (not opts.ext_params and
opts.disk_template == constants.DT_EXT):
ToStderr("Provider option is missing, use either the"
" '--ext-params' or '-e' option")
return 1
if ((opts.file_driver or
opts.file_storage_dir) and
not opts.disk_template in constants.DTS_FILEBASED):
ToStderr("Specifying file-based configuration arguments requires"
" converting to a file-based disk template")
return 1
ext_params = _ParseExtStorageParams(opts.ext_params)
if opts.offline_inst:
offline = True
elif opts.online_inst:
offline = False
else:
offline = None
instance_comm = opts.instance_communication
op = opcodes.OpInstanceSetParams(instance_name=args[0],
nics=nics,
disks=disks,
hotplug=opts.hotplug,
hotplug_if_possible=opts.hotplug_if_possible,
disk_template=opts.disk_template,
ext_params=ext_params,
file_driver=opts.file_driver,
file_storage_dir=opts.file_storage_dir,
remote_node=opts.node,
pnode=opts.new_primary_node,
hvparams=opts.hvparams,
beparams=opts.beparams,
runtime_mem=opts.runtime_mem,
os_name=opts.os,
osparams=opts.osparams,
osparams_private=opts.osparams_private,
force_variant=opts.force_variant,
force=opts.force,
wait_for_sync=opts.wait_for_sync,
offline=offline,
conflicts_check=opts.conflicts_check,
ignore_ipolicy=opts.ignore_ipolicy,
instance_communication=instance_comm)
# even if here we process the result, we allow submit only
result = SubmitOrSend(op, opts)
if result:
ToStdout("Modified instance %s", args[0])
for param, data in result:
ToStdout(" - %-5s -> %s", param, data)
ToStdout("Please don't forget that most parameters take effect"
" only at the next (re)start of the instance initiated by"
" ganeti; restarting from within the instance will"
" not be enough.")
if opts.hvparams:
ToStdout("Note that changing hypervisor parameters without performing a"
" restart might lead to a crash while performing a live"
" migration. This will be addressed in future Ganeti versions.")
return 0
def ChangeGroup(opts, args):
"""Moves an instance to another group.
"""
(instance_name, ) = args
cl = GetClient()
op = opcodes.OpInstanceChangeGroup(instance_name=instance_name,
iallocator=opts.iallocator,
target_groups=opts.to,
early_release=opts.early_release)
result = SubmitOrSend(op, opts, cl=cl)
# Keep track of submitted jobs
jex = JobExecutor(cl=cl, opts=opts)
for (status, job_id) in result[constants.JOB_IDS_KEY]:
jex.AddJobId(None, status, job_id)
results = jex.GetResults()
bad_cnt = len([row for row in results if not row[0]])
if bad_cnt == 0:
ToStdout("Instance '%s' changed group successfully.", instance_name)
rcode = constants.EXIT_SUCCESS
else:
ToStdout("There were %s errors while changing group of instance '%s'.",
bad_cnt, instance_name)
rcode = constants.EXIT_FAILURE
return rcode
# multi-instance selection options
m_force_multi = cli_option("--force-multiple", dest="force_multi",
help="Do not ask for confirmation when more than"
" one instance is affected",
action="store_true", default=False)
m_pri_node_opt = cli_option("--primary", dest="multi_mode",
help="Filter by nodes (primary only)",
const=_EXPAND_NODES_PRI, action="store_const")
m_sec_node_opt = cli_option("--secondary", dest="multi_mode",
help="Filter by nodes (secondary only)",
const=_EXPAND_NODES_SEC, action="store_const")
m_node_opt = cli_option("--node", dest="multi_mode",
help="Filter by nodes (primary and secondary)",
const=_EXPAND_NODES_BOTH, action="store_const")
m_clust_opt = cli_option("--all", dest="multi_mode",
help="Select all instances in the cluster",
const=_EXPAND_CLUSTER, action="store_const")
m_inst_opt = cli_option("--instance", dest="multi_mode",
help="Filter by instance name [default]",
const=_EXPAND_INSTANCES, action="store_const")
m_node_tags_opt = cli_option("--node-tags", dest="multi_mode",
help="Filter by node tag",
const=_EXPAND_NODES_BOTH_BY_TAGS,
action="store_const")
m_pri_node_tags_opt = cli_option("--pri-node-tags", dest="multi_mode",
help="Filter by primary node tag",
const=_EXPAND_NODES_PRI_BY_TAGS,
action="store_const")
m_sec_node_tags_opt = cli_option("--sec-node-tags", dest="multi_mode",
help="Filter by secondary node tag",
const=_EXPAND_NODES_SEC_BY_TAGS,
action="store_const")
m_inst_tags_opt = cli_option("--tags", dest="multi_mode",
help="Filter by instance tag",
const=_EXPAND_INSTANCES_BY_TAGS,
action="store_const")
# this is defined separately due to readability only
add_opts = [
NOSTART_OPT,
OS_OPT,
FORCE_VARIANT_OPT,
NO_INSTALL_OPT,
IGNORE_IPOLICY_OPT,
INSTANCE_COMMUNICATION_OPT,
HELPER_STARTUP_TIMEOUT_OPT,
HELPER_SHUTDOWN_TIMEOUT_OPT,
]
commands = {
"add": (
AddInstance, [ArgHost(min=1, max=1)],
COMMON_CREATE_OPTS + add_opts,
"[...] -t disk-type -n node[:secondary-node] -o os-type <name>",
"Creates and adds a new instance to the cluster"),
"batch-create": (
BatchCreate, [ArgFile(min=1, max=1)],
[DRY_RUN_OPT, PRIORITY_OPT, IALLOCATOR_OPT] + SUBMIT_OPTS,
"<instances.json>",
"Create a bunch of instances based on specs in the file."),
"console": (
ConnectToInstanceConsole, ARGS_ONE_INSTANCE,
[SHOWCMD_OPT, PRIORITY_OPT],
"[--show-cmd] <instance>", "Opens a console on the specified instance"),
"failover": (
FailoverInstance, ARGS_ONE_INSTANCE,
[FORCE_OPT, IGNORE_CONSIST_OPT] + SUBMIT_OPTS +
[SHUTDOWN_TIMEOUT_OPT,
DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT,
IGNORE_IPOLICY_OPT, CLEANUP_OPT],
"[-f] <instance>", "Stops the instance, changes its primary node and"
" (if it was originally running) starts it on the new node"
" (the secondary for mirrored instances or any node"
" for shared storage)."),
"migrate": (
MigrateInstance, ARGS_ONE_INSTANCE,
[FORCE_OPT, NONLIVE_OPT, MIGRATION_MODE_OPT, CLEANUP_OPT, DRY_RUN_OPT,
PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT, ALLOW_FAILOVER_OPT,
IGNORE_IPOLICY_OPT, IGNORE_HVVERSIONS_OPT, NORUNTIME_CHGS_OPT]
+ SUBMIT_OPTS,
"[-f] <instance>", "Migrate instance to its secondary node"
" (only for mirrored instances)"),
"move": (
MoveInstance, ARGS_ONE_INSTANCE,
[FORCE_OPT] + SUBMIT_OPTS +
[SINGLE_NODE_OPT, COMPRESS_OPT,
SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_CONSIST_OPT,
IGNORE_IPOLICY_OPT],
"[-f] <instance>", "Move instance to an arbitrary node"
" (only for instances of type file and lv)"),
"info": (
ShowInstanceConfig, ARGS_MANY_INSTANCES,
[STATIC_OPT, ALL_OPT, ROMAN_OPT, PRIORITY_OPT],
"[-s] {--all | <instance>...}",
"Show information on the specified instance(s)"),
"list": (
ListInstances, ARGS_MANY_INSTANCES,
[NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, VERBOSE_OPT,
FORCE_FILTER_OPT],
"[<instance>...]",
"Lists the instances and their status. The available fields can be shown"
" using the \"list-fields\" command (see the man page for details)."
" The default field list is (in order): %s." %
utils.CommaJoin(_LIST_DEF_FIELDS),
),
"list-fields": (
ListInstanceFields, [ArgUnknown()],
[NOHDR_OPT, SEP_OPT],
"[fields...]",
"Lists all available fields for instances"),
"reinstall": (
ReinstallInstance, [ArgInstance()],
[FORCE_OPT, OS_OPT, FORCE_VARIANT_OPT, m_force_multi, m_node_opt,
m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, m_node_tags_opt,
m_pri_node_tags_opt, m_sec_node_tags_opt, m_inst_tags_opt, SELECT_OS_OPT]
+ SUBMIT_OPTS + [DRY_RUN_OPT, PRIORITY_OPT, OSPARAMS_OPT,
OSPARAMS_PRIVATE_OPT, OSPARAMS_SECRET_OPT],
"[-f] <instance>", "Reinstall a stopped instance"),
"remove": (
RemoveInstance, ARGS_ONE_INSTANCE,
[FORCE_OPT, SHUTDOWN_TIMEOUT_OPT, IGNORE_FAILURES_OPT] + SUBMIT_OPTS
+ [DRY_RUN_OPT, PRIORITY_OPT],
"[-f] <instance>", "Shuts down the instance and removes it"),
"rename": (
RenameInstance,
[ArgInstance(min=1, max=1), ArgHost(min=1, max=1)],
[NOIPCHECK_OPT, NONAMECHECK_OPT] + SUBMIT_OPTS
+ [DRY_RUN_OPT, PRIORITY_OPT],
"<instance> <new_name>", "Rename the instance"),
"replace-disks": (
ReplaceDisks, ARGS_ONE_INSTANCE,
[AUTO_REPLACE_OPT, DISKIDX_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT,
NEW_SECONDARY_OPT, ON_PRIMARY_OPT, ON_SECONDARY_OPT] + SUBMIT_OPTS
+ [DRY_RUN_OPT, PRIORITY_OPT, IGNORE_IPOLICY_OPT],
"[-s|-p|-a|-n NODE|-I NAME] <instance>",
"Replaces disks for the instance"),
"modify": (
SetInstanceParams, ARGS_ONE_INSTANCE,
[BACKEND_OPT, DISK_OPT, FORCE_OPT, HVOPTS_OPT, NET_OPT] + SUBMIT_OPTS +
[DISK_TEMPLATE_OPT, SINGLE_NODE_OPT, OS_OPT, FORCE_VARIANT_OPT,
OSPARAMS_OPT, OSPARAMS_PRIVATE_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT,
OFFLINE_INST_OPT, ONLINE_INST_OPT, IGNORE_IPOLICY_OPT, RUNTIME_MEM_OPT,
NOCONFLICTSCHECK_OPT, NEW_PRIMARY_OPT, HOTPLUG_OPT,
HOTPLUG_IF_POSSIBLE_OPT, INSTANCE_COMMUNICATION_OPT,
EXT_PARAMS_OPT, FILESTORE_DRIVER_OPT, FILESTORE_DIR_OPT],
"<instance>", "Alters the parameters of an instance"),
"shutdown": (
GenericManyOps("shutdown", _ShutdownInstance), [ArgInstance()],
[FORCE_OPT, m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt,
m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
m_inst_tags_opt, m_inst_opt, m_force_multi, TIMEOUT_OPT] + SUBMIT_OPTS
+ [DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT, NO_REMEMBER_OPT],
"<instance>", "Stops an instance"),
"startup": (
GenericManyOps("startup", _StartupInstance), [ArgInstance()],
[FORCE_OPT, m_force_multi, m_node_opt, m_pri_node_opt, m_sec_node_opt,
m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
m_inst_tags_opt, m_clust_opt, m_inst_opt] + SUBMIT_OPTS +
[HVOPTS_OPT,
BACKEND_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT,
NO_REMEMBER_OPT, STARTUP_PAUSED_OPT],
"<instance>", "Starts an instance"),
"reboot": (
GenericManyOps("reboot", _RebootInstance), [ArgInstance()],
[m_force_multi, REBOOT_TYPE_OPT, IGNORE_SECONDARIES_OPT, m_node_opt,
m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt] + SUBMIT_OPTS +
[m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
m_inst_tags_opt, SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
"<instance>", "Reboots an instance"),
"activate-disks": (
ActivateDisks, ARGS_ONE_INSTANCE,
SUBMIT_OPTS + [IGNORE_SIZE_OPT, PRIORITY_OPT, WFSYNC_OPT],
"<instance>", "Activate an instance's disks"),
"deactivate-disks": (
DeactivateDisks, ARGS_ONE_INSTANCE,
[FORCE_OPT] + SUBMIT_OPTS + [DRY_RUN_OPT, PRIORITY_OPT],
"[-f] <instance>", "Deactivate an instance's disks"),
"recreate-disks": (
RecreateDisks, ARGS_ONE_INSTANCE,
SUBMIT_OPTS +
[DISK_OPT, NODE_PLACEMENT_OPT, DRY_RUN_OPT, PRIORITY_OPT,
IALLOCATOR_OPT],
"<instance>", "Recreate an instance's disks"),
"grow-disk": (
GrowDisk,
[ArgInstance(min=1, max=1), ArgUnknown(min=1, max=1),
ArgUnknown(min=1, max=1)],
SUBMIT_OPTS + [NWSYNC_OPT, DRY_RUN_OPT, PRIORITY_OPT, ABSOLUTE_OPT],
"<instance> <disk> <size>", "Grow an instance's disk"),
"change-group": (
ChangeGroup, ARGS_ONE_INSTANCE,
[TO_GROUP_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT, PRIORITY_OPT]
+ SUBMIT_OPTS,
"[-I <iallocator>] [--to <group>]", "Change group of instance"),
"list-tags": (
ListTags, ARGS_ONE_INSTANCE, [],
"<instance_name>", "List the tags of the given instance"),
"add-tags": (
AddTags, [ArgInstance(min=1, max=1), ArgUnknown()],
[TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS,
"<instance_name> tag...", "Add tags to the given instance"),
"remove-tags": (
RemoveTags, [ArgInstance(min=1, max=1), ArgUnknown()],
[TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS,
"<instance_name> tag...", "Remove tags from given instance"),
}
#: dictionary with aliases for commands
aliases = {
"start": "startup",
"stop": "shutdown",
"show": "info",
}
def Main():
return GenericMain(commands, aliases=aliases,
override={"tag_type": constants.TAG_INSTANCE},
env_override=_ENV_OVERRIDE)
| bsd-2-clause | 8,941,289,388,210,791,000 | 35.149826 | 80 | 0.618715 | false | 3.790647 | false | false | false |
bmazin/ARCONS-pipeline | fluxcal/fluxCal.py | 1 | 29931 | #!/bin/python
'''
fluxCal.py
Created by Seth Meeker on 11-21-2012
Modified on 02-16-2015 to perform absolute fluxCal with point sources
Opens ARCONS observation of a spectrophotometric standard star and
associated wavelength cal file, reads in all photons and converts to energies.
Bins photons to generate a spectrum, then divides this into the known spectrum
of the object to create a Sensitivity curve. This curve is then written out to
h5 file.
Flags are associated with each pixel - see headers/pipelineFlags
for descriptions. Note some flags are set here, others are set
later on when creating photon lists.
'''
import sys,os
import tables
import numpy as np
from scipy import interpolate
from scipy.optimize.minpack import curve_fit
import matplotlib.pyplot as plt
from photometry import LightCurve
from util.FileName import FileName
from util.ObsFile import ObsFile
from util import MKIDStd
from util.readDict import readDict
from util.utils import rebin
from util.utils import gaussianConvolution
from util.utils import makeMovie
from util.utils import fitBlackbody
import hotpix.hotPixels as hp
from scipy.optimize.minpack import curve_fit
from scipy import interpolate
import matplotlib
from matplotlib.backends.backend_pdf import PdfPages
from headers import pipelineFlags
import figureHeader
class FluxCal:
def __init__(self,paramFile,plots=False,verbose=False):
"""
Opens flux file, prepares standard spectrum, and calculates flux factors for the file.
Method is provided in param file. If 'relative' is selected, an obs file with standard star defocused over
the entire array is expected, with accompanying sky file to do sky subtraction.
If any other method is provided, 'absolute' will be done by default, wherein a point source is assumed
to be present. The obs file is then broken into spectral frames with photometry (psf or aper) performed
on each frame to generate the ARCONS observed spectrum.
"""
self.verbose=verbose
self.plots = plots
self.params = readDict()
self.params.read_from_file(paramFile)
run = self.params['run']
sunsetDate = self.params['fluxSunsetLocalDate']
self.fluxTstamp = self.params['fluxTimestamp']
skyTstamp = self.params['skyTimestamp']
wvlSunsetDate = self.params['wvlCalSunsetLocalDate']
wvlTimestamp = self.params['wvlCalTimestamp']
flatCalFileName = self.params['flatCalFileName']
needTimeAdjust = self.params['needTimeAdjust']
self.deadtime = float(self.params['deadtime']) #from firmware pulse detection
self.timeSpacingCut = self.params['timeSpacingCut']
bLoadBeammap = self.params.get('bLoadBeammap',False)
self.method = self.params['method']
self.objectName = self.params['object']
self.r = float(self.params['energyResolution'])
self.photometry = self.params['photometry']
self.centroidRow = self.params['centroidRow']
self.centroidCol = self.params['centroidCol']
self.aperture = self.params['apertureRad']
self.annulusInner = self.params['annulusInner']
self.annulusOuter = self.params['annulusOuter']
self.collectingArea = self.params['collectingArea']
self.startTime = self.params['startTime']
self.intTime = self.params['integrationTime']
fluxFN = FileName(run=run,date=sunsetDate,tstamp=self.fluxTstamp)
self.fluxFileName = fluxFN.obs()
self.fluxFile = ObsFile(self.fluxFileName)
if self.plots:
self.plotSavePath = os.environ['MKID_PROC_PATH']+os.sep+'fluxCalSolnFiles'+os.sep+run+os.sep+sunsetDate+os.sep+'plots'+os.sep
if not os.path.exists(self.plotSavePath): os.mkdir(self.plotSavePath)
if self.verbose: print "Created directory %s"%self.plotSavePath
obsFNs = [fluxFN]
self.obsList = [self.fluxFile]
if self.startTime in ['',None]: self.startTime=0
if self.intTime in ['',None]: self.intTime=-1
if self.method=="relative":
try:
print "performing Relative Flux Calibration"
skyFN = FileName(run=run,date=sunsetDate,tstamp=skyTstamp)
self.skyFileName = skyFN.obs()
self.skyFile = ObsFile(self.skyFileName)
obsFNs.append(skyFN)
self.obsList.append(self.skyFile)
except:
print "For relative flux calibration a sky file must be provided in param file"
self.__del__()
else:
self.method='absolute'
print "performing Absolute Flux Calibration"
if self.photometry not in ['aperture','PSF']: self.photometry='PSF' #default to PSF fitting if no valid photometry selected
timeMaskFileNames = [fn.timeMask() for fn in obsFNs]
timeAdjustFileName = FileName(run=run).timeAdjustments()
#make filename for output fluxCalSoln file
self.fluxCalFileName = FileName(run=run,date=sunsetDate,tstamp=self.fluxTstamp).fluxSoln()
print "Creating flux cal: %s"%self.fluxCalFileName
if wvlSunsetDate != '':
wvlCalFileName = FileName(run=run,date=wvlSunsetDate,tstamp=wvlTimestamp).calSoln()
if flatCalFileName =='':
flatCalFileName=FileName(obsFile=self.fluxFile).flatSoln()
#load cal files for flux file and, if necessary, sky file
for iObs,obs in enumerate(self.obsList):
if bLoadBeammap:
print 'loading beammap',os.environ['MKID_BEAMMAP_PATH']
obs.loadBeammapFile(os.environ['MKID_BEAMMAP_PATH'])
if wvlSunsetDate != '':
obs.loadWvlCalFile(wvlCalFileName)
else:
obs.loadBestWvlCalFile()
obs.loadFlatCalFile(flatCalFileName)
obs.setWvlCutoffs(-1,-1)
if needTimeAdjust:
obs.loadTimeAdjustmentFile(timeAdjustFileName)
timeMaskFileName = timeMaskFileNames[iObs]
print timeMaskFileName
if not os.path.exists(timeMaskFileName):
print 'Running hotpix for ',obs
hp.findHotPixels(obsFile=obs,outputFileName=timeMaskFileName,fwhm=np.inf,useLocalStdDev=True)
print "Flux cal/sky file pixel mask saved to %s"%(timeMaskFileName)
obs.loadHotPixCalFile(timeMaskFileName)
if self.verbose: print "Loaded hot pixel file %s"%timeMaskFileName
#get flat cal binning information since flux cal will need to match it
self.wvlBinEdges = self.fluxFile.flatCalFile.root.flatcal.wavelengthBins.read()
self.nWvlBins = self.fluxFile.flatWeights.shape[2]
self.binWidths = np.empty((self.nWvlBins),dtype=float)
self.binCenters = np.empty((self.nWvlBins),dtype=float)
for i in xrange(self.nWvlBins):
self.binWidths[i] = self.wvlBinEdges[i+1]-self.wvlBinEdges[i]
self.binCenters[i] = (self.wvlBinEdges[i]+(self.binWidths[i]/2.0))
if self.method=='relative':
print "Extracting ARCONS flux and sky spectra"
self.loadRelativeSpectrum()
print "Flux Spectrum loaded"
self.loadSkySpectrum()
print "Sky Spectrum loaded"
elif self.method=='absolute':
print "Extracting ARCONS point source spectrum"
self.loadAbsoluteSpectrum()
print "Loading standard spectrum"
try:
self.loadStdSpectrum(self.objectName)
except KeyError:
print "Invalid spectrum object name"
self.__del__()
sys.exit()
print "Generating sensitivity curve"
self.calculateFactors()
print "Sensitivity Curve calculated"
print "Writing fluxCal to file %s"%self.fluxCalFileName
self.writeFactors(self.fluxCalFileName)
if self.plots: self.makePlots()
print "Done"
def __del__(self):
try:
self.fluxFile.close()
self.calFile.close()
except AttributeError:#fluxFile was never defined
pass
def getDeadTimeCorrection(self, obs): #WRONG RIGHT NOW. NEEDS TO HAVE RAW COUNTS SUMMED, NOT CUBE WHICH EXCLUDES NOISE TAIL
if self.verbose: print "Making raw cube to get dead time correction"
cubeDict = obs.getSpectralCube(firstSec=self.startTime, integrationTime=self.intTime, weighted=False, fluxWeighted=False)
cube= np.array(cubeDict['cube'], dtype=np.double)
wvlBinEdges= cubeDict['wvlBinEdges']
effIntTime= cubeDict['effIntTime']
if self.verbose: print "median effective integration time = ", np.median(effIntTime)
nWvlBins=len(wvlBinEdges)-1
if self.verbose: print "cube shape ", np.shape(cube)
if self.verbose: print "effIntTime shape ", np.shape(effIntTime)
#add third dimension to effIntTime for broadcasting
effIntTime = np.reshape(effIntTime,np.shape(effIntTime)+(1,))
#put cube into counts/s in each pixel
cube /= effIntTime
#CALCULATE DEADTIME CORRECTION
#NEED TOTAL COUNTS PER SECOND FOR EACH PIXEL TO DO PROPERLY
#ASSUMES SAME CORRECTION FACTOR APPLIED FOR EACH WAVELENGTH, MEANING NO WL DEPENDANCE ON DEAD TIME EFFECT
DTCorr = np.zeros((np.shape(cube)[0],np.shape(cube)[1]),dtype=float)
for f in range(0,np.shape(cube)[2]):
#if self.verbose: print cube[:,:,f]
#if self.verbose: print '-----------------------'
DTCorr += cube[:,:,f]
#if self.verbose: print DTCorr
#if self.verbose: print '\n=====================\n'
#Correct for firmware dead time (100us in 2012 ARCONS firmware)
DTCorrNew=DTCorr/(1-DTCorr*self.deadtime)
CorrFactors = DTCorrNew/DTCorr #This is what the frames need to be multiplied by to get their true values
if self.verbose: print "Dead time correction factors: ", CorrFactors
#add third dimension to CorrFactors for broadcasting
CorrFactors = np.reshape(CorrFactors,np.shape(CorrFactors)+(1,))
return CorrFactors
def loadAbsoluteSpectrum(self):
'''
extract the ARCONS measured spectrum of the spectrophotometric standard by breaking data into spectral cube
and performing photometry (aper or psf) on each spectral frame
'''
if self.verbose:print "Making spectral cube"
cubeDict = self.fluxFile.getSpectralCube(firstSec=self.startTime, integrationTime=self.intTime, weighted=True, fluxWeighted=False)
cube= np.array(cubeDict['cube'], dtype=np.double)
effIntTime= cubeDict['effIntTime']
if self.verbose: print "median effective integration time in flux file cube = ", np.median(effIntTime)
if self.verbose: print "cube shape ", np.shape(cube)
if self.verbose: print "effIntTime shape ", np.shape(effIntTime)
#add third dimension to effIntTime for broadcasting
effIntTime = np.reshape(effIntTime,np.shape(effIntTime)+(1,))
#put cube into counts/s in each pixel
cube /= effIntTime
#get dead time correction factors
DTCorr = self.getDeadTimeCorrection(self.fluxFile)
cube*=DTCorr #cube now in units of counts/s and corrected for dead time
if self.plots and not 'figureHeader' in sys.modules:
if self.verbose: print "Saving spectral frames as movie..."
movieCube = np.zeros((self.nWvlBins,np.shape(cube)[0],np.shape(cube)[1]),dtype=float)
for i in xrange(self.nWvlBins):
movieCube[i,:,:] = cube[:,:,i]
makeMovie(movieCube,frameTitles=self.binCenters,cbar=True,outName=self.plotSavePath+'FluxCal_Cube_%s.gif'%(self.objectName), normMin=0, normMax=50)
if self.verbose: print "Movie saved in %s"%self.plotSavePath
LCplot=False #light curve pop-ups not compatible with FLuxCal plotting 2/18/15
#if self.photometry=='PSF': LCplot = False
LC = LightCurve.LightCurve(verbose=self.verbose, showPlot=LCplot)
self.fluxSpectrum=np.empty((self.nWvlBins),dtype=float)
self.skySpectrum=np.zeros((self.nWvlBins),dtype=float)
for i in xrange(self.nWvlBins):
frame = cube[:,:,i]
if self.verbose: print "%s photometry on frame %i of cube, central wvl = %f Angstroms"%(self.photometry,i,self.binCenters[i])
if self.photometry == 'aperture':
fDict = LC.performPhotometry(self.photometry,frame,[[self.centroidCol,self.centroidRow]],expTime=None,aper_radius = self.aperture, annulus_inner = self.annulusInner, annulus_outer = self.annulusOuter, interpolation="linear")
self.fluxSpectrum[i] = fDict['flux']
self.skySpectrum[i] = fDict['skyFlux']
print "Sky estimate = ", fDict['skyFlux']
else:
fDict = LC.performPhotometry(self.photometry,frame,[[self.centroidCol,self.centroidRow]],expTime=None,aper_radius = self.aperture)
self.fluxSpectrum[i] = fDict['flux']
self.fluxSpectrum=self.fluxSpectrum/self.binWidths/self.collectingArea #spectrum now in counts/s/Angs/cm^2
self.skySpectrum=self.skySpectrum/self.binWidths/self.collectingArea
return self.fluxSpectrum, self.skySpectrum
def loadRelativeSpectrum(self):
self.fluxSpectra = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)]
self.fluxEffTime = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)]
for iRow in xrange(self.nRow):
for iCol in xrange(self.nCol):
count = self.fluxFile.getPixelCount(iRow,iCol)
fluxDict = self.fluxFile.getPixelSpectrum(iRow,iCol,weighted=True,firstSec=0,integrationTime=-1)
self.fluxSpectra[iRow][iCol],self.fluxEffTime[iRow][iCol] = fluxDict['spectrum'],fluxDict['effIntTime']
self.fluxSpectra = np.array(self.fluxSpectra)
self.fluxEffTime = np.array(self.fluxEffTime)
DTCorr = self.getDeadTimeCorrection(self.fluxFile)
#print "Bin widths = ",self.binWidths
self.fluxSpectra = self.fluxSpectra/self.binWidths/self.fluxEffTime*DTCorr
self.fluxSpectrum = self.calculateMedian(self.fluxSpectra) #find median of subtracted spectra across whole array
return self.fluxSpectrum
def loadSkySpectrum(self):
self.skySpectra = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)]
self.skyEffTime = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)]
for iRow in xrange(self.nRow):
for iCol in xrange(self.nCol):
count = self.skyFile.getPixelCount(iRow,iCol)
skyDict = self.skyFile.getPixelSpectrum(iRow,iCol,weighted=True,firstSec=0,integrationTime=-1)
self.skySpectra[iRow][iCol],self.skyEffTime[iRow][iCol] = skyDict['spectrum'],skyDict['effIntTime']
self.skySpectra = np.array(self.skySpectra)
self.skyEffTime = np.array(self.skyEffTime)
DTCorr = self.getDeadTimeCorrection(self.skyFile)
self.skySpectra = self.skySpectra/self.binWidths/self.skyEffTime*DTCorr
self.skySpectrum = self.calculateMedian(self.skySpectra) #find median of subtracted spectra across whole array
return self.skySpectrum
def loadStdSpectrum(self, objectName="G158-100"):
#import the known spectrum of the calibrator and rebin to the histogram parameters given
#must be imported into array with dtype float so division later does not have error
std = MKIDStd.MKIDStd()
a = std.load(objectName)
a = std.countsToErgs(a) #convert std spectrum to ergs/s/Angs/cm^2 for BB fitting and cleaning
self.stdWvls = np.array(a[:,0])
self.stdFlux = np.array(a[:,1]) #std object spectrum in ergs/s/Angs/cm^2
if self.plots:
#create figure for plotting standard spectrum modifications
self.stdFig = plt.figure()
self.stdAx = self.stdFig.add_subplot(111)
plt.xlim(3500,12000)
plt.plot(self.stdWvls,self.stdFlux*1E15,linewidth=1,color='grey',alpha=0.75)
convX_rev,convY_rev = self.cleanSpectrum(self.stdWvls,self.stdFlux)
convX = convX_rev[::-1] #convolved spectrum comes back sorted backwards, from long wvls to low which screws up rebinning
convY = convY_rev[::-1]
#rebin cleaned spectrum to flat cal's wvlBinEdges
newa = rebin(convX,convY,self.wvlBinEdges)
rebinnedWvl = np.array(newa[:,0])
rebinnedFlux = np.array(newa[:,1])
if self.plots:
#plot final resampled spectrum
plt.plot(convX,convY*1E15,color='blue')
plt.step(rebinnedWvl,rebinnedFlux*1E15,color = 'black',where='mid')
plt.legend(['%s Spectrum'%self.objectName,'Blackbody Fit','Gaussian Convolved Spectrum','Rebinned Spectrum'],'upper right', numpoints=1)
plt.xlabel(ur"Wavelength (\r{A})")
plt.ylabel(ur"Flux (10$^{-15}$ ergs s$^{-1}$ cm$^{-2}$ \r{A}$^{-1}$)")
plt.ylim(0.9*min(rebinnedFlux)*1E15, 1.1*max(rebinnedFlux)*1E15)
plt.savefig(self.plotSavePath+'FluxCal_StdSpectrum_%s.eps'%self.objectName,format='eps')
#convert standard spectrum back into counts/s/angstrom/cm^2
newa = std.ergsToCounts(newa)
self.binnedSpectrum = np.array(newa[:,1])
def cleanSpectrum(self,x,y):
##=============== BB Fit to extend spectrum beyond 11000 Angstroms ==================
fraction = 1.0/3.0
nirX = np.arange(int(x[(1.0-fraction)*len(x)]),20000)
T, nirY = fitBlackbody(x,y,fraction=fraction,newWvls=nirX,tempGuess=5600)
if self.plots: plt.plot(nirX,nirY*1E15,linestyle='--',linewidth=2, color="black",alpha=0.5)
extendedWvl = np.concatenate((x,nirX[nirX>max(x)]))
extendedFlux = np.concatenate((y,nirY[nirX>max(x)]))
##======= Gaussian convolution to smooth std spectrum to MKIDs median resolution ========
newX, newY = gaussianConvolution(extendedWvl,extendedFlux,xEnMin=0.005,xEnMax=6.0,xdE=0.001,fluxUnits = "lambda",r=self.r,plots=False)
return newX, newY
def calculateFactors(self):
"""
Calculate the sensitivity spectrum: the weighting factors that correct the flat calibrated spectra to the real spectra
For relative calibration:
First subtract sky spectrum from ARCONS observed spectrum. Then take median of this spectrum as it should be identical
across the array, assuming the flat cal has done its job. Then divide this into the known spectrum of the object.
For absolute calibration:
self.fluxSpectra already has sky subtraction included. Simply divide this spectrum into the known standard spectrum.
"""
self.subtractedSpectrum = self.fluxSpectrum - self.skySpectrum
self.subtractedSpectrum = np.array(self.subtractedSpectrum,dtype=float) #cast as floats so division does not fail later
if self.method=='relative':
normWvl = 5500 #Angstroms. Choose an arbitrary wvl to normalize the relative correction at
ind = np.where(self.wvlBinEdges >= normWvl)[0][0]-1
self.subtractedSpectrum = self.subtractedSpectrum/(self.subtractedSpectrum[ind]) #normalize
self.binnedSpectrum = self.binnedSpectrum/(self.binnedSpectrum[ind]) #normalize treated Std spectrum while we are at it
#Calculate FluxCal factors
self.fluxFactors = self.binnedSpectrum/self.subtractedSpectrum
#self.fluxFlags = np.zeros(np.shape(self.fluxFactors),dtype='int')
self.fluxFlags = np.empty(np.shape(self.fluxFactors),dtype='int')
self.fluxFlags.fill(pipelineFlags.fluxCal['good']) #Initialise flag array filled with 'good' flags. JvE 5/1/2013.
#set factors that will cause trouble to 1
#self.fluxFlags[self.fluxFactors == np.inf] = 1
self.fluxFlags[self.fluxFactors == np.inf] = pipelineFlags.fluxCal['infWeight'] #Modified to use flag dictionary - JvE 5/1/2013
self.fluxFactors[self.fluxFactors == np.inf]=1.0
self.fluxFlags[np.isnan(self.fluxFactors)] = pipelineFlags.fluxCal['nanWeight'] #Modified to use flag dictionary - JvE 5/1/2013
self.fluxFactors[np.isnan(self.fluxFactors)]=1.0
self.fluxFlags[self.fluxFactors <= 0]=pipelineFlags.fluxCal['LEzeroWeight'] #Modified to use flag dictionary - JvE 5/1/2013
self.fluxFactors[self.fluxFactors <= 0]=1.0
def calculateMedian(self, spectra):
spectra2d = np.reshape(spectra,[self.nRow*self.nCol,self.nWvlBins])
wvlMedian = np.empty(self.nWvlBins,dtype=float)
for iWvl in xrange(self.nWvlBins):
spectrum = spectra2d[:,iWvl]
goodSpectrum = spectrum[spectrum != 0]#dead pixels need to be taken out before calculating medians
wvlMedian[iWvl] = np.median(goodSpectrum)
return wvlMedian
def makePlots(self):
"""
Output all debugging plots of ARCONS sky and object spectra, known calibrator spectrum, and sensitivity curve
"""
scratchDir = os.getenv('MKID_PROC_PATH')
fluxDir = self.plotSavePath
fluxCalBase = 'FluxCal_%s'%self.objectName
plotFileName = fluxCalBase+".pdf"
fullFluxPlotFileName = os.path.join(fluxDir,plotFileName)
#uncomment to make some plots for the paper. Proper formatting Will also require figureheader to be imported and for movie making to be turned off
self.paperFig = plt.figure()
self.paperAx = self.paperFig.add_subplot(111)
plt.xlim(4000,11000)
plt.plot(self.binCenters,self.fluxFactors,linewidth=3,color='black')
plt.xlabel(ur"Wavelength (\r{A})")
plt.ylabel(ur"Spectral Calibration Curve")
plt.ylim(0,150)
plt.savefig(self.plotSavePath+'FluxCal_Sensitivity_%s.eps'%self.objectName,format='eps')
#save throughput as a .npz file that other code uses when making paper plots
np.savez(self.plotSavePath+'%s_%s_throughput.npz'%(self.objectName.strip(),self.fluxTstamp),throughput=1.0/self.fluxFactors,wvls=self.binCenters)
pp = PdfPages(fullFluxPlotFileName)
#plt.rcParams['font.size'] = 2
wvls = self.binCenters
plt.figure()
ax1 = plt.subplot(111)
ax1.set_title('ARCONS median flat cal\'d flux in counts')
plt.plot(wvls,self.fluxSpectrum)
pp.savefig()
plt.figure()
ax2 = plt.subplot(111)
ax2.set_title('ARCONS median flat cal\'d sky in counts')
plt.plot(wvls,self.skySpectrum)
pp.savefig()
plt.figure()
ax3 = plt.subplot(111)
ax3.set_title('Flux data minus sky in counts')
plt.plot(wvls,self.subtractedSpectrum)
pp.savefig()
plt.figure()
ax4 = plt.subplot(111)
ax4.set_title('Std Spectrum of %s'%(self.objectName))
plt.plot(self.stdWvls,self.stdFlux)
pp.savefig()
plt.figure()
ax5 = plt.subplot(111)
ax5.set_title('Binned Std Spectrum')
plt.plot(wvls,self.binnedSpectrum)
pp.savefig()
plt.figure()
ax6 = plt.subplot(111)
ax6.set_title('Median Sensitivity Spectrum')
ax6.set_xlim((3500,12000))
#ax6.set_ylim((0,5))
plt.plot(wvls,self.fluxFactors)
pp.savefig()
plt.figure()
ax7 = plt.subplot(111)
ax7.set_title('1/Sensitivity (Throughput)')
ax7.set_xlim((3500,12000))
ax7.set_ylim((0,.04))
plt.plot(wvls,1.0/self.fluxFactors)
pp.savefig()
plt.figure()
ax8 = plt.subplot(111)
ax8.set_title('Flux Cal\'d ARCONS Spectrum of Std')
plt.plot(wvls,self.fluxFactors*self.subtractedSpectrum)
pp.savefig()
pp.close()
print "Saved Flux Cal plots to %s"%(fullFluxPlotFileName)
def writeFactors(self,fluxCalFileName):
"""
Write flux cal weights to h5 file
"""
if os.path.isabs(fluxCalFileName) == True:
fullFluxCalFileName = fluxCalFileName
else:
scratchDir = os.getenv('MKID_PROC_PATH')
fluxDir = os.path.join(scratchDir,'fluxCalSolnFiles')
fullFluxCalFileName = os.path.join(fluxDir,fluxCalFileName)
try:
fluxCalFile = tables.openFile(fullFluxCalFileName,mode='w')
except:
print 'Error: Couldn\'t create flux cal file, ',fullFluxCalFileName
return
calgroup = fluxCalFile.createGroup(fluxCalFile.root,'fluxcal','Table of flux calibration weights by wavelength')
caltable = tables.Array(calgroup,'weights',object=self.fluxFactors,title='Flux calibration Weights indexed by wavelengthBin')
flagtable = tables.Array(calgroup,'flags',object=self.fluxFlags,title='Flux cal flags indexed by wavelengthBin. 0 is Good')
bintable = tables.Array(calgroup,'wavelengthBins',object=self.wvlBinEdges,title='Wavelength bin edges corresponding to third dimension of weights array')
fluxCalFile.flush()
fluxCalFile.close()
print "Finished Flux Cal, written to %s"%(fullFluxCalFileName)
def cleanSpectrum_old(self,x,y,objectName):
'''
function to take high resolution spectrum of standard star, extend IR coverage with
an exponential tail, then rebin down to ARCONS resolution. This function has since been
deprecated with the current cleanSpectrum which uses a BB fit to extend IR coverage,
and does the rebinning using a gaussian convolution. This is left in for reference.
'''
#locations and widths of absorption features in Angstroms
#features = [3890,3970,4099,4340,4860,6564,6883,7619]
#widths = [50,50,50,50,50,50,50,50]
#for i in xrange(len(features)):
# #check for absorption feature in std spectrum
# ind = np.where((x<(features[i]+15)) & (x>(features[i]-15)))[0]
# if len(ind)!=0:
# ind = ind[len(ind)/2]
# #if feature is found (flux is higher on both sides of the specified wavelength where the feature should be)
# if y[ind]<y[ind+1] and y[ind]<y[ind-1]:
# #cut out width[i] around feature[i]
# inds = np.where((x >= features[i]+widths[i]) | (x <= features[i]-widths[i]))
# x = x[inds]
# y = y[inds]
#fit a tail to the end of the spectrum to interpolate out to desired wavelength in angstroms
fraction = 3.0/4.0
newx = np.arange(int(x[fraction*len(x)]),20000)
slopeguess = (np.log(y[-1])-np.log(y[fraction*len(x)]))/(x[-1]-x[fraction*len(x)])
print "Guess at exponential slope is %f"%(slopeguess)
guess_a, guess_b, guess_c = float(y[fraction*len(x)]), x[fraction*len(x)], slopeguess
guess = [guess_a, guess_b, guess_c]
fitx = x[fraction*len(x):]
fity = y[fraction*len(x):]
exp_decay = lambda fx, A, x0, t: A * np.exp((fx-x0) * t)
params, cov = curve_fit(exp_decay, fitx, fity, p0=guess, maxfev=2000)
A, x0, t= params
print "A = %s\nx0 = %s\nt = %s\n"%(A, x0, t)
best_fit = lambda fx: A * np.exp((fx-x0)*t)
calcx = np.array(newx,dtype=float)
newy = best_fit(calcx)
#func = interpolate.splrep(x[fration*len(x):],y[fraction*len(x):],s=smooth)
#newx = np.arange(int(x[fraction*len(x)]),self.wvlBinEdges[-1])
#newy = interpolate.splev(newx,func)
wl = np.concatenate((x,newx[newx>max(x)]))
flux = np.concatenate((y,newy[newx>max(x)]))
#new method, rebin data to grid of wavelengths generated from a grid of evenly spaced energy bins
#R=7.0 at 4500
#R=E/dE -> dE = R/E
dE = 0.3936 #eV
start = 1000 #Angs
stop = 20000 #Angs
enBins = ObsFile.makeWvlBins(dE,start,stop)
rebinned = rebin(wl,flux,enBins)
re_wl = rebinned[:,0]
re_flux = rebinned[:,1]
#plt.plot(re_wl,re_flux,color='r')
re_wl = re_wl[np.isnan(re_flux)==False]
re_flux = re_flux[np.isnan(re_flux)==False]
start1 = self.wvlBinEdges[0]
stop1 = self.wvlBinEdges[-1]
#regrid downsampled data
new_wl = np.arange(start1,stop1)
#print re_wl
#print re_flux
#print new_wl
#weight=1.0/(re_flux)**(2/1.00)
print len(re_flux)
weight = np.ones(len(re_flux))
#decrease weights near peak
ind = np.where(re_flux == max(re_flux))[0]
weight[ind] = 0.3
for p in [1,2,3]:
if p==1:
wt = 0.3
elif p==2:
wt = 0.6
elif p==3:
wt = 0.7
try:
weight[ind+p] = wt
except IndexError:
pass
try:
if ind-p >= 0:
weight[ind-p] = wt
except IndexError:
pass
weight[-4:] = 1.0
#weight = [0.7,1,0.3,0.3,0.5,0.7,1,1,1]
#print len(weight)
#weight = re_flux/min(re_flux)
#weight = 1.0/weight
#weight = weight/max(weight)
#print weight
f = interpolate.splrep(re_wl,re_flux,w=weight,k=3,s=max(re_flux)**1.71)
new_flux = interpolate.splev(new_wl,f,der=0)
return new_wl, new_flux
if __name__ == '__main__':
try:
paramFile = sys.argv[1]
except:
paramFile = '/home/srmeeker/ARCONS-pipeline/params/fluxCal.dict'
fc = FluxCal(paramFile, plots=True, verbose=True)
| gpl-2.0 | 1,007,656,973,070,526,800 | 43.016176 | 240 | 0.640406 | false | 3.514678 | false | false | false |
emc-openstack/storops | storops_test/lib/test_tasks.py | 1 | 3524 | # coding=utf-8
# Copyright (c) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import shutil
from unittest import TestCase
import tempfile
from hamcrest import assert_that, equal_to, raises
import persistqueue
from storops.lib import tasks
from storops_test.vnx.cli_mock import patch_cli, t_vnx
import time
class TestPQueue(TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='storops')
self.q = tasks.PQueue(self.path)
def tearDown(self):
self.q.stop()
self.q = None
time.sleep(0.1)
shutil.rmtree(self.path, ignore_errors=True)
def test_queue_path(self):
assert_that(self.q.path, equal_to(self.path))
def test_put(self):
fake_vnx = t_vnx()
self.q.put(fake_vnx.delete_lun, name='l1')
def test_get(self):
fake_vnx = t_vnx()
self.q.put(fake_vnx.delete_lun, name='l1')
pickled_item = self.q.get()
assert_that(pickled_item['object']._ip, equal_to(fake_vnx._ip))
assert_that(pickled_item['method'], equal_to('delete_lun'))
assert_that(pickled_item['params']['name'], equal_to('l1'))
self.q.task_done()
self.q = None
self.q = tasks.PQueue(self.path)
assert_that(self.q.get, raises(persistqueue.Empty))
def test_run_empty_queue(self):
self.q.set_interval(0.01)
self.q.start()
# Make sure restart is fine
self.q.start()
@patch_cli
def test_run_tasks(self):
self.q.set_interval(0.01)
fake_vnx = t_vnx()
self.q.put(fake_vnx.delete_lun, name='l1')
self.q.start()
def test_re_enqueue(self):
fake_vnx = t_vnx()
item = {'object': fake_vnx, 'method': 'delete_lun',
'params': {'name': 'l1'}}
self.q.re_enqueue(item)
assert_that(item['retries'], equal_to(1))
def test_re_enqueue_max_retries(self):
fake_vnx = t_vnx()
item = {'object': fake_vnx, 'method': 'delete_lun', 'params': 'l1'}
for i in range(100):
self.q.re_enqueue(item)
self.q.get()
self.q.re_enqueue(item)
assert_that(item['retries'], equal_to(100))
@patch_cli
def test_enqueue_expected_error(self):
self.q.set_interval(0.1)
fake_vnx = t_vnx()
uid = '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:01'
self.q.put(fake_vnx.delete_hba, hba_uid=uid)
self.q.start()
time.sleep(0.2)
assert_that(self.q.get, raises(persistqueue.Empty))
@patch_cli
def test_enqueue_storops_error(self):
self.q.set_interval(0.1)
fake_vnx = t_vnx()
self.q.put(fake_vnx.create_block_user,
name='b', password='b', role='operator')
self.q.start()
time.sleep(0.2)
reenqueued_item = self.q.get()
assert_that('b', equal_to(reenqueued_item['params']['name']))
| apache-2.0 | -1,893,215,132,781,401,000 | 31.036364 | 78 | 0.608116 | false | 3.186257 | true | false | false |
rossumai/keras-multi-gpu | keras_tf_multigpu/examples/kuza55/cifar10_cnn_functional_multigpu.py | 1 | 4556 | '''Train a simple deep CNN on the CIFAR10 small images dataset.
GPU run command with Theano backend (with TensorFlow, the GPU is automatically used):
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatx=float32 python cifar10_cnn.py
It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50 epochs.
(it's still underfitting at that point, though).
'''
from __future__ import print_function
import keras
from keras import backend as K
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.layers import Input, Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.merge import concatenate
from keras.layers.core import Lambda
import os
import tensorflow as tf
from keras_tf_multigpu.kuza55 import make_parallel
# sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
sess = tf.Session()
K.set_session(sess)
ps_device = '/gpu:0'
gpu_count = len([dev for dev in os.environ.get('CUDA_VISIBLE_DEVICES', '').split(',') if len(dev.strip()) > 0])
batch_size = 128
num_classes = 10
epochs = 6
data_augmentation = True
# The data, shuffled and split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
def basic_model():
input = Input(shape=x_train.shape[1:])
x = Conv2D(32, (3, 3), padding='same')(input)
x = Activation('relu')(x)
x = Conv2D(32, (3, 3))(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25)(x)
x = Conv2D(64, (3, 3), padding='same')(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3))(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25)(x)
x = Flatten()(x)
x = Dense(512)(x)
x = Activation('relu')(x)
x = Dropout(0.5)(x)
x = Dense(num_classes)(x)
output = Activation('softmax')(x)
model = Model(inputs=input, outputs=output)
print('Single tower model:')
model.summary()
return model
with tf.device(ps_device):
serial_model = basic_model()
print('Serial model:')
serial_model.summary()
model = make_parallel(tower, gpu_count, ps_device)
print('Multi-GPU model:')
model.summary()
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
| mit | -8,435,183,062,125,755,000 | 33.778626 | 111 | 0.663082 | false | 3.394933 | true | false | false |
lumig242/Video-Share-System | video/views.py | 1 | 4497 | from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from video.form import *
from video.models import Video,Comment
from django.contrib.auth.decorators import login_required
import json
@login_required
def upload(request):
uploadFlag = True
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
video = Video()
video.owner = request.user
video.title = form.cleaned_data['title']
video.file = request.FILES['file']
video.description = form.cleaned_data["description"]
video.save()
return HttpResponseRedirect('success/')
else:
form = UploadFileForm()
return render_to_response('upload.html', locals(),context_instance=RequestContext(request))
def uploadSuccess(request):
return render_to_response('upload_Success.html',context_instance=RequestContext(request))
def homepage_video_list(request):
highscore = Video.objects.all()
highscore = sorted(highscore, key=lambda x: 1. * x.rating_sum / (1 + x.rating_person))[0:5]
latest = Video.objects.all()[0:5]
return render_to_response('homepage.html', locals(), context_instance=RequestContext(request))
def video_play(request,video_id):
video_object = Video.objects.get(id=video_id)
video_path = video_object.file.url
own = True if request.user == video_object.owner else False
if video_object.rating_person:
points = round(1.0*video_object.rating_sum/video_object.rating_person,1)
else:
points = "Not rated"
# Comment display
commentList = Comment.objects.filter(video=video_object).order_by('-time')
return render_to_response('videoplay.html', locals(),context_instance=RequestContext(request))
def rate_video(request,video_id):
print request.method, video_id
if request.method == 'POST':
print 'hello2'
form = RatingForm(request.POST)
if form.is_valid():
print 'hello3'
video_object = Video.objects.get(id=video_id)
video_object.rating_person += 1
video_object.rating_sum += form.cleaned_data['rate']
video_object.save()
HasRated = True
points = round(1.0*video_object.rating_sum/video_object.rating_person,1)
return HttpResponse(points)
def comment_video(request, video_id):
print request.method, video_id
if request.method == 'POST':
print "hello2"
form = SendCommentForm(request.POST)
if form.is_valid():
print "hello3"
comment = Comment()
comment.author = request.user
comment.video = Video.objects.filter(id=video_id)[0]
comment.content = form.cleaned_data['content']
comment.save()
print str(comment.author.username), str(comment.time), str(comment.content)
s = '<p>'+str(comment.author.username)+ comment.time.strftime(" %b. %d, %Y, %I:%m %p ")+ str(comment.content) + '</p>'
#return HttpResponse(json.dumps({"name":str(comment.author.username), "date":str(comment.time), "content": str(comment.content)}))
return HttpResponse(s)
def video_modify(request,video_id):
modifyFlag = True
video_object = Video.objects.get(id=video_id)
if request.method == 'POST':
uploadFlag = True
form = ModifyVideoForm(request.POST)
if form.is_valid():
video_object.title = form.cleaned_data['title']
video_object.description = form.cleaned_data["description"]
video_object.save()
return HttpResponseRedirect('/videoplay/{}'.format(video_id))
else:
form = ModifyVideoForm()
return render_to_response('upload.html', locals(),context_instance=RequestContext(request))
def video_delete(request,video_id):
video_object = Video.objects.get(id=video_id)
video_object.delete()
return HttpResponseRedirect('/timeline')
def video_share(request,video_id):
video_object = Video.objects.get(id=video_id)
video = Video()
video.owner = request.user
video.title = video_object.title
video.file = video_object.file
video.description = video_object.description
video.save()
return HttpResponseRedirect('/videoplay/{}'.format(video_id))
| mit | -4,826,796,093,921,239,000 | 35.266129 | 142 | 0.653769 | false | 3.893506 | false | false | false |
scott48074/Restorative-Justice-App | app/facesheet.py | 1 | 4506 | '''
Takes in a list of values from the database and creates a facesheet.
'''
import os
from docx import Document
from docx.enum.text import WD_ALIGN_PARAGRAPH
def assemble_address(street, apartment, city, state, zip_code):
address = street.title()
if apartment:
address += f' APT: {apartment.title()}'
address += f' {city.title()}, '
address += state.upper()
address += ' ' + zip_code
return address
def parse_row(row_list):
info = {'case_number': row_list[1],
'occurred_date': row_list[2],
'incident_type': row_list[3].title(),
'age': row_list[5],
'name': row_list[7].title(),
'address': assemble_address(row_list[8], row_list[9],
row_list[10], row_list[11],
row_list[12],
),
'DOB': row_list[13],
'phone': row_list[14],
'race': row_list[15].title(),
'sex': row_list[16].title(),
'district': row_list[18].title()}
return info
def district_line(document, district):
p = document.add_paragraph()
p.alignment = WD_ALIGN_PARAGRAPH.RIGHT
p.add_run(f'District: {district}').bold = True
def approval_line(document):
p = document.add_paragraph()
p.alignment = WD_ALIGN_PARAGRAPH.RIGHT
p.add_run('Selection: ').bold = True
p.add_run('Pass').bold = True
p.add_run(' | ').bold = True
p.add_run('Fail').bold = True
p.add_run().add_break()
p.add_run('Background: ').bold = True
p.add_run('Pass').bold = True
p.add_run(' | ').bold = True
p.add_run('Fail').bold = True
p.add_run().add_break()
def case_number_line(document, case_number):
p = document.add_paragraph()
p.add_run(f'Case Number: {case_number}')
def name_line(document, name):
p = document.add_paragraph()
p.add_run(f'Name: {name}')
def bio_line(document, sex, race, dob, age):
lines = ['Sex:\t', 'Race:\t', 'DOB:\t', 'Age:\t']
bio_list = [sex, race, dob, age]
p = document.add_paragraph()
for line, bio in zip(lines, bio_list):
p.add_run(f'{line}{bio}')
p.add_run().add_break()
def charge_line(document):
lines = ['Charge Type: State | Municipal',
'Description:', 'Court Date:', 'Citation#:']
p = document.add_paragraph()
for line in lines:
p.add_run(line)
p.add_run().add_break()
def address_line(document, address):
p = document.add_paragraph()
p.add_run(f'Address: {address}')
def phone_line(document, phone):
p = document.add_paragraph()
p.add_run(f'Phone: {phone}')
p.add_run().add_break()
p.add_run('Email:')
def background_line(document):
lines = ['Court Records:', 'Out of State Records:',
'Local Records:', 'Notes:']
for line in lines:
p = document.add_paragraph()
p.add_run(line).bold = True
def last_name_first(name):
suffix = ['II', 'IV', 'JR', 'SR']
name_list = name.split()
name_list.insert(0, name_list.pop())
if name_list[0][:2].upper() in suffix:
name_list.insert(0, name_list.pop())
name = "_".join(name_list)
return name
def save_facesheet(document, directory, name, district, district_folders):
name = last_name_first(name)
if district_folders:
path = f'{directory}/results/{district}/{name}/{name}.docx'
if not os.path.isdir(f'{directory}/results/{district}/{name}'):
os.makedirs(f'{directory}/results/{district}/{name}')
else:
path = f'{directory}/results/{name}/{name}.docx'
if not os.path.isdir(f'{directory}/results/{name}'):
os.makedirs(f'{directory}/results/{name}')
document.save(path)
def assemble_sheet(row_list, directory, district_folders):
info_dict = parse_row(row_list)
document = Document()
district_line(document, info_dict['district'])
approval_line(document)
case_number_line(document, info_dict['case_number'])
name_line(document, info_dict['name'])
bio_line(document, info_dict['sex'], info_dict['race'], info_dict['DOB'], info_dict['age'])
charge_line(document)
address_line(document, info_dict['address'])
phone_line(document, info_dict['phone'])
background_line(document)
save_facesheet(document, directory, info_dict['name'], info_dict['district'], district_folders)
def main():
pass
if __name__ == '__main__':
main()
| mit | 5,163,028,321,451,028,000 | 28.644737 | 99 | 0.583666 | false | 3.272331 | false | false | false |
wtgme/labeldoc2vec | gensim/models/labeldoc2vec.py | 1 | 45979 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Deep learning via the distributed memory and distributed bag of words models from
[1]_, using either hierarchical softmax or negative sampling [2]_ [3]_.
**Make sure you have a C compiler before installing gensim, to use optimized (compiled)
doc2vec training** (70x speedup [blog]_).
Initialize a model with e.g.::
>>> model = Doc2Vec(documents, size=100, window=8, min_count=5, workers=4)
Persist a model to disk with::
>>> model.save(fname)
>>> model = Doc2Vec.load(fname) # you can continue training with the loaded model!
The model can also be instantiated from an existing file on disk in the word2vec C format::
>>> model = Doc2Vec.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> model = Doc2Vec.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
.. [1] Quoc Le and Tomas Mikolov. Distributed Representations of Sentences and Documents. http://arxiv.org/pdf/1405.4053v2.pdf
.. [2] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [3] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed Representations of Words and Phrases and their Compositionality.
In Proceedings of NIPS, 2013.
.. [blog] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
"""
import logging
import os
import warnings
try:
from queue import Queue
except ImportError:
from Queue import Queue
from collections import namedtuple, defaultdict
from timeit import default_timer
from numpy import zeros, exp, random, sum as np_sum, outer, add as np_add, concatenate, \
repeat as np_repeat, array, float32 as REAL, empty, ones, memmap as np_memmap, \
sqrt, newaxis, ndarray, dot, vstack, dtype, divide as np_divide
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from word2vec import Word2Vec, Vocab, train_cbow_pair, train_sg_pair, train_batch_sg
from six.moves import xrange, zip
from six import string_types, integer_types, itervalues
import random
logger = logging.getLogger(__name__)
try:
from gensim.models.labeldoc2vec_inner import train_label_document_dbow, train_label_document_dm, train_label_document_dm_concat
from gensim.models.word2vec_inner import FAST_VERSION # blas-adaptation shared from word2vec
logger.info('Fast version of {0} is being used'.format(__name__))
except ImportError:
logger.info('Slow version of {0} is being used'.format(__name__))
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
# def train_dl_pair(negative, label_index, context_index, alpha, learn_vectors=True, learn_hidden=True,
# context_vectors=None, context_locks=None, label_vectors=None, label_locks=None):
# print '-----------------------------------'
# print '------------Lower version------------'
# print '-----------------------------------'
# l1 = context_vectors[context_index] # input word (NN input/projection layer)
# lock_factor = context_locks[context_index]
#
# neu1e = zeros(l1.shape)
#
# # use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
# neg_size = min(negative+1, len(label_vectors))
# word_indices = random.sample(range(len(label_vectors)), neg_size)
# final_labels = zeros(neg_size)
# if label_index not in word_indices:
# word_indices[0] = label_index
# final_labels[0] = 1
# else:
# index_pos = word_indices.index(label_index)
# final_labels[index_pos] = 1
# l2b = label_vectors[word_indices] # 2d matrix, k+1 x layer1_size
# fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
# gb = (final_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
# if learn_hidden:
# label_vectors[word_indices] += outer(gb, l1) # learn hidden -> output
# neu1e += dot(gb, l2b) # save error
#
# if learn_vectors:
# # l1 += neu1e * lock_factor # learn input -> hidden (mutates model.syn0[word2.index], if that is l1)
# context_vectors[context_index] += neu1e * lock_factor # learn input -> hidden (mutates model.syn0[word2.index], if that is l1)
# return neu1e
#
#
# def train_label_document_dbow(model, doc_words, doctag_indexes, doclabel_indexes, alpha, work=None,
# train_words=False, learn_doctags=True, learn_words=True, learn_hidden=True,
# word_vectors=None, word_locks=None, doctag_vectors=None, doctag_locks=None,
# doclabel_vectors=None, doclabel_locks=None):
# """
# Update distributed bag of words model ("PV-DBOW") by training on a single document.
#
# Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`.
#
# The document is provided as `doc_words`, a list of word tokens which are looked up
# in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
# into the doctag_vectors array.
#
# If `train_words` is True, simultaneously train word-to-word (not just doc-to-word)
# examples, exactly as per Word2Vec skip-gram training. (Without this option,
# word vectors are neither consulted nor updated during DBOW doc vector training.)
#
# Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
# prevent learning-updates to those respective model weights, as if using the
# (partially-)frozen model to infer other compatible vectors.
#
# This is the non-optimized, Python version. If you have cython installed, gensim
# will use the optimized version from doc2vec_inner instead.
#
# """
# if doctag_vectors is None:
# doctag_vectors = model.docvecs.doctag_syn0
# if doctag_locks is None:
# doctag_locks = model.docvecs.doctag_syn0_lockf
#
# if doclabel_vectors is None:
# doclabel_vectors = model.labelvecs.doctag_syn0
# if doclabel_locks is None:
# doclabel_locks = model.labelvecs.doctag_syn0_lockf
#
# if train_words and learn_words:
# train_batch_sg(model, [doc_words], alpha, work)
# for doctag_index in doctag_indexes:
# for word in doc_words:
# train_sg_pair(model, word, doctag_index, alpha, learn_vectors=learn_doctags,
# learn_hidden=learn_hidden, context_vectors=doctag_vectors,
# context_locks=doctag_locks)
# for doclabel_index in doclabel_indexes:
# train_dl_pair(model.negative, doclabel_index, doctag_index, alpha, learn_vectors=learn_doctags,
# learn_hidden=learn_hidden, context_vectors=doctag_vectors,
# context_locks=doctag_locks, label_vectors=doclabel_vectors, label_locks=doclabel_locks)
#
#
# return len(doc_words)
#
# def train_label_document_dm(model, doc_words, doctag_indexes, doclabel_indexes, alpha, work=None, neu1=None,
# learn_doctags=True, learn_words=True, learn_hidden=True,
# word_vectors=None, word_locks=None, doctag_vectors=None, doctag_locks=None,
# doclabel_vectors=None, doclabel_locks=None):
# """
# Update distributed memory model ("PV-DM") by training on a single document.
#
# Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`. This
# method implements the DM model with a projection (input) layer that is
# either the sum or mean of the context vectors, depending on the model's
# `dm_mean` configuration field. See `train_label_document_dm_concat()` for the DM
# model with a concatenated input layer.
#
# The document is provided as `doc_words`, a list of word tokens which are looked up
# in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
# into the doctag_vectors array.
#
# Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
# prevent learning-updates to those respective model weights, as if using the
# (partially-)frozen model to infer other compatible vectors.
#
# This is the non-optimized, Python version. If you have a C compiler, gensim
# will use the optimized version from doc2vec_inner instead.
#
# """
# if word_vectors is None:
# word_vectors = model.syn0
# if word_locks is None:
# word_locks = model.syn0_lockf
#
# if doctag_vectors is None:
# doctag_vectors = model.docvecs.doctag_syn0
# if doctag_locks is None:
# doctag_locks = model.docvecs.doctag_syn0_lockf
#
# if doclabel_vectors is None:
# doclabel_vectors = model.labelvecs.doctag_syn0
# if doclabel_locks is None:
# doclabel_locks = model.labelvecs.doctag_syn0_lockf
#
# word_vocabs = [model.vocab[w] for w in doc_words if w in model.vocab and
# model.vocab[w].sample_int > model.random.rand() * 2**32]
#
# for pos, word in enumerate(word_vocabs):
# reduced_window = model.random.randint(model.window) # `b` in the original doc2vec code
# start = max(0, pos - model.window + reduced_window)
# window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
# word2_indexes = [word2.index for pos2, word2 in window_pos if pos2 != pos]
# l1 = np_sum(word_vectors[word2_indexes], axis=0) + np_sum(doctag_vectors[doctag_indexes], axis=0)
# count = len(word2_indexes) + len(doctag_indexes)
# if model.cbow_mean and count > 1 :
# l1 /= count
# neu1e = train_cbow_pair(model, word, word2_indexes, l1, alpha,
# learn_vectors=False, learn_hidden=learn_hidden)
# if not model.cbow_mean and count > 1:
# neu1e /= count
# if learn_doctags:
# for i in doctag_indexes:
# doctag_vectors[i] += neu1e * doctag_locks[i]
# if learn_words:
# for i in word2_indexes:
# word_vectors[i] += neu1e * word_locks[i]
# for doctag_index in doctag_indexes:
# for doclabel_index in doclabel_indexes:
# train_dl_pair(model.negative, doclabel_index, doctag_index, alpha, learn_vectors=learn_doctags,
# learn_hidden=learn_hidden, context_vectors=doctag_vectors,
# context_locks=doctag_locks, label_vectors=doclabel_vectors, label_locks=doclabel_locks)
#
# return len(word_vocabs)
#
# def train_label_document_dm_concat(model, doc_words, doctag_indexes, doclabel_indexes, alpha, work=None, neu1=None,
# learn_doctags=True, learn_words=True, learn_hidden=True,
# word_vectors=None, word_locks=None, doctag_vectors=None, doctag_locks=None,
# doclabel_vectors=None, doclabel_locks=None):
# """
# Update distributed memory model ("PV-DM") by training on a single document, using a
# concatenation of the context window word vectors (rather than a sum or average).
#
# Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`.
#
# The document is provided as `doc_words`, a list of word tokens which are looked up
# in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
# into the doctag_vectors array.
#
# Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
# prevent learning-updates to those respective model weights, as if using the
# (partially-)frozen model to infer other compatible vectors.
#
# This is the non-optimized, Python version. If you have a C compiler, gensim
# will use the optimized version from doc2vec_inner instead.
#
# """
# if word_vectors is None:
# word_vectors = model.syn0
# if word_locks is None:
# word_locks = model.syn0_lockf
#
# if doctag_vectors is None:
# doctag_vectors = model.docvecs.doctag_syn0
# if doctag_locks is None:
# doctag_locks = model.docvecs.doctag_syn0_lockf
#
# if doclabel_vectors is None:
# doclabel_vectors = model.labelvecs.doctag_syn0
# if doclabel_locks is None:
# doclabel_locks = model.labelvecs.doctag_syn0_lockf
#
# word_vocabs = [model.vocab[w] for w in doc_words if w in model.vocab and
# model.vocab[w].sample_int > model.random.rand() * 2**32]
# doctag_len = len(doctag_indexes)
# if doctag_len != model.dm_tag_count:
# return 0 # skip doc without expected number of doctag(s) (TODO: warn/pad?)
#
# null_word = model.vocab['\0']
# pre_pad_count = model.window
# post_pad_count = model.window
# padded_document_indexes = (
# (pre_pad_count * [null_word.index]) # pre-padding
# + [word.index for word in word_vocabs if word is not None] # elide out-of-Vocabulary words
# + (post_pad_count * [null_word.index]) # post-padding
# )
#
# for pos in range(pre_pad_count, len(padded_document_indexes) - post_pad_count):
# word_context_indexes = (
# padded_document_indexes[(pos - pre_pad_count): pos] # preceding words
# + padded_document_indexes[(pos + 1):(pos + 1 + post_pad_count)] # following words
# )
# word_context_len = len(word_context_indexes)
# predict_word = model.vocab[model.index2word[padded_document_indexes[pos]]]
# # numpy advanced-indexing copies; concatenate, flatten to 1d
# l1 = concatenate((doctag_vectors[doctag_indexes], word_vectors[word_context_indexes])).ravel()
# neu1e = train_cbow_pair(model, predict_word, None, l1, alpha,
# learn_hidden=learn_hidden, learn_vectors=False)
#
# # filter by locks and shape for addition to source vectors
# e_locks = concatenate((doctag_locks[doctag_indexes], word_locks[word_context_indexes]))
# neu1e_r = (neu1e.reshape(-1, model.vector_size)
# * np_repeat(e_locks, model.vector_size).reshape(-1, model.vector_size))
#
# if learn_doctags:
# np_add.at(doctag_vectors, doctag_indexes, neu1e_r[:doctag_len])
# if learn_words:
# np_add.at(word_vectors, word_context_indexes, neu1e_r[doctag_len:])
# for doctag_index in doctag_indexes:
# for doclabel_index in doclabel_indexes:
# train_dl_pair(model.negative, doclabel_index, doctag_index, alpha, learn_vectors=learn_doctags,
# learn_hidden=learn_hidden, context_vectors=doctag_vectors,
# context_locks=doctag_locks, label_vectors=doclabel_vectors, label_locks=doclabel_locks)
#
# return len(padded_document_indexes) - pre_pad_count - post_pad_count
class LabeledTaggedDocument(namedtuple('LabeledTaggedDocument', 'words tags labels')):
"""
A single document, made up of `words` (a list of unicode string tokens)
and `tags` (a list of tokens). Tags may be one or more unicode string
tokens, but typical practice (which will also be most memory-efficient) is
for the tags list to include a unique integer id as the only tag.
Replaces "sentence as a list of words" from Word2Vec.
"""
def __str__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.words, self.tags, self.labels)
class DocvecsArray(utils.SaveLoad):
"""
Default storage of doc vectors during/after training, in a numpy array.
As the 'docvecs' property of a Doc2Vec model, allows access and
comparison of document vectors.
>>> docvec = d2v_model.docvecs[99]
>>> docvec = d2v_model.docvecs['SENT_99'] # if string tag used in training
>>> sims = d2v_model.docvecs.most_similar(99)
>>> sims = d2v_model.docvecs.most_similar('SENT_99')
>>> sims = d2v_model.docvecs.most_similar(docvec)
If only plain int tags are presented during training, the dict (of
string tag -> index) and list (of index -> string tag) stay empty,
saving memory.
Supplying a mapfile_path (as by initializing a Doc2Vec model with a
'docvecs_mapfile' value) will use a pair of memory-mapped
files as the array backing for doctag_syn0/doctag_syn0_lockf values.
The Doc2Vec model automatically uses this class, but a future alternative
implementation, based on another persistence mechanism like LMDB, LevelDB,
or SQLite, should also be possible.
"""
def __init__(self, mapfile_path=None):
self.doctags = {} # string -> Doctag (only filled if necessary)
self.max_rawint = -1 # highest rawint-indexed doctag
self.offset2doctag = [] # int offset-past-(max_rawint+1) -> String (only filled if necessary)
self.count = 0
self.mapfile_path = mapfile_path
def note_doctag(self, key, document_no, document_length):
"""Note a document tag during initial corpus scan, for structure sizing."""
if isinstance(key, int):
self.max_rawint = max(self.max_rawint, key)
else:
if key in self.doctags:
self.doctags[key] = self.doctags[key].repeat(document_length)
else:
self.doctags[key] = Doctag(len(self.offset2doctag), document_length, 1)
self.offset2doctag.append(key)
self.count = self.max_rawint + 1 + len(self.offset2doctag)
def indexed_doctags(self, doctag_tokens):
"""Return indexes and backing-arrays used in training examples."""
return ([self._int_index(index) for index in doctag_tokens if index in self],
self.doctag_syn0, self.doctag_syn0_lockf, doctag_tokens)
def trained_item(self, indexed_tuple):
"""Persist any changes made to the given indexes (matching tuple previously
returned by indexed_doctags()); a no-op for this implementation"""
pass
def _int_index(self, index):
"""Return int index for either string or int index"""
if isinstance(index, int):
return index
else:
return self.max_rawint + 1 + self.doctags[index].offset
def _key_index(self, i_index, missing=None):
"""Return string index for given int index, if available"""
warnings.warn("use DocvecsArray.index_to_doctag", DeprecationWarning)
return self.index_to_doctag(i_index)
def index_to_doctag(self, i_index):
"""Return string key for given i_index, if available. Otherwise return raw int doctag (same int)."""
candidate_offset = i_index - self.max_rawint - 1
if 0 <= candidate_offset < len(self.offset2doctag):
return self.offset2doctag[candidate_offset]
else:
return i_index
def __getitem__(self, index):
"""
Accept a single key (int or string tag) or list of keys as input.
If a single string or int, return designated tag's vector
representation, as a 1D numpy array.
If a list, return designated tags' vector representations as a
2D numpy array: #tags x #vector_size.
"""
if isinstance(index, string_types + (int,)):
return self.doctag_syn0[self._int_index(index)]
return vstack([self[i] for i in index])
def __len__(self):
return self.count
def __contains__(self, index):
if isinstance(index, int):
return index < self.count
else:
return index in self.doctags
def borrow_from(self, other_docvecs):
self.count = other_docvecs.count
self.doctags = other_docvecs.doctags
self.offset2doctag = other_docvecs.offset2doctag
def clear_sims(self):
self.doctag_syn0norm = None
def estimated_lookup_memory(self):
"""Estimated memory for tag lookup; 0 if using pure int tags."""
return 60 * len(self.offset2doctag) + 140 * len(self.doctags)
def reset_weights(self, model):
length = max(len(self.doctags), self.count)
if self.mapfile_path:
self.doctag_syn0 = np_memmap(self.mapfile_path+'.doctag_syn0', dtype=REAL,
mode='w+', shape=(length, model.vector_size))
self.doctag_syn0_lockf = np_memmap(self.mapfile_path+'.doctag_syn0_lockf', dtype=REAL,
mode='w+', shape=(length,))
self.doctag_syn0_lockf.fill(1.0)
else:
self.doctag_syn0 = empty((length, model.vector_size), dtype=REAL)
self.doctag_syn0_lockf = ones((length,), dtype=REAL) # zeros suppress learning
for i in xrange(length):
# construct deterministic seed from index AND model seed
seed = "%d %s" % (model.seed, self.index_to_doctag(i))
self.doctag_syn0[i] = model.seeded_vector(seed)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training or inference** after doing a replace.
The model becomes effectively read-only = you can call `most_similar`, `similarity`
etc., but not `train` or `infer_vector`.
"""
if getattr(self, 'doctag_syn0norm', None) is None or replace:
logger.info("precomputing L2-norms of doc weight vectors")
if replace:
for i in xrange(self.doctag_syn0.shape[0]):
self.doctag_syn0[i, :] /= sqrt((self.doctag_syn0[i, :] ** 2).sum(-1))
self.doctag_syn0norm = self.doctag_syn0
else:
if self.mapfile_path:
self.doctag_syn0norm = np_memmap(
self.mapfile_path+'.doctag_syn0norm', dtype=REAL,
mode='w+', shape=self.doctag_syn0.shape)
else:
self.doctag_syn0norm = empty(self.doctag_syn0.shape, dtype=REAL)
np_divide(self.doctag_syn0, sqrt((self.doctag_syn0 ** 2).sum(-1))[..., newaxis], self.doctag_syn0norm)
def most_similar(self, positive=[], negative=[], topn=10, clip_start=0, clip_end=None, indexer=None):
"""
Find the top-N most similar docvecs known from training. Positive docs contribute
positively towards the similarity, negative docs negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given docs. Docs may be specified as vectors, integer indexes
of trained docvecs, or if the documents were originally presented with string tags,
by the corresponding tags.
The 'clip_start' and 'clip_end' allow limiting results to a particular contiguous
range of the underlying doctag_syn0norm vectors. (This may be useful if the ordering
there was chosen to be significant, such as more popular tag IDs in lower indexes.)
"""
self.init_sims()
clip_end = clip_end or len(self.doctag_syn0norm)
if isinstance(positive, string_types + integer_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each doc, if not already present; default to 1.0 for positive and -1.0 for negative docs
positive = [
(doc, 1.0) if isinstance(doc, string_types + (ndarray,) + integer_types)
else doc for doc in positive
]
negative = [
(doc, -1.0) if isinstance(doc, string_types + (ndarray,) + integer_types)
else doc for doc in negative
]
# compute the weighted average of all docs
all_docs, mean = set(), []
for doc, weight in positive + negative:
if isinstance(doc, ndarray):
mean.append(weight * doc)
elif doc in self.doctags or doc < self.count:
mean.append(weight * self.doctag_syn0norm[self._int_index(doc)])
all_docs.add(self._int_index(doc))
else:
raise KeyError("doc '%s' not in trained set" % doc)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
if indexer is not None:
return indexer.most_similar(mean, topn)
dists = dot(self.doctag_syn0norm[clip_start:clip_end], mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_docs), reverse=True)
# ignore (don't return) docs from the input
result = [(self.index_to_doctag(sim), float(dists[sim])) for sim in best if sim not in all_docs]
return result[:topn]
def doesnt_match(self, docs):
"""
Which doc from the given list doesn't go with the others?
(TODO: Accept vectors of out-of-training-set docs, as if from inference.)
"""
self.init_sims()
docs = [doc for doc in docs if doc in self.doctags or 0 <= doc < self.count] # filter out unknowns
logger.debug("using docs %s" % docs)
if not docs:
raise ValueError("cannot select a doc from an empty list")
vectors = vstack(self.doctag_syn0norm[self._int_index(doc)] for doc in docs).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, docs))[0][1]
def similarity(self, d1, d2):
"""
Compute cosine similarity between two docvecs in the trained set, specified by int index or
string tag. (TODO: Accept vectors of out-of-training-set docs, as if from inference.)
"""
return dot(matutils.unitvec(self[d1]), matutils.unitvec(self[d2]))
def n_similarity(self, ds1, ds2):
"""
Compute cosine similarity between two sets of docvecs from the trained set, specified by int
index or string tag. (TODO: Accept vectors of out-of-training-set docs, as if from inference.)
"""
v1 = [self[doc] for doc in ds1]
v2 = [self[doc] for doc in ds2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
def similarity_unseen_docs(self, model, doc_words1, doc_words2, alpha=0.1, min_alpha=0.0001, steps=5):
"""
Compute cosine similarity between two post-bulk out of training documents.
Document should be a list of (word) tokens.
"""
d1 = model.infer_vector(doc_words=doc_words1, alpha=alpha, min_alpha=min_alpha, steps=steps)
d2 = model.infer_vector(doc_words=doc_words2, alpha=alpha, min_alpha=min_alpha, steps=steps)
return dot(matutils.unitvec(d1), matutils.unitvec(d2))
class Doctag(namedtuple('Doctag', 'offset, word_count, doc_count')):
"""A string document tag discovered during the initial vocabulary
scan. (The document-vector equivalent of a Vocab object.)
Will not be used if all presented document tags are ints.
The offset is only the true index into the doctags_syn0/doctags_syn0_lockf
if-and-only-if no raw-int tags were used. If any raw-int tags were used,
string Doctag vectors begin at index (max_rawint + 1), so the true index is
(rawint_index + 1 + offset). See also DocvecsArray.index_to_doctag().
"""
__slots__ = ()
def repeat(self, word_count):
return self._replace(word_count=self.word_count + word_count, doc_count=self.doc_count + 1)
class LabelDoc2Vec(Word2Vec):
"""Class for training, using and evaluating neural networks described in http://arxiv.org/pdf/1405.4053v2.pdf"""
def __init__(self, documents=None, size=300, alpha=0.025, window=8, min_count=5,
max_vocab_size=None, sample=0, seed=1, workers=1, min_alpha=0.0001,
dm=1, hs=1, negative=0, dbow_words=0, dm_mean=0, dm_concat=0, dm_tag_count=1,
docvecs=None, docvecs_mapfile=None, labelvecs=None, labelvecs_mapfile=None,
comment=None, trim_rule=None, **kwargs):
"""
Initialize the model from an iterable of `documents`. Each document is a
LabeledTaggedDocument object that will be used for training.
The `documents` iterable can be simply a list of LabeledTaggedDocument elements, but for larger corpora,
consider an iterable that streams the documents directly from disk/network.
If you don't supply `documents`, the model is left uninitialized -- use if
you plan to initialize it in some other way.
`dm` defines the training algorithm. By default (`dm=1`), 'distributed memory' (PV-DM) is used.
Otherwise, `distributed bag of words` (PV-DBOW) is employed.
`size` is the dimensionality of the feature vectors.
`window` is the maximum distance between the predicted word and context words used for prediction
within a document.
`alpha` is the initial learning rate (will linearly drop to zero as training progresses).
`seed` = for the random number generator.
Note that for a fully deterministically-reproducible run, you must also limit the model to
a single worker thread, to eliminate ordering jitter from OS thread scheduling. (In Python
3, reproducibility between interpreter launches also requires use of the PYTHONHASHSEED
environment variable to control hash randomization.)
`min_count` = ignore all words with total frequency lower than this.
`max_vocab_size` = limit RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types
need about 1GB of RAM. Set to `None` for no limit (default).
`sample` = threshold for configuring which higher-frequency words are randomly downsampled;
default is 0 (off), useful value is 1e-5.
`workers` = use this many worker threads to train the model (=faster training with multicore machines).
`iter` = number of iterations (epochs) over the corpus. The default inherited from Word2Vec is 5,
but values of 10 or 20 are common in published 'Paragraph Vector' experiments.
`hs` = if 1 (default), hierarchical sampling will be used for model training (else set to 0).
`negative` = if > 0, negative sampling will be used, the int for negative
specifies how many "noise words" should be drawn (usually between 5-20).
`dm_mean` = if 0 (default), use the sum of the context word vectors. If 1, use the mean.
Only applies when dm is used in non-concatenative mode.
`dm_concat` = if 1, use concatenation of context vectors rather than sum/average;
default is 0 (off). Note concatenation results in a much-larger model, as the input
is no longer the size of one (sampled or arithmatically combined) word vector, but the
size of the tag(s) and all words in the context strung together.
`dm_tag_count` = expected constant number of document tags per document, when using
dm_concat mode; default is 1.
`dbow_words` if set to 1 trains word-vectors (in skip-gram fashion) simultaneous with DBOW
doc-vector training; default is 0 (faster training of doc-vectors only).
`trim_rule` = vocabulary trimming rule, specifies whether certain words should remain
in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and
returns either util.RULE_DISCARD, util.RULE_KEEP or util.RULE_DEFAULT.
Note: The rule, if given, is only used prune vocabulary during build_vocab() and is not stored as part
of the model.
"""
super(LabelDoc2Vec, self).__init__(
size=size, alpha=alpha, window=window, min_count=min_count, max_vocab_size=max_vocab_size,
sample=sample, seed=seed, workers=workers, min_alpha=min_alpha,
sg=(1+dm) % 2, hs=hs, negative=negative, cbow_mean=dm_mean,
null_word=dm_concat, **kwargs)
self.dbow_words = dbow_words
self.dm_concat = dm_concat
self.dm_tag_count = dm_tag_count
if self.dm and self.dm_concat:
self.layer1_size = (self.dm_tag_count + (2 * self.window)) * self.vector_size
else:
self.layer1_size = size
self.docvecs = docvecs or DocvecsArray(docvecs_mapfile)
self.labelvecs = labelvecs or DocvecsArray(labelvecs_mapfile)
self.comment = comment
if documents is not None:
self.build_vocab(documents, trim_rule=trim_rule)
self.train(documents)
@property
def dm(self):
return not self.sg # opposite of SG
@property
def dbow(self):
return self.sg # same as SG
def clear_sims(self):
super(LabelDoc2Vec, self).clear_sims()
self.docvecs.clear_sims()
self.labelvecs.clear_sims()
def reset_weights(self):
if self.dm and self.dm_concat:
# expand l1 size to match concatenated tags+words length
self.layer1_size = (self.dm_tag_count + (2 * self.window)) * self.vector_size
logger.info("using concatenative %d-dimensional layer1" % (self.layer1_size))
super(LabelDoc2Vec, self).reset_weights()
self.docvecs.reset_weights(self)
self.labelvecs.reset_weights(self)
def reset_from(self, other_model):
"""Reuse shareable structures from other_model."""
self.docvecs.borrow_from(other_model.docvecs)
self.labelvecs.borrow_from(other_model.labelvecs)
super(LabelDoc2Vec, self).reset_from(other_model)
def scan_vocab(self, documents, progress_per=10000, trim_rule=None, update=False):
logger.info("collecting all words and their counts")
document_no = -1
total_words = 0
min_reduce = 1
interval_start = default_timer() - 0.00001 # guard against next sample being identical
interval_count = 0
checked_string_types = 0
vocab = defaultdict(int)
for document_no, document in enumerate(documents):
if not checked_string_types:
if isinstance(document.words, string_types):
logger.warn("Each 'words' should be a list of words (usually unicode strings)."
"First 'words' here is instead plain %s." % type(document.words))
checked_string_types += 1
if document_no % progress_per == 0:
interval_rate = (total_words - interval_count) / (default_timer() - interval_start)
logger.info("PROGRESS: at example #%i, processed %i words (%i/s), %i word types, %i tags, %i labels",
document_no, total_words, interval_rate, len(vocab), len(self.docvecs), len(self.labelvecs))
interval_start = default_timer()
interval_count = total_words
document_length = len(document.words)
for tag in document.tags:
self.docvecs.note_doctag(tag, document_no, document_length)
for label in document.labels:
self.labelvecs.note_doctag(label, document_no, document_length)
for word in document.words:
vocab[word] += 1
total_words += len(document.words)
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
logger.info("collected %i word types and %i unique tags and %i unique labels from a corpus of %i examples and %i words",
len(vocab), len(self.docvecs), len(self.labelvecs), document_no + 1, total_words)
self.corpus_count = document_no + 1
self.raw_vocab = vocab
def _do_train_job(self, job, alpha, inits):
work, neu1 = inits
tally = 0
for doc in job:
indexed_doctags = self.docvecs.indexed_doctags(doc.tags)
indexed_doclabels = self.labelvecs.indexed_doctags(doc.labels)
doctag_indexes, doctag_vectors, doctag_locks, ignored = indexed_doctags
doclabel_indexes, doclabel_vectors, doclabel_locks, ignored = indexed_doclabels
if self.sg:
tally += train_label_document_dbow(self, doc.words, doctag_indexes, doclabel_indexes, alpha, work,
train_words=self.dbow_words, doctag_vectors=doctag_vectors,
doctag_locks=doctag_locks, doclabel_vectors=doclabel_vectors,
doclabel_locks=doclabel_locks)
elif self.dm_concat:
tally += train_label_document_dm_concat(self, doc.words, doctag_indexes, doclabel_indexes, alpha, work, neu1,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks,
doclabel_vectors=doclabel_vectors, doclabel_locks=doclabel_locks)
else:
tally += train_label_document_dm(self, doc.words, doctag_indexes, doclabel_indexes, alpha, work, neu1,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks,
doclabel_vectors=doclabel_vectors, doclabel_locks=doclabel_locks)
self.docvecs.trained_item(indexed_doctags)
self.labelvecs.trained_item(indexed_doclabels)
return tally, self._raw_word_count(job)
def _raw_word_count(self, job):
"""Return the number of words in a given job."""
return sum(len(sentence.words) for sentence in job)
def infer_vector_label(self, doc_words, alpha=0.1, min_alpha=0.0001, steps=5):
"""
Infer a vector for given post-bulk training document.
Document should be a list of (word) tokens.
"""
doctag_vectors = empty((1, self.vector_size), dtype=REAL)
doctag_vectors[0] = self.seeded_vector(' '.join(doc_words))
doctag_locks = ones(1, dtype=REAL)
doctag_indexes = [0]
doclabel_vectors = empty((1, self.vector_size), dtype=REAL)
doclabel_vectors[0] = self.seeded_vector(' '.join(doc_words))
doclabel_locks = ones(1, dtype=REAL)
doclabel_indexes = [0]
work = zeros(self.layer1_size, dtype=REAL)
if not self.sg:
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
for i in range(steps):
if self.sg:
train_label_document_dbow(self, doc_words, doctag_indexes, doclabel_indexes, alpha, work,
learn_words=False, learn_hidden=False,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks,
doclabel_vectors=doclabel_vectors, doclabel_locks=doclabel_locks)
elif self.dm_concat:
train_label_document_dm_concat(self, doc_words, doctag_indexes, doclabel_indexes, alpha, work, neu1,
learn_words=False, learn_hidden=False,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks,
doclabel_vectors=doclabel_vectors, doclabel_locks=doclabel_locks)
else:
train_label_document_dm(self, doc_words, doctag_indexes, doclabel_indexes, alpha, work, neu1,
learn_words=False, learn_hidden=False,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks,
doclabel_vectors=doclabel_vectors, doclabel_locks=doclabel_locks)
alpha = ((alpha - min_alpha) / (steps - i)) + min_alpha
return doctag_vectors[0]
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate required memory for a model using current settings."""
report = report or {}
report['doctag_lookup'] = self.docvecs.estimated_lookup_memory()
report['doctag_syn0'] = self.docvecs.count * self.vector_size * dtype(REAL).itemsize
report['doclabel_lookup'] = self.labelvecs.estimated_lookup_memory()
report['doclabel_syn0'] = self.labelvecs.count * self.vector_size * dtype(REAL).itemsize
return super(LabelDoc2Vec, self).estimate_memory(vocab_size, report=report)
def __str__(self):
"""Abbreviated name reflecting major configuration paramaters."""
segments = []
if self.comment:
segments.append('"%s"' % self.comment)
if self.sg:
if self.dbow_words:
segments.append('dbow+w') # also training words
else:
segments.append('dbow') # PV-DBOW (skip-gram-style)
else: # PV-DM...
if self.dm_concat:
segments.append('dm/c') # ...with concatenative context layer
else:
if self.cbow_mean:
segments.append('dm/m')
else:
segments.append('dm/s')
segments.append('d%d' % self.vector_size) # dimensions
if self.negative:
segments.append('n%d' % self.negative) # negative samples
if self.hs:
segments.append('hs')
if not self.sg or (self.sg and self.dbow_words):
segments.append('w%d' % self.window) # window size, when relevant
if self.min_count > 1:
segments.append('mc%d' % self.min_count)
if self.sample > 0:
segments.append('s%g' % self.sample)
if self.workers > 1:
segments.append('t%d' % self.workers)
return '%s(%s)' % (self.__class__.__name__, ','.join(segments))
class TaggedBrownCorpus(object):
"""Iterate over documents from the Brown corpus (part of NLTK data), yielding
each document out as a LabeledTaggedDocument object."""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for item_no, line in enumerate(utils.smart_open(fname)):
line = utils.to_unicode(line)
# each file line is a single document in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty documents
continue
yield LabeledTaggedDocument(words, ['%s_SENT_%s' % (fname, item_no)], [])
class TaggedLineDocument(object):
"""Simple format: one document = one line = one LabeledTaggedDocument object.
Words are expected to be already preprocessed and separated by whitespace,
tags are constructed automatically from the document line number."""
def __init__(self, source):
"""
`source` can be either a string (filename) or a file object.
Example::
documents = TaggedLineDocument('myfile.txt')
Or for compressed files::
documents = TaggedLineDocument('compressed_text.txt.bz2')
documents = TaggedLineDocument('compressed_text.txt.gz')
"""
self.source = source
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for item_no, line in enumerate(self.source):
yield LabeledTaggedDocument(utils.to_unicode(line).split(), [item_no], [])
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for item_no, line in enumerate(fin):
yield LabeledTaggedDocument(utils.to_unicode(line).split(), [item_no], [])
| lgpl-2.1 | -8,098,908,495,692,754,000 | 48.760823 | 167 | 0.614911 | false | 3.679792 | false | false | false |
grahamking/goodenergy | campaign/management/commands/ge_copy_campaign.py | 1 | 4417 | """Copies the contents (indicators and actions) of one campaign into another
"""
# Copyright 2010,2011 Good Energy Research Inc. <[email protected]>, <[email protected]>
#
# This file is part of Good Energy.
#
# Good Energy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Good Energy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Good Energy. If not, see <http://www.gnu.org/licenses/>.
#
# Disable the pylint check for dynamically added attributes. This happens a lot
# with Django DB model usage.
# pylint: disable-msg=E1101
# pylint: disable-msg=E1103
from django.core.management.base import BaseCommand, CommandError
from profile.models import Profile
from campaign.models import Campaign
from indicator.models import IndicatorLikert, Option
from action.models import Action
def copy_indicators(from_campaign, to_campaign):
"""Copies indicators and options from from_campaign to to_campaign"""
for indicator in IndicatorLikert.objects.filter(campaign=from_campaign):
new_indicator, is_created = IndicatorLikert.objects.get_or_create(
campaign = to_campaign,
position = indicator.position,
name = indicator.name,
question = indicator.question,
is_synthetic = indicator.is_synthetic,
description = indicator.description)
for option in indicator.option_set.all():
Option.objects.get_or_create(
indicator = new_indicator,
value = option.value,
position = option.position)
if is_created:
print('Created indicator %s' % new_indicator)
def copy_actions(from_campaign, to_campaign, action_owner):
"""Copies Actions from from_campaign to to_campaign"""
for action in from_campaign.action_set.all():
new_action, is_created = Action.objects.get_or_create(
campaign = to_campaign,
title = action.title,
description = action.description,
learn_more = action.learn_more,
created_by = action_owner)
if is_created:
print('Created action %s' % new_action)
class Command(BaseCommand):
"""Copies the contents (indicators and actions) of one campaign into another"""
option_list = BaseCommand.option_list
help = 'Copies the contents (indicators and actions) from one campaign into another'
args = '<from_campaign_id> <to_campaign_id> <action_owner_username>'
def handle(
self,
from_campaign_id=None,
to_campaign_id=None,
action_username=None,
*args,
**options):
"""Main entry point for command"""
if not from_campaign_id or not to_campaign_id or not action_username:
raise CommandError('Usage is ge_copy_campaign %s' % self.args)
try:
from_campaign = Campaign.objects.get(id=from_campaign_id)
except Campaign.DoesNotExist:
raise CommandError('FROM Campaign with id %s not found' % from_campaign_id)
try:
to_campaign = Campaign.objects.get(id=to_campaign_id)
except Campaign.DoesNotExist:
raise CommandError('TO Campaign with id %s not found' % to_campaign_id)
try:
action_user = Profile.objects.get(user__username=action_username)
except Profile.DoesNotExist:
raise CommandError("Profile for username %s not found" % action_username)
print('Copying contents of {from_c} into {to_c}.'.\
format(from_c=from_campaign, to_c = to_campaign))
confirm = raw_input('Continue? [y|n]')
if confirm != 'y':
raise CommandError('Abort')
copy_indicators(from_campaign, to_campaign)
copy_actions(from_campaign, to_campaign, action_user)
| agpl-3.0 | -979,541,411,536,322,800 | 37.745614 | 97 | 0.644329 | false | 4.128037 | false | false | false |
dpnishant/appmon | tracer/android_tracer.py | 1 | 12107 | #!/usr/bin/python
###
# Copyright (c) 2016 Nishant Das Patnaik.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import os, sys, frida, re, argparse, codecs, json
from termcolor import colored
print("""
___ .______ .______ .___ ___. ______ .__ __.
/ \ | _ \ | _ \ | \/ | / __ \ | \ | |
/ ^ \ | |_) | | |_) | | \ / | | | | | | \| |
/ /_\ \ | ___/ | ___/ | |\/| | | | | | | . ` |
/ _____ \ | | | | | | | | | `--" | | |\ |
/__/ \__\ | _| | _| |__| |__| \______/ |__| \__|
github.com/dpnishant
""")
parser = argparse.ArgumentParser()
parser.add_argument("-a", action="store", dest="app_name", default="",
help='''Process Name;
Accepts "com.twitter.android"''')
parser.add_argument("-c", action="store", dest="class_name", default="",
help='''Class Name;
Example: "OpenSSL*SHA*"''')
parser.add_argument("-m", action="store", dest="method_name", default="",
help='''Method Name;
Example: "*digest*";''')
parser.add_argument("-v", action="version", version="AppMon Android Method Tracer v0.2, Copyright 2016 Nishant Das Patnaik")
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
results = parser.parse_args()
appName = results.app_name
className = results.class_name
classCandidates = []
method = results.method_name
if len(className) >= 1 and len(className) < 3:
print(colored("[ERROR] Class Name should be at least 3 characters", "red"))
sys.exit(1)
def on_message(message, data):
if message["type"] == "send":
payload = json.loads(message["payload"])
if payload["type"] == "classEnum":
if "overloads" in payload and "className" in payload and "methodName" in payload and "argCount" in payload:
classCandidates.append([ payload["className"], payload["overloads"], payload["methodName"], payload["argCount"] ])
print('[FOUND] "%s" in "%s"' % (colored(payload['methodName'], "yellow", attrs=["bold"]), colored(payload['className'], "magenta", attrs=["bold"])))
elif "className" in payload and not "overloads" in payload and not "methodName" in payload:
print('[FOUND] "%s"' % colored(payload['className'], "magenta", attrs=["bold"]))
elif payload['type'] == "methodTrace":
payload['overloadIndex']
print("%(methodName)s \n\tCalled by: %(caller)s \n\tDefined at: %(className)s [%(overloadIndex)s]\n" % { "methodName": colored(payload['methodName'], "green", attrs=["bold"]), "caller": colored(payload['caller'].split("class ")[1], "blue", attrs=["bold"]), "className": colored(payload['className'], "magenta", attrs=["bold"]), "overloadIndex": colored(payload['overloadIndex'], "red", attrs=["bold"]) })
def build_search_script(className, method):
if className and className != "" and not method or method == "":
script = """Java.perform(function (){
function wildcard_search(string, search) {
var prevIndex = -1,
array = search.split('*'),
result = true;
for (var i = 0; i < array.length && result; i++) {
var index = string.indexOf(array[i]);
if (index == -1 || index < prevIndex) {
return false;
}
}
return result;
}
var classes = Java.enumerateLoadedClassesSync();
classes = classes.sort();
for(var i=0; i < classes.length; i++ ) {
if(wildcard_search(classes[i], '%(className)s')) {
var payload = {
"type": "classEnum",
"className": classes[i].replace(/\//gi, '.').replace(/\[/gi, '').replace(/^L/, '').replace(/;$/, '')
};
send(JSON.stringify(payload));
}
}
});
""" % { "className": className }
else:
script = """Java.perform(function() {
function wildcard_search(string, search) {
var prevIndex = -1,
array = search.split('*'),
result = true;
for (var i = 0; i < array.length && result; i++) {
var index = string.indexOf(array[i]);
if (index == -1 || index < prevIndex) {
return false;
}
}
return result;
}
Java.enumerateLoadedClasses({
onMatch: function(name) {
name = name.replace(/\//gi, '.').replace(/\[/gi, '').replace(/^L/, '').replace(/;$/, '');
if (wildcard_search(name, '%(className)s')) {
try {
var handle = Java.use(name);
var currentMethods = handle.class.getMethods();
for (var i = 0; i < currentMethods.length; i++) {
var argsCount = currentMethods[i].toString().split('(')[1].split(')')[0].split(',').length;
var items = currentMethods[i].toString().split('(')[0].split(' ');
var currentMethodName = items[items.length - 1];
currentMethodName = currentMethodName.replace(name.toString(), '');
if (currentMethodName.split('.').length-1 > 1) {
continue
} else {
currentMethodName = currentMethodName.replace('.', '');
}
if (wildcard_search(currentMethodName, '%(methodName)s')) {
if (currentMethodName in handle) {
var overload_count = handle[currentMethodName].overloads.length;
var payload = {
"type": "classEnum",
"className": name,
"overloads": overload_count,
"methodName": currentMethodName,
"argCount": argsCount
};
send(JSON.stringify(payload));
} else {
console.log(currentMethodName + ' not found in ' + name);
}
}
}
} catch (e) { console.log(e.stack); }
}
},
onComplete: function() {}
});
});
""" % { "className": className, "methodName": method }
return script
def begin_instrumentation(appName, script_source):
device = frida.get_usb_device()
try:
session = device.attach(appName)
except Exception as e:
print(colored('[ERROR]: ' + str(e), "red"))
sys.exit()
try:
script = session.create_script(script_source)
script.on('message', on_message)
script.load()
except Exception as e:
print(colored('[ERROR]: ' + str(e), "red"))
sys.exit()
def enumerate_overloads(overloadIndx, currentClassName, overload_count, methodName):
generated_overloads = []
template ="""
var class_%(overloadIndx)s = "%(currentClassName)s";
var c_%(overloadIndx)s = Java.use(class_%(overloadIndx)s);
c_%(overloadIndx)s.%(methodName)s.overloads[i].implementation = function(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) {
var methodName = c_%(overloadIndx)s.%(methodName)s.overloads[i].toString().split("function")[1].split("{")[0].trim().split("(")[0];
var argTypes = getType(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15);
var args = "";
for (var i = 0; i < argTypes.length; i++) {
if (i != argTypes.length - 1) {
args += argTypes[i] + " arg" + i + ", ";
} else {
args += argTypes[i] + " arg" + i;
}
}
var methodName = methodName + "(" + args + ")";
var payload = {
"type": "methodTrace",
"methodName": methodName,
"className": class_%(overloadIndx)s,
"overloadIndex": ovrldindexplaceholder,
"caller": this.getClass().toString()
};
send(JSON.stringify(payload));
return this.%(methodName)s.overloads[i].apply(this, arguments);
};""" % { "overloadIndx": overloadIndx, "currentClassName": currentClassName, "methodName": methodName }
for index in range(0, overload_count):
argString = ""
current_template = ""
current_overload = ""
current_template = template
current_template = current_template.replace("overloads[i]", "overloads[" + str(index) +"]")
current_template = current_template.replace("ovrldindexplaceholder", str(index))
generated_overloads.append(current_template)
return generated_overloads
def build_trace_script(candidates, methodName):
all_overloads = ""
generated_trace_scripts = []
for candidate in candidates:
overloadIndx = str(candidates.index(candidate))
for overload_variant in enumerate_overloads(overloadIndx, candidate[0], candidate[1], candidate[2]):
if overload_variant == "":
continue
all_overloads += overload_variant
tracer_template = """'use strict';
var checkType = function(arg) {
var type = "";
if (arg.getClass) {
type = arg.getClass().toString().split("class ")[1];
} else if (typeof arg === "string") {
type = "String";
} else if (typeof arg === "number") {
type = "Number";
} else if (typeof arg === "boolean") {
type = "Boolean";
} else if (arg.length) {
type = "Array";
} else if (typeof arg === "object") {
type = "Object";
} else {
type = typeof arg;
}
return type;
}
var getType = function(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) {
var type = [];
if (a1) {
type.push(checkType(a1));
}
if (a2) {
type.push(checkType(a2));
}
if (a3) {
type.push(checkType(a3));
}
if (a4) {
type.push(checkType(a4));
}
if (a5) {
type.push(checkType(a5));
}
if (a6) {
type.push(checkType(a6));
}
if (a7) {
type.push(checkType(a7));
}
if (a8) {
type.push(checkType(a8));
}
if (a9) {
type.push(checkType(a9));
}
if (a10) {
type.push(checkType(a10));
}
if (a11) {
type.push(checkType(a11));
}
if (a12) {
type.push(checkType(a12));
}
if (a13) {
type.push(checkType(a13));
}
if (a14) {
type.push(checkType(a14));
}
if (a15) {
type.push(checkType(a15));
}
return type;
}
Java.perform(function () {
%s
});
""" % (all_overloads)
generated_trace_scripts.append(tracer_template)
return generated_trace_scripts
def generate_tracer_js(scriptName, txtScript):
script_dir = "__handlers__"
if not os.path.exists(script_dir):
os.makedirs(script_dir)
tracer_file_path = os.path.join(script_dir, scriptName + ".js")
with codecs.open(tracer_file_path, 'w', 'utf-8') as f:
f.write(txtScript)
return tracer_file_path
if not method or method == "" and not className or className == "":
print(colored('Enumerating loaded classes...', "green", attrs=["bold"]))
else:
print('Searching method "%s" in loaded classes...' % colored(method, "green", attrs=["bold"]))
begin_instrumentation(appName, build_search_script(className, method))
if len(classCandidates) > 0:
tracer_script_source = ""
for script in build_trace_script(classCandidates, method):
tracer_script_source += script
begin_instrumentation(appName, tracer_script_source)
print(colored("\nTracing methods...\n", "blue", attrs=["bold"]))
try:
sys.stdin.readlines()
except KeyboardInterrupt:
sys.exit()
else:
print(colored('Didn\'t find anything...quitting!', "red"))
sys.exit() | apache-2.0 | 7,836,066,327,096,006,000 | 36.255385 | 416 | 0.550508 | false | 3.500145 | false | false | false |
andrew-lundgren/gwpy | gwpy/cli/spectrum.py | 1 | 5127 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) Joseph Areeda (2015)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
#
""" Spectrum plots
"""
from cliproduct import CliProduct
class Spectrum(CliProduct):
def get_action(self):
"""Return the string used as "action" on command line."""
return 'spectrum'
def init_cli(self, parser):
"""Set up the argument list for this product"""
self.arg_chan1(parser)
self.arg_freq(parser)
self.arg_ax_xlf(parser)
self.arg_ax_logy(parser)
self.arg_plot(parser)
self.xaxis_is_freq = True
return
def get_ylabel(self, args):
"""Text for y-axis label"""
if args.nology:
ylabel = r'$\mathrm{log_{10} ASD}$ ' \
r'$\left( \frac{\mathrm{Counts}}' \
r'{\sqrt{\mathrm{Hz}}}\right)$'
else:
ylabel = r'$\mathrm{ASD}$ $\left( \frac{\mathrm{Counts}}' \
r'{\sqrt{\mathrm{Hz}}}\right)$'
return ylabel
def get_title(self):
"""Start of default super title, first channel is appended to it"""
return 'Spectrum: '
def get_xlabel(self):
xlabel = 'Frequency (Hz)'
return xlabel
def freq_is_y(self):
"""This plot puts frequency on the y-axis of the graph"""
return False
def gen_plot(self, arg_list):
"""Generate the plot from time series and arguments"""
self.is_freq_plot = True
fftlen = 1.0
if arg_list.secpfft:
fftlen = float(arg_list.secpfft)
self.secpfft = fftlen
ovlap = 0.5
if arg_list.overlap:
ovlap = float(arg_list.overlap)
self.overlap = ovlap
self.log(2, "Calculating spectrum secpfft: %.2f, overlap: %.2f" %
(fftlen, ovlap))
spectra = []
# calculate and plot the first spectrum
spectrum = self.timeseries[0].asd(fftlen, fftlen*ovlap)
spectra.append(spectrum)
fs = self.timeseries[0].sample_rate.value
self.fmin = 1/self.secpfft
self.fmax = fs/2
self.ymin = spectrum.value.min()
self.ymax = spectrum.value.max()
label = self.timeseries[0].channel.name
if len(self.start_list) > 1:
label += ", %s" % self.timeseries[0].epoch.gps
spectrum.name = label
self.plot = spectrum.plot()
# if we have more time series calculate and add to the first plot
if len(self.timeseries) > 1:
for idx in range(1, len(self.timeseries)):
specb = self.timeseries[idx].asd(fftlen, ovlap*fftlen)
spectra.append(specb)
fsb = self.timeseries[idx].sample_rate.value
self.fmax = max(self.fmax, fsb/2)
self.ymin = min(self.ymin, specb.value.min())
self.ymax = max(self.ymax, specb.value.max())
label = self.timeseries[idx].channel.name
if len(self.start_list) > 1:
label += ", %s" % self.timeseries[idx].epoch.gps
specb.name = label
self.plot.add_frequencyseries(specb)
self.log(2, ('Frequency range: [%f, %f]' % (self.fmin, self.fmax)))
# if the specified frequency limits adjust our ymin and ymax values
# at this point self.ymin and self.ymax represent the full spectra
if arg_list.fmin or arg_list.fmax:
import numpy
mymin = self.ymax # guaranteed to be >= anything we look at
mymax = self.ymin # guaranteed to be <= anything we look at
myfmin = self.fmin
myfmax = self.fmax
if arg_list.fmin:
myfmin = float(arg_list.fmin)
if arg_list.fmax:
myfmax = float(arg_list.fmax)
for idx in range(0, len(spectra)):
t = numpy.where(spectra[idx].frequencies.value >= myfmin)
if t[0].size:
strt = t[0][0]
t = numpy.where(spectra[idx].frequencies.value >= myfmax)
if t[0].size:
stop = t[0][0]
else:
stop = spectra[idx].frequencies.size - 1
mymin = min(mymin, numpy.min(spectra[idx].value[strt:stop]))
mymax = max(mymax, numpy.max(spectra[idx].value[strt:stop]))
self.ymin = mymin
self.ymax = mymax
return
| gpl-3.0 | 1,358,993,382,451,397,000 | 35.105634 | 80 | 0.561537 | false | 3.667382 | false | false | false |
shpakoo/YAP | YAP_MiSeq.py | 1 | 29538 | ########################################################################################
## This file is a part of YAP package of scripts. https://github.com/shpakoo/YAP
## Distributed under the MIT license: http://www.opensource.org/licenses/mit-license.php
## Copyright (c) 2011-2013 Sebastian Szpakowski
########################################################################################
#################################################
## A pipeline for miseq data
## OTUs (certain regions of 16S and ITS supported)
## This is for demultiplexed MiSeq data
#################################################
import sys, os.path
from optparse import OptionParser, OptionGroup
from StepsLibrary import *
from StepsLibrary_EXP import *
from collections import defaultdict
from Queue import Queue
_author="Sebastian Szpakowski"
_date="2013/04/01"
_version="Version 5"
#################################################
## Classes
##
class InfoValidator:
def __init__(self,filename):
self.filename = filename
self.info = GeneralPurposeParser(filename, sep=",")
self.URI = "http://confluence/display/~sszpakow/YAP"
self.epilogue = "\n***\tPlease correct before continuing...\n***\t{0}\n".format(self.URI)
self.header = ""
self.tech = ""
self.files, self.barcodes ,self.primersF, self.primersR, self.sampleIDs = self.parse()
print ("***\tValidation complete, no obvious errors found.\n")
def parse(self):
counter=0;
print ("\n***\tValidating your template\n\t{0} ...\n".format(self.filename))
files = set()
barcodes = set()
primersF = set()
primersR = set()
sampleIDs = set()
for line in self.info:
if counter == 0:
self.header = line
has = ",".join (self.header)
needed454 = "path,file,barcode,forward,reverse,use,both,SampleID"
neededMiSeq = "path,file1,file2,forward,reverse,SampleID"
if has.lower().startswith( needed454.lower()) :
self.tech = "454"
elif has.lower().startswith( neededMiSeq.lower()) :
self.tech = "MiSeq"
else:
self.error( "Your template's header is incorrect or missing:\nhas :\t{0}\nneed (454):\t{1}\n\t(illumina)\t{2}".format(has, needed454, neededMiSeq), 101)
if not ("SampleID" in self.header):
self.error( "Your template has\n\t'{0}' instead of \n\t'SampleID' in the column's header.".format(self.header[7]), 102)
else:
files.add("{0}/{1}".format(line[0], line[1].strip()))
if self.tech == "454":
barcodes.add(line[2])
primersF.add(line[3])
primersR.add(line[4])
sampleIDs.add(line[7])
elif self.tech == "MiSeq":
if line[2].strip() != "":
files.add("{0}/{1}".format(line[0], line[2].strip()))
primersF.add(line[3])
primersR.add(line[4])
sampleIDs.add(line[5])
counter+=1
##### files
for f in files:
if not os.path.isfile(f):
self.error("file doesn't exist\n\t{0}".format(f), 103)
##### F primers
if len(primersF)>1:
self.error("Multiple forward primers specified:\n\t{0}\n\tnot supported in the current version of YAP".format("\n\t".join(primersF)), 104)
if list(primersF)[0].strip() =="" :
self.error("Forward primer should not be empty", 104)
##### R primers
if len(primersF)>1:
self.error("Multiple reverse primers specified:\n\t{0}\n\tnot supported in the current version of YAP".format("\n\t".join(primersR)), 105)
if list(primersR)[0].strip() =="" :
self.error("Reverse primer should not be empty", 105)
##### sampleIDs
spaces = set()
ill = ("\\","/", "~", "-", "+", "#")
illegalchars = set()
digitstart = set()
for s in sampleIDs:
if s.count(" ")>0:
spaces.add(s)
for k in ill:
if s.count(k)>0:
illegalchars.add(s)
if s[0].isdigit():
digitstart.add(s)
hint = "*You could create two columns: \n\tSampleID, compliant with YAP (excel function: SUBSTITUTE()) and\n\tOriginalIDs, where any character is allowed."
if len(spaces)>0:
M = "The following samplesID(s) have spaces in them:\n\t"
for s in spaces:
M = "{0}'{1}',".format(M, s)
M = "{0}\n\n\t{1}".format(M, hint)
self.error(M, 106)
if len(illegalchars)>0:
M = "The following samplesID(s) have illegal chars in them {0}:\n\t".format(", ".join(ill))
for s in illegalchars:
M = "{0}'{1}',".format(M, s)
M = "{0}\n\n\t{1}".format(M, hint)
self.error(M, 107)
if len(digitstart)>0:
M = "The following samplesID(s) start with numbers:\n\t".format(", ".join(ill))
for s in digitstart:
M = "{0}'{1}',".format(M, s)
M = "{0}\n\n\t{1}".format(M, hint)
self.error(M, 108)
return (files, barcodes, primersF, primersR, sampleIDs)
def error(self, message, code):
print "!!!\t{0}\n{1}".format(message, self.epilogue)
sys.exit(code)
def getTrimpoints(self):
primers = self.primersF.union(self.primersR)
if "AGAGTTTGATYMTGGCTCAG" in primers and "ATTACCGCGGCTGCTGG" in primers:
return "1044", "13127", "1044-13127"
else:
return "0", "0", "unknown"
def getTech(self):
return self.tech
class InfoParserMiSeq:
def __init__(self, filename):
self.filename = filename
self.info = GeneralPurposeParser(filename, sep=",", skip=1)
self.store = list()
self.IDs = defaultdict(str)
self.primers = set()
self.forward = ""
self.reverse = ""
for line in self.info:
path = line[0]
file1 = line[1]
file2 = line[2]
forward = line[3]
reverse = line[4]
if path.endswith("/"):
path = path[:-1]
path1 = "%s/%s" % (path, file1)
path2 = "%s/%s" % (path, file2)
if file2=="":
self.store.append([path1])
self.IDs[path1] = line[5]
else:
self.store.append([path1, path2])
self.IDs[path1] = line[5]
self.IDs[path2] = line[5]
if reverse =="" or forward =="":
print "%s: please provide both primers for file(s):'%s' " % (x, ",".join(file1, file2))
sys.exit(11)
else:
self.primers.add(">_primer_F\n%s\n" % (forward))
self.primers.add(">_primer_F_rc\n%s\n" % (revComp(forward)))
self.primers.add(">_primer_R\n%s\n" % (reverse))
self.primers.add(">_primer_R_rc\n%s\n" % (revComp(reverse)))
self.forward = forward
self.reverse = reverse
def getFiles(self):
return (self.store)
def getSampleID(self, file):
return self.IDs[file]
def getPrimerFilename(self):
primerfilename = "primers.fasta"
if len(self.primers)>4:
print "The annotation file has more than 2 primers !"
for p in self.primers:
print "%s" % (p.strip())
sys.exit(15)
primerfile = open(primerfilename , "w")
for p in self.primers:
primerfile.write(p)
primerfile.close()
return (primerfilename)
#################################################
## Functions
##
def preprocess():
forprocessing = InfoParserMiSeq(options.fn_info)
PREPROCESS = list()
for files in forprocessing.getFiles():
INS = {}
if len(files) == 2:
M1 = files[0]
M2 = files[1]
sampleid = forprocessing.getSampleID(M1)
INS = {"mate1": ["%s~%s" % (M1, sampleid)], "mate2": ["%s~%s" % (M2, sampleid)]}
else:
M1 = files[0]
sampleid = forprocessing.getSampleID(M1)
INS = {"fastq": ["%s~%s" % (M1, sampleid)]}
#### import files
if options.head == 0:
x = FileImport(INS)
else:
x = FileMiniImport(INS, {"lines": options.head})
#### determine the encoding of fastQ
Q = getQ(M1)
if Q == "":
print (Q)
print "Q issues"
print files
sys.exit(1)
### generate quality information:
ARGS = {
"-h": options.minqual,
"-m": "",
"-v": ""
}
qc = SQA(ARGS, [x])
supplementary.append(qc)
### split into smaller files for parallelization
### 100,000 sequences (x4 since fastq)
ARGS = {
"types": "mate1,mate2,fastq",
"chunk": "400000"
}
P0 = FileSplit(ARGS, [x])
#### trim fastQ files
ARGS = {
"-h": options.minqual,
}
P1 = SQAtrim(ARGS, [P0])
#### overlap mates if available
if len(files)==2:
ARGS = {
"-M": "200",
"-p": Q,
"-r": "250"
}
P2 = Flash({}, ARGS, [P1])
else:
P2 = P1
#### convert fastq to fasta
ARGS = {
"-Q": Q
}
P3 = fastq2fasta(dict(), ARGS, [P2])
#### use fuzznuc to find cut primer sequences
ARGS = {
"-f": forprocessing.forward,
"-r": forprocessing.reverse,
"-m": "1"
}
P4 = PrimerClipper ( {}, ARGS, [P3])
### make fastA headers less problematic
P5 = FastaHeadHash({}, {}, [P4])
P6 = FileMerger("fasta", [P5])
P7 = MakeGroupsFile([P6], sampleid)
P8 = MakeNamesFile([P6])
PREPROCESS.extend([P6,P7,P8])
A1 = FileMerger("fasta,group,name", PREPROCESS)
args = {"mingroupmembers": options.mingroupmembers,
"report": "failing"}
A2 = GroupRetriever(args, [A1])
args = {
"force" : "fasta,name,group",
"find": "groups"
}
A3 = MothurStep("remove.groups", options.nodesize, dict(), args, [A2])
return (A3)
def finalize(input):
clean = CleanFasta(dict(), [input])
####### remove sequences that are too short, and with ambiguous bases
args = { "minlength" : "%s" % ( options.minlength ),
"maxambig" : "0",
"force": "fasta,name,group"}
clean2 = MothurStep("screen.seqs", options.nodesize, dict(), args, [clean])
args = {"mingroupmembers": 0,
"report": "passing"}
clean2a = GroupRetriever(args, [clean2])
OutputStep("2-NOISY", "groupstats,fasta,group,name,list,svg,pdf,tiff,taxsummary,globalsummary,localsummary", clean2a)
###################### CDHIT-454
#### unique and de-noise
args = {}
### strictly unique collapsing
if options.strictlevel==1:
args= {
"c" : "1.0",
"b" : "8",
"aS": "1.0",
"g" : "1",
"M" : "50000",
"T" : "%s" % (options.nodesize)
}
### aggressive de-noising:
elif options.strictlevel==2:
args= {
"c" : "0.98",
"b" : "10",
"aS": "0.0",
"g" : "1",
"M" : "0",
"T" : "%s" % (options.nodesize)
}
#### de-noise/unique collapse
CD_1 = CDHIT_454(options.nodesize, args, [clean2])
CD_2 = CDHIT_Mothurize(dict(), CD_1)
args = {"mingroupmembers": 0,
"report": "passing"}
CD_2a = GroupRetriever(args, [CD_2])
OutputStep("3-UNIQUE", "groupstats,tre,fasta,group,name,list,svg,pdf,tiff,taxsummary,globalsummary,localsummary", CD_2a)
#### add reference sequences to the merged experiments' file
CD_3 = FileMerger("fasta,name,group,qfile", [CD_2, REF_1, REF_2, REF_3])
#### align to reference database
inputs = {"reference": ["%s/%s" % (options.dir_anno, _alignment)] }
args = { "flip":"t",
"ksize": "8"
}
CD_4 = MothurStep("align.seqs", options.nodesize, inputs, args, [CD_3])
#### AlignmentSummary determining alignment trimming options
#### sets trimstart and trimend variables that can be used by in subsequent steps.
#### threshold means to keep the center part of the alignment with at least
#### the fraction of maximum coverage
args = {"ref": _referenceseqname, "thresh": options.dynthresh}
CD_5 = AlignmentSummary(args,[CD_4])
#### alignment plots
if _trimstart != _trimend:
args = {"ref": _referenceseqname,
"trimstart" : _trimstart,
"trimend" : _trimend
}
else:
args = {"ref": _referenceseqname,
"trimstart" : "find",
"trimend" : "find"
}
CD_6 = AlignmentPlot(args,[CD_5])
#supplementary.append(CD_5)
supplementary.append(CD_6)
###########################
args = {"mingroupmembers": 0,
"report": "passing"}
CD_4a = GroupRetriever(args, [CD_4])
OutputStep("4-ALIGNED", "groupstats,tre,fasta,group,name,list,svg,pdf,tiff,taxsummary,globalsummary,localsummary", CD_4a)
cleanCD = cleanup(CD_5)
args = {"mingroupmembers": 0,
"report": "passing"}
cleanCDa = GroupRetriever(args, [cleanCD])
OutputStep("5-CLEAN", "groupstats,fasta,group,name,list,svg,pdf,tiff,taxsummary,globalsummary,localsummary", cleanCDa)
clusterCD = CDHITCluster(cleanCD)
x = plotsAndStats(clusterCD)
INS = {"annotation" : [options.fn_info]}
ARGS = {"dist": "0.03"}
output1 = R_defaultplots(INS, ARGS, x)
output2 = AnnotateClusters(dict(), dict(), output1)
return (output2)
def cleanup(input):
### remove the "ref" group
args = {
"force" : "fasta,name,group",
"groups": "ref"
}
s15 = MothurStep("remove.groups", options.nodesize, dict(), args, [input])
####### remove sequences that are too short (bad alignment?)
args = {
"minlength" : "%s" % (options.minlength),
"maxambig" : "0",
"force" : "fasta,name,group" ,
}
s16 = MothurStep("screen.seqs", options.nodesize, dict(), args, [s15])
####### find chimeric sequences
toremove = list()
for ch in [ "uchime" ]:
### chimeras against reference
args = {"force" : "fasta,reference"}
inputs = {"reference": ["%s/%s" % (options.dir_anno, _alignment)] }
A = MothurStep("chimera.%s" % (ch),options.nodesize, inputs, args, [s16])
toremove.append(A)
if not options.quickmode:
### chimeras against self
args ={"force": "name,group,fasta"}
inputs = {}
A = MothurStep("chimera.%s" % (ch),options.nodesize, inputs, args, [s16])
toremove.append(A)
### merge all accnos files and remove ALL chimeras
allchimeras = FileMerger("accnos", toremove)
s17 = MothurStep("remove.seqs",options.nodesize, dict(), dict(), allchimeras)
#### if primer trimming points are not unknown
if _trimstart!=_trimend:
### primer cut
args = {
"s" : _trimstart,
"e": _trimend,
}
else:
args = {
"s" : "find:trimstart",
"e" : "find:trimend"
}
s18a = AlignmentTrim(dict(), args, [s17])
####### remove sequence fragments, bad alignments (?)
args = {}
if options.dynamic:
args = { "minlength" : "50" ,
"force": "fasta,name,group"}
else:
args = { "minlength" : "%s" % (options.minlength),
"force": "fasta,name,group"}
s18b = MothurStep("screen.seqs", options.nodesize, dict(), args, [s18a])
### build a tree
#s18b_tree = ClearcutTree({}, s18b)
####### remove empty columns
args = {"vertical" : "T"}
s19 = MothurStep("filter.seqs",options.nodesize, dict(), args, [s18b])
####### taxonomy
inputs = { "reference": ["%s/%s" % (options.dir_anno,_trainset)],
"taxonomy": ["%s/%s" % (options.dir_anno, _taxonomy )]
}
args = { "iters" : "100",
"cutoff": "60"
}
s20 = MothurStep("classify.seqs", options.nodesize, inputs, args, [s19])
### remove - and . for subsequent clustering efforts
s21 = CleanFasta(dict(), [s20])
return (s21)
def CDHITCluster(input):
cdhits = list()
for arg in ["0.99", "0.97", "0.95", "0.90"]:
args = {"c": arg,
"d" : "0",
"n": "8",
"g": "1",
"M": "10000",
"T": "%s" % (options.nodesize)
}
CD_1 = CDHIT_EST(options.nodesize, args, [input])
### make sth. analogous to mothur's labels
arg = 1.0 - float(arg)
if arg == 0:
arg = "unique"
else:
arg = "%s" % (arg)
args = {"mode": arg
}
CD_2 = CDHIT_Mothurize(args, CD_1)
CD_2a = CDHIT_Perls({}, CD_2)
cdhits.append(CD_2)
READY = FileMerger("list,rabund,sabund", cdhits)
SORTED = FileSort("list,rabund,sabund", READY)
return (SORTED)
def plotsAndStats(input):
### all groups!
args = {"mingroupmembers": 0,
"report": "passing"}
s23 = GroupRetriever(args, [input])
######## make a shared file
args = {"label" : "0.01-0.03-0.05-0.1", "find": "groups"}
s24 = MothurStep("make.shared", options.nodesize, dict(), args, [s23])
args = {
"label" : "0.01-0.03-0.05-0.1",
"basis" : "otu"
}
s25a= MothurStep("classify.otu", options.nodesize, dict(), args, [s24])
args = {
"taxonomy": "otu.taxonomy",
"taxsummary": "otu.taxsummary"
}
s25aa = FileType(args, [s25a])
args = {
"label" : "0.01-0.03-0.05-0.1",
"basis" : "sequence"
}
s25b = MothurStep("classify.otu", options.nodesize, dict(), args, [s24])
args = {
"taxonomy": "seq.taxonomy",
"taxsummary": "seq.taxsummary"
}
s25bb = FileType(args, [s25b])
args = {"force" : "list", "calc": "nseqs-sobs-simpson-invsimpson-chao-shannon-shannoneven-coverage"}
s26 = MothurStep("summary.single",options.nodesize, dict(), args, [s25bb])
args = {"summary": "globalsummary"}
s26a = FileType(args, [s26])
args = {"force" : "shared", "calc": "nseqs-sobs-simpson-invsimpson-chao-shannon-shannoneven-coverage"}
s27 = MothurStep("summary.single", options.nodesize, dict(), args, [s25bb])
args = {"summary": "localsummary"}
s27a = FileType(args, [s27])
args = {"force" : "shared", "calc": "thetayc-jclass-braycurtis"}
s28 = MothurStep("tree.shared", options.nodesize, dict(), args, [s24])
supplementary.append(s28)
args = {"force" : "list", "calc": "nseqs-sobs-simpson-invsimpson-chao-shannon-shannoneven-coverage", "freq": "0.01"}
s29 = MothurStep("rarefaction.single", options.nodesize, dict(), args, [s24])
#return ([s23, s24, s25aa, s25bb, s26a, s27a, s28, s29])
if options.quickmode:
return ([s23, s24, s25aa, s25bb, s26a, s27a, s28, s29])
else:
args = {"force" : "shared", "calc": "nseqs-sobs-simpson-invsimpson-chao-shannon-shannoneven-coverage", "freq": "0.05"}
s30 = MothurStep("rarefaction.single",options.nodesize, dict(), args, [s24])
return ([s23, s24, s25aa, s25bb, s26a, s27a, s28, s29, s30])
#################################################
## Arguments
##
parser = OptionParser()
group = OptionGroup(parser, "Required", description="Will not run without these !")
group.add_option("-P", "--PROJECT", dest="project", default="",
help="project code", metavar="#")
group.add_option("-E", "--EMAIL", dest="email", default="",
help="e-mail address", metavar="@")
group.add_option("-i", "--info", dest="fn_info", default="",
help="mapping: file, barcode, primer, sample information. File should be in CSV format", metavar="allinfo.csv")
parser.add_option_group(group)
group = OptionGroup(parser, "Optional Configuration", description="parameters to alter if necessary")
group.add_option("-Y", "--Yap", dest="mode", default="16S",
help="""Which Pipeline: 16S ITS [%default]""", metavar="#")
group.add_option("-D", "--dynamic", dest="dynamic", action = "store_true", default=False,
help="""If specified, alignment will be scanned for primer locations and trimmed accordingly. Otherwise a database of known primers and trimming points will be used. [%default]""", metavar="#")
group.add_option("-d", "--thresh", dest="dynthresh", default=0.75, type="float",
help="""in conjunction with -D, otherwise this is ignored. This allows to specify how much of the alignment to keep using the per-base coverage. The [%default] value indicates that ends of the alignment are trimmed until a base has a coverage of [%default] * peak coverage.""", metavar="#")
group.add_option("-a", "--annotations", dest="dir_anno", default="/usr/local/devel/ANNOTATION/sszpakow/ANNOTATION/",
help="directory that stores auxilliary files\n[%default]", metavar="annotations")
group.add_option("-S", "--SAMPLE", dest="sampletimes", default=0, type="int",
help="perform sub.sampling of all reads based on the number of reads in smallest group. if 0 - all reads are used. if 1 - the sampling will be performed once, if 2 or more, then 2 or more independent samplings are going to be performed.\n[%default]", metavar="#")
group.add_option("-m", "--minlen", dest="minlength", default=200, type="int",
help="what is the minimum length of reads to process\n[%default]", metavar="#")
group.add_option("-g", "--mingroupsize", dest="mingroupmembers", default=100, type="int",
help="after demultiplexing, discard groups with fewer reads than #\n[%default]", metavar="#")
group.add_option("-Q", "--minqual", dest="minqual", default=30, type="int",
help="Keep stretches of reads this good or better #\n[%default]", metavar="#")
group.add_option("-q", "--quick", dest="quickmode", action = "store_true", default=False,
help="""If specified, only single, reference DB based chimera checking will be used. [%default]""", metavar="#")
parser.add_option("-H", "--head", dest="head", default=0, type="int",
help="For dry runs, import only # of lines from the input files")
group.add_option("-x", "--strict", dest="strictlevel", default=2, type="int",
help="""how strict to be at pre-clustering:
1 very strict, conservative denoising (precluster identical sequences)
2 less strict, aggresive denoising (precluster using 98% similarity)
[%default]""", metavar="#")
parser.add_option_group(group)
group = OptionGroup(parser, "Technical", description="could be useful sometimes")
group.add_option("-C", "--NODESIZE", dest="nodesize", default=30,
help="maximum number of grid node's CPUs to use\n[%default]", metavar="#")
parser.add_option_group(group)
(options, args) = parser.parse_args()
#################################################
## Begin
##
if options.fn_info == "" or options.email == "" or options.project =="":
parser.print_help()
sys.exit(1)
if not options.mode in ("16S", "ITS"):
parser.print_help()
sys.exit(2)
### parameters specific to YAP incarnations
### 16S V1-V3
if options.mode=="16S":
### file in the annotations directory that has reference sequences
_referenceseq = "ecolis.fasta"
### which fasta ID use as the reference (if file has more than one)
_referenceseqname = "e_coli2_genbank"
### mothur's compendium of ALIGNED 16S sequences
_alignment = "silva.bacteria.fasta"
### mothur's curated version of RDP's curated train set and corresponding taxonomy
_trainset = "trainset9_032012.pds.fasta"
_taxonomy = "trainset9_032012.pds.tax"
### until automatic primer detection is implemented, these are coordinates of primers
### when aligned to the silva.bacteria.fasta (for in-silico PCR and subsequent primer trimming)
#_trimstart = "1044"
#_trimend = "13127"
### ITS NSI1 - NLB4 (barcoded)
elif options.mode=="ITS":
_referenceseq = "yeastITS.fasta"
_referenceseqname = "AF293_reference"
_alignment = "FungalITSseed.092012.1.aln.fasta"
_trainset = "FungalITSdb.092012.1.fasta"
_taxonomy = "FungalITSdb.092012.1.tax"
#_trimstart = "1716"
#_trimend = "2795"
else:
parser.print_help()
sys.exit(2)
validator = InfoValidator(options.fn_info)
_trimstart , _trimend, _region = validator.getTrimpoints()
_tech = validator.getTech()
BOH = init(options.project, options.email)
BOH.toPrint("-----", "GLOBAL", "We are in %s mode" % (options.mode))
BOH.toPrint("-----", "GLOBAL", "We will be processing %s data" % (_tech))
if options.dynamic or _region == "unknown":
BOH.toPrint("-----", "GLOBAL", "Dynamic alignment trimming enabled")
BOH.toPrint("-----", "GLOBAL", "Alignment will be trimmed using %s * peak coverage threshold" % (options.dynthresh))
_trimstart = "0"
_trimend = "0"
else:
BOH.toPrint("-----", "GLOBAL", "Alignment trimming predefined: %s - %s" % (_trimstart, _trimend))
#############################
#######################
##### reference:
inputs = {"fasta": ["%s/%s" % (options.dir_anno, _referenceseq)] }
REF = FileImport(inputs)
REF_1 = MakeNamesFile([REF])
REF_2 = MakeGroupsFile([REF], "ref")
REF_3 = MakeQualFile ([REF], "40" )
##############################
supplementary = list()
READY = preprocess()
OutputStep("1-PREPROCESS", "groupstats,fasta,group,name,list,pdf,svg,tiff,taxsummary,globalsummary,localsummary", READY)
if options.sampletimes==0:
tmp = finalize(READY)
y = R_rarefactions(dict(), dict(), tmp)
z = R_OTUplots(dict(), dict(), tmp)
supplementary.append(y)
supplementary.append(z)
OutputStep("6-ENTIRE", "groupstats,fasta,group,name,list,pdf,svg,tiff,taxsummary,globalsummary,localsummary,phylotax", [tmp])
OutputStep("8-TBC", "phylotax,group,list,fasta", [tmp])
#else:
# thefinalset = list()
# for k in xrange(0, options.sampletimes):
# args = {
# "force" : "fasta,name,group",
# "persample": "T",
# "iter": "%s" % (k)
# }
# sampled = MothurStep("sub.sample", options.nodesize, dict(), args, [READY])
# tmp = finalize(sampled)
# y = R_rarefactions(dict(), dict(), tmp)
# z = R_OTUplots(dict(), dict(), tmp)
# supplementary.append(y)
# supplementary.append(z)
# OutputStep("SAMPLED_%s" % (k), "groupstats,fasta,group,name,list,pdf,svg,tiff,taxsummary,globalsummary,localsummary", [tmp])
# thefinalset.append(tmp)
#
OutputStep("7-SUPP_PLOTS", "tre,pdf,png,svg,tiff,r_nseqs,rarefaction,r_simpson,r_invsimpson,r_chao,r_shannon,r_shannoneven,r_coverage", supplementary)
###########################################################################
##
##################################################
### Finish
##################################################
| mit | -7,766,473,033,472,721,000 | 36.201511 | 308 | 0.511138 | false | 3.61012 | false | false | false |
varlog00/Sigil | src/Resource_Files/python3lib/xmlprocessor.py | 1 | 16367 | #!/usr/bin/env python3
import sys
import os
from sigil_bs4 import BeautifulSoup
from sigil_bs4.builder._lxml import LXMLTreeBuilderForXML
import re
from urllib.parse import unquote
from urllib.parse import urlsplit
from lxml import etree
from io import BytesIO
from opf_newparser import Opf_Parser
ASCII_CHARS = set(chr(x) for x in range(128))
URL_SAFE = set('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '#' '_.-/~')
IRI_UNSAFE = ASCII_CHARS - URL_SAFE
TEXT_FOLDER_NAME = "Text"
ebook_xml_empty_tags = ["meta", "item", "itemref", "reference", "content"]
def get_void_tags(mtype):
voidtags = []
if mtype == "application/oebps-package+xml":
voidtags = ["item", "itemref", "mediatype", "mediaType", "reference"]
elif mtype == "application/x-dtbncx+xml":
voidtags = ["meta", "reference", "content"]
elif mtype == "application/smil+xml":
voidtags = ["text", "audio"]
elif mtype == "application/oebps-page-map+xml":
voidtags = ["page"]
else:
voidtags = ebook_xml_empty_tags
return voidtags
# returns a quoted IRI (not a URI)
def quoteurl(href):
if isinstance(href,bytes):
href = href.decode('utf-8')
(scheme, netloc, path, query, fragment) = urlsplit(href, scheme="", allow_fragments=True)
if scheme != "":
scheme += "://"
href = href[len(scheme):]
result = []
for char in href:
if char in IRI_UNSAFE:
char = "%%%02x" % ord(char)
result.append(char)
return scheme + ''.join(result)
# unquotes url/iri
def unquoteurl(href):
if isinstance(href,bytes):
href = href.decode('utf-8')
href = unquote(href)
return href
def _remove_xml_header(data):
newdata = data
return re.sub(r'<\s*\?xml\s*[^\?>]*\?*>\s*','',newdata, count=1,flags=re.I)
def _well_formed(data):
result = True
newdata = data
if isinstance(newdata, str):
newdata = newdata.encode('utf-8')
try:
parser = etree.XMLParser(encoding='utf-8', recover=False)
tree = etree.parse(BytesIO(newdata), parser)
except Exception:
result = False
pass
return result
def _reformat(data):
newdata = data
if isinstance(newdata, str):
newdata = newdata.encode('utf-8')
parser = etree.XMLParser(encoding='utf-8', recover=True, ns_clean=True,
remove_comments=True, remove_pis=True, strip_cdata=True, resolve_entities=False)
tree = etree.parse(BytesIO(newdata), parser)
newdata = etree.tostring(tree.getroot(),encoding='UTF-8', xml_declaration=False)
return newdata
# does not support cdata sections yet
def _make_it_sane(data):
# first remove all comments as they may contain unescaped xml reserved characters
# that will confuse the remaining _make_it_sane regular expressions
comments = re.compile(r'''<!--.*?-->''', re.DOTALL)
data = comments.sub("",data)
# remove invalid tags that freak out lxml
emptytag = re.compile(r'''(<\s*[/]*\s*>)''')
data=emptytag.sub("", data);
# handle double tag start
badtagstart = re.compile(r'''(<[^>]*<)''')
extrastart = re.compile(r'''<\s*<''');
missingend = re.compile(r'''<\s*[a-zA-Z:]+[^<]*\s<''')
startinattrib = re.compile(r'''<\s*[a-z:A-Z]+[^<]*["'][^<"']*<''')
mo = badtagstart.search(data)
while mo is not None:
fixdata = data[mo.start(1):mo.end(1)]
mextra = extrastart.match(fixdata)
mmiss = missingend.match(fixdata)
mattr = startinattrib.match(fixdata)
if mextra is not None:
fixdata = fixdata[1:]
elif mattr is not None:
fixdata = fixdata[0:-1] + "<"
elif mmiss is not None:
fixdata = fixdata[0:-1].rstrip() + "> <"
else:
fixdata = "<" + fixdata[1:]
data = data[0:mo.start(1)] + fixdata + data[mo.end(1):]
mo = badtagstart.search(data)
# handle double tag end
badtagend = re.compile(r'''(>[^<]*>)''')
extraend = re.compile(r'''>\s*>''');
missingstart = re.compile(r'''>\s[^>]*[a-zA-Z:]+[^>]*>''')
endinattrib = re.compile(r'''>[^>]*["'][^>'"]*>''')
mo = badtagend.search(data)
while mo is not None:
fixdata = data[mo.start(1):mo.end(1)]
mextra = extraend.match(fixdata)
mmiss = missingstart.match(fixdata)
mattr = endinattrib.match(fixdata)
if mextra is not None:
fixdata = fixdata[0:-1]
elif mattr is not None:
fixdata = ">" + fixdata[1:]
elif mmiss is not None:
fixdata = "> <" + fixdata[1:].lstrip()
else:
fixdata = fixdata[0:-1] + ">"
data = data[0:mo.start(1)] + fixdata + data[mo.end(1):]
mo = badtagend.search(data)
return data
# ncx_text_pattern = re.compile(r'''(<text>)\s*(\S[^<]*\S)\s*(</text>)''',re.IGNORECASE)
# re.sub(ncx_text_pattern,r'\1\2\3',newdata)
# data is expectedd to be in unicode
def WellFormedXMLErrorCheck(data, mtype=""):
newdata = _remove_xml_header(data)
if isinstance(newdata, str):
newdata = newdata.encode('utf-8')
line = "-1"
column = "-1"
message = "well-formed"
try:
parser = etree.XMLParser(encoding='utf-8', recover=False)
tree = etree.parse(BytesIO(newdata), parser)
except Exception:
line = "0"
column = "0"
message = "exception"
if len(parser.error_log) > 0:
error = parser.error_log[0]
message = error.message
if isinstance(message, bytes):
message = message.decode('utf-8')
line = "%d" % error.line
column = "%d" % error.column
pass
result = [line, column, message]
return result
def IsWellFormedXML(data, mtype=""):
[line, column, message] = WellFormedXMLErrorCheck(data, mtype)
result = line == "-1"
return result
# data is expected to be in unicode
# note: bs4 with lxml for xml strips whitespace so always prettyprint xml
def repairXML(data, mtype="", indent_chars=" "):
newdata = _remove_xml_header(data)
# if well-formed - don't mess with it
if _well_formed(newdata):
return data
newdata = _make_it_sane(newdata)
if not _well_formed(newdata):
newdata = _reformat(newdata)
if mtype == "application/oebps-package+xml":
newdata = newdata.decode('utf-8')
newdata = Opf_Parser(newdata).rebuild_opfxml()
# lxml requires utf-8 on Mac, won't work with unicode
if isinstance(newdata, str):
newdata = newdata.encode('utf-8')
voidtags = get_void_tags(mtype)
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=voidtags)
soup = BeautifulSoup(newdata, features=None, from_encoding="utf-8", builder=xmlbuilder)
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=indent_chars)
return newdata
def anchorNCXUpdates(data, originating_filename, keylist, valuelist):
data = _remove_xml_header(data)
# lxml on a Mac does not seem to handle full unicode properly, so encode as utf-8
data = data.encode('utf-8')
# rebuild serialized lookup dictionary
id_dict = {}
for i in range(0, len(keylist)):
id_dict[ keylist[i] ] = valuelist[i]
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=ebook_xml_empty_tags)
soup = BeautifulSoup(data, features=None, from_encoding="utf-8", builder=xmlbuilder)
original_filename_with_relative_path = TEXT_FOLDER_NAME + "/" + originating_filename
for tag in soup.find_all("content"):
if "src" in tag.attrs:
src = tag["src"]
if src.find(":") == -1:
parts = src.split('#')
if (parts is not None) and (len(parts) > 1) and (parts[0] == original_filename_with_relative_path) and (parts[1] != ""):
fragment_id = parts[1]
if fragment_id in id_dict:
attribute_value = TEXT_FOLDER_NAME + "/" + quoteurl(id_dict[fragment_id]) + "#" + fragment_id
tag["src"] = attribute_value
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=" ")
return newdata
def performNCXSourceUpdates(data, currentdir, keylist, valuelist):
data = _remove_xml_header(data)
# lxml on a Mac does not seem to handle full unicode properly, so encode as utf-8
data = data.encode('utf-8')
# rebuild serialized lookup dictionary
updates = {}
for i in range(0, len(keylist)):
updates[ keylist[i] ] = valuelist[i]
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=ebook_xml_empty_tags)
soup = BeautifulSoup(data, features=None, from_encoding="utf-8", builder=xmlbuilder)
for tag in soup.find_all("content"):
if "src" in tag.attrs:
src = tag["src"]
if src.find(":") == -1:
parts = src.split('#')
url = parts[0]
fragment = ""
if len(parts) > 1:
fragment = parts[1]
bookrelpath = os.path.join(currentdir, unquoteurl(url))
bookrelpath = os.path.normpath(bookrelpath)
bookrelpath = bookrelpath.replace(os.sep, "/")
if bookrelpath in updates:
attribute_value = updates[bookrelpath]
if fragment != "":
attribute_value = attribute_value + "#" + fragment
attribute_value = quoteurl(attribute_value)
tag["src"] = attribute_value
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=" ")
return newdata
def performOPFSourceUpdates(data, currentdir, keylist, valuelist):
data = _remove_xml_header(data)
# lxml on a Mac does not seem to handle full unicode properly, so encode as utf-8
data = data.encode('utf-8')
# rebuild serialized lookup dictionary
updates = {}
for i in range(0, len(keylist)):
updates[ keylist[i] ] = valuelist[i]
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=ebook_xml_empty_tags)
soup = BeautifulSoup(data, features=None, from_encoding="utf-8", builder=xmlbuilder)
for tag in soup.find_all(["item","reference","site"]):
if "href" in tag.attrs :
href = tag["href"]
if href.find(":") == -1 :
parts = href.split('#')
url = parts[0]
fragment = ""
if len(parts) > 1:
fragment = parts[1]
bookrelpath = os.path.join(currentdir, unquoteurl(url))
bookrelpath = os.path.normpath(bookrelpath)
bookrelpath = bookrelpath.replace(os.sep, "/")
if bookrelpath in updates:
attribute_value = updates[bookrelpath]
if fragment != "":
attribute_value = attribute_value + "#" + fragment
attribute_value = quoteurl(attribute_value)
tag["href"] = attribute_value
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=" ")
return newdata
# Note xml_updates has paths relative to the OEBPS folder as base
# As if they were meant only for OEBPS/content.opf and OEBPS/toc.ncx
# So adjust them to be relative to the Misc directory where .smil files live in Sigil
def performSMILUpdates(data, currentdir, keylist, valuelist):
data = _remove_xml_header(data)
# lxml on a Mac does not seem to handle full unicode properly, so encode as utf-8
data = data.encode('utf-8')
# rebuild serialized lookup dictionary of xml_updates, properly adjusted
updates = {}
for i in range(0, len(keylist)):
updates[ keylist[i] ] = "../" + valuelist[i]
xml_empty_tags = ["text", "audio"]
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=xml_empty_tags)
soup = BeautifulSoup(data, features=None, from_encoding="utf-8", builder=xmlbuilder)
for tag in soup.find_all(["body","seq","text","audio"]):
for att in ["src", "epub:textref"]:
if att in tag.attrs :
ref = tag[att]
if ref.find(":") == -1 :
parts = ref.split('#')
url = parts[0]
fragment = ""
if len(parts) > 1:
fragment = parts[1]
bookrelpath = os.path.join(currentdir, unquoteurl(url))
bookrelpath = os.path.normpath(bookrelpath)
bookrelpath = bookrelpath.replace(os.sep, "/")
if bookrelpath in updates:
attribute_value = updates[bookrelpath]
if fragment != "":
attribute_value = attribute_value + "#" + fragment
attribute_value = quoteurl(attribute_value)
tag[att] = attribute_value
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=" ")
return newdata
# Note xml_updates has urls/iris relative to the OEBPS folder as base
# As if they were meant only for OEBPS/content.opf and OEBPS/toc.ncx
# So adjust them to be relative to the Misc directory where page-map.xml lives
def performPageMapUpdates(data, currentdir, keylist, valuelist):
data = _remove_xml_header(data)
# lxml on a Mac does not seem to handle full unicode properly, so encode as utf-8
data = data.encode('utf-8')
# rebuild serialized lookup dictionary of xml_updates properly adjusted
updates = {}
for i in range(0, len(keylist)):
updates[ keylist[i] ] = "../" + valuelist[i]
xml_empty_tags = ["page"]
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=xml_empty_tags)
soup = BeautifulSoup(data, features=None, from_encoding="utf-8", builder=xmlbuilder)
for tag in soup.find_all(["page"]):
for att in ["href"]:
if att in tag.attrs :
ref = tag[att]
if ref.find(":") == -1 :
parts = ref.split('#')
url = parts[0]
fragment = ""
if len(parts) > 1:
fragment = parts[1]
bookrelpath = os.path.join(currentdir, unquoteurl(url))
bookrelpath = os.path.normpath(bookrelpath)
bookrelpath = bookrelpath.replace(os.sep, "/")
if bookrelpath in updates:
attribute_value = updates[bookrelpath]
if fragment != "":
attribute_value = attribute_value + "#" + fragment
attribute_value = quoteurl(attribute_value)
tag[att] = attribute_value
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=" ")
return newdata
def main():
argv = sys.argv
opfxml = '''
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="BookId" version="2.0">
<metadata xmlns:mydc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">
<mydc:identifier id="BookId" opf:scheme="UUID">urn:uuid:a418a8f1-dcbc-4c5d-a18f-533765e34ee8</mydc:identifier>
</metadata>
<manifest>
<!-- this has a lot of bad characters & < > \" \'-->
<item href="toc.ncx" id="ncx" media-type="application/x-dtbncx+xml" />
<item href="Text/Section0001.xhtml" id="Section0001.xhtml" media-type="application/xhtml+xml" />
</manifest>
<
<spine toc="ncx">
<itemref idref="Section0001.xhtml">
</spine>
<text>
this is a bunch of nonsense
</text>
<text>
this is a bunch of nonsense 1
</text>
<text>
this is a bunch of nonsense 2
</text>
<guide />
</package>
'''
print(argv)
if not argv[-1].endswith("xmlprocessor.py"):
with open(argv[-1],'rb') as f:
opfxml = f.read();
if isinstance(opfxml, bytes):
opfxml = opfxml.decode('utf-8')
print(repairXML(opfxml, "application/oebps-package+xml"))
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | -1,157,109,961,533,576,400 | 39.412346 | 136 | 0.587646 | false | 3.627438 | false | false | false |
rdcrt/pystarling | test/api_objects/test_Account.py | 1 | 1419 | import dateutil
import pytest
from pystarling.api_objects.Account import Account
class TestAccount(object):
test_data = {
'id': 'ee8152d7-6ff2-4f79-b9de-39861bdec427',
'number': '99999999',
'sortCode': '608371',
'iban': 'GB26SRLG60837199999999',
'bic': 'SRLGGB2L',
'currency': 'GBP',
'createdAt': '2017-05-16T12:00:00.000Z'
}
incomplete_data = {
'id': 'ee8152d7-6ff2-4f79-b9de-39861bdec427'
}
def test_incomplete_data_raises_error(self):
with pytest.raises(KeyError):
Account(self.incomplete_data)
def test_data_parsed_correctly(self):
account = Account(self.test_data)
assert account.id == 'ee8152d7-6ff2-4f79-b9de-39861bdec427'
assert account.sort_code == '608371'
assert account.number == '99999999'
assert account.iban == 'GB26SRLG60837199999999'
assert account.bic == 'SRLGGB2L'
assert account.currency == 'GBP'
assert account.created_at == dateutil.parser.parse('2017-05-16T12:00:00.000Z')
def test_get_readable_sort_code_formatted_correctly(self):
account = Account(self.test_data)
assert account.get_readable_sort_code() == '60-83-71'
def test_get_readable_iban_formatted_correctly(self):
account = Account(self.test_data)
assert account.get_readable_iban() == "GB26 SRLG 6083 7199 9999 99"
| mit | 2,434,672,469,967,569,000 | 32.785714 | 86 | 0.639183 | false | 3.118681 | true | false | false |
APMonitor/arduino | 2_Regression/2nd_order_MIMO/GEKKO/tclab_2nd_order_linear.py | 1 | 3283 | import numpy as np
import time
import matplotlib.pyplot as plt
import random
# get gekko package with:
# pip install gekko
from gekko import GEKKO
import pandas as pd
# import data
data = pd.read_csv('data.txt')
tm = data['Time (sec)'].values
Q1s = data[' Heater 1'].values
Q2s = data[' Heater 2'].values
T1s = data[' Temperature 1'].values
T2s = data[' Temperature 2'].values
#########################################################
# Initialize Model as Estimator
#########################################################
m = GEKKO(name='tclab-mhe')
#m.server = 'http://127.0.0.1' # if local server is installed
# 120 second time horizon, 40 steps
m.time = tm
# Parameters to Estimate
K1 = m.FV(value=0.5)
K1.STATUS = 1
K1.FSTATUS = 0
K1.LOWER = 0.1
K1.UPPER = 1.0
K2 = m.FV(value=0.3)
K2.STATUS = 1
K2.FSTATUS = 0
K2.LOWER = 0.1
K2.UPPER = 1.0
K3 = m.FV(value=0.1)
K3.STATUS = 1
K3.FSTATUS = 0
K3.LOWER = 0.0001
K3.UPPER = 1.0
tau12 = m.FV(value=150)
tau12.STATUS = 1
tau12.FSTATUS = 0
tau12.LOWER = 50.0
tau12.UPPER = 250
tau3 = m.FV(value=15)
tau3.STATUS = 0
tau3.FSTATUS = 0
tau3.LOWER = 10
tau3.UPPER = 20
# Measured inputs
Q1 = m.MV(value=0)
Q1.FSTATUS = 1 # measured
Q1.value = Q1s
Q2 = m.MV(value=0)
Q2.FSTATUS = 1 # measured
Q2.value = Q2s
# Ambient temperature
Ta = m.Param(value=23.0) # degC
# State variables
TH1 = m.SV(value=T1s[0])
TH2 = m.SV(value=T2s[0])
# Measurements for model alignment
TC1 = m.CV(value=T1s)
TC1.STATUS = 1 # minimize error between simulation and measurement
TC1.FSTATUS = 1 # receive measurement
TC1.MEAS_GAP = 0.1 # measurement deadband gap
TC2 = m.CV(value=T1s[0])
TC2.STATUS = 1 # minimize error between simulation and measurement
TC2.FSTATUS = 1 # receive measurement
TC2.MEAS_GAP = 0.1 # measurement deadband gap
TC2.value = T2s
# Heat transfer between two heaters
DT = m.Intermediate(TH2-TH1)
# Empirical correlations
m.Equation(tau12 * TH1.dt() + (TH1-Ta) == K1*Q1 + K3*DT)
m.Equation(tau12 * TH2.dt() + (TH2-Ta) == K2*Q2 - K3*DT)
m.Equation(tau3 * TC1.dt() + TC1 == TH1)
m.Equation(tau3 * TC2.dt() + TC2 == TH2)
# Global Options
m.options.IMODE = 5 # MHE
m.options.EV_TYPE = 2 # Objective type
m.options.NODES = 3 # Collocation nodes
m.options.SOLVER = 3 # IPOPT
m.options.COLDSTART = 0 # COLDSTART on first cycle
# Predict Parameters and Temperatures
# use remote=False for local solve
m.solve()
# Create plot
plt.figure(figsize=(10,7))
ax=plt.subplot(2,1,1)
ax.grid()
plt.plot(tm,T1s,'ro',label=r'$T_1$ measured')
plt.plot(tm,TC1.value,'k-',label=r'$T_1$ predicted')
plt.plot(tm,T2s,'bx',label=r'$T_2$ measured')
plt.plot(tm,TC2.value,'k--',label=r'$T_2$ predicted')
plt.ylabel('Temperature (degC)')
plt.legend(loc=2)
ax=plt.subplot(2,1,2)
ax.grid()
plt.plot(tm,Q1s,'r-',label=r'$Q_1$')
plt.plot(tm,Q2s,'b:',label=r'$Q_2$')
plt.ylabel('Heaters')
plt.xlabel('Time (sec)')
plt.legend(loc='best')
# Print optimal values
print('K1: ' + str(K1.newval))
print('K2: ' + str(K2.newval))
print('K3: ' + str(K3.newval))
print('tau12: ' + str(tau12.newval))
print('tau3: ' + str(tau3.newval))
# Save figure
plt.savefig('tclab_estimation.png')
plt.show()
| apache-2.0 | -3,404,360,134,149,806,000 | 22.318519 | 70 | 0.624733 | false | 2.355093 | false | false | false |
DistrictDataLabs/yellowbrick | yellowbrick/contrib/scatter.py | 1 | 11862 | # yellowbrick.contrib.scatter
# Implements a 2d scatter plot for feature analysis.
#
# Author: Nathan Danielsen
# Created: Fri Feb 26 19:40:00 2017 -0400
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: scatter.py [a89633e] [email protected] $
"""
Implements a 2D scatter plot for feature analysis.
"""
##########################################################################
# Imports
##########################################################################
import itertools
import numpy as np
from yellowbrick.features.base import DataVisualizer
from yellowbrick.utils import is_dataframe, is_structured_array
from yellowbrick.utils import has_ndarray_int_columns
from yellowbrick.exceptions import YellowbrickValueError
from yellowbrick.style.colors import resolve_colors
##########################################################################
# Quick Methods
##########################################################################
def scatterviz(
X,
y=None,
ax=None,
features=None,
classes=None,
color=None,
colormap=None,
markers=None,
alpha=1.0,
**kwargs
):
"""Displays a bivariate scatter plot.
This helper function is a quick wrapper to utilize the ScatterVisualizer
(Transformer) for one-off analysis.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n, default: None
An array or series of target or class values
ax : matplotlib axes, default: None
The axes to plot the figure on.
features : list of strings, default: None
The names of two features or columns.
More than that will raise an error.
classes : list of strings, default: None
The names of the classes in the target
color : list or tuple of colors, default: None
Specify the colors for each individual class
colormap : string or matplotlib cmap, default: None
Sequential colormap for continuous target
markers : iterable of strings, default: ,+o*vhd
Matplotlib style markers for points on the scatter plot points
alpha : float, default: 1.0
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
Returns
-------
viz : ScatterVisualizer
Returns the fitted, finalized visualizer
"""
# Instantiate the visualizer
visualizer = ScatterVisualizer(
ax=ax,
features=features,
classes=classes,
color=color,
colormap=colormap,
markers=markers,
alpha=alpha,
**kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y, **kwargs)
visualizer.transform(X)
# Return the visualizer object
return visualizer
##########################################################################
# Static ScatterVisualizer Visualizer
##########################################################################
class ScatterVisualizer(DataVisualizer):
"""
ScatterVisualizer is a bivariate feature data visualization algorithm that
plots using the Cartesian coordinates of each point.
Parameters
----------
ax : a matplotlib plot, default: None
The axis to plot the figure on.
x : string, default: None
The feature name that corresponds to a column name or index postion
in the matrix that will be plotted against the x-axis
y : string, default: None
The feature name that corresponds to a column name or index postion
in the matrix that will be plotted against the y-axis
features : a list of two feature names to use, default: None
List of two features that correspond to the columns in the array.
The order of the two features correspond to X and Y axes on the
graph. More than two feature names or columns will raise an error.
If a DataFrame is passed to fit and features is None, feature names
are selected that are the columns of the DataFrame.
classes : a list of class names for the legend, default: None
If classes is None and a y value is passed to fit then the classes
are selected from the target vector.
color : optional list or tuple of colors to colorize points, default: None
Use either color to colorize the points on a per class basis or
colormap to color them on a continuous scale.
colormap : optional string or matplotlib cmap to colorize points, default: None
Use either color to colorize the points on a per class basis or
colormap to color them on a continuous scale.
markers : iterable of strings, default: ,+o*vhd
Matplotlib style markers for points on the scatter plot points
alpha : float, default: 1.0
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
kwargs : keyword arguments passed to the super class.
These parameters can be influenced later on in the visualization
process, but can and should be set as early as possible.
"""
def __init__(
self,
ax=None,
x=None,
y=None,
features=None,
classes=None,
color=None,
colormap=None,
markers=None,
alpha=1.0,
**kwargs
):
"""
Initialize the base scatter with many of the options required in order
to make the visualization work.
"""
super(ScatterVisualizer, self).__init__(
ax=ax,
features=features,
classes=classes,
color=color,
colormap=colormap,
**kwargs
)
self.x = x
self.y = y
self.alpha = alpha
self.markers = itertools.cycle(
kwargs.pop("markers", (",", "+", "o", "*", "v", "h", "d"))
)
self.color = color
self.colormap = colormap
if self.x is not None and self.y is not None and self.features is not None:
raise YellowbrickValueError("Please specify x,y or features, not both.")
if self.x is not None and self.y is not None and self.features is None:
self.features = [self.x, self.y]
# Ensure with init that features doesn't have more than two features
if features is not None:
if len(features) != 2:
raise YellowbrickValueError(
"ScatterVisualizer only accepts two features."
)
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the parallel coords
visualization since it has both the X and y data required for the
viz and the transform method does not.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with 2 features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
_, ncols = X.shape
# NOTE: Do not call super for this class, it conflicts with the fit.
# Setting these variables is similar to the old behavior of DataVisualizer.
# TODO: refactor to make use of the new DataVisualizer functionality
self.features_ = self.features
self.classes_ = self.classes
if ncols == 2:
X_two_cols = X
if self.features_ is None:
self.features_ = ["Feature One", "Feature Two"]
# Handle the feature names if they're None.
elif self.features_ is not None and is_dataframe(X):
X_two_cols = X[self.features_].values
# handle numpy named/ structured array
elif self.features_ is not None and is_structured_array(X):
X_selected = X[self.features_]
X_two_cols = X_selected.copy().view(
(np.float64, len(X_selected.dtype.names))
)
# handle features that are numeric columns in ndarray matrix
elif self.features_ is not None and has_ndarray_int_columns(self.features_, X):
f_one, f_two = self.features_
X_two_cols = X[:, [int(f_one), int(f_two)]]
else:
raise YellowbrickValueError(
"""
ScatterVisualizer only accepts two features, please
explicitly set these two features in the init kwargs or
pass a matrix/ dataframe in with only two columns."""
)
# Store the classes for the legend if they're None.
if self.classes_ is None:
# TODO: Is this the most efficient method?
self.classes_ = [str(label) for label in np.unique(y)]
# Draw the instances
self.draw(X_two_cols, y, **kwargs)
# Fit always returns self.
return self
def draw(self, X, y, **kwargs):
"""Called from the fit method, this method creates a scatter plot that
draws each instance as a class or target colored point, whose location
is determined by the feature data set.
"""
# Set the axes limits
self.ax.set_xlim([-1, 1])
self.ax.set_ylim([-1, 1])
# set the colors
color_values = resolve_colors(
n_colors=len(self.classes_), colormap=self.colormap, colors=self.color
)
colors = dict(zip(self.classes_, color_values))
# Create a data structure to hold the scatter plot representations
to_plot = {}
for kls in self.classes_:
to_plot[kls] = [[], []]
# Add each row of the data set to to_plot for plotting
# TODO: make this an independent function for override
for i, row in enumerate(X):
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
x_, y_ = row_[0], row_[1]
kls = self.classes_[y[i]]
to_plot[kls][0].append(x_)
to_plot[kls][1].append(y_)
# Add the scatter plots from the to_plot function
# TODO: store these plots to add more instances to later
# TODO: make this a separate function
for i, kls in enumerate(self.classes_):
self.ax.scatter(
to_plot[kls][0],
to_plot[kls][1],
marker=next(self.markers),
color=colors[kls],
label=str(kls),
alpha=self.alpha,
**kwargs
)
self.ax.axis("equal")
def finalize(self, **kwargs):
"""
Adds a title and a legend and ensures that the axis labels are set as
the feature names being visualized.
Parameters
----------
kwargs: generic keyword arguments.
Notes
-----
Generally this method is called from show and not directly by the user.
"""
# Divide out the two features
feature_one, feature_two = self.features_
# Set the title
self.set_title(
"Scatter Plot: {0} vs {1}".format(str(feature_one), str(feature_two))
)
# Add the legend
self.ax.legend(loc="best")
self.ax.set_xlabel(str(feature_one))
self.ax.set_ylabel(str(feature_two))
# Alias for ScatterViz
ScatterViz = ScatterVisualizer
| apache-2.0 | -159,934,294,901,768,700 | 32.041783 | 87 | 0.583966 | false | 4.587007 | false | false | false |
dichen001/Go4Jobs | JackChen/hash/18. 4Sum.py | 1 | 1449 | """
Given an array S of n integers, are there elements a, b, c, and d in S such that a + b + c + d = target? Find all unique quadruplets in the array which gives the sum of target.
Note: The solution set must not contain duplicate quadruplets.
For example, given array S = [1, 0, -1, 0, -2, 2], and target = 0.
A solution set is:
[
[-1, 0, 0, 1],
[-2, -1, 1, 2],
[-2, 0, 0, 2]
]
"""
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
nums.sort()
results = []
for i in range(len(nums)-3):
if i > 0 and nums[i] == nums[i-1]: continue
sum_3 = target - nums[i]
for j in range(i+1, len(nums) -2):
if j > i+1 and nums[j] == nums[j-1]: continue
l, h, sum_2 = j+1, len(nums) - 1, sum_3 - nums[j]
while l < h:
if nums[l] + nums[h] < sum_2:
l += 1
elif nums[l] + nums[h] > sum_2:
h -= 1
else:
results.append([nums[i], nums[j], nums[l], nums[h]])
while l < h and nums[l] == nums[l+1]: l += 1
while l < h and nums[h] == nums[h-1]: h -= 1
l, h = l+1, h-1
return results | gpl-3.0 | 125,500,663,320,398,770 | 34.275 | 176 | 0.429952 | false | 3.331034 | false | false | false |
santiago-salas-v/walas | node_images.py | 1 | 1746 | import matplotlib
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
patch1 = matplotlib.patches.Circle(
[0.5,0.5],0.05
)
patch2 = matplotlib.patches.Rectangle(
[0.3,0.3],0.4, 0.4, alpha=0.5,
fill=False, edgecolor='black',
linestyle = '--'
)
arrow1 = matplotlib.patches.Arrow(
0, 0.5,0.45,0, width=0.05,
color='black'
)
arrow2 = matplotlib.patches.Arrow(
0.55, 0.5,0.45,0, width=0.05,
color='black'
)
line1 = matplotlib.lines.Line2D(
[0.5,0.5], [0,0.45],
linestyle='--', color='black'
)
text1 = matplotlib.text.Text(
0, 0.45, '$n_{A0}$\n$V_0$\n$U_A=0$'
)
text2 = matplotlib.text.Text(
0.8, 0.45, '$n_{A1}$\n$V_1$\n$U_{A1}$'
)
for artist in [
patch1,patch2,arrow1,arrow2,
line1,text1,text2
]:
ax.add_artist(artist)
ax.set_frame_on(False)
ax.set_axis_off()
ax.set_aspect(1.0)
fig.
fig = plt.figure()
ax = fig.add_subplot(111)
patch1 = matplotlib.patches.Circle(
[0.5,0.5],0.05
)
patch2 = matplotlib.patches.Rectangle(
[0.3,0.3],0.4, 0.4, alpha=0.5,
fill=False, edgecolor='black',
linestyle = '--'
)
arrow1 = matplotlib.patches.Arrow(
0, 0.5,0.45,0, width=0.05,
color='black'
)
arrow2 = matplotlib.patches.Arrow(
0.55, 0.5,0.45,0, width=0.05,
color='black'
)
arrow3 = matplotlib.patches.Arrow(
0.5, 0.0, 0,0.45, width=0.05,
color='black'
)
text1 = matplotlib.text.Text(
0, 0.45, '$n_{A0}$\n$V_0$\n$U_A=0$'
)
text2 = matplotlib.text.Text(
0.8, 0.45, '$n_{A1}$\n$V_1$\n$U_{A1}$'
)
text3 = matplotlib.text.Text(
0.55, 0.1, '$n_{Ar}$\n$V_r$'
)
for artist in [
patch1,patch2,arrow1,arrow2,
arrow3,text1,text2,text3
]:
ax.add_artist(artist)
ax.set_frame_on(False)
ax.set_axis_off()
ax.set_aspect(1.0) | mit | -8,638,347,603,755,213,000 | 20.567901 | 42 | 0.611111 | false | 2.282353 | false | false | false |
audiohacked/pyBusPirate | src/buspirate/uart.py | 1 | 5375 | # Created by Sean Nelson on 2018-08-19.
# Copyright 2018 Sean Nelson <[email protected]>
#
# This file is part of pyBusPirate.
#
# pyBusPirate is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# pyBusPirate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyBusPirate. If not, see <https://www.gnu.org/licenses/>.
""" UART class """
from enum import IntEnum
from buspirate.base import BusPirate
class UartSpeed(IntEnum):
""" UART Speed Enum """
BAUD_300 = 0b0000
BAUD_1200 = 0b0001
BAUD_2400 = 0b0010
BAUD_4800 = 0b0011
BAUD_9600 = 0b0100
BAUD_19200 = 0b0101
BAUD_31250 = 0b0110
BAUD_MIDI = 0b0110
MIDI = 0b0110
BAUD_38400 = 0b0111
BAUD_57600 = 0b1000
BAUD_115200 = 0b1010
class UartConfiguration(object):
""" UART Configuration Enum Base """
class PinOutput(IntEnum):
""" Enum for Pin Output """
HIZ = 0b00000
V3P3 = 0b10000
PIN_HIZ = 0b00000
PIN_3P3V = 0b10000
class DataBitsAndParity(IntEnum):
""" Enum for Data bits and Parity """
EIGHT_NONE = 0b0000
EIGHT_EVEN = 0b0100
EIGHT_ODD = 0b1000
NINE_NONE = 0b1100
class StopBits(IntEnum):
""" Enum for Stop bits """
ONE = 0b00
TWO = 0b10
class RxPolarity(IntEnum):
""" Enum for Rx Polarity """
IDLE_1 = 0b0
IDLE_0 = 0b1
class UART(BusPirate):
""" UART BitBanging on the BusPirate """
@property
def enter(self) -> bool:
"""
Enter UART Mode on the BusPirate
:returns: returns Success or Failure
"""
self.write(0x03)
return self.read(4) == "ART1"
def echo_rx(self, start_stop: int = 0) -> bool:
"""
Enable disable RX Echoing
:param start_stop: Give 0 for Start Echo, Give 1 to Stop Echo
:type start_stop: int
:returns: Success or Failure
:rtype: bool
"""
self.write(0x02|start_stop)
return self.read(1) == 0x01
def manual_baudrate(self, brg_register: int = 0x0000) -> bool:
"""
Set Baudrate Manually
:param brg_register: BRG Register value based on 32mhz osc, divider = 2, and BRGH = 1
:type brg_register: int
:returns: Success or Failure
:rtype: bool
"""
data = [0x07, brg_register]
self.write(data)
return self.read(3) == [0x01, 0x01, 0x01]
@property
def bridge_mode(self) -> bool:
"""
Enable Bridge mode. Hard Reset BP to exit.
:returns: Success or Failure
:rtype: bool
"""
self.write(0x0F)
return self.read(1) == 0x01
@property
def speed(self):
""" Speed Property Getter """
return self._speed
@speed.setter
def speed(self, value):
""" Speed Property Setter """
self._speed = value
return self.uart_speed(value)
def uart_speed(self, baudrate: int = UartSpeed.BAUD_115200) -> bool:
"""
Set UART Speed
:param baudrate: Uart Baud Rates
:type baudrate: int
:returns: Success or Failure
:rtype: bool
"""
self.write(0x60|baudrate)
return self.read(1) == 0x01
@property
def config(self):
""" Configuration Property Getter """
return self._config
@config.setter
def config(self, value):
""" Configuration Property Setter """
self._config = value
pin_outputs = value & 0b1000
data_parity = value & 0b0100
uastop_bits = value & 0b0010
rx_polarity = value & 0b0001
return self.uart_configuration(pin_outputs, data_parity, uastop_bits, rx_polarity)
def uart_configuration(self,
pin_output: int = UartConfiguration.PinOutput.HIZ,
databits_parity: int = UartConfiguration.DataBitsAndParity.EIGHT_NONE,
stop_bits: int = UartConfiguration.StopBits.ONE,
rx_polarity: int = UartConfiguration.RxPolarity.IDLE_1) -> bool:
"""
UART Configuration
:param pin_output: The Pin Configuration for Power Pins
:type pin_output: int.
:param clock_phase: The Pin Configuration for Pull Up Pins
:type clock_phase: int.
:param clock_edge: The Pin Configuration for AUX pin
:type clock_edge: int.
:param sample_time: The Pin Configuration for Chip Select Pin
:type sample_time: int.
:returns: returns Success or Failure
:rtype: bool.
"""
uart_configuration = 0
uart_configuration += pin_output
uart_configuration += databits_parity
uart_configuration += stop_bits
uart_configuration += rx_polarity
self.write(0x80|uart_configuration)
return self.read(1) == 0x01
if __name__ == '__main__':
pass
| gpl-2.0 | 5,007,357,886,985,508,000 | 27.439153 | 97 | 0.599256 | false | 3.583333 | true | false | false |
Re4son/Kali-Pi | Menus/menu-9p.py | 1 | 2924 | #!/usr/bin/env python
import kalipi
from kalipi import *
#############################
## Local Functions ##
## Local Functions ##
#############################
#############################
## Buttons ##
# define all of the buttons
label1 = Button(labelPadding * " " + " ", originX, originX, buttonHeight, buttonWidth * 3 + spacing * 2, tron_ora, tron_yel, labelFont)
label2 = Button(labelPadding * " " + " ", originX, originY, buttonHeight, buttonWidth * 3 + spacing * 2, tron_ora, tron_yel, labelFont)
label3 = Button(labelPadding * " " + " ", originX, originY + buttonHeight + spacing, buttonHeight, buttonWidth * 3 + spacing * 2, tron_ora, tron_yel, labelFont)
button7 = Button(labelPadding * " " + " <<<", originX, originY + (buttonHeight * 2) + (spacing * 2), buttonHeight, buttonWidth, tron_ora, tron_yel, labelFont)
button9 = Button(labelPadding * " " + " Refresh", originX + (buttonWidth * 2) + (spacing * 2), originY + (buttonHeight * 2) + (spacing * 2), buttonHeight, buttonWidth, tron_ora, tron_yel, labelFont)
# Define each button press action
def button(number):
if number == 7:
if button7.disable == 1:
return
# Previous page
pygame.quit()
page=os.environ["MENUDIR"] + "menu-pin.py"
retPage=kalipi.get_retPage()
args = [page, retPage]
os.execvp("python", ["python"] + args)
sys.exit()
if number == 9:
if button9.disable == 1:
return
# Refresh
pygame.quit()
menu9p()
## Buttons ##
#############################
def menu9p():
# Init screen
kalipi.screen()
# Outer Border
kalipi.border(tron_ora)
#############################
## Buttons ##
# Buttons and labels
# See variables at the top of the document to adjust the menu
# First Row
# label 1
label1.text=labelPadding * " " + kalipi.get_clock()
label1.draw()
# Second Row
# Button 2
label2.text=labelPadding * " " + kalipi.get_temp()
label2.draw()
# Third Row
# Label 3
label3.text=labelPadding * " " + kalipi.get_volts()
label3.draw()
# Fourth Row
# Button 7
button7.disable = 0 # "1" disables button
if button7.disable == 1:
button7.draw()
else:
# Add button launch code here
button7.draw()
# Button 9
button9.disable = 0 # "1" disables button
if button9.disable == 1:
button9.draw()
else:
# Add button launch code here
button9.draw()
## Buttons ##
#############################
#############################
## Input loop ##
while 1:
butNo=kalipi.inputLoop("menu-9p.py")
button(butNo)
## Input loop ##
#############################
if __name__ == "__main__":
menu9p()
| gpl-3.0 | 1,339,044,619,860,689,200 | 25.107143 | 202 | 0.515048 | false | 3.578947 | false | false | false |
JKarathiya/Lean | Algorithm.Python/InceptionDateSelectionRegressionAlgorithm.py | 1 | 2432 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Algorithm.Framework.Selection import *
from QuantConnect.Data import *
from QuantConnect.Data.UniverseSelection import *
from datetime import timedelta
### <summary>
### Regression algorithm to test universe additions and removals with open positions
### </summary>
### <meta name="tag" content="regression test" />
class InceptionDateSelectionRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2013,10,1)
self.SetEndDate(2013,10,31)
self.SetCash(100000)
self.changes = None
self.UniverseSettings.Resolution = Resolution.Hour
# select IBM once a week, empty universe the other days
self.AddUniverseSelection(CustomUniverseSelectionModel("my-custom-universe", lambda dt: ["IBM"] if dt.day % 7 == 0 else []))
# Adds SPY 5 days after StartDate and keep it in Universe
self.AddUniverseSelection(InceptionDateUniverseSelectionModel("spy-inception", {"SPY": self.StartDate + timedelta(5)}));
def OnData(self, slice):
if self.changes is None:
return
# we'll simply go long each security we added to the universe
for security in self.changes.AddedSecurities:
self.SetHoldings(security.Symbol, .5)
self.changes = None
def OnSecuritiesChanged(self, changes):
# liquidate removed securities
for security in changes.RemovedSecurities:
self.Liquidate(security.Symbol, "Removed from Universe")
self.changes = changes | apache-2.0 | -5,988,067,062,033,275,000 | 38.209677 | 132 | 0.733333 | false | 3.996711 | false | false | false |
kret0s/gnuhealth-live | tryton/server/trytond-3.8.3/trytond/ir/property.py | 1 | 5773 | # This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from decimal import Decimal
from ..model import ModelView, ModelSQL, fields
from ..transaction import Transaction
from ..cache import Cache
from ..pool import Pool
__all__ = [
'Property',
]
_CAST = {
'numeric': Decimal,
'integer': int,
'float': float,
}
class Property(ModelSQL, ModelView):
"Property"
__name__ = 'ir.property'
_rec_name = 'field'
value = fields.Reference('Value', selection='models_get')
res = fields.Reference('Resource', selection='models_get', select=True)
field = fields.Many2One('ir.model.field', 'Field',
ondelete='CASCADE', required=True, select=True)
_models_get_cache = Cache('ir_property.models_get', context=False)
@classmethod
def models_get(cls):
pool = Pool()
Model = pool.get('ir.model')
models = cls._models_get_cache.get(None)
if models:
return models
cursor = Transaction().cursor
model = Model.__table__()
cursor.execute(*model.select(model.model, model.name,
order_by=model.name.asc))
models = cursor.fetchall() + [('', '')]
cls._models_get_cache.set(None, models)
return models
@classmethod
def get(cls, names, model, res_ids=None):
"""
Return named property values for each res_ids of model
"""
pool = Pool()
ModelAccess = pool.get('ir.model.access')
res = {}
ModelAccess.check(model, 'read')
names_list = True
if not isinstance(names, list):
names_list = False
names = [names]
if res_ids is None:
res_ids = []
properties = cls.search([
('field.name', 'in', names),
['OR',
('res', '=', None),
('res', 'in', ['%s,%s' % (model, x) for x in res_ids]),
],
], order=[])
default_vals = dict((x, None) for x in names)
for property_ in (x for x in properties if not x.res):
value = property_.value
val = None
if value is not None:
if not isinstance(value, basestring):
val = int(value)
else:
if property_.field.ttype in _CAST:
cast = _CAST[property_.field.ttype]
val = cast(value.split(',')[1])
elif property_.field.ttype in ('char', 'selection'):
val = value.split(',')[1]
else:
raise Exception('Not implemented')
default_vals[property_.field.name] = val
if not res_ids:
if not names_list:
return default_vals[names[0]]
return default_vals
for name in names:
res[name] = dict((x, default_vals[name]) for x in res_ids)
for property_ in (x for x in properties if x.res):
val = None
if property_.value is not None:
if not isinstance(property_.value, basestring):
val = int(property_.value)
else:
if property_.field.ttype in _CAST:
cast = _CAST[property_.field.ttype]
val = cast(property_.value.split(',')[1])
elif property_.field.ttype in ('char', 'selection'):
val = property_.value.split(',')[1]
else:
raise Exception('Not implemented')
res[property_.field.name][int(property_.res)] = val
if not names_list:
return res[names[0]]
return res
@staticmethod
def _set_values(model, res_id, val, field_id):
return {
'value': val,
'res': model + ',' + str(res_id),
'field': field_id,
}
@classmethod
def set(cls, name, model, ids, val):
"""
Set named property value for ids of model
Return the id of the record created
"""
pool = Pool()
ModelField = pool.get('ir.model.field')
ModelAccess = pool.get('ir.model.access')
ModelAccess.check(model, 'write')
model_field, = ModelField.search([
('name', '=', name),
('model.model', '=', model),
], order=[], limit=1)
Model = pool.get(model)
field = Model._fields[name]
properties = cls.search([
('field', '=', model_field.id),
('res', 'in', [model + ',' + str(res_id) for res_id in ids]),
], order=[])
cls.delete(properties)
defaults = cls.search([
('field', '=', model_field.id),
('res', '=', None),
], order=[], limit=1)
default_val = None
if defaults:
value = cls(defaults[0].id).value
default_val = None
if value is not None:
if not isinstance(value, basestring):
default_val = int(value)
else:
if field._type in _CAST:
cast = _CAST[field._type]
default_val = cast(value.split(',')[1])
elif field._type in ('char', 'selection'):
default_val = value.split(',')[1]
else:
raise Exception('Not implemented')
if (val != default_val):
for res_id in ids:
vals = cls._set_values(model, res_id, val, model_field.id)
cls.create([vals])
| gpl-3.0 | -8,129,677,329,808,463,000 | 32.760234 | 75 | 0.493677 | false | 4.327586 | false | false | false |
larsks/cloud-init | cloudinit/sources/DataSourceBigstep.py | 2 | 1917 | # Copyright (C) 2015-2016 Bigstep Cloud Ltd.
#
# Author: Alexandru Sirbu <[email protected]>
#
# This file is part of cloud-init. See LICENSE file for license information.
import errno
import json
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
LOG = logging.getLogger(__name__)
class DataSourceBigstep(sources.DataSource):
dsname = 'Bigstep'
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.metadata = {}
self.vendordata_raw = ""
self.userdata_raw = ""
def _get_data(self, apply_filter=False):
url = get_url_from_file()
if url is None:
return False
response = url_helper.readurl(url)
decoded = json.loads(response.contents.decode())
self.metadata = decoded["metadata"]
self.vendordata_raw = decoded["vendordata_raw"]
self.userdata_raw = decoded["userdata_raw"]
return True
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
return 'metadata (%s)' % get_url_from_file()
def get_url_from_file():
try:
content = util.load_file("/var/lib/cloud/data/seed/bigstep/url")
except IOError as e:
# If the file doesn't exist, then the server probably isn't a Bigstep
# instance; otherwise, another problem exists which needs investigation
if e.errno == errno.ENOENT:
return None
else:
raise
return content
# Used to match classes to dependencies
datasources = [
(DataSourceBigstep, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
# vi: ts=4 expandtab
| gpl-3.0 | 2,950,398,994,128,199,700 | 27.61194 | 79 | 0.664058 | false | 3.766208 | false | false | false |
praphull27/diskBasedLdaBenchmarkingTools | readXmlAndOutputToTxt.py | 1 | 1444 | from bs4 import BeautifulSoup
import re
import os
import multiprocessing
def read_and_tokenize (file_name):
xml_file_handle = open(file_name, 'rb')
xml_file_contents = xml_file_handle.read()
xml_file_handle.close()
xml_file_text = ''
full_text_all = BeautifulSoup(xml_file_contents).find_all(class_="full_text")
for full_text in full_text_all:
xml_file_text += full_text.get_text(" ")
xml_file_text = re.sub(r'[^a-zA-Z]', ' ', xml_file_text)
xml_file_text = (xml_file_text.strip()).lower()
xml_file_text_tokenized = xml_file_text.split()
xml_file_filtered_words = [word for word in xml_file_text_tokenized if len(word) >=3]
xml_file_filtered_text = " ".join(xml_file_filtered_words)
return xml_file_filtered_text
root_path = "/Users/praphull/Desktop/msProject/nyt_corpus/"
paths = [os.path.join(root, name) for root, dirs, files in os.walk(root_path) for name in files]
paths_list = []
num = 10000
no_of_parts = len(paths) / num
if len(paths) % num != 0:
no_of_parts += 1
paths_list = [paths[a*num:(a+1)*num] for a in range(no_of_parts)]
out_handle = open("nyt_corpus_original.txt", 'wb')
file_count = 0
for paths in paths_list:
p = multiprocessing.Pool(processes=(multiprocessing.cpu_count() - 1))
results = p.map(read_and_tokenize, paths)
p.close()
p.join()
out_handle.write("\n".join(results) + "\n")
file_count += 1
if file_count % 10 == 0:
print file_count*num
else:
print '.'
out_handle.close()
#1855658 | mit | 5,477,891,135,625,652,000 | 28.489796 | 96 | 0.687673 | false | 2.674074 | false | false | false |
dc3-plaso/dfvfs | dfvfs/credentials/keychain.py | 1 | 2743 | # -*- coding: utf-8 -*-
"""The path specification key chain.
The key chain is used to manage credentials for path specifications.
E.g. BitLocker Drive Encryption (BDE) encrypted volumes can require a
credential (e.g. password) to access the unencrypted data (unlock).
"""
from dfvfs.credentials import manager
class KeyChain(object):
"""Class that implements the key chain."""
def __init__(self):
"""Initializes the key chain."""
super(KeyChain, self).__init__()
self._credentials_per_path_spec = {}
def Empty(self):
"""Empties the key chain."""
self._credentials_per_path_spec = {}
def ExtractCredentialsFromPathSpec(self, path_spec):
"""Extracts credentials from a path specification.
Args:
path_spec (PathSpec): path specification to extract credentials from.
"""
credentials = manager.CredentialsManager.GetCredentials(path_spec)
for identifier in credentials.CREDENTIALS:
value = getattr(path_spec, identifier, None)
if value is None:
continue
self.SetCredential(path_spec, identifier, value)
def GetCredential(self, path_spec, identifier):
"""Retrieves a specific credential from the key chain.
Args:
path_spec (PathSpec): path specification.
identifier (str): credential identifier.
Returns:
object: credential or None if the credential for the path specification
is not set.
"""
credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})
return credentials.get(identifier, None)
def GetCredentials(self, path_spec):
"""Retrieves all credentials for the path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
dict[str,object]: credentials for the path specification.
"""
return self._credentials_per_path_spec.get(path_spec.comparable, {})
def SetCredential(self, path_spec, identifier, data):
"""Sets a specific credential for the path specification.
Args:
path_spec (PathSpec): path specification.
identifier (str): credential identifier.
data (object): credential data.
Raises:
KeyError: if the credential is not supported by the path specification
type.
"""
supported_credentials = manager.CredentialsManager.GetCredentials(path_spec)
if identifier not in supported_credentials.CREDENTIALS:
raise KeyError((
u'Unsuppored credential: {0:s} for path specification type: '
u'{1:s}').format(identifier, path_spec.type_indicator))
credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})
credentials[identifier] = data
self._credentials_per_path_spec[path_spec.comparable] = credentials
| apache-2.0 | -8,760,681,104,130,690,000 | 31.654762 | 80 | 0.692308 | false | 4.265941 | false | false | false |
hbldh/skboost | skboost/stumps/decision_stump.py | 1 | 17561 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`decision_stump`
==================
.. module:: decision_stump
:platform: Unix, Windows
:synopsis:
.. moduleauthor:: hbldh <[email protected]>
Created on 2014-08-31, 01:52
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from warnings import warn
from operator import itemgetter
import concurrent.futures as cfut
import psutil
import numpy as np
from scipy.sparse import issparse
import six
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state, check_array
from numpy.lib.arraysetops import unique
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import _tree
try:
import skboost.stumps.ext.classifiers as c_classifiers
except ImportError as e:
c_classifiers = None
_all__ = ["NMMDecisionStump", ]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
class DecisionStump(DecisionTreeClassifier):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
Not used in Stratos Decision Stump.
max_features : int, float, string or None, optional (default=None)
Not used in Stratos Decision Stump.
max_depth : integer or None, optional (default=None)
Not used in Stratos Decision Stump. Always a depth 1 tree.
min_samples_split : integer, optional (default=2)
Not used in Stratos Decision Stump.
min_samples_leaf : integer, optional (default=1)
Not used in Stratos Decision Stump.
random_state : int, RandomState instance or None, optional (default=None)
Not used in Stratos Decision Stump. Nothing random in learning.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`classes_` : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
`n_classes_` : int or list
Alwats 2 fr this class.
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
random_state=None,
min_density=None,
compute_importances=None,
distributed_learning=True,
calculate_probabilites=False,
method='bp'):
super(DecisionStump, self).__init__(criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
random_state=random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
self.distributed_learning = distributed_learning
self.calculate_probabilites = calculate_probabilites
self.method = method
def fit(self, X, y, sample_mask=None,
X_argsorted=None, check_input=True, sample_weight=None):
# Deprecations
if sample_mask is not None:
warn("The sample_mask parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
# Convert data
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in six.moves.range(self.n_outputs_):
classes_k, y[:, k] = unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
max_depth = 1
max_features = 10
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if self.method == 'bp':
self.tree_ = _fit_binary_decision_stump_breakpoint(
X, y, sample_weight, X_argsorted, self.calculate_probabilites)
elif self.method == 'bp_threaded':
self.tree_ = _fit_binary_decision_stump_breakpoint_threaded(
X, y, sample_weight, X_argsorted, self.calculate_probabilites)
else:
self.tree_ = _fit_binary_decision_stump_breakpoint(
X, y, sample_weight, X_argsorted, self.calculate_probabilites)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = check_array(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
if self.tree_.get('direction') > 0:
return ((X[:, self.tree_.get('best_dim')] > self.tree_.get('threshold')) * 2) - 1
else:
return ((X[:, self.tree_.get('best_dim')] <= self.tree_.get('threshold')) * 2) - 1
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by arithmetical order.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = check_array(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = np.array(self.tree_['probabilities']).take(self.predict(X) > 0, axis=0)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in six.moves.range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def _fit_binary_decision_stump_breakpoint(X, y, sample_weight,
argsorted_X=None,
calculate_probabilities=False):
Y = (y.flatten() * 2) - 1
results = {
'min_value': None,
'best_dim': 0,
'threshold': 0,
'direction': 0,
'probabilities': []
}
if sample_weight is None:
sample_weight = np.ones(shape=(X.shape[0],), dtype='float') / (X.shape[0],)
else:
sample_weight /= np.sum(sample_weight)
classifier_result = []
for dim in six.moves.range(X.shape[1]):
if argsorted_X is not None:
sorted_x = X[argsorted_X[:, dim], dim]
w = sample_weight[argsorted_X[:, dim]]
sorted_y = Y[argsorted_X[:, dim]]
else:
data_order = np.argsort(X[:, dim])
sorted_x = X[data_order, dim]
w = sample_weight[data_order]
sorted_y = Y[data_order]
breakpoint_indices = np.where(np.diff(sorted_x))[0] + 1
w_pos_c = (w * (sorted_y > 0)).cumsum()
w_neg_c = (w * (sorted_y < 0)).cumsum()
left_errors = w_pos_c[breakpoint_indices] - w_neg_c[breakpoint_indices] + w_neg_c[-1]
right_errors = w_neg_c[breakpoint_indices] - w_pos_c[breakpoint_indices] + w_pos_c[-1]
best_left_point = np.argmin(left_errors)
best_right_point = np.argmin(right_errors)
if best_left_point < best_right_point:
output = [dim,
left_errors[best_left_point],
(sorted_x[breakpoint_indices[best_left_point] + 1] +
sorted_x[breakpoint_indices[best_left_point]]) / 2,
1]
else:
output = [dim,
right_errors[best_right_point],
(sorted_x[breakpoint_indices[best_right_point] + 1] +
sorted_x[breakpoint_indices[best_right_point]]) / 2,
-1]
classifier_result.append(output)
del sorted_x, sorted_y, left_errors, right_errors, w, w_pos_c, w_neg_c
# Sort the returned data after lowest error.
classifier_result = sorted(classifier_result, key=itemgetter(1))
best_result = classifier_result[0]
results['best_dim'] = int(best_result[0])
results['min_value'] = float(best_result[1])
# If the data is in integers, then set the threshold in integer as well.
if X.dtype.kind in ('u', 'i'):
results['threshold'] = int(best_result[2])
else:
results['threshold'] = float(best_result[2])
# Direction is defined as 1 if the positives labels are at
# higher values and -1 otherwise.
results['direction'] = int(best_result[3])
if calculate_probabilities:
results['probabilities'] = _calculate_probabilities(
X[:, results['best_dim']], Y, results)
return results
def _fit_binary_decision_stump_breakpoint_threaded(X, y, sample_weight,
argsorted_X=None,
calculate_probabilities=False):
Y = y.flatten() * 2 - 1
results = {
'min_value': None,
'best_dim': 0,
'threshold': 0,
'direction': 0,
'probabilities': []
}
if sample_weight is None:
sample_weight = np.ones(shape=(X.shape[0],), dtype='float') / (X.shape[0],)
else:
sample_weight /= np.sum(sample_weight)
classifier_result = []
tpe = cfut.ThreadPoolExecutor(max_workers=psutil.cpu_count())
futures = []
if argsorted_X is not None:
for dim in six.moves.range(X.shape[1]):
futures.append(
tpe.submit(_breakpoint_learn_one_dimension, dim, X[:, dim], Y, sample_weight, argsorted_X[:, dim]))
else:
for dim in six.moves.range(X.shape[1]):
futures.append(tpe.submit(_breakpoint_learn_one_dimension, dim, X[:, dim], Y, sample_weight))
for future in cfut.as_completed(futures):
classifier_result.append(future.result())
# Sort the returned data after lowest error.
classifier_result = sorted(classifier_result, key=itemgetter(1))
best_result = classifier_result[0]
results['best_dim'] = int(best_result[0])
results['min_value'] = float(best_result[1])
# If the data is in integers, then set the threshold in integer as well.
if X.dtype.kind in ('u', 'i'):
results['threshold'] = int(best_result[2])
else:
results['threshold'] = float(best_result[2])
# Direction is defined as 1 if the positives labels are at
# higher values and -1 otherwise.
results['direction'] = int(best_result[3])
if calculate_probabilities:
results['probabilities'] = _calculate_probabilities(X[:, results['best_dim']], Y, results)
return results
def _calculate_probabilities(X, Y, results):
if results['direction'] > 0:
labels = X > results['threshold']
else:
labels = X <= results['threshold']
n_correct_negs = sum(Y[-labels] < 0)
n_false_negs = sum(Y[-labels] > 0)
n_false_pos = sum(Y[labels] < 0)
n_correct_pos = sum(Y[labels] > 0)
return [[n_correct_negs / len(Y), n_false_negs / len(Y)],
[n_false_pos / len(Y), n_correct_pos / len(Y)]]
def _breakpoint_learn_one_dimension(dim_nbr, x, y, sample_weights, sorting_argument=None):
if sorting_argument is None:
sorting_argument = np.argsort(x)
sorted_x = x[sorting_argument]
w = sample_weights[sorting_argument]
sorted_y = y[sorting_argument]
breakpoint_indices = np.where(np.diff(sorted_x))[0] + 1
w_pos_c = (w * (sorted_y > 0)).cumsum()
w_neg_c = (w * (sorted_y < 0)).cumsum()
left_errors = w_pos_c[breakpoint_indices] - w_neg_c[breakpoint_indices] + w_neg_c[-1]
right_errors = w_neg_c[breakpoint_indices] - w_pos_c[breakpoint_indices] + w_pos_c[-1]
best_left_point = np.argmin(left_errors)
best_right_point = np.argmin(right_errors)
if best_left_point < best_right_point:
output = [dim_nbr,
left_errors[best_left_point],
(sorted_x[breakpoint_indices[best_left_point] - 1] +
sorted_x[breakpoint_indices[best_left_point]]) / 2,
1]
else:
output = [dim_nbr,
right_errors[best_right_point],
(sorted_x[breakpoint_indices[best_right_point] + 1] +
sorted_x[breakpoint_indices[best_right_point]]) / 2,
-1]
return output
| mit | -5,267,852,490,259,074,000 | 35.509356 | 115 | 0.553442 | false | 3.937444 | false | false | false |
deepmind/lab2d | dmlab2d/lib/game_scripts/levels/clean_up/play.py | 1 | 3449 | # Copyright 2020 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing the `clean_up` level.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to fire clean.
Use `LEFT_CTRL` to fire fine.
Use `TAB` to switch between players.
Use `[]` to switch between levels.
Use `R` to restart a level.
Use `ESCAPE` to quit.
"""
import argparse
import collections
import json
from typing import Mapping
from dmlab2d import ui_renderer
_ACTION_MAP = {
'move': ui_renderer.get_direction_pressed,
'turn': ui_renderer.get_turn_pressed,
'fireClean': ui_renderer.get_space_key_pressed,
'fireFine': ui_renderer.get_left_control_pressed
}
_FRAMES_PER_SECOND = 8
def _run(rgb_observation: str, config: Mapping[str, str]):
"""Run multiplayer environment, with per player rendering and actions."""
player_count = int(config.get('numPlayers', '1'))
score = collections.defaultdict(float)
total_contrib = collections.defaultdict(float)
prefixes = [str(i + 1) + '.' for i in range(player_count)]
ui = ui_renderer.Renderer(
config=config,
action_map=_ACTION_MAP,
rgb_observation=rgb_observation,
player_prefixes=[str(i + 1) + '.' for i in range(player_count)],
frames_per_second=_FRAMES_PER_SECOND)
def player_printer(idx: int):
print(f'Player({idx}) contrib({total_contrib[idx]}) score({score[idx]})')
for step in ui.run():
if step.type == ui_renderer.StepType.FIRST:
print(f'=== Start episode {step.episode} ===')
print_player = False
for idx, prefix in enumerate(prefixes):
reward = step.env.observation(prefix + 'REWARD')
score[idx] += reward
contrib = step.env.observation(prefix + 'CONTRIB')
total_contrib[idx] += contrib
if step.player == idx and (reward != 0 or contrib != 0):
print_player = True
if print_player:
player_printer(step.player)
if step.type == ui_renderer.StepType.LAST:
print(f'=== End episode {step.episode} ===')
for idx in range(player_count):
player_printer(idx)
print('======')
print('=== Exiting ===')
for idx in range(player_count):
player_printer(idx)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
parser.add_argument(
'--players', type=int, default=4, help='Number of players.')
args = parser.parse_args()
if 'levelName' not in args.settings:
args.settings['levelName'] = 'clean_up'
if 'numPlayers' not in args.settings:
args.settings['numPlayers'] = args.players
for k in args.settings:
args.settings[k] = str(args.settings[k])
_run(args.observation, args.settings)
if __name__ == '__main__':
main()
| apache-2.0 | 7,325,948,526,025,038,000 | 30.642202 | 80 | 0.677298 | false | 3.548354 | false | false | false |
cdiener/pyart | asciinator.py | 1 | 1723 | #!/usr/bin/env python
# asciinator.py
#
# Copyright 2014 Christian Diener <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from __future__ import print_function # for python2 compat
import sys;
from PIL import Image;
import numpy as np
# ascii chars sorted by "density"
chars = np.asarray(list(' .,:;irsXA253hMHGS#9B&@'))
# check command line arguments
if len(sys.argv) != 4:
print( 'Usage: asciinator.py image scale factor' )
sys.exit()
# set basic program parameters
# f = filename, SC = scale, GCF = gamma correction factor, WCF = width correction factor
f, SC, GCF, WCF = sys.argv[1], float(sys.argv[2]), float(sys.argv[3]), 7.0/4.0
# open, scale and normalize image by pixel intensities
img = Image.open(f)
S = (int(img.size[0]*SC*WCF), int(img.size[1]*SC))
img = np.sum( np.asarray(img.resize(S), dtype="float"), axis=2)
img -= img.min()
img = (1.0 - img/img.max())**GCF*(chars.size-1)
# Assemble and print ascii art
print( "\n".join(("".join(r) for r in chars[img.astype(int)])))
print()
| gpl-3.0 | -7,175,192,178,625,269,000 | 32.784314 | 89 | 0.702263 | false | 3.098921 | false | false | false |
katchengli/tech-interview-prep | interview_cake/ic3.py | 1 | 1451 | #constraint: list_of_ints will always have at least 3 integers
#can have negative numbers
def highest_product_three_ints(list_of_ints):
biggest_int = max(list_of_ints)
list_of_ints.remove(biggest_int)
max_int1 = max(list_of_ints)
list_of_ints.remove(max_int1)
max_int2 = max(list_of_ints)
list_of_ints.remove(max_int2)
if list_of_ints:
min_int1 = min(list_of_ints)
list_of_ints.remove(min_int1)
else:
return biggest_int * max_int1 * max_int2
if list_of_ints:
min_int2 = min(list_of_ints)
#list_of_ints.remove(min_int2)
else:
min_int2 = max_int2
potent_highest_product1 = biggest_int * min_int1 * min_int2
potent_highest_product2 = biggest_int * max_int1 * max_int2
if potent_highest_product1 > potent_highest_product2:
return potent_highest_product1
else:
return potent_highest_product2
print(highest_product_three_ints([3, 4, 5, 6]))
#should return 120
print(highest_product_three_ints([-10, -10, 5, 6]))
#should return 600
print(highest_product_three_ints([-60, -100, -1, -2]))
#should return -120
print(highest_product_three_ints([600, 200, -1, -2]))
#should return 1200
print(highest_product_three_ints([1000, -1000, -1, 1]))
#should return 1000000
print(highest_product_three_ints([1000, -1000, -1, 1, 800]))
#should return 1000000
print(highest_product_three_ints([1000, -1000, -1, 1, -800]))
#should return 800000000
| apache-2.0 | 2,525,693,542,367,842,300 | 30.543478 | 63 | 0.671261 | false | 2.890438 | false | false | false |
hemidactylus/flaskbiblio | config.py | 1 | 1074 | import os
# directories and so on
basedir = os.path.abspath(os.path.dirname(__file__))
DB_DIRECTORY=os.path.join(basedir,'app/database')
DB_NAME='biblio.db'
# stuff for Flask
WTF_CSRF_ENABLED = True
from sensible_config import SECRET_KEY
# formats, etc
DATETIME_STR_FORMAT = '%Y-%m-%d %H:%M:%S'
SHORT_DATETIME_STR_FORMAT = '%d/%m/%y'
FILENAME_DATETIME_STR_FORMAT = '%Y_%m_%d'
USERS_TIMEZONE='Europe/Rome'
# similarity thresholds for author (last- and complete-) names
SIMILAR_USE_DIGRAMS=True # otherwise: use single-letter grams
# Different thresholds are required depending on the type of vectoring
if SIMILAR_USE_DIGRAMS:
SIMILAR_AUTHOR_THRESHOLD=0.7
SIMILAR_BOOK_THRESHOLD=0.7
else:
SIMILAR_AUTHOR_THRESHOLD=0.90
SIMILAR_BOOK_THRESHOLD=0.93
# what are the smallest tokens to employ in similar-search in book titles?
MINIMUM_SIMILAR_BOOK_TOKEN_SIZE=4
# Are multiple books with the same title allowed? (suggested: yes)
ALLOW_DUPLICATE_BOOKS=True
# temporary directory for storing import-related files
TEMP_DIRECTORY=os.path.join(basedir,'app/temp')
| gpl-3.0 | -1,613,392,918,894,984,700 | 29.685714 | 74 | 0.752328 | false | 3.016854 | false | false | false |
mharrys/sudoku | sudoku.py | 1 | 7848 | import fileinput
from dlx import DLX
from numpy import array, unique
from optparse import OptionParser
class SudokuError(Exception):
"""Raised when any error related to Sudoku is found during construction
and validation such as unexpected values or contradictions.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value.encode('string_escape')
class Sudoku(object):
"""Complete all necessary steps to solve a Sudoku challenge using
Dancing Links (DLX) including validating the challenge and building and
validating the possible solution found by DLX.
The expected input is one line of 81 characters where each unknown digit
is represented as a '.' (dot).
"""
def __init__(self, validate, pretty):
self.validate = validate
self.pretty = pretty
def solve(self, line):
"""Return list of solutions from specified line.
Return empty list if no solutions are found and return at most
one solution if validation is enabled or all solutions if validation
is disabled. It is possible for a Sudoku challenge to have more than
one solution but such challenge is concidered to be an invalid.
"""
grid = self.build_challenge(line)
self.validate_challenge(grid)
self.grids = []
dlx = DLX.from_sudoku(grid, self.result)
dlx.run(self.validate)
return self.grids
def build_challenge(self, line):
"""Returns 9x9 numpy array from specified line.
SudokuError is raised if unexpected value is found.
"""
grid = []
for c in line:
if c != '.':
if c < '1' or c > '9':
msg = 'Unexpected value "%s" when building challenge.' % c
raise SudokuError(msg)
grid.append(int(c))
else:
grid.append(0)
return array(grid).reshape(9, 9)
def validate_challenge(self, grid):
"""Search specified grid (9x9 numpy array) for contradictions.
SudokuError is raised if a contradiction is found.
"""
# validate rows
for row in grid:
cells = []
for cell in row:
if cell != 0:
if cell in cells:
msg = 'Row digits are not unique in challenge.'
raise SudokuError(msg)
else:
cells.append(cell)
# validate columns
for column in grid.transpose():
cells = []
for cell in column:
if cell != 0:
if cell in cells:
msg = 'Column digits are not unique in challenge.'
raise SudokuError(msg)
else:
cells.append(cell)
# validate boxes
for i in range(3):
# row slice
rs = i * 3
re = i * 3 + 3
for j in range(3):
# column slice
cs = j * 3
ce = j * 3 + 3
# box slice
box = grid[rs:re, cs:ce]
cells = []
for cell in box.flatten():
if cell != 0:
if cell in cells:
msg = 'Box digits are no unique in challenge.'
raise SudokuError(msg)
else:
cells.append(cell)
def build_solution(self, s):
"""Return 9x9 grid from a solution found by DLX.
"""
rows = []
for k in s:
rows.append(k.ID)
rows.sort()
grid = []
for row in rows:
grid.append(row % 9 + 1)
return array(grid).reshape(9, 9)
def validate_solution(self, grid):
"""Search specified grid (9x9 numpy array) for contradictions.
SudokuError is raised if a contradiction is found.
"""
# validate cells
for cell in grid.flatten():
if cell not in range(1, 10):
msg = 'Cell digit is not between 1 and 9 in solution.'
raise SudokuError(msg)
# validate rows
for row in grid:
if unique(row).size != 9:
msg = 'Row digits are not unique in solution.'
raise SudokuError(msg)
# validate columns
for col in grid.transpose():
if unique(col).size != 9:
msg = 'Column digits are not unique in solution.'
raise SudokuError(msg)
# validate boxes
for i in range(3):
# row slice
rs = i * 3
re = i * 3 + 3
for j in range(3):
# column slice
cs = j * 3
ce = j * 3 + 3
# box slice
box = grid[rs:re, cs:ce]
if unique(box.flatten()).size != 9:
msg = 'Box digits are not unique in solution.'
raise SudokuError(msg)
def result(self, solutions, s):
"""Build, validate and save recieved solution.
SudokuError is raised if validation is enabled and more than one
solution exist or contradiction is found in solution.
"""
grid = self.build_solution(s)
if self.validate:
if solutions > 1:
msg = 'More than one solution exist.'
raise SudokuError(msg)
self.validate_solution(grid)
if self.pretty:
self.grids.append(self.format_pretty(grid))
else:
self.grids.append(self.format_simple(grid))
def format_simple(self, grid):
"""Return solution in the same format as expected input line.
"""
f = ''
for s in grid.ravel():
f += str(s)
return f
def format_pretty(self, grid):
"""Return solution in a more human readable format.
"""
f = '+-------+-------+-------+\n'
for i, s in enumerate(grid):
num = str(s)[1:-1].replace(',', '')
f += '| %s | %s | %s |\n' % (num[0:5], num[6:11], num[12:17])
if (i + 1) % 3 == 0:
f += '+-------+-------+-------+'
if (i + 1) < len(grid):
f += '\n'
return f
def print_error(n, msg):
print('sudoku: Error on line %s: %s' % (n, msg))
def print_solutions(grids):
for grid in grids:
print(grid)
def solve_line(sudoku, line, line_num):
if len(line) < 82 or line[81] != '\n':
print_error(line_num, 'Input line must be exactly 81 chars long.')
else:
grids = []
try:
grids = sudoku.solve(line[:81]) # slice off '\n'
except SudokuError as e:
print_error(line_num, e)
else:
print_solutions(grids)
def solve_line_by_line(options, args):
sudoku = Sudoku(options.validate, options.pretty)
for line in fileinput.input(args):
solve_line(sudoku, line, fileinput.lineno())
if __name__ == '__main__':
parser = OptionParser()
parser.add_option(
'-v',
'--validate',
dest='validate',
help='validate solution (longer search time)',
action='store_true'
)
parser.add_option(
'-p',
'--pretty',
dest='pretty',
help='pretty print solution',
action='store_true'
)
options, args = parser.parse_args()
try:
solve_line_by_line(options, args)
except IOError as e:
print('sudoku: %s' % e)
except (KeyboardInterrupt, SystemExit) as e:
print('')
print('sudoku: Interrupt caught ... exiting')
| gpl-3.0 | 3,664,689,965,295,890,400 | 28.727273 | 78 | 0.507645 | false | 4.208043 | false | false | false |
Bekt/tweetement | src/service.py | 1 | 3578 | import logging
import string
import tweepy
from credentials import (consumer_key, consumer_secret)
from models import Stopword
from collections import Counter
class Service(object):
# Map uppercase to lowercase, and deletes any punctuation.
trans = {ord(string.ascii_uppercase[i]): ord(string.ascii_lowercase[i])
for i in range(26)}
trans.update({ord(c): None for c in string.punctuation})
def __init__(self, access_token='', access_token_secret=''):
self._tw_api = None
self._access_token = access_token
self._access_token_secret = access_token_secret
@property
def tw_api(self):
"""Tweepy API client."""
if self._tw_api is None:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(self._access_token, self._access_token_secret)
self._tw_api = tweepy.API(auth)
return self._tw_api
def fetch(self, query, limit=100):
"""Fetches search results for the given query."""
# Cursor doesn't work with dev_appserver.py :(
# return list(tweepy.Cursor(self.tw_api.search, q=query, lang='en',
# result_type='popular').items(limit))
query += ' -filter:retweets'
# Try to get as many 'popular' posts as possible.
# Twitter limits this really hard.
res_type = 'popular'
last_id = -1
tweets = []
while len(tweets) < limit:
count = limit - len(tweets)
try:
t = self.tw_api.search(q=query, count=count, result_type=res_type,
lang='en', max_id=str(last_id - 1))
if len(t) < 3 and res_type == 'popular':
tweets.extend(t)
res_type = 'mixed'
last_id = -1
continue
if len(t) < 3 and res_type == 'mixed':
tweets.extend(t)
break
tweets.extend(t)
last_id = t[-1].id
except tweepy.TweepError as e:
logging.exception(e)
break
return tweets
@staticmethod
def top_hashtags(tweets, limit=5):
"""Extracts most frequent hashtags from given tweets."""
hashtags = Counter()
for t in tweets:
for h in t.entities['hashtags']:
if 'text' in h:
hashtags[h['text'].lower()] += 1
top = hashtags.most_common(limit)
return ['#' + t[0] for t in top]
@staticmethod
def top_keywords(tweets, limit=5, exclude=set()):
"""Extracts most frequent keywords from given tweets."""
exc = set()
for w in exclude:
ok, text = _token_okay(w)
if ok:
exc.add(text)
words = Counter()
for t in tweets:
for token in set(t.text.split()):
ok, text = _token_okay(token)
if ok and text not in exc:
words[text] += 1
top = words.most_common(limit)
return [t[0] for t in top]
def _token_okay(text):
"""Decides whether the given token is a valid expandable query."""
text = ''.join(c for c in text if 127 > ord(c) > 31)
try:
text = text.translate(Service.trans)
except TypeError:
return False, text
if (len(text) < 2 or text.isdigit()
or Stopword.gql('WHERE token = :1', text).get() is not None):
return False, text
return True, text
| mit | -3,306,388,078,736,274,000 | 34.425743 | 82 | 0.536333 | false | 3.906114 | false | false | false |
nbessi/pyhiccup | pyhiccup/page.py | 1 | 3037 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2014
# Original concept by James Reeves
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License 3
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from __future__ import unicode_literals
DOC_TYPES = {
'html4': "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01//EN\" "
"\"http://www.w3.org/TR/html4/strict.dtd\">\n",
'xhtml-strict': "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 ""Strict//EN\" "
"\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n",
'xhtml-transitional': "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" "
"\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n",
'html5': "<!DOCTYPE html>\n",
}
DEFAULT_XMLNS = 'http://www.w3.org/1999/xhtml'
XMl_DECLARATION = '<?xml version="1.0" encoding="UTF-8"?>'
def get_doc_type(doc_type):
"""Return a DOCTYPE declaration
:param doc_type: doc type string must be in ``page.DOC_TYPES``
:type doc_type: str
:return: DOCTYPE declaration
:rtype: str
"""
if doc_type not in DOC_TYPES:
raise ValueError(
'Invalid DOCTYPE %s available values are %s' %
(doc_type, DOC_TYPES.keys())
)
return DOC_TYPES[doc_type]
def build_html_enclosing_tag(etype, **kwargs):
"""Generate html tag list representation
:param etype: html doc type `html5, html4, xhtml-strict,
xhtml-transitional`
:type etype: str
:param kwargs: dict of attribute for HTML tag will override defaults
:type kwargs: dict
:return: html tag list representation ['html', {'xmlns': ...}]
:rtype: dict
"""
attrs = {}
if etype in DOC_TYPES:
attrs['lang'] = 'en'
attrs['dir'] = 'rtl'
attrs['xml:lang'] = 'en'
if 'xhtml' in etype:
attrs[u'xmlns'] = DEFAULT_XMLNS
attrs.update(kwargs)
return ['html', attrs]
def build_xml_enclosing_tag(etype, **kwargs):
"""Generate XML root tag list representation
:param etype: root tag name
:type etype: str
:param kwargs: dict of attribute for root tag
:type kwargs: dict
:return: root xml tag list representation ['atag', {'attr': ...}]
:rtype: dict
"""
return [etype, kwargs]
| agpl-3.0 | 384,448,600,303,087,940 | 32.01087 | 93 | 0.591373 | false | 3.667874 | false | false | false |
docusign/docusign-python-client | docusign_esign/models/external_file.py | 1 | 7550 | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ExternalFile(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'_date': 'str',
'id': 'str',
'img': 'str',
'name': 'str',
'size': 'str',
'supported': 'str',
'type': 'str',
'uri': 'str'
}
attribute_map = {
'_date': 'date',
'id': 'id',
'img': 'img',
'name': 'name',
'size': 'size',
'supported': 'supported',
'type': 'type',
'uri': 'uri'
}
def __init__(self, _date=None, id=None, img=None, name=None, size=None, supported=None, type=None, uri=None): # noqa: E501
"""ExternalFile - a model defined in Swagger""" # noqa: E501
self.__date = None
self._id = None
self._img = None
self._name = None
self._size = None
self._supported = None
self._type = None
self._uri = None
self.discriminator = None
if _date is not None:
self._date = _date
if id is not None:
self.id = id
if img is not None:
self.img = img
if name is not None:
self.name = name
if size is not None:
self.size = size
if supported is not None:
self.supported = supported
if type is not None:
self.type = type
if uri is not None:
self.uri = uri
@property
def _date(self):
"""Gets the _date of this ExternalFile. # noqa: E501
# noqa: E501
:return: The _date of this ExternalFile. # noqa: E501
:rtype: str
"""
return self.__date
@_date.setter
def _date(self, _date):
"""Sets the _date of this ExternalFile.
# noqa: E501
:param _date: The _date of this ExternalFile. # noqa: E501
:type: str
"""
self.__date = _date
@property
def id(self):
"""Gets the id of this ExternalFile. # noqa: E501
# noqa: E501
:return: The id of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ExternalFile.
# noqa: E501
:param id: The id of this ExternalFile. # noqa: E501
:type: str
"""
self._id = id
@property
def img(self):
"""Gets the img of this ExternalFile. # noqa: E501
# noqa: E501
:return: The img of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._img
@img.setter
def img(self, img):
"""Sets the img of this ExternalFile.
# noqa: E501
:param img: The img of this ExternalFile. # noqa: E501
:type: str
"""
self._img = img
@property
def name(self):
"""Gets the name of this ExternalFile. # noqa: E501
# noqa: E501
:return: The name of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ExternalFile.
# noqa: E501
:param name: The name of this ExternalFile. # noqa: E501
:type: str
"""
self._name = name
@property
def size(self):
"""Gets the size of this ExternalFile. # noqa: E501
Reserved: TBD # noqa: E501
:return: The size of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this ExternalFile.
Reserved: TBD # noqa: E501
:param size: The size of this ExternalFile. # noqa: E501
:type: str
"""
self._size = size
@property
def supported(self):
"""Gets the supported of this ExternalFile. # noqa: E501
# noqa: E501
:return: The supported of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._supported
@supported.setter
def supported(self, supported):
"""Sets the supported of this ExternalFile.
# noqa: E501
:param supported: The supported of this ExternalFile. # noqa: E501
:type: str
"""
self._supported = supported
@property
def type(self):
"""Gets the type of this ExternalFile. # noqa: E501
# noqa: E501
:return: The type of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ExternalFile.
# noqa: E501
:param type: The type of this ExternalFile. # noqa: E501
:type: str
"""
self._type = type
@property
def uri(self):
"""Gets the uri of this ExternalFile. # noqa: E501
# noqa: E501
:return: The uri of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""Sets the uri of this ExternalFile.
# noqa: E501
:param uri: The uri of this ExternalFile. # noqa: E501
:type: str
"""
self._uri = uri
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ExternalFile, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExternalFile):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit | 7,504,362,721,954,708,000 | 23.121406 | 140 | 0.513907 | false | 4.046088 | false | false | false |
edx/ecommerce | ecommerce/extensions/voucher/migrations/0001_initial.py | 1 | 3161 | # -*- coding: utf-8 -*-
from decimal import Decimal
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0001_initial'),
('offer', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Voucher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(verbose_name='Name', max_length=128, help_text='This will be shown in the checkout and basket once the voucher is entered')),
('code', models.CharField(max_length=128, verbose_name='Code', unique=True, db_index=True, help_text='Case insensitive / No spaces allowed')),
('usage', models.CharField(default='Multi-use', max_length=128, verbose_name='Usage', choices=[('Single use', 'Can be used once by one customer'), ('Multi-use', 'Can be used multiple times by multiple customers'), ('Once per customer', 'Can only be used once per customer')])),
('start_datetime', models.DateTimeField(verbose_name='Start datetime')),
('end_datetime', models.DateTimeField(verbose_name='End datetime')),
('num_basket_additions', models.PositiveIntegerField(default=0, verbose_name='Times added to basket')),
('num_orders', models.PositiveIntegerField(default=0, verbose_name='Times on orders')),
('total_discount', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Total discount')),
('date_created', models.DateField(auto_now_add=True)),
('offers', models.ManyToManyField(related_name='vouchers', verbose_name='Offers', to='offer.ConditionalOffer')),
],
options={
'verbose_name_plural': 'Vouchers',
'get_latest_by': 'date_created',
'verbose_name': 'Voucher',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='VoucherApplication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateField(auto_now_add=True, verbose_name='Date Created')),
('order', models.ForeignKey(verbose_name='Order', to='order.Order', on_delete=models.CASCADE)),
('user', models.ForeignKey(null=True, verbose_name='User', to=settings.AUTH_USER_MODEL, blank=True, on_delete=models.CASCADE)),
('voucher', models.ForeignKey(verbose_name='Voucher', related_name='applications', to='voucher.Voucher', on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'Voucher Applications',
'verbose_name': 'Voucher Application',
'abstract': False,
},
bases=(models.Model,),
),
]
| agpl-3.0 | 9,208,904,800,063,817,000 | 53.5 | 293 | 0.598545 | false | 4.348006 | false | false | false |
AlfioEmanueleFresta/practical-ecb-lib | cp_ecb/image.py | 1 | 4402 | from PIL import Image
class InMemoryImage:
"""
A very simple class to represent an image.
"""
def __init__(self, w, h, c=3,
b=b'', encrypted=False):
"""
Instantiate a new image.
:param w: The width of the image (px).
:param h: The height of the image (px).
:param c: The number of colour channels of the image. Default is 3.
:param b: A byte literal for the body of the image.
:param encrypted: A flag to say whether the image is encrypted or not.
"""
self.w = w
self.h = h
self.c = c
self.b = b
self.encrypted = encrypted
def __repr__(self):
return "<InMemoryImage(%s): channels=%d, width=%d, height=%d>" % (
"encrypted" if self.encrypted else "unencrypted",
self.c, self.w, self.h
)
def load_image(input_file, encrypted=False):
"""
Load an image file into memory as a InMemoryImage object.
:param input_file: The file to load.
:param encrypted: Whether to flag the file as an encrypted image or not.
:return: An instantiated InMemoryImage object.
"""
image_file = Image.open(input_file)
image = image_file.convert('RGB')
image_size = image.size
image_b = b''
for y in range(image_size[1]):
for x in range(image_size[0]):
r, g, b = image.getpixel((x, y))
image_b += bytes([r, g, b])
image_file.close()
return InMemoryImage(w=image_size[0], h=image_size[1],
c=3, b=image_b, encrypted=encrypted)
def save_image(image, output_file):
output = Image.new("RGB", (image.w, image.h))
maxlen = len(image.b) - (len(image.b) % image.c)
data = tuple(tuple(image.b[i:i + image.c]) for i in range(0, maxlen, image.c))
data = data[:(image.w * image.h)]
output.putdata(data)
output.save(output_file)
def _crypt_image(encrypt, image, function):
if type(image) is not InMemoryImage:
raise ValueError("You need to pass this function a valid InMemoryImage object.")
if encrypt and image.encrypted:
raise ValueError("The input image is already encrypted.")
elif (not encrypt) and (not image.encrypted):
raise ValueError("The input image is not flagged as encrypted and can't be decrypted.")
image.b = function(image.b)
# Allow return list of ordinals
if type(image.b) is list:
image.b = bytes(image.b)
image.encrypted = encrypt
return image
def encrypt_image(image, function):
"""
Encrypt the content of an InMemoryImage using a given function.
:param image: The unencrypted InMemoryImage object.
:param function: An encryption function which takes a single bytes literal and returns a single bytes literal.
:return: An encrypted InMemoryImage object.
"""
return _crypt_image(encrypt=True, image=image, function=function)
def decrypt_image(image, function):
"""
Decrypt the content of an InMemoryImage using a given function.
:param image: The encrypted InMemoryImage object.
:param function: A decryption function which takes a single bytes literal and returns a single bytes literal.
:return: An unencrypted InMemoryImage object.
"""
return _crypt_image(encrypt=False, image=image, function=function)
def encrypt_image_file(input_file, function, output_file):
"""
Loads an image file, encrypts its contents and saves it as another image file.
:param input_file: The original unencrytped image file.
:param function: The encryption function to use. This must take a single bytes literal and return a single bytes literal.
:param output_file: The file name for the encrypted image.
"""
image = load_image(input_file)
image = encrypt_image(image, function)
save_image(image, output_file)
def decrypt_image_file(input_file, function, output_file):
"""
Loads an encrypted image file, decrypts its contents and saves it as another image file.
:param input_file: The encrypted image file.
:param function: The decryption function to use. This must take a single bytes literal and return a single bytes literal.
:param output_file: The file name for the decrypted image.
"""
image = load_image(input_file, encrypted=True)
image = decrypt_image(image, function)
save_image(image, output_file)
| gpl-3.0 | 2,049,984,958,980,118,500 | 33.124031 | 125 | 0.655838 | false | 3.841187 | false | false | false |
jamesiter/JimV-N | models/event_process.py | 1 | 9567 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import libvirt
from models.initialize import guest_event_emit
from models import Guest
__author__ = 'James Iter'
__date__ = '2017/6/15'
__contact__ = '[email protected]'
__copyright__ = '(c) 2017 by James Iter.'
class EventProcess(object):
conn = None
guest_callbacks = list()
VIR_DOMAIN_EVENT_SHUTDOWN_GUEST = 1
VIR_DOMAIN_EVENT_SHUTDOWN_HOST = 2
def __init__(self):
pass
@classmethod
def guest_event_callback(cls, conn, dom, event, detail, opaque):
if not isinstance(dom, libvirt.virDomain):
# 跳过已经不再本宿主机的 guest
return
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED and detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_MIGRATED:
# Guest 从本宿主机迁出完成后不做状态通知
return
Guest.guest_state_report(dom=dom)
if event == libvirt.VIR_DOMAIN_EVENT_DEFINED:
if detail == libvirt.VIR_DOMAIN_EVENT_DEFINED_ADDED:
# 创建出一个 Guest 后被触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_DEFINED_UPDATED:
# 更新 Guest 配置后被触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_DEFINED_RENAMED:
# 变更 Guest 名称,待测试。猜测为 Guest 变更为新名称时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_DEFINED_FROM_SNAPSHOT:
# Config was restored from a snapshot 待测试。猜测为,依照一个 Guest 快照的当前配置,创建一个新的 Guest
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_UNDEFINED:
if detail == libvirt.VIR_DOMAIN_EVENT_UNDEFINED_REMOVED:
# 删除一个 Guest 定义
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_UNDEFINED_RENAMED:
# 变更 Guest 名称,待测试。猜测为 Guest 旧名称消失时触发
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
if detail == libvirt.VIR_DOMAIN_EVENT_STARTED_BOOTED:
# 正常启动
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STARTED_MIGRATED:
# Guest 从另一个宿主机迁入时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STARTED_RESTORED:
# 从一个状态文件中恢复 Guest
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STARTED_FROM_SNAPSHOT:
# 从快照中恢复 Guest 时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STARTED_WAKEUP:
# 唤醒时触发,待测试。
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
if detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_PAUSED:
# 管理员暂停 Guest 时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED:
# 为了在线迁移,临时暂停当前准备迁出的 Guest 时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_IOERROR:
# 磁盘 IO 错误时,被暂停时触发,待测试
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_WATCHDOG:
# 触发看门狗时触发,待测试
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_RESTORED:
# 从暂停的 Guest 状态文件恢复时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_FROM_SNAPSHOT:
# 从暂停的 Guest 快照恢复时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR:
# 调用 libvirt API 失败后触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY:
# 以 post-copy 模式迁移 Guest,被暂停时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY_FAILED:
# post-copy 模式迁移失败时触发
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
if detail == libvirt.VIR_DOMAIN_EVENT_RESUMED_UNPAUSED:
# 取消暂停,正常恢复时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_RESUMED_MIGRATED:
# Guest 迁移的目标宿主机,迁移完成时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_RESUMED_FROM_SNAPSHOT:
# 从快照恢复时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_RESUMED_POSTCOPY:
# 恢复,但迁移任然在 post-copy 模式下进行,待测试
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
if detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_SHUTDOWN:
# 正常关机时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_DESTROYED:
# 从宿主机中强行断开 Guest 电源时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_CRASHED:
# Guest 崩溃时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_MIGRATED:
# Guest 从本宿主机迁出完成后触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_SAVED:
# 保存 Guest 为状态文件后触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_FAILED:
# 宿主机上的模拟器或管理器失败时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT:
# 加载完离线快照后触发,待测试
pass
elif event == libvirt.VIR_DOMAIN_EVENT_SHUTDOWN:
if detail == libvirt.VIR_DOMAIN_EVENT_SHUTDOWN_FINISHED:
# Guest 正常关机后触发
pass
elif detail == cls.VIR_DOMAIN_EVENT_SHUTDOWN_GUEST:
# Guest 自己触发关机信号后触发(即,此时硬件还运行着,系统已经被关闭。有别于 poweroff),待测试
pass
elif detail == cls.VIR_DOMAIN_EVENT_SHUTDOWN_HOST:
# 从宿主机通过信号方式关闭 Guest 后触发
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_PMSUSPENDED:
if detail == libvirt.VIR_DOMAIN_EVENT_PMSUSPENDED_MEMORY:
# Guest 的内存被电源管理器暂停
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_PMSUSPENDED_DISK:
# Guest 的磁盘被电源管理器暂停
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_CRASHED:
if detail == libvirt.VIR_DOMAIN_EVENT_CRASHED_PANICKED:
# Guest 奔溃时触发
pass
else:
pass
else:
pass
@staticmethod
def guest_event_migration_iteration_callback(conn, dom, iteration, opaque):
try:
migrate_info = dict()
migrate_info['type'], migrate_info['time_elapsed'], migrate_info['time_remaining'], \
migrate_info['data_total'], migrate_info['data_processed'], migrate_info['data_remaining'], \
migrate_info['mem_total'], migrate_info['mem_processed'], migrate_info['mem_remaining'], \
migrate_info['file_total'], migrate_info['file_processed'], migrate_info['file_remaining'] = \
dom.jobInfo()
guest_event_emit.migrating(uuid=dom.UUIDString(), migrating_info=migrate_info)
except libvirt.libvirtError as e:
pass
@staticmethod
def guest_event_device_added_callback(conn, dom, dev, opaque):
Guest.update_xml(dom=dom)
@staticmethod
def guest_event_device_removed_callback(conn, dom, dev, opaque):
Guest.update_xml(dom=dom)
@classmethod
def guest_event_register(cls):
cls.conn = libvirt.open()
cls.conn.domainEventRegister(cls.guest_event_callback, None)
# 参考地址:https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainEventID
cls.guest_callbacks.append(cls.conn.domainEventRegisterAny(
None, libvirt.VIR_DOMAIN_EVENT_ID_MIGRATION_ITERATION,
cls.guest_event_migration_iteration_callback, None))
cls.guest_callbacks.append(cls.conn.domainEventRegisterAny(
None, libvirt.VIR_DOMAIN_EVENT_ID_DEVICE_ADDED,
cls.guest_event_device_added_callback, None))
cls.guest_callbacks.append(cls.conn.domainEventRegisterAny(
None, libvirt.VIR_DOMAIN_EVENT_ID_DEVICE_REMOVED,
cls.guest_event_device_removed_callback, None))
@classmethod
def guest_event_deregister(cls):
cls.conn.domainEventDeregister(cls.guest_event_callback)
for eid in cls.guest_callbacks:
cls.conn.domainEventDeregisterAny(eid)
| gpl-3.0 | 7,831,306,273,110,688,000 | 35.497872 | 110 | 0.55847 | false | 3.11099 | false | false | false |
petrvanblokland/Xierpa3 | xierpa3/sites/examples/helloworldblueprint/make.py | 1 | 9548 | # -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
# xierpa server
# Copyright (c) 2014+ [email protected], www.petr.com, www.xierpa.com
#
# X I E R P A 3
# Distribution by the MIT License.
#
# -----------------------------------------------------------------------------
#
# make.py
#
import webbrowser
from xierpa3.toolbox.transformer import TX
from xierpa3.components import Theme, Page, Column
from xierpa3.builders.cssbuilder import CssBuilder
from xierpa3.builders.htmlbuilder import HtmlBuilder
from xierpa3.attributes import Em, Margin, Perc, Color
from xierpa3.descriptors.media import Media
from xierpa3.descriptors.blueprint import BluePrint
BODYFAMILY = 'Impact, Verdana, sans'
CAPTIONFAMILY = 'Georgia, serif'
class HelloWorldBluePrintText(Column):
# Get Constants->Config as class variable, so inheriting classes can redefine values.
C = Theme.C
# The BluePrint defined the parameters for the component. They can be adjusted by parent
# components who implement this component on a page, or by inheriting classes that
# only want to redefine part of the parameters. The actual self.style is created during
# compilation of the start (not during run-time) as cascading result of all parent BLUEPRINT
# dictionaries.
# Furthermore the documentation builder is using the BluePrint instance to visualize
# the interface of each component available.
#
BLUEPRINT = BluePrint(
# Attribute, documentation about the attribute.
# Main div block
bodyFamily=BODYFAMILY, doc_bodyFamily=u'Body font family of this example. For now, in this example we only use system fonts.',
fontSize=Em(4), doc_fontSize=u'Font size of the body text, relative to the body font size.',
lineHeight=Em(1.2), doc_lineHeight=u'Line height (leading) of body text.',
textAlign=C.CENTER, doc_textAlign=u'Horizontal alignment of text.',
color=Color('yellow'), doc_color=u'Color of the main column.',
colorTablet=Color('orange'), doc_colorTablet=u'Text color of the main column for tablet.',
colorMobile=Color('red'), doc_colorMobile=u'Text color of the main column for mobile.',
backgroundColor=Color('red'), doc_backgroundColor=u'Background color of the main column',
backgroundColorTablet=Color('green'), doc_backgroundColorTablet=u'Background color of the main column for tablet.',
backgroundColorMobile=Color('#BBB'), doc_backgroundColorMobile=u'Background color of the main column for mobile.',
paddingTop=Em(0.5), doc_paddingTop=u'Padding on top of the page',
paddingBottom=Em(0.5), doc_paddingBottom=u'Padding at bottom of the page.',
margin=Margin(0, C.AUTO), doc_margin=u'Page margin of the column. In this case, horizontally centered on the page.',
width=Perc(80), doc_width=u'Width of the main column. Default is 80% os the page with.',
maxWidth=700, doc_maxWidth=u'Maximal width of the column.',
minWidth=300, doc_minWidth=u'Minimal width of the column.',
# Caption
captionFont=CAPTIONFAMILY, doc_captionFont=u'Caption font family for this example. For now, in this example we only use system fonts.',
captionColor=Color('#888'), doc_captionColor=u'Color of the caption.',
captionPaddingTop=Em(0.2), doc_captionPaddingTop=u'Padding top of the caption.',
)
def buildBlock(self, b):
u"""Build the column, using the parameters from the class BluePrint instance.
This dictionary is builds the **self.style()** by cascading all BlurPrint instances
of the parent classes. The result is a complete specification of all the parameters
the direction the style and behavior of this component."""
s = self.style
b.div(class_=self.getClassName(), color=s.color, margin=s.margin,
width=s.width, maxwidth=s.maxWidth, minwidth=s.minWidth, backgroundcolor=s.backgroundColor,
paddingtop=s.paddingTop, paddingbottom=s.paddingBottom, fontfamily=s.bodyFamily,
fontsize=s.fontSize, textalign=s.textAlign, lineheight=s.lineHeight,
# Now define the @media parameters, where they belong: inside the definition of the element.
# The media parameters are collected and sorted for output at the end of the CSS document.
media=(
# Example for table, show lighter background, change color of text and smaller size.
Media(min=self.C.M_TABLET_MIN, max=self.C.M_TABLET_MAX, backgroundcolor=s.backgroundColorTablet,
color=s.colorTablet, fontsize=Em(3), width=self.C.AUTO, float=self.C.NONE),
# For mobile, even more lighter background, change color of text and smaller size.
Media(max=self.C.M_MOBILE_MAX, backgroundcolor=s.backgroundColorMobile,
color=s.colorMobile, fontsize=Em(2), width=self.C.AUTO, float=self.C.NONE)
))
b.text('Hello parametric world.')
# One of the advantages of using a real programming language to generate
# HTML/CSS code, is that repetitions can be written as a loop. Not necessary
# fewer lines, but more expandable and less redundant distribution of
# knowledge in the code.
data = (
# class, minWidth, maxWidth, text
('c1', self.C.M_DESKTOP_MIN, None, 'Responsive desktop mode.' ),
('c2', self.C.M_TABLET_MIN, self.C.M_TABLET_MAX, 'Responsive tablet mode.' ),
('c3', None, self.C.M_MOBILE_MAX, 'Responsive mobile mode..' ),
)
for class_, minWidth, maxWidth, text in data:
b.div(class_=class_, display=self.C.NONE, fontsize=Em(0.7), color=Color(self.C.WHITE),
media=Media(min=minWidth, max=maxWidth, display=self.C.BLOCK))
b.text(text)
b._div()
b._div()
b.div(class_=self.C.CLASS_CAPTION, color=s.captionColor, margin=Margin(0, self.C.AUTO),
width=Perc(100), maxwidth=700, minwidth=300,
paddingtop=s.captionPaddingTop, fontfamily=s.captionFont, fontsize=Em(0.9),
textalign=s.textAlign, fontstyle=self.C.ITALIC,
# Change background color of the line to indicate the illustrate the difference for mobile size.
#media=Media(max=self.M_MOBILE_MAX, backgroundcolor='yellow', color='#222', fontsize=Em(1),
# margin=0, width=Perc(100),
)
b.text('Responsive page, generated by Xierpa3. Using BluePrint parameters.')
b._div()
class HelloWorldBluePrint(Theme):
u"""The **HelloWorldResponsive** class implements a basic "Hello, world!" page, running as
batch process, saving the result as an HTML file. Double click the generated file or
drag to a browser see the result."""
TITLE = u'The responsive "Hello, world!" page using BluePrint styling.' # Use as title of window.
def baseComponents(self):
u"""Create a theme site with just one single template home page. Answer a list
of page instances that are used as templates for this site."""
# Create an instance (=object) of the text component to be placed on the page.
hw = HelloWorldBluePrintText()
# Create an instance (=object) of the page, containing the "hw" component.
# The class is also the page name in the url.
# Components can be a single component or a list of components.
homePage = Page(class_=self.C.TEMPLATE_INDEX, components=hw, title=self.TITLE)
# Answer a list of types of pages for this site.
return [homePage]
def make(self, root):
u"""The instance of this class builds CSS and HTML files at the optional path **root**.
If not defined, then the default ~/Desktop/Xierpa3Examples/[component.name] is used as export path,
as set by Builder.DEFAULT_ROOTPATH"""
# Create an "instance" (=object) of type "HelloWorldLayout". The type (=class) defines
# the behavior of the object that is made by calling the class.
if root is None:
root = TX.asDir(self.C.PATH_EXAMPLES) # Expand user path to full directory path.
# C S S
# Create the main CSS builder instance to build the SASS/CSS part of the site.
cssBuilder = CssBuilder()
# Compile (=build) the SCSS to CSS and save the file in "css/style.css".
self.build(cssBuilder) # Build from entire site theme, not just from template. Result is stream in builder.
cssBuilder.save(self, root)
# H T M L
# Create the main HTML builder instance to build the HTML part of the site.
htmlBuilder = HtmlBuilder()
# Compile the HTML and save the resulting HTML file in "helloWorld.html".
self.build(htmlBuilder) # Build from entire site theme, not just from template. Result is stream in builder.
# Answer the path, so we can directly open the file with a browser.
return htmlBuilder.save(self, root)
if __name__ == '__main__':
# This construction "__name__ == '__main__'" makes this Python file only
# be executed when called in direct mode, such as "python make.py" in the terminal.
# Since no rootPath is added to make(), the file export is in ~/Desktop/Xierpa3Examples/HelloWorldBluePrint/
site = HelloWorldBluePrint()
path = site.make()
webbrowser.open(path) # Open file path with browser
| mit | -8,731,468,573,358,479,000 | 59.43038 | 143 | 0.662233 | false | 3.897143 | false | false | false |
bd-j/magellanic | magellanic/sfhs/prediction_scripts/predicted_total.py | 1 | 5894 | import sys, pickle, copy
import numpy as np
import matplotlib.pyplot as pl
import astropy.io.fits as pyfits
import magellanic.regionsed as rsed
import magellanic.mcutils as utils
from magellanic.lfutils import *
try:
import fsps
from sedpy import observate
except ImportError:
#you wont be able to predict the integrated spectrum or magnitudes
# filterlist must be set to None in calls to total_cloud_data
sps = None
wlengths = {'2': '{4.5\mu m}',
'4': '{8\mu m}'}
dmod = {'smc':18.9,
'lmc':18.5}
cloud_info = {}
cloud_info['smc'] = [utils.smc_regions(), 20, 23, [7, 13, 16], [3,5,6]]
cloud_info['lmc'] = [utils.lmc_regions(), 48, 38, [7, 11, 13, 16], [3,4,5,6]]
def total_cloud_data(cloud, filternames = None, basti=False,
lfstring=None, agb_dust=1.0,
one_metal=None):
#########
# SPS
#########
#
if filternames is not None:
sps = fsps.StellarPopulation(add_agb_dust_model=True)
sps.params['sfh'] = 0
sps.params['agb_dust'] = agb_dust
dust = ['nodust', 'agbdust']
sps.params['imf_type'] = 0.0 #salpeter
filterlist = observate.load_filters(filternames)
else:
filterlist = None
##########
# SFHs
##########
regions, nx, ny, zlist, zlist_basti = cloud_info[cloud.lower()]
if basti:
zlist = basti_zlist
if 'header' in regions.keys():
rheader = regions.pop('header') #dump the header info from the reg. dict
total_sfhs = None
for n, dat in regions.iteritems():
total_sfhs = sum_sfhs(total_sfhs, dat['sfhs'])
total_zmet = dat['zmet']
#collapse SFHs to one metallicity
if one_metal is not None:
ts = None
for sfh in total_sfhs:
ts = sum_sfhs(ts, sfh)
total_sfh = ts
zlist = [zlist[one_metal]]
total_zmet = [total_zmet[one_metal]]
#############
# LFs
############
bins = rsed.lfbins
if lfstring is not None:
# these are stored as a list of different metallicities
lffiles = [lfstring.format(z) for z in zlist]
lf_base = [read_villaume_lfs(f) for f in lffiles]
#get LFs broken out by age and metallicity as well as the total
lfs_zt, lf, logages = rsed.one_region_lfs(copy.deepcopy(total_sfhs), lf_base)
else:
lfs_zt, lf, logages = None, None, None
###########
# SED
############
if filterlist is not None:
spec, wave, mass = rsed.one_region_sed(copy.deepcopy(total_sfhs), total_zmet, sps)
mags = observate.getSED(wave, spec*rsed.to_cgs, filterlist=filterlist)
maggies = 10**(-0.4 * np.atleast_1d(mags))
else:
maggies, mass = None, None
#############
# Write output
############
total_values = {}
total_values['agb_clf'] = lf
total_values['agb_clfs_zt'] = lfs_zt
total_values['clf_mags'] = bins
total_values['logages'] = logages
total_values['sed_ab_maggies'] = maggies
total_values['sed_filters'] = filternames
total_values['lffile'] = lfstring
total_values['mstar'] = mass
total_values['zlist'] = zlist
return total_values, total_sfhs
def sum_sfhs(sfhs1, sfhs2):
"""
Accumulate individual sets of SFHs into a total set of SFHs. This
assumes that the individual SFH sets all have the same number and
order of metallicities, and the same time binning.
"""
if sfhs1 is None:
return copy.deepcopy(sfhs2)
elif sfhs2 is None:
return copy.deepcopy(sfhs1)
else:
out = copy.deepcopy(sfhs1)
for s1, s2 in zip(out, sfhs2):
s1['sfr'] += s2['sfr']
return out
if __name__ == '__main__':
filters = ['galex_NUV', 'spitzer_irac_ch2',
'spitzer_irac_ch4', 'spitzer_mips_24']
#filters = None
ldir, cdir = 'lf_data/', 'composite_lfs/'
outst = '{0}_n2teffcut.p'
# total_cloud_data will loop over the appropriate (for the
# isochrone) metallicities for a given lfst filename template
lfst = '{0}z{{0:02.0f}}_tau{1:2.1f}_vega_irac{2}_n2_teffcut_lf.txt'
basti = False
agb_dust=1.0
agebins = np.arange(9)*0.3 + 7.4
#loop over clouds (and bands and agb_dust) to produce clfs
for cloud in ['smc']:
rdir = '{0}cclf_{1}_'.format(cdir, cloud)
for band in ['2','4']:
lfstring = lfst.format(ldir, agb_dust, band)
dat, sfhs = total_cloud_data(cloud, filternames=filters, agb_dust=agb_dust,
lfstring=lfstring, basti=basti)
agebins = sfhs[0]['t1'][3:-1]
outfile = lfstring.replace(ldir, rdir).replace('z{0:02.0f}_','').replace('.txt','.dat')
write_clf_many([dat['clf_mags'], dat['agb_clf']], outfile, lfstring)
#fig, ax = plot_weighted_lfs(dat, agebins = agebins, dm=dmod[cloud])
#fig.suptitle('{0} @ IRAC{1}'.format(cloud.upper(), band))
#fig.savefig('byage_clfs/{0}_clfs_by_age_and_Z_irac{1}'.format(cloud, band))
#pl.close(fig)
colheads = (len(agebins)-1) * ' N<m(t={})'
colheads = colheads.format(*(agebins[:-1]+agebins[1:])/2.)
tbin_lfs = np.array([rebin_lfs(lf, ages, agebins) for lf, ages
in zip(dat['agb_clfs_zt'], dat['logages'])])
write_clf_many([dat['clf_mags'], tbin_lfs.sum(axis=0)],
outfile.replace(cdir,'byage_clfs/'), lfstring,
colheads=colheads)
pl.figure()
for s, z in zip(sfhs, dat['zlist']):
pl.step(s['t1'], s['sfr'], where='post', label='zind={0}'.format(z), linewidth=3)
pl.legend(loc=0)
pl.title(cloud.upper())
print(cloud, dat['mstar'])
| gpl-2.0 | 5,455,226,854,893,435,000 | 34.721212 | 99 | 0.557686 | false | 3.055469 | false | false | false |
reverse-CSE-591/reverse | driver.py | 1 | 19133 | #!/usr/bin/python -tt
#####################################################################################################################
# CSE 591: Security and Vulnerability Analysis
#
# Team 5:
#
# Kartheek Nallepalli
# Bhargavi Rajagopalan
# Priya Pipada
# Ayush Maheshwari
# Nikhil Aourpally
#
#
# This is the driver program. Run the main function here to find potential vulnerabilities in the website
#####################################################################################################################
# Python Imports
from __future__ import division
from bs4 import BeautifulSoup
from lxml import html
from os import system, path
from random import randint
from urlparse import urlparse
import ast
import json
import math
import nltk
import re
import requests
import sys
import time
import urllib
import urllib2
# This is a global set that contains all the URL's crawled from the website.
urls = set()
stopWords = []
#####################################################################################################################
# This method takes in a form to be filled and the url and tries to guess valid inputs that would result in a
# successful response from the server
# Inputs:
# params[] (List[String]): list of parameters along with the types in the following format.
# ex: ["username::text", "password::password"]
# action (String): The action the form should take when submitted
# url (String): The page URL for getting the HTML data and figuring out what to fill
# Output:
# validResponse (String): returns the HTML string of the valid response
#####################################################################################################################
def getValidResponse(params, action, url, cookies):
formInput={}
for key in params:
value = params[key]
formInput[key] = generateValue(value['label'],value['type'])
#print cookies, type(cookies)
(header,validResponse) = constructPostRequest(formInput, cookies, action)
return validResponse
#####################################################################################################################
# This method constructs a HTTP Post Request to submit the form to it
# Inputs:
#Output:
#####################################################################################################################
def constructPostRequest(formInput, input_cookies, action):
r = requests.post(action, data=formInput, verify=False, cookies=input_cookies)
return (r.headers,r.text)
#####################################################################################################################
# This method takes in a form to be filled and the url and inserts <scripts> into the fields.
# Inputs:
# params{} (Dictionary): list of parameters along with the types in the following format.
# ex: ["username::text", "password::password"]
# action (String): The action the form should take when submitted
# Output:
# xssResponse (String): returns the HTML response
#####################################################################################################################
def getXssResponse(params, action, url, cookies):
formInput={}
for key in params:
value = params[key]
formInput[key]="<sCript>xssAttack</sCript>"
(header,xssInjResponse) = constructPostRequest(formInput,cookies,action)
return xssInjResponse
#####################################################################################################################
# This method computes the XSS injection score for the given response
# Inputs:
#Output:
#####################################################################################################################
def getXssScore(xssResponse, input_cookies):
urls = open("crawledURLs.txt")
for url in urls:
response = requests.get(re.sub("\n","",url), verify=False, cookies=input_cookies).text
if bool(re.search('<sCript>xssAttack</sCript>', response)):
return 1
return 0
#####################################################################################################################
# This method takes in a form to be filled and the url and tries SQL injection in the fields
# Inputs:
# params[] (List[String]): list of parameters along with the types in the following format.
# ex: ["username::text", "password::password"]
# action (String): The action the form should take when submitted
# Output:
# xssResponse (String): returns the HTML response
#####################################################################################################################
def getSqlInjResponse(params, action, url, cookies):
formInput={}
for key in params:
value = params[key]
formInput[key] ="' or 1=1 --'"
(header,sqlInjResponse) = constructPostRequest(formInput,cookies,action)
return sqlInjResponse
#####################################################################################################################
# This method takes in two HTML strings, compares them and assigns a similarity score. The idea is to use this
# score to see how similar pages with valid and invalid outputs are.
# Inputs:
# html_1 (String): The first HTML page
# html_2 (String): The second HTML page
# Output:
# score (double): similarity between pages
#####################################################################################################################
def getSimilarityScore(html_1, html_2):
cleanResponse1 = BeautifulSoup(html_1).get_text()
cleanResponse2 = BeautifulSoup(html_2).get_text()
return calculateCosineSimilarity(formatVector(cleanResponse1), formatVector(cleanResponse2))
#####################################################################################################################
# The method calculates the cosine similarity between two groups
# Inputs:
#Output:
#####################################################################################################################
def calculateCosineSimilarity(group1, group2):
doc1sq = doc2sq = frequency = 0
for i in group1:
if i in group2:
frequency += group1[i] * group2[i]
for j in group1:
doc1sq += math.pow(group1[j], 2)
for k in group2:
doc2sq += math.pow(group2[k], 2)
score = float(frequency) / (math.sqrt(doc1sq) * math.sqrt(doc2sq))
return score
#####################################################################################################################
# This method constructs a HTTP Post Request to submit the form to it
# Inputs:
#Output:
#####################################################################################################################
def formatVector(response):
global stopWords
cleanResponse = map(lambda x:re.split(" ", x), re.split("\n", response))
vectorList = []
vectorDict = {}
for i in cleanResponse:
vectorList.extend(i)
vector = []
for i in vectorList:
if str(i) != '' or str(i) not in stopWords:
vector.append(i.lower())
for j in vector:
if j in vectorDict:
vectorDict[j] += 1
else:
vectorDict[j] = 1
return vectorDict
#####################################################################################################################
# This method takes in the original label extracted, gets the similarity score and predicts the valid form entries
# by understanding meaning of the labes and mapping them to known labels using dictionary similarity and edit-
# distance score.
#
# TODO : Faced problems with wu-palmer similarity over wordNet (flase positives and not all terms present)
# Currently using just the edit distance
#
# Inputs:
# label (String): Label generated from the scarppy code extended
# Output:
# generated value (String): Valid generated form input value
#####################################################################################################################
def getLabel(orglabel):
userset = ['user','username','user_name']
maxscore =0
newlabel =''
for field in userset:
score = getEdidDistanceScore(orglabel, field)
if(score > maxscore):
maxscore = score
newlabel = 'username'
#print 'Max score' + str(maxscore), 'Label' + newlabel
if(maxscore<0.5):
newlabel = orglabel
return newlabel
#####################################################################################################################
# This method generates random values based on the form field type and implements intelligent form filling
# Inputs:
#Output:
#####################################################################################################################
def generateValue(label, labeltype):
if labeltype == 'text':
newlabel = getLabel(label)
if newlabel == 'username':
return 'reverse'+ str(time.time())
else:
return 'reverserandom'+ str(time.time())
elif labeltype == 'password':
return 'reversePass'+ str(time.time())
elif labeltype == 'email':
return 'reverse'+str(time.time())+'@reverse.com'
elif labeltype == 'number':
return randint(0,10000)
#####################################################################################################################
# Helper methods
#####################################################################################################################
# Get the specific form parameters
def getFormParams(link):
params = {}
labels = []
source = link['source'].replace("\n","")
for i in range(0, len(source)):
label = ''
if source[i] == '>':
while source[i] != '<':
label += source[i]
i = i + 1
if i >= len(source) - 1:
break;
if label[1:] and not label[1:].isspace():
labels.append(label[1:])
i = 0
for j in link['form']:
params[j['name']] = {}
params[j['name']]['type'] = j['type']
params[j['name']]['label'] = labels[0]
i = i + 1
return (link['target'], params)
# This method gets the list of stopwords
def getStopWords():
global stopWords
f = open("stopwords.en")
for i in f:
stopWords.append(re.sub("\n","",i))
# Get the edit-distance score between two words
def getEdidDistanceScore(word1, word2):
distance = nltk.metrics.distance.edit_distance(word1, word2, transpositions=False)
avgLength = (len(word1) + len(word2))/2
score = distance/avgLength
return score
#Get cookies from user
def getCookies():
flag = 0
cookies = {}
print "Enter cookies(Press X to exit): "
while True:
if not flag:
key = raw_input("Enter Key: ")
flag = 1
if key == 'X':
break;
else:
value = raw_input("Enter value: ")
flag = 0
if value == 'X':
break;
cookies[key] = value
return cookies
#####################################################################################################################
# Method to inject malicious input values into the application to check if nth order SQL injection is possible
#####################################################################################################################
def nthOrderSQLInjection(params, action, url, cookies, index, urlForms):
UserName = "reverse_12345"
Password = "aXb675hjWF@"
SQLKeyWord = "' union select "
TableInfo = 'from dual;--'
responseString = None
for i in range(0,5):
formInput = {}
ParameterPadding = 'Null,' * i
Parameter = '"Evilmax"' + str(index) + ' '
MaliciousInputValue = UserName + SQLKeyWord + ParameterPadding + Parameter + TableInfo
for key in params:
value = params[key]
if value['type'] != 'password':
formInput[key] = MaliciousInputValue
else:
formInput[key] = Password
constructPostRequest(formInput, cookies, action)
for urlForm in urlForms:
(newAction, newParams) = getFormParams(urlForm)
newFormInput = {}
for newParam in newParams:
value = newParams[newParam]
if value['type'] != 'password':
newFormInput[newParam] = UserName
else:
newFormInput[newParam] = Password
(header, response) = constructPostRequest(formInput, cookies, newAction)
if 'EvilMax' in response:
SplitString = response.split("EvilMax")
Index = SplitString[1].split(' ')
if index != Index:
responseString = responseString + "nth Order SQL injection present in " + newAction + "\n"
return responseString
#####################################################################################################################
# The method takes the URLs extracted from the crawler scrapy and performs a "deeper" crawling by seeing if the
# server is setting any cookies after login and adds that to the list of cookies.
#Output: Updates cookies (Dictionary)
#####################################################################################################################
def deepCrawling(urlForms,cookies):
storedFormInputs=[]
formInput={}
login=False
for urlForm in urlForms:
(action, params) = getFormParams(urlForm)
credentials = {'username': None, 'password' : None}
for key in params:
value = params[key]
if value['type'] != 'submit':
formInput[key] = generateValue(value['label'],value['type'])
newLabel = getLabel(value['label'])
if newLabel == 'username':
credentials['username'] = formInput[key]
if value['type'] == 'password':
credentials['password'] = formInput[key]
if credentials:
storedFormInputs.append(credentials)
(header,response) = constructPostRequest(formInput,cookies,action)
if "registered" in response.lower() or "created" in response.lower() or "authenticated" in response.lower():
login=True
if login == True:
for urlForm in urlForms:
(action, params) = getFormParams(urlForm)
for storedFormInput in storedFormInputs:
formInput = {}
for key in params:
value = params[key]
newLabel = getLabel(value['label'])
if newLabel == 'username':
formInput[key] = storedFormInput['username']
if value['type'] == 'password' and storedFormInput['password']:
formInput[key] = storedFormInput['password']
(header, response) = constructPostRequest(formInput,cookies,action)
if 'set-cookie' in header.keys():
newCookie = str(header['set-cookie']).split(';')[0]
CookieSplit = str(newCookie).split('=')
cookies[CookieSplit[0]] = CookieSplit[1]
return cookies
#####################################################################################################################
# This is the main method that gets called and submits the report on possible vulnerabilities
#####################################################################################################################
def main():
# Init Global variables
getStopWords()
# Add the required headers, most likely its just the login cookie for the page.
#opener = urllib2.build_opener()
#opener.addheaders.append(('Cookie', 'cse591=kP047iYtubEZ6ZnMKmxO'))
# domain = "129.219.253.30:80"
url = raw_input("Enter the web address: ")
cookies = getCookies()
domain = urlparse(url).netloc
# Remove any residual files
system("rm items.json")
system("rm crawledURLs.txt")
system("rm reverse_report")
system("rm reverse_response")
# Use Scrapy to get recursively get all URLs, Stores the
system("scrapy crawl ReverseCrawler -a domain="+domain+" -a start_urls="+url+" -a cookies=\""+str(cookies)+"\" -o items.json")
#cookies = ast.literal_eval(cookies)
# Iterate over all the URL's and their forms
UrlForms = json.load(open("items.json"))
print "\n\n\n"
# Open report, response file
reportFile = open('reverse_report','w')
responseFile = open('reverse_response','w')
# Perform a deeper crawling and re-crawl using scrapy to fetch more URLs
cookies = deepCrawling(UrlForms,cookies)
system("rm -f items.json")
system("scrapy crawl ReverseCrawler -a domain="+domain+" -a start_urls="+url+" -a cookies=\""+str(cookies)+"\" -o items.json")
UrlForms = json.load(open("items.json"))
# Iterate through all possible forms
index = 0
for urlForm in UrlForms:
(action, params) = getFormParams(urlForm)
print "[INFO] action: ", action
# Get the valid response
validResponse = getValidResponse(params, action, url, cookies)
# Append the resposes to response file
responseFile.write("%%%%%%%%%%%%%%%%%%%%%%%%%% Start Valid Response %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n")
responseFile.write(action + "\n")
responseFile.write(str(params) + "\n")
responseFile.write(BeautifulSoup(validResponse).get_text() + "\n")
responseFile.write("############################ Start SQL Injection response ###########################\n")
# Attempt SQL Injection and Get the score
sqlInjResponse = getSqlInjResponse(params, action, url, cookies)
responseFile.write(BeautifulSoup(sqlInjResponse).get_text() + "\n")
responseFile.write("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ Start XSS response @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n")
sqlInjectionScore = float(1) - getSimilarityScore(validResponse, sqlInjResponse)
print "[INFO] SQL_INJ_Score = ", sqlInjectionScore
# Attempt nth Order SQL injection
responseString = nthOrderSQLInjection(params, action, url, cookies, index, UrlForms)
# Attempt XSS and get the score
xssResponse = getXssResponse(params, action, url, cookies)
responseFile.write("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n")
xssScore = getXssScore(xssResponse, cookies)
print "[INFO] XSS_Score = ", xssScore
# Add scores to the report
reportFile.write("[Params]:: " + str(params) + "\n")
reportFile.write("[Action]:: " + action + "\n")
reportFile.write("[SQL_Inj_Score]:: " + str(sqlInjectionScore) + "\n")
reportFile.write("[XSS_Inj_Score]:: " + str(xssScore) + "\n\n")
if responseString is not None:
reportFile.write("[nth Order SQL Injection]::" + responseString + "\n")
print "\n\n"
index = index + 1
# Close the report
reportFile.close()
responseFile.close()
if __name__ == '__main__':
main()
| mit | 6,833,721,684,920,172,000 | 39.111111 | 130 | 0.516176 | false | 4.391324 | false | false | false |
woelfware/BluMote | test/button_tx.py | 1 | 1963 | #!/usr/bin/env python
# Copyright (C) 2011 Woelfware
from bluetooth import *
import blumote
import cPickle
from glob import glob
import os
import sys
import time
class Blumote_Client(blumote.Services):
def __init__(self):
blumote.Services.__init__(self)
self.addr = None
def find_blumote_pods(self, pod_name = None):
if pod_name is None:
pod_name = self.service["name"]
print "Searching for \"%s\" service..." % (pod_name)
return find_service(name = pod_name)
def connect_to_blumote_pod(self, addr):
self.client_sock = BluetoothSocket(RFCOMM)
self.client_sock.connect((addr, 1))
def transport_tx(self, cmd, msg):
full_msg = struct.pack("B", cmd)
full_msg += msg
self.client_sock.send(full_msg)
def ir_transmit(self, msg):
self.transport_tx(self.cmd_codes.ir_transmit, msg)
return self.client_sock.recv(128)
if __name__ == "__main__":
bm_remote = Blumote_Client()
found = False
while not found:
try:
nearby_devices = discover_devices(lookup_names = True)
except:
print 'failed to find a blumote... retrying'
nearby_devices = ()
print 'found %d device(s)' % len(nearby_devices)
for addr, name in nearby_devices:
if name[:len('BluMote')] == 'BluMote':
print 'connecting to', addr, name
bm_remote.connect_to_blumote_pod(addr)
found = True
break
buttons = glob('*.pkl')
print 'Available buttons:'
for i, button in enumerate(buttons):
print '\t%i: %s' % (i, os.path.splitext(button)[0])
print
while True:
selection = raw_input('Select a button to transmit (-1 to quit): ')
try:
selection = int(selection)
except ValueError:
print 'Invalid selection'
continue
if selection == -1:
break
if ((selection < 0) or (selection >= len(buttons))):
print 'Invalid selecion'
continue
button = open(buttons[selection], 'rb')
key_code = cPickle.load(button)
button.close()
bm_remote.ir_transmit(''.join(['\x03', key_code]))
bm_remote.client_sock.close()
| gpl-3.0 | -4,726,947,572,065,195,000 | 23.5375 | 69 | 0.671931 | false | 2.903846 | false | false | false |
VcamX/grpc | src/python/grpcio/grpc/framework/alpha/_face_utilities.py | 1 | 7822 | # Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import abc
import collections
import six
# face_interfaces is referenced from specification in this module.
from grpc.framework.common import cardinality
from grpc.framework.face import interfaces as face_interfaces # pylint: disable=unused-import
from grpc.framework.face import utilities as face_utilities
from grpc.framework.alpha import _reexport
from grpc.framework.alpha import interfaces
def _qualified_name(service_name, method_name):
return '/%s/%s' % (service_name, method_name)
# TODO(nathaniel): This structure is getting bloated; it could be shrunk if
# implementations._Stub used a generic rather than a dynamic underlying
# face-layer stub.
class InvocationBreakdown(six.with_metaclass(abc.ABCMeta)):
"""An intermediate representation of invocation-side views of RPC methods.
Attributes:
cardinalities: A dictionary from RPC method name to interfaces.Cardinality
value.
qualified_names: A dictionary from unqualified RPC method name to
service-qualified RPC method name.
face_cardinalities: A dictionary from service-qualified RPC method name to
to cardinality.Cardinality value.
request_serializers: A dictionary from service-qualified RPC method name to
callable behavior to be used serializing request values for the RPC.
response_deserializers: A dictionary from service-qualified RPC method name
to callable behavior to be used deserializing response values for the
RPC.
"""
class _EasyInvocationBreakdown(
InvocationBreakdown,
collections.namedtuple(
'_EasyInvocationBreakdown',
('cardinalities', 'qualified_names', 'face_cardinalities',
'request_serializers', 'response_deserializers'))):
pass
class ServiceBreakdown(six.with_metaclass(abc.ABCMeta)):
"""An intermediate representation of service-side views of RPC methods.
Attributes:
implementations: A dictionary from service-qualified RPC method name to
face_interfaces.MethodImplementation implementing the RPC method.
request_deserializers: A dictionary from service-qualified RPC method name
to callable behavior to be used deserializing request values for the RPC.
response_serializers: A dictionary from service-qualified RPC method name
to callable behavior to be used serializing response values for the RPC.
"""
class _EasyServiceBreakdown(
ServiceBreakdown,
collections.namedtuple(
'_EasyServiceBreakdown',
('implementations', 'request_deserializers', 'response_serializers'))):
pass
def break_down_invocation(service_name, method_descriptions):
"""Derives an InvocationBreakdown from several RPC method descriptions.
Args:
service_name: The package-qualified full name of the service.
method_descriptions: A dictionary from RPC method name to
interfaces.RpcMethodInvocationDescription describing the RPCs.
Returns:
An InvocationBreakdown corresponding to the given method descriptions.
"""
cardinalities = {}
qualified_names = {}
face_cardinalities = {}
request_serializers = {}
response_deserializers = {}
for name, method_description in six.iteritems(method_descriptions):
qualified_name = _qualified_name(service_name, name)
method_cardinality = method_description.cardinality()
cardinalities[name] = method_description.cardinality()
qualified_names[name] = qualified_name
face_cardinalities[qualified_name] = _reexport.common_cardinality(
method_cardinality)
request_serializers[qualified_name] = method_description.serialize_request
response_deserializers[qualified_name] = (
method_description.deserialize_response)
return _EasyInvocationBreakdown(
cardinalities, qualified_names, face_cardinalities, request_serializers,
response_deserializers)
def break_down_service(service_name, method_descriptions):
"""Derives a ServiceBreakdown from several RPC method descriptions.
Args:
method_descriptions: A dictionary from RPC method name to
interfaces.RpcMethodServiceDescription describing the RPCs.
Returns:
A ServiceBreakdown corresponding to the given method descriptions.
"""
implementations = {}
request_deserializers = {}
response_serializers = {}
for name, method_description in six.iteritems(method_descriptions):
qualified_name = _qualified_name(service_name, name)
method_cardinality = method_description.cardinality()
if method_cardinality is interfaces.Cardinality.UNARY_UNARY:
def service(
request, face_rpc_context,
service_behavior=method_description.service_unary_unary):
return service_behavior(
request, _reexport.rpc_context(face_rpc_context))
implementations[qualified_name] = face_utilities.unary_unary_inline(
service)
elif method_cardinality is interfaces.Cardinality.UNARY_STREAM:
def service(
request, face_rpc_context,
service_behavior=method_description.service_unary_stream):
return service_behavior(
request, _reexport.rpc_context(face_rpc_context))
implementations[qualified_name] = face_utilities.unary_stream_inline(
service)
elif method_cardinality is interfaces.Cardinality.STREAM_UNARY:
def service(
request_iterator, face_rpc_context,
service_behavior=method_description.service_stream_unary):
return service_behavior(
request_iterator, _reexport.rpc_context(face_rpc_context))
implementations[qualified_name] = face_utilities.stream_unary_inline(
service)
elif method_cardinality is interfaces.Cardinality.STREAM_STREAM:
def service(
request_iterator, face_rpc_context,
service_behavior=method_description.service_stream_stream):
return service_behavior(
request_iterator, _reexport.rpc_context(face_rpc_context))
implementations[qualified_name] = face_utilities.stream_stream_inline(
service)
request_deserializers[qualified_name] = (
method_description.deserialize_request)
response_serializers[qualified_name] = (
method_description.serialize_response)
return _EasyServiceBreakdown(
implementations, request_deserializers, response_serializers)
| bsd-3-clause | 5,831,767,619,180,549,000 | 41.743169 | 94 | 0.749297 | false | 4.531866 | false | false | false |
ovnicraft/openerp-server | openerp/addons/base/module/wizard/base_update_translations.py | 1 | 2901 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
import tools
import cStringIO
from tools.translate import _
class base_update_translations(osv.osv_memory):
def _get_languages(self, cr, uid, context):
lang_obj = self.pool.get('res.lang')
ids = lang_obj.search(cr, uid, ['&', ('active', '=', True), ('translatable', '=', True),])
langs = lang_obj.browse(cr, uid, ids)
return [(lang.code, lang.name) for lang in langs]
def _get_lang_name(self, cr, uid, lang_code):
lang_obj = self.pool.get('res.lang')
ids = lang_obj.search(cr, uid, [('code', '=', lang_code)])
if not ids:
raise osv.except_osv(_('Error!'), _('No language with code "%s" exists') % lang_code)
lang = lang_obj.browse(cr, uid, ids[0])
return lang.name
def act_update(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids)[0]
lang_name = self._get_lang_name(cr, uid, this.lang)
buf = cStringIO.StringIO()
tools.trans_export(this.lang, ['all'], buf, 'csv', cr)
tools.trans_load_data(cr, buf, 'csv', this.lang, lang_name=lang_name)
buf.close()
return {'type': 'ir.actions.act_window_close'}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(base_update_translations, self).default_get(cr, uid, fields, context=context)
if context.get('active_model') != "res.lang":
return res
record_id = context.get('active_id', False) or False
if record_id:
lang = self.pool.get('res.lang').browse(cr, uid, record_id).code
res.update(lang=lang)
return res
_name = 'base.update.translations'
_columns = {
'lang': fields.selection(_get_languages, 'Language', required=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,133,423,672,400,212,000 | 40.442857 | 98 | 0.592554 | false | 3.79712 | false | false | false |
PageArkanis/StEVE | steve/constellation.py | 1 | 1217 | from steve.backend.sqlitedb import SDB
from steve.system import System
class Constellation(object):
def __init__(self, universe, data):
self.universe = universe
self.regionID = data[0]
self.uid = data[1]
self.name = data[2]
self.x = data[3]
self.y = data[4]
self.z = data[5]
self.xMin = data[6]
self.xMax = data[7]
self.yMin = data[8]
self.yMax = data[9]
self.zMin = data[10]
self.zMax = data[11]
self.factionID = data[12]
self.radius = data[13]
self._systems = {}
@property
def system(self):
if len(self._constellations) == 0:
query = 'SELECT * from mapSolarSystems WHERE constellationID = %' % self.uid
for entry in SDB.queryAll(query):
system = System(self.universe, entry)
self._systems[system.name] = system
self._systems[system.uid] = system
return self._systems
@property
def region(self):
return self.universe.regions[self.regionID]
| agpl-3.0 | 3,538,158,798,730,244,000 | 26.659091 | 88 | 0.501233 | false | 3.744615 | false | false | false |
blueyed/coveragepy | tests/test_templite.py | 1 | 10970 | # coding: utf-8
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for coverage.templite."""
import re
from coverage.templite import Templite, TempliteSyntaxError, TempliteValueError
from tests.coveragetest import CoverageTest
# pylint: disable=unused-variable
class AnyOldObject(object):
"""Simple testing object.
Use keyword arguments in the constructor to set attributes on the object.
"""
def __init__(self, **attrs):
for n, v in attrs.items():
setattr(self, n, v)
class TempliteTest(CoverageTest):
"""Tests for Templite."""
run_in_temp_dir = False
def try_render(self, text, ctx=None, result=None):
"""Render `text` through `ctx`, and it had better be `result`.
Result defaults to None so we can shorten the calls where we expect
an exception and never get to the result comparison.
"""
actual = Templite(text).render(ctx or {})
# If result is None, then an exception should have prevented us getting
# to here.
assert result is not None
self.assertEqual(actual, result)
def assertSynErr(self, msg):
"""Assert that a `TempliteSyntaxError` will happen.
A context manager, and the message should be `msg`.
"""
pat = "^" + re.escape(msg) + "$"
return self.assertRaisesRegex(TempliteSyntaxError, pat)
def test_passthrough(self):
# Strings without variables are passed through unchanged.
self.assertEqual(Templite("Hello").render(), "Hello")
self.assertEqual(
Templite("Hello, 20% fun time!").render(),
"Hello, 20% fun time!"
)
def test_variables(self):
# Variables use {{var}} syntax.
self.try_render("Hello, {{name}}!", {'name':'Ned'}, "Hello, Ned!")
def test_undefined_variables(self):
# Using undefined names is an error.
with self.assertRaises(Exception):
self.try_render("Hi, {{name}}!")
def test_pipes(self):
# Variables can be filtered with pipes.
data = {
'name': 'Ned',
'upper': lambda x: x.upper(),
'second': lambda x: x[1],
}
self.try_render("Hello, {{name|upper}}!", data, "Hello, NED!")
# Pipes can be concatenated.
self.try_render("Hello, {{name|upper|second}}!", data, "Hello, E!")
def test_reusability(self):
# A single Templite can be used more than once with different data.
globs = {
'upper': lambda x: x.upper(),
'punct': '!',
}
template = Templite("This is {{name|upper}}{{punct}}", globs)
self.assertEqual(template.render({'name':'Ned'}), "This is NED!")
self.assertEqual(template.render({'name':'Ben'}), "This is BEN!")
def test_attribute(self):
# Variables' attributes can be accessed with dots.
obj = AnyOldObject(a="Ay")
self.try_render("{{obj.a}}", locals(), "Ay")
obj2 = AnyOldObject(obj=obj, b="Bee")
self.try_render("{{obj2.obj.a}} {{obj2.b}}", locals(), "Ay Bee")
def test_member_function(self):
# Variables' member functions can be used, as long as they are nullary.
class WithMemberFns(AnyOldObject):
"""A class to try out member function access."""
def ditto(self):
"""Return twice the .txt attribute."""
return self.txt + self.txt
obj = WithMemberFns(txt="Once")
self.try_render("{{obj.ditto}}", locals(), "OnceOnce")
def test_item_access(self):
# Variables' items can be used.
d = {'a':17, 'b':23}
self.try_render("{{d.a}} < {{d.b}}", locals(), "17 < 23")
def test_loops(self):
# Loops work like in Django.
nums = [1,2,3,4]
self.try_render(
"Look: {% for n in nums %}{{n}}, {% endfor %}done.",
locals(),
"Look: 1, 2, 3, 4, done."
)
# Loop iterables can be filtered.
def rev(l):
"""Return the reverse of `l`."""
l = l[:]
l.reverse()
return l
self.try_render(
"Look: {% for n in nums|rev %}{{n}}, {% endfor %}done.",
locals(),
"Look: 4, 3, 2, 1, done."
)
def test_empty_loops(self):
self.try_render(
"Empty: {% for n in nums %}{{n}}, {% endfor %}done.",
{'nums':[]},
"Empty: done."
)
def test_multiline_loops(self):
self.try_render(
"Look: \n{% for n in nums %}\n{{n}}, \n{% endfor %}done.",
{'nums':[1,2,3]},
"Look: \n\n1, \n\n2, \n\n3, \ndone."
)
def test_multiple_loops(self):
self.try_render(
"{% for n in nums %}{{n}}{% endfor %} and "
"{% for n in nums %}{{n}}{% endfor %}",
{'nums': [1,2,3]},
"123 and 123"
)
def test_comments(self):
# Single-line comments work:
self.try_render(
"Hello, {# Name goes here: #}{{name}}!",
{'name':'Ned'}, "Hello, Ned!"
)
# and so do multi-line comments:
self.try_render(
"Hello, {# Name\ngoes\nhere: #}{{name}}!",
{'name':'Ned'}, "Hello, Ned!"
)
def test_if(self):
self.try_render(
"Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!",
{'ned': 1, 'ben': 0},
"Hi, NED!"
)
self.try_render(
"Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!",
{'ned': 0, 'ben': 1},
"Hi, BEN!"
)
self.try_render(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 0, 'ben': 0},
"Hi, !"
)
self.try_render(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 1, 'ben': 0},
"Hi, NED!"
)
self.try_render(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 1, 'ben': 1},
"Hi, NEDBEN!"
)
def test_complex_if(self):
class Complex(AnyOldObject):
"""A class to try out complex data access."""
def getit(self):
"""Return it."""
return self.it
obj = Complex(it={'x':"Hello", 'y': 0})
self.try_render(
"@"
"{% if obj.getit.x %}X{% endif %}"
"{% if obj.getit.y %}Y{% endif %}"
"{% if obj.getit.y|str %}S{% endif %}"
"!",
{ 'obj': obj, 'str': str },
"@XS!"
)
def test_loop_if(self):
self.try_render(
"@{% for n in nums %}{% if n %}Z{% endif %}{{n}}{% endfor %}!",
{'nums': [0,1,2]},
"@0Z1Z2!"
)
self.try_render(
"X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!",
{'nums': [0,1,2]},
"X@012!"
)
self.try_render(
"X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!",
{'nums': []},
"X!"
)
def test_nested_loops(self):
self.try_render(
"@"
"{% for n in nums %}"
"{% for a in abc %}{{a}}{{n}}{% endfor %}"
"{% endfor %}"
"!",
{'nums': [0,1,2], 'abc': ['a', 'b', 'c']},
"@a0b0c0a1b1c1a2b2c2!"
)
def test_whitespace_handling(self):
self.try_render(
"@{% for n in nums %}\n"
" {% for a in abc %}{{a}}{{n}}{% endfor %}\n"
"{% endfor %}!\n",
{'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']},
"@\n a0b0c0\n\n a1b1c1\n\n a2b2c2\n!\n"
)
self.try_render(
"@{% for n in nums -%}\n"
" {% for a in abc -%}\n"
" {# this disappears completely -#}\n"
" {{a -}}\n"
" {{n -}}\n"
" {% endfor %}\n"
"{% endfor %}!\n",
{'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']},
"@a0b0c0\na1b1c1\na2b2c2\n!\n"
)
def test_non_ascii(self):
self.try_render(
u"{{where}} ollǝɥ",
{ 'where': u'ǝɹǝɥʇ' },
u"ǝɹǝɥʇ ollǝɥ"
)
def test_exception_during_evaluation(self):
# TypeError: Couldn't evaluate {{ foo.bar.baz }}:
msg = "Couldn't evaluate None.bar"
with self.assertRaisesRegex(TempliteValueError, msg):
self.try_render(
"Hey {{foo.bar.baz}} there", {'foo': None}, "Hey ??? there"
)
def test_bad_names(self):
with self.assertSynErr("Not a valid name: 'var%&!@'"):
self.try_render("Wat: {{ var%&!@ }}")
with self.assertSynErr("Not a valid name: 'filter%&!@'"):
self.try_render("Wat: {{ foo|filter%&!@ }}")
with self.assertSynErr("Not a valid name: '@'"):
self.try_render("Wat: {% for @ in x %}{% endfor %}")
def test_bogus_tag_syntax(self):
with self.assertSynErr("Don't understand tag: 'bogus'"):
self.try_render("Huh: {% bogus %}!!{% endbogus %}??")
def test_malformed_if(self):
with self.assertSynErr("Don't understand if: '{% if %}'"):
self.try_render("Buh? {% if %}hi!{% endif %}")
with self.assertSynErr("Don't understand if: '{% if this or that %}'"):
self.try_render("Buh? {% if this or that %}hi!{% endif %}")
def test_malformed_for(self):
with self.assertSynErr("Don't understand for: '{% for %}'"):
self.try_render("Weird: {% for %}loop{% endfor %}")
with self.assertSynErr("Don't understand for: '{% for x from y %}'"):
self.try_render("Weird: {% for x from y %}loop{% endfor %}")
with self.assertSynErr("Don't understand for: '{% for x, y in z %}'"):
self.try_render("Weird: {% for x, y in z %}loop{% endfor %}")
def test_bad_nesting(self):
with self.assertSynErr("Unmatched action tag: 'if'"):
self.try_render("{% if x %}X")
with self.assertSynErr("Mismatched end tag: 'for'"):
self.try_render("{% if x %}X{% endfor %}")
with self.assertSynErr("Too many ends: '{% endif %}'"):
self.try_render("{% if x %}{% endif %}{% endif %}")
def test_malformed_end(self):
with self.assertSynErr("Don't understand end: '{% end if %}'"):
self.try_render("{% if x %}X{% end if %}")
with self.assertSynErr("Don't understand end: '{% endif now %}'"):
self.try_render("{% if x %}X{% endif now %}")
| apache-2.0 | 1,905,987,912,981,123,000 | 33.670886 | 79 | 0.478094 | false | 3.555988 | true | false | false |
RyanSkraba/beam | sdks/python/apache_beam/coders/typecoders.py | 1 | 8078 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Type coders registration.
This module contains functionality to define and use coders for custom classes.
Let's say we have a class Xyz and we are processing a PCollection with elements
of type Xyz. If we do not register a coder for Xyz, a default pickle-based
fallback coder will be used. This can be undesirable for two reasons. First, we
may want a faster coder or a more space efficient one. Second, the pickle-based
coder is not deterministic in the sense that objects like dictionaries or sets
are not guaranteed to be encoded in the same way every time (elements are not
really ordered).
Two (sometimes three) steps are needed to define and use a custom coder:
- define the coder class
- associate the code with the class (a.k.a. coder registration)
- typehint DoFns or transforms with the new class or composite types using
the class.
A coder class is defined by subclassing from CoderBase and defining the
encode_to_bytes and decode_from_bytes methods. The framework uses duck-typing
for coders so it is not strictly required to subclass from CoderBase as long as
the encode/decode methods are defined.
Registering a coder class is made with a register_coder() call::
from apache_beam import coders
...
coders.registry.register_coder(Xyz, XyzCoder)
Additionally, DoFns and PTransforms may need type hints. This is not always
necessary since there is functionality to infer the return types of DoFns by
analyzing the code. For instance, for the function below the return type of
'Xyz' will be inferred::
def MakeXyzs(v):
return Xyz(v)
If Xyz is inferred then its coder will be used whenever the framework needs to
serialize data (e.g., writing to the shuffler subsystem responsible for group by
key operations). If a typehint is needed it can be specified by decorating the
DoFns or using with_input_types/with_output_types methods on PTransforms. For
example, the above function can be decorated::
@with_output_types(Xyz)
def MakeXyzs(v):
return complex_operation_returning_Xyz(v)
See apache_beam.typehints.decorators module for more details.
"""
from __future__ import absolute_import
from builtins import object
from typing import Any
from typing import Dict
from typing import Iterable
from typing import List
from typing import Type
from past.builtins import unicode
from apache_beam.coders import coders
from apache_beam.typehints import typehints
__all__ = ['registry']
class CoderRegistry(object):
"""A coder registry for typehint/coder associations."""
def __init__(self, fallback_coder=None):
self._coders = {} # type: Dict[Any, Type[coders.Coder]]
self.custom_types = [] # type: List[Any]
self.register_standard_coders(fallback_coder)
def register_standard_coders(self, fallback_coder):
"""Register coders for all basic and composite types."""
self._register_coder_internal(int, coders.VarIntCoder)
self._register_coder_internal(float, coders.FloatCoder)
self._register_coder_internal(bytes, coders.BytesCoder)
self._register_coder_internal(bool, coders.BooleanCoder)
self._register_coder_internal(unicode, coders.StrUtf8Coder)
self._register_coder_internal(typehints.TupleConstraint, coders.TupleCoder)
# Default fallback coders applied in that order until the first matching
# coder found.
default_fallback_coders = [coders.ProtoCoder, coders.FastPrimitivesCoder]
self._fallback_coder = fallback_coder or FirstOf(default_fallback_coders)
def _register_coder_internal(self, typehint_type, typehint_coder_class):
# type: (Any, Type[coders.Coder]) -> None
self._coders[typehint_type] = typehint_coder_class
def register_coder(self, typehint_type, typehint_coder_class):
# type: (Any, Type[coders.Coder]) -> None
if not isinstance(typehint_coder_class, type):
raise TypeError('Coder registration requires a coder class object. '
'Received %r instead.' % typehint_coder_class)
if typehint_type not in self.custom_types:
self.custom_types.append(typehint_type)
self._register_coder_internal(typehint_type, typehint_coder_class)
def get_coder(self, typehint):
# type: (Any) -> coders.Coder
coder = self._coders.get(
typehint.__class__ if isinstance(typehint, typehints.TypeConstraint)
else typehint, None)
if isinstance(typehint, typehints.TypeConstraint) and coder is not None:
return coder.from_type_hint(typehint, self)
if coder is None:
# We use the fallback coder when there is no coder registered for a
# typehint. For example a user defined class with no coder specified.
if not hasattr(self, '_fallback_coder'):
raise RuntimeError(
'Coder registry has no fallback coder. This can happen if the '
'fast_coders module could not be imported.')
if isinstance(typehint, (typehints.IterableTypeConstraint,
typehints.ListConstraint)):
return coders.IterableCoder.from_type_hint(typehint, self)
elif typehint is None:
# In some old code, None is used for Any.
# TODO(robertwb): Clean this up.
pass
elif typehint is object or typehint == typehints.Any:
# We explicitly want the fallback coder.
pass
elif isinstance(typehint, typehints.TypeVariable):
# TODO(robertwb): Clean this up when type inference is fully enabled.
pass
else:
# TODO(robertwb): Re-enable this warning when it's actionable.
# warnings.warn('Using fallback coder for typehint: %r.' % typehint)
pass
coder = self._fallback_coder
return coder.from_type_hint(typehint, self)
def get_custom_type_coder_tuples(self, types):
"""Returns type/coder tuples for all custom types passed in."""
return [(t, self._coders[t]) for t in types if t in self.custom_types]
def verify_deterministic(self, key_coder, op_name, silent=True):
if not key_coder.is_deterministic():
error_msg = ('The key coder "%s" for %s '
'is not deterministic. This may result in incorrect '
'pipeline output. This can be fixed by adding a type '
'hint to the operation preceding the GroupByKey step, '
'and for custom key classes, by writing a '
'deterministic custom Coder. Please see the '
'documentation for more details.' % (key_coder, op_name))
return key_coder.as_deterministic_coder(op_name, error_msg)
else:
return key_coder
class FirstOf(object):
"""For internal use only; no backwards-compatibility guarantees.
A class used to get the first matching coder from a list of coders."""
def __init__(self, coders):
# type: (Iterable[Type[coders.Coder]]) -> None
self._coders = coders
def from_type_hint(self, typehint, registry):
messages = []
for coder in self._coders:
try:
return coder.from_type_hint(typehint, self)
except Exception as e:
msg = ('%s could not provide a Coder for type %s: %s' %
(coder, typehint, e))
messages.append(msg)
raise ValueError('Cannot provide coder for %s: %s' %
(typehint, ';'.join(messages)))
registry = CoderRegistry()
| apache-2.0 | -3,578,803,315,472,674,000 | 41.072917 | 80 | 0.709953 | false | 3.87806 | false | false | false |
sunshinelover/chanlun | vn.trader/ctaAlgo/uiChanlunWidget.py | 1 | 68647 | # encoding: UTF-8
"""
缠论模块相关的GUI控制组件
"""
from vtGateway import VtSubscribeReq
from uiBasicWidget import QtGui, QtCore, BasicCell,BasicMonitor,TradingWidget
from eventEngine import *
from ctaBase import *
import pyqtgraph as pg
import numpy as np
import pymongo
from pymongo.errors import *
from datetime import datetime, timedelta
from ctaHistoryData import HistoryDataEngine
import time
import types
import pandas as pd
########################################################################
class MyStringAxis(pg.AxisItem):
def __init__(self, xdict, *args, **kwargs):
pg.AxisItem.__init__(self, *args, **kwargs)
self.x_values = np.asarray(xdict.keys())
self.x_strings = xdict.values()
def tickStrings(self, values, scale, spacing):
strings = []
for v in values:
# vs is the original tick value
vs = v * scale
# if we have vs in our values, show the string
# otherwise show nothing
if vs in self.x_values:
# Find the string with x_values closest to vs
vstr = self.x_strings[np.abs(self.x_values - vs).argmin()]
else:
vstr = ""
strings.append(vstr)
return strings
########################################################################
class ChanlunEngineManager(QtGui.QWidget):
"""chanlun引擎管理组件"""
signal = QtCore.pyqtSignal(type(Event()))
# ----------------------------------------------------------------------
def __init__(self, chanlunEngine, eventEngine, mainEngine, parent=None):
"""Constructor"""
super(ChanlunEngineManager, self).__init__(parent)
self.chanlunEngine = chanlunEngine
self.eventEngine = eventEngine
self.mainEngine = mainEngine
self.penLoaded = False
self.segmentLoaded = False
self.tickLoaded = False
self.zhongShuLoaded = False
self.instrumentid = ''
self.initUi()
self.registerEvent()
# 记录日志
self.chanlunEngine.writeChanlunLog(u'缠论引擎启动成功')
# ----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'缠论策略')
# 期货代码输入框
self.codeEdit = QtGui.QLineEdit()
self.codeEdit.setPlaceholderText(u'在此输入期货代码')
self.codeEdit.setMaximumWidth(200)
self.data = pd.DataFrame() #画图所需数据, 重要
self.fenX = [] #分笔分段所需X轴坐标
self.fenY = [] #分笔分段所需Y轴坐标
self.zhongshuPos = [] #中枢的位置
self.zhongShuType = [] #中枢的方向
# 金融图
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.TickW = None
# MongoDB数据库相关
self.__mongoConnected = False
self.__mongoConnection = None
# 调用函数
self.__connectMongo()
# 按钮
penButton = QtGui.QPushButton(u'分笔')
segmentButton = QtGui.QPushButton(u'分段')
zhongshuButton = QtGui.QPushButton(u'走势中枢')
shopButton = QtGui.QPushButton(u'买卖点')
restoreButton = QtGui.QPushButton(u'还原')
penButton.clicked.connect(self.pen)
segmentButton.clicked.connect(self.segment)
zhongshuButton.clicked.connect(self.zhongShu)
shopButton.clicked.connect(self.shop)
restoreButton.clicked.connect(self.restore)
# Chanlun组件的日志监控
self.chanlunLogMonitor = QtGui.QTextEdit()
self.chanlunLogMonitor.setReadOnly(True)
self.chanlunLogMonitor.setMaximumHeight(180)
# 设置布局
self.hbox2 = QtGui.QHBoxLayout()
self.hbox2.addWidget(self.codeEdit)
self.hbox2.addWidget(penButton)
self.hbox2.addWidget(segmentButton)
self.hbox2.addWidget(zhongshuButton)
self.hbox2.addWidget(shopButton)
self.hbox2.addWidget(restoreButton)
self.hbox2.addStretch()
tickButton = QtGui.QPushButton(u'Tick')
oneMButton = QtGui.QPushButton(u"1分")
fiveMButton = QtGui.QPushButton(u'5分')
fifteenMButton = QtGui.QPushButton(u'15分')
thirtyMButton = QtGui.QPushButton(u'30分')
sixtyMButton = QtGui.QPushButton(u'60分')
dayButton = QtGui.QPushButton(u'日')
weekButton = QtGui.QPushButton(u'周')
monthButton = QtGui.QPushButton(u'月')
oneMButton.checked = True
self.vbox1 = QtGui.QVBoxLayout()
tickButton.clicked.connect(self.openTick)
oneMButton.clicked.connect(self.oneM)
fiveMButton.clicked.connect(self.fiveM)
fifteenMButton.clicked.connect(self.fifteenM)
thirtyMButton.clicked.connect(self.thirtyM)
sixtyMButton.clicked.connect(self.sixtyM)
dayButton.clicked.connect(self.daily)
weekButton.clicked.connect(self.weekly)
monthButton.clicked.connect(self.monthly)
self.vbox2 = QtGui.QVBoxLayout()
self.vbox1.addWidget(self.PriceW)
self.vbox2.addWidget(tickButton)
self.vbox2.addWidget(oneMButton)
self.vbox2.addWidget(fiveMButton)
self.vbox2.addWidget(fifteenMButton)
self.vbox2.addWidget(thirtyMButton)
self.vbox2.addWidget(sixtyMButton)
self.vbox2.addWidget(dayButton)
self.vbox2.addWidget(weekButton)
self.vbox2.addWidget(monthButton)
self.vbox2.addStretch()
self.hbox3 = QtGui.QHBoxLayout()
self.hbox3.addStretch()
self.hbox3.addLayout(self.vbox1)
self.hbox3.addLayout(self.vbox2)
self.vbox = QtGui.QVBoxLayout()
self.vbox.addLayout(self.hbox2)
self.vbox.addLayout(self.hbox3)
self.vbox.addWidget(self.chanlunLogMonitor)
self.setLayout(self.vbox)
self.codeEdit.returnPressed.connect(self.updateSymbol)
#-----------------------------------------------------------------------
#从通联数据端获取历史数据
def downloadData(self, symbol, unit):
listBar = [] #K线数据
num = 0
#从通联客户端获取K线数据
historyDataEngine = HistoryDataEngine()
# unit为int型获取分钟数据,为String类型获取日周月K线数据
if type(unit) is types.IntType:
#从通联数据端获取当日分钟数据并存入数据库
historyDataEngine.downloadFuturesIntradayBar(symbol, unit)
# 从数据库获取前几天的分钟数据
cx = self.getDbData(symbol, unit)
if cx:
for data in cx:
barOpen = data['open']
barClose = data['close']
barLow = data['low']
barHigh = data['high']
barTime = data['datetime']
listBar.append((num, barTime, barOpen, barClose, barLow, barHigh))
num += 1
elif type(unit) is types.StringType:
data = historyDataEngine.downloadFuturesBar(symbol, unit)
if data:
for d in data:
barOpen = d.get('openPrice', 0)
barClose = d.get('closePrice', 0)
barLow = d.get('lowestPrice', 0)
barHigh = d.get('highestPrice', 0)
if unit == "daily":
barTime = d.get('tradeDate', '').replace('-', '')
else:
barTime = d.get('endDate', '').replace('-', '')
listBar.append((num, barTime, barOpen, barClose, barLow, barHigh))
num += 1
if unit == "monthly" or unit == "weekly":
listBar.reverse()
else:
print "参数格式错误"
return
#将List数据转换成dataFormat类型,方便处理
df = pd.DataFrame(listBar, columns=['num', 'time', 'open', 'close', 'low', 'high'])
df.index = df['time'].tolist()
df = df.drop('time', 1)
return df
#-----------------------------------------------------------------------
#从数据库获取前两天的分钟数据
def getDbData(self, symbol, unit):
#周六周日不交易,无分钟数据
# 给数据库命名
dbname = ''
days = 7
if unit == 1:
dbname = MINUTE_DB_NAME
elif unit == 5:
dbname = MINUTE5_DB_NAME
elif unit == 15:
dbname = MINUTE15_DB_NAME
elif unit == 30:
dbname = MINUTE30_DB_NAME
elif unit == 60:
dbname = MINUTE60_DB_NAME
weekday = datetime.now().weekday() # weekday() 返回的是0-6是星期一到星期日
if days == 2:
if weekday == 6:
aDay = timedelta(days=3)
elif weekday == 0 or weekday == 1:
aDay = timedelta(days=4)
else:
aDay = timedelta(days=2)
else:
aDay = timedelta(days=7)
startDate = (datetime.now() - aDay).strftime('%Y%m%d')
print startDate
if self.__mongoConnected:
collection = self.__mongoConnection[dbname][symbol]
cx = collection.find({'date': {'$gte': startDate}})
return cx
else:
return None
#----------------------------------------------------------------------------------
#"""合约变化"""
def updateSymbol(self):
# 读取组件数据
instrumentid = str(self.codeEdit.text())
self.chanlunEngine.writeChanlunLog(u'查询合约%s' % (instrumentid))
# 从通联数据客户端获取当日分钟数据
self.data = self.downloadData(instrumentid, 1)
if self.data.empty:
self.chanlunEngine.writeChanlunLog(u'合约%s 不存在' % (instrumentid))
else:
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.chanlunEngine.writeChanlunLog(u'打开合约%s 1分钟K线图' % (instrumentid))
self.penLoaded = False
self.segmentLoaded = False
self.tickLoaded = False
self.zhongShuLoaded = False
# # 订阅合约[仿照ctaEngine.py写的]
# # 先取消订阅之前的合约,再订阅最新输入的合约
# contract = self.mainEngine.getContract(self.instrumentid)
# if contract:
# req = VtSubscribeReq()
# req.symbol = contract.symbol
# self.mainEngine.unsubscribe(req, contract.gatewayName)
#
# contract = self.mainEngine.getContract(instrumentid)
# if contract:
# req = VtSubscribeReq()
# req.symbol = contract.symbol
# self.mainEngine.subscribe(req, contract.gatewayName)
# else:
# self.chanlunEngine.writeChanlunLog(u'交易合约%s无法找到' % (instrumentid))
#
# # 重新注册事件监听
# self.eventEngine.unregister(EVENT_TICK + self.instrumentid, self.signal.emit)
# self.eventEngine.register(EVENT_TICK + instrumentid, self.signal.emit)
# 更新目前的合约
self.instrumentid = instrumentid
def oneM(self):
"打开1分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 1分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 1)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def fiveM(self):
"打开5分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 5分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 5)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def fifteenM(self):
"打开15分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 15分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 15)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def thirtyM(self):
"打开30分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 30分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 30)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def sixtyM(self):
"打开60分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 60分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 60)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def daily(self):
"""打开日K线图"""
self.chanlunEngine.writeChanlunLog(u'打开合约%s 日K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, "daily")
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def weekly(self):
"""打开周K线图"""
self.chanlunEngine.writeChanlunLog(u'打开合约%s 周K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, "weekly")
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
def monthly(self):
"""打开月K线图"""
self.chanlunEngine.writeChanlunLog(u'打开合约%s 月K线图' % (self.instrumentid))
# 从通联数据客户端获取数据并画图
self.data = self.downloadData(self.instrumentid, "monthly")
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def openTick(self):
"""切换成tick图"""
self.chanlunEngine.writeChanlunLog(u'打开tick图')
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.TickW = TickWidget(self.eventEngine, self.chanlunEngine)
self.vbox1.addWidget(self.TickW)
self.tickLoaded = True
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def restore(self):
"""还原初始k线状态"""
self.chanlunEngine.writeChanlunLog(u'还原加载成功')
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.data = self.downloadData(self.instrumentid, 1)
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data, self)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.chanlunEngine.writeChanlunLog(u'还原为1分钟k线图')
self.penLoaded = False
self.segmentLoaded = False
self.tickLoaded = False
# ----------------------------------------------------------------------
def pen(self):
"""加载分笔"""
# 先合并K线数据,记录新建PriceW之前合并K线的数据
if not self.penLoaded:
after_fenxing = self.judgeInclude() #判断self.data中K线数据的包含关系
# 清空画布时先remove已有的Widget再新建
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, after_fenxing)
self.vbox1.addWidget(self.PriceW)
#使用合并K线的数据重新画K线图
self.plotAfterFenXing(after_fenxing)
# 找出顶和底
fenxing_data, fenxing_type = self.findTopAndLow(after_fenxing)
arrayFenxingdata = np.array(fenxing_data)
arrayTypedata = np.array(fenxing_type)
self.fenY = []
self.fenX = [m[0] for m in arrayFenxingdata]
fenbiY1 = [m[4] for m in arrayFenxingdata] # 顶分型标志最高价
fenbiY2 = [m[3] for m in arrayFenxingdata] # 底分型标志最低价
for i in xrange(len(self.fenX)):
if arrayTypedata[i] == 1:
self.fenY.append(fenbiY1[i])
else:
self.fenY.append(fenbiY2[i])
if not self.penLoaded:
if self.fenX:
self.fenX.append(self.fenX[-1])
self.fenY.append(self.fenY[-1])
print "self.fenX: ", self.fenX
print "self.fenY: ", self.fenY
self.fenbi(self.fenX, self.fenY)
self.fenX.pop()
self.fenY.pop()
self.chanlunEngine.writeChanlunLog(u'分笔加载成功')
self.penLoaded = True
# ----------------------------------------------------------------------
def segment(self):
if not self.penLoaded:
self.pen() #先分笔才能分段
segmentX = [] #分段点X轴值
segmentY = [] #分段点Y轴值
temp_type = 0 #标志线段方向,向上为1,向下为-1, 未判断前三笔是否重合为0
i = 0
while i < len(self.fenX) - 4:
if temp_type == 0:
if self.fenY[i] > self.fenY[i+1] and self.fenY[i] > self.fenY[i+3]:
temp_type = -1 #向下线段,三笔重合
segmentX.append(self.fenX[i])
segmentY.append(self.fenY[i])
elif self.fenY[i] < self.fenY[i+1] and self.fenY[i] < self.fenY[i+3]:
temp_type = 1 #向上线段,三笔重合
segmentX.append(self.fenX[i])
segmentY.append(self.fenY[i])
else:
temp_type = 0
i += 1
continue
if temp_type == 1: #向上线段
j = i+1
high = [] # 记录顶
low = [] # 记录低
while j < len(self.fenX) - 1: #记录顶底
high.append(self.fenY[j])
low.append(self.fenY[j+1])
j += 2
if self.fenY[i+4] < self.fenY[i+1]: #向上线段被向下笔破坏
j = 0
while j < len(high)-2:
# 顶底出现顶分型,向上线段结束
if high[j+1] > high[j] and high[j+1] > high[j+2]:
num = i + 2 * j + 3 #线段结束点位置
segmentX.append(self.fenX[num])
segmentY.append(self.fenY[num])
i = num
temp_type = -1 #向上线段一定由向下线段结束
break
j += 1
if j == len(high)-2:
break
else: #向上线段未被向下笔破坏
j = 1
while j < len(high)-2:
# 顶底出现底分型,向上线段结束
if low[j + 1] < low[j] and low[j + 1] < low[j + 2]:
num = i + 2 * j + 1 # 线段结束点位置
segmentX.append(self.fenX[num])
segmentY.append(self.fenY[num])
i = num
temp_type = -1 # 向上线段一定由向下线段结束
break
j += 1
if j == len(high)-2:
break
elif temp_type == -1: # 向下线段
j = i + 1
high = [] # 记录顶
low = [] # 记录低
while j < len(self.fenX) - 1: # 记录顶底
high.append(self.fenY[j + 1])
low.append(self.fenY[j])
j += 2
if self.fenY[i + 4] > self.fenY[i + 1]: # 向下线段被向上笔破坏
j = 0
while j < len(high) - 2:
# 顶底出现底分型,向下线段结束
if low[j + 1] < low[j] and low[j + 1] < low[j + 2]:
num = i + 2 * j + 3 # 线段结束点位置
segmentX.append(self.fenX[num])
segmentY.append(self.fenY[num])
i = num
temp_type = 1 # 向下线段一定由向上线段结束
break
j += 1
if j == len(high) - 2:
break
else: # 向下线段未被向上笔破坏
j = 1
while j < len(high) - 2:
# 顶底出现顶分型,向下线段结束
if high[j + 1] > high[j] and high[j + 1] > high[j + 2]:
num = i + 2 * j + 1 # 线段结束点位置
segmentX.append(self.fenX[num])
segmentY.append(self.fenY[num])
i = num
temp_type = 1 # 向下线段一定由向上线段结束
break
j += 1
if j == len(high) - 2:
break
print "segmentX: ", segmentX
print "segmentY: ", segmentY
if not self.segmentLoaded:
if len(segmentX) > 1:
segmentX.append(segmentX[-1])
segmentY.append(segmentY[-1])
segmentX = [int(x) for x in segmentX]
segmentY = [int(y) for y in segmentY]
self.fenduan(segmentX, segmentY)
self.chanlunEngine.writeChanlunLog(u'分段加载成功')
self.segmentLoaded = True
# ----------------------------------------------------------------------
def updateChanlunLog(self, event):
"""更新缠论相关日志"""
log = event.dict_['data']
# print type(log)
if(log.logTime):
content = '\t'.join([log.logTime, log.logContent])
self.chanlunLogMonitor.append(content)
else:
print 0
#-----------------------------------------------------------------------
def zhongShu(self):
if not self.penLoaded:
self.pen() # 先分笔才能画走势中枢
# temp_type = 0 # 标志中枢方向,向上为1,向下为-1
i = 0
temp_high, temp_low = 0, 0
minX, maxY = 0, 0
self.zhongshuPos = [] # 记录所有的中枢开始段和结束段的位置
self.zhongShuType = [] #记录所有中枢的方向
while i < len(self.fenX) - 4:
if (self.fenY[i] > self.fenY[i + 1] and self.fenY[i + 1] < self.fenY[i + 4]): #判断进入段方向
temp_low = max(self.fenY[i + 1], self.fenY[i + 3])
temp_high = min(self.fenY[i + 2], self.fenY[i + 4]) #记录中枢内顶的最小值与底的最大值
minX = self.fenX[i+1]
self.zhongshuPos.append(i)
self.zhongShuType.append(-1)
j = i
while i < len(self.fenX) - 4:
j = i
if self.fenY[i + 1] < self.fenY[i + 4] and self.fenY[i + 4] > temp_low and self.fenY[i + 3] < temp_high :
maxX = self.fenX[i+4]
if self.fenY[i + 3] > temp_low:
temp_low = self.fenY[i + 3]
if self.fenY[i + 4] < temp_high:
temp_high = self.fenY[i + 4]
i = i + 1
elif self.fenY[i + 1] > self.fenY[i + 4] and self.fenY[i + 4] < temp_high and self.fenY[i + 3] > temp_low :
maxX = self.fenX[i + 4]
if self.fenY[i + 3] < temp_high:
temp_high = self.fenY[i + 3]
if self.fenY[i + 4] > temp_low:
temp_low = self.fenY[i + 4]
i = i + 1
if j == i:
break
elif (self.fenY[i] < self.fenY[i + 1] and self.fenY[i + 1] > self.fenY[i + 4]):
temp_high = min(self.fenY[i + 1], self.fenY[i + 3])
temp_low = max(self.fenY[i + 2], self.fenY[i + 4])
minX = self.fenX[i + 1]
self.zhongshuPos.append(i)
self.zhongShuType.append(1)
j = i
while i < len(self.fenX) - 4:
j = i
if self.fenY[i + 1] > self.fenY[i + 4] and self.fenY[i + 4] < temp_high and self.fenY[i + 3] > temp_low:
maxX = self.fenX[i + 4]
if self.fenY[i + 3] < temp_high:
temp_high = self.fenY[i + 3]
if self.fenY[i + 4] > temp_low:
temp_low = self.fenY[i + 4]
i = i + 1
elif self.fenY[i + 1] < self.fenY[i + 4] and self.fenY[i + 4] > temp_low and self.fenY[i + 3] < temp_high:
maxX = self.fenX[i + 4]
if self.fenY[i + 3] > temp_low:
temp_low = self.fenY[i + 3]
if self.fenY[i + 4] < temp_high:
temp_high = self.fenY[i + 4]
i = i + 1
if i == j:
break
else:
i += 1
continue
# 画出当前判断出的中枢
if minX != 0 and maxX == 0:
maxX = self.fenX[i+4]
i = i + 1
self.zhongshuPos.append(i + 4)
else:
self.zhongshuPos.append(i + 3)
minY, maxY = temp_low, temp_high
print minX, minY, maxX, maxY
if int(maxY) > int(minY):
plotX = [minX, minX, maxX, maxX, minX]
plotY = [minY, maxY, maxY, minY, minY]
plotX = [int(x) for x in plotX]
plotY = [int(y) for y in plotY]
self.zhongshu(plotX, plotY)
i = i + 4
self.zhongShuLoaded = True
self.chanlunEngine.writeChanlunLog(u'走势中枢加载成功')
# ----------------------------------------------------------------------
def shop(self):
"""加载买卖点"""
if not self.zhongShuLoaded:
self.zhongShu()
i = 0
while i < len(self.zhongShuType) - 1:
startPos, endPos = self.zhongshuPos[2*i], self.zhongshuPos[2*i + 1] # 中枢开始段的位置和结束段的位置
startY = self.fenY[startPos + 1] - self.fenY[startPos] # 开始段Y轴距离
startX = self.fenX[startPos + 1] - self.fenX[startPos] # 开始段X轴距离
startK = abs(startY * startX) # 开始段投影面积
endY = self.fenY[endPos + 1] - self.fenY[endPos] # 结束段Y轴距离
endX = self.fenX[endPos + 1] - self.fenX[endPos] # 结束段段X轴距离
endK = abs(endY * endX) # 开始段投影面积
if endK < startK:
print startPos, endPos
if self.zhongShuType[i] == 1 and self.zhongShuType[i + 1] == -1:
# 一卖
self.sellpoint([self.fenX[endPos + 1]], [self.fenY[endPos + 1]], 1)
# 二卖,一卖后一个顶点
self.sellpoint([self.fenX[endPos + 3]], [self.fenY[endPos + 3]], 2)
# 三卖,一卖之后中枢结束段的第一个顶
i = i + 1
nextPos = self.zhongshuPos[2*i + 1] # 下一个中枢结束位置
if nextPos + 1 < len(self.fenY):
if self.fenY[nextPos + 1] > self.fenY[nextPos]:
self.sellpoint([self.fenX[nextPos + 1]], [self.fenY[nextPos + 1]], 3)
else:
self.sellpoint([self.fenX[nextPos]], [self.fenY[nextPos]], 3)
elif self.zhongShuType[i] == -1 and self.zhongShuType[i + 1] == 1:
# 一买
self.buypoint([self.fenX[endPos + 1]], [self.fenY[endPos + 1]], 1)
# 二买,一买后一个底点
self.buypoint([self.fenX[endPos + 3]], [self.fenY[endPos + 3]], 2)
# 三买,一买之后中枢结束段的第一个顶
i = i + 1
nextPos = self.zhongshuPos[2*i + 1] # 下一个中枢结束位置
if nextPos + 1 < len(self.fenY):
if self.fenY[nextPos + 1] < self.fenY[nextPos]:
self.buypoint([self.fenX[nextPos + 1]], [self.fenY[nextPos + 1]], 3)
else:
self.buypoint([self.fenX[nextPos]], [self.fenY[nextPos]], 3)
i = i + 1 # 接着判断之后的中枢是否出现背驰
self.chanlunEngine.writeChanlunLog(u'买卖点加载成功')
# ----------------------------------------------------------------------
def fenbi(self, fenbix, fenbiy):
self.PriceW.pw2.plotItem.plot(x=fenbix, y=fenbiy, pen=QtGui.QPen(QtGui.QColor(255, 236, 139)))
def fenduan(self, fenduanx, fenduany):
self.PriceW.pw2.plot(x=fenduanx, y=fenduany, symbol='o', pen=QtGui.QPen(QtGui.QColor(131, 111, 255)))
def zhongshu(self, zhongshux, zhongshuy):
self.PriceW.pw2.plot(x=zhongshux, y=zhongshuy, pen=QtGui.QPen(QtGui.QColor(255,165,0)))
def buypoint(self, buyx, buyy, point):
if point == 1:
self.PriceW.pw2.plot(x=buyx, y=buyy, symbolSize=18, symbolBrush=(255,0,0), symbolPen=(255,0,0), symbol='star')
elif point == 2:
self.PriceW.pw2.plot(x=buyx, y=buyy, symbolSize=18, symbolBrush=(238,130,238), symbolPen=(238,130,238),symbol='star')
elif point == 3:
self.PriceW.pw2.plot(x=buyx, y=buyy, symbolSize=18, symbolBrush=(138,43,226), symbolPen=(138,43,226),symbol='star')
def sellpoint(self, sellx, selly, point):
if point == 1:
self.PriceW.pw2.plot(x=sellx, y=selly, symbolSize=18, symbolBrush=(119,172,48), symbolPen=(119,172,48), symbol='star')
elif point == 2:
self.PriceW.pw2.plot(x=sellx, y=selly, symbolSize=18, symbolBrush=(221,221,34), symbolPen=(221,221,34),symbol='star')
elif point == 3:
self.PriceW.pw2.plot(x=sellx, y=selly, symbolSize=18, symbolBrush=(179,158,77), symbolPen=(179,158,77),symbol='star')
# ----------------------------------------------------------------------
# 判断包含关系,仿照聚框,合并K线数据
def judgeInclude(self):
## 判断包含关系
k_data = self.data
# 保存分型后dataFrame的值
after_fenxing = pd.DataFrame()
temp_data = k_data[:1]
zoushi = [3] # 3-持平 4-向下 5-向上
for i in xrange(len(k_data)):
case1 = temp_data.high[-1] >= k_data.high[i] and temp_data.low[-1] <= k_data.low[i] # 第1根包含第2根
case2 = temp_data.high[-1] <= k_data.high[i] and temp_data.low[-1] >= k_data.low[i] # 第2根包含第1根
case3 = temp_data.high[-1] == k_data.high[i] and temp_data.low[-1] == k_data.low[i] # 第1根等于第2根
case4 = temp_data.high[-1] > k_data.high[i] and temp_data.low[-1] > k_data.low[i] # 向下趋势
case5 = temp_data.high[-1] < k_data.high[i] and temp_data.low[-1] < k_data.low[i] # 向上趋势
if case3:
zoushi.append(3)
continue
elif case1:
print temp_data
if zoushi[-1] == 4:
temp_data.ix[0, 4] = k_data.high[i] #向下走取高点的低点
else:
temp_data.ix[0, 3] = k_data.low[i] #向上走取低点的高点
elif case2:
temp_temp = temp_data[-1:]
temp_data = k_data[i:i + 1]
if zoushi[-1] == 4:
temp_data.ix[0, 4] = temp_temp.high[0]
else:
temp_data.ix[0, 3] = temp_temp.low[0]
elif case4:
zoushi.append(4)
after_fenxing = pd.concat([after_fenxing, temp_data], axis=0)
temp_data = k_data[i:i + 1]
elif case5:
zoushi.append(5)
after_fenxing = pd.concat([after_fenxing, temp_data], axis=0)
temp_data = k_data[i:i + 1]
return after_fenxing
# ----------------------------------------------------------------------
#画出合并后的K线图,分笔
def plotAfterFenXing(self, after_fenxing):
#判断包含关系,合并K线
for i in xrange(len(after_fenxing)):
#处理k线的最大最小值、开盘收盘价,合并后k线不显示影线。
after_fenxing.iloc[i, 0] = i
if after_fenxing.open[i] > after_fenxing.close[i]:
after_fenxing.iloc[i, 1] = after_fenxing.high[i]
after_fenxing.iloc[i, 2] = after_fenxing.low[i]
else:
after_fenxing.iloc[i, 1] = after_fenxing.low[i]
after_fenxing.iloc[i, 2] = after_fenxing.high[i]
self.PriceW.onBarAfterFenXing(i, after_fenxing.index[i], after_fenxing.open[i], after_fenxing.close[i], after_fenxing.low[i], after_fenxing.high[i])
self.PriceW.plotKlineAfterFenXing()
print "plotKLine after fenxing"
# ----------------------------------------------------------------------
# 找出顶和底
def findTopAndLow(self, after_fenxing):
temp_num = 0 # 上一个顶或底的位置
temp_high = 0 # 上一个顶的high值
temp_low = 0 # 上一个底的low值
temp_type = 0 # 上一个记录位置的类型
i = 1
fenxing_type = [] # 记录分型点的类型,1为顶分型,-1为底分型
fenxing_data = pd.DataFrame() # 分型点的DataFrame值
while (i < len(after_fenxing) - 1):
case1 = after_fenxing.high[i - 1] < after_fenxing.high[i] and after_fenxing.high[i] > after_fenxing.high[i + 1] # 顶分型
case2 = after_fenxing.low[i - 1] > after_fenxing.low[i] and after_fenxing.low[i] < after_fenxing.low[i + 1] # 底分型
if case1:
if temp_type == 1: # 如果上一个分型为顶分型,则进行比较,选取高点更高的分型
if after_fenxing.high[i] <= temp_high:
i += 1
else:
temp_high = after_fenxing.high[i]
temp_num = i
temp_type = 1
i += 1
elif temp_type == 2: # 如果上一个分型为底分型,则记录上一个分型,用当前分型与后面的分型比较,选取同向更极端的分型
if temp_low >= after_fenxing.high[i]: # 如果上一个底分型的底比当前顶分型的顶高,则跳过当前顶分型。
i += 1
elif i < temp_num + 4: # 顶和底至少5k线
i += 1
else:
fenxing_type.append(-1)
fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0)
temp_high = after_fenxing.high[i]
temp_num = i
temp_type = 1
i += 1
else:
temp_high = after_fenxing.high[i]
temp_num = i
temp_type = 1
i += 1
elif case2:
if temp_type == 2: # 如果上一个分型为底分型,则进行比较,选取低点更低的分型
if after_fenxing.low[i] >= temp_low:
i += 1
else:
temp_low = after_fenxing.low[i]
temp_num = i
temp_type = 2
i += 1
elif temp_type == 1: # 如果上一个分型为顶分型,则记录上一个分型,用当前分型与后面的分型比较,选取同向更极端的分型
if temp_high <= after_fenxing.low[i]: # 如果上一个顶分型的底比当前底分型的底低,则跳过当前底分型。
i += 1
elif i < temp_num + 4: # 顶和底至少5k线
i += 1
else:
fenxing_type.append(1)
fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0)
temp_low = after_fenxing.low[i]
temp_num = i
temp_type = 2
i += 1
else:
temp_low = after_fenxing.low[i]
temp_num = i
temp_type = 2
i += 1
else:
i += 1
# if fenxing_type:
# if fenxing_type[-1] == 1 and temp_type == 2:
# fenxing_type.append(-1)
# fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0)
#
# if fenxing_type[-1] == -1 and temp_type == 1:
# fenxing_type.append(1)
# fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0)
return fenxing_data, fenxing_type
# ----------------------------------------------------------------------
# 连接MongoDB数据库
def __connectMongo(self):
try:
self.__mongoConnection = pymongo.MongoClient("localhost", 27017)
self.__mongoConnected = True
except ConnectionFailure:
pass
# ----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.signal.connect(self.updateChanlunLog)
self.eventEngine.register(EVENT_CHANLUN_LOG, self.signal.emit)
########################################################################
class PriceWidget(QtGui.QWidget):
"""用于显示价格走势图"""
signal = QtCore.pyqtSignal(type(Event()))
symbol = ''
class CandlestickItem(pg.GraphicsObject):
def __init__(self, data):
pg.GraphicsObject.__init__(self)
self.data = data ## data must have fields: time, open, close, min, max
self.generatePicture()
def generatePicture(self):
## pre-computing a QPicture object allows paint() to run much more quickly,
## rather than re-drawing the shapes every time.
self.picture = QtGui.QPicture()
p = QtGui.QPainter(self.picture)
p.setPen(pg.mkPen(color='w', width=0.4)) # 0.4 means w*2
# w = (self.data[1][0] - self.data[0][0]) / 3.
w = 0.2
for (n, t, open, close, min, max) in self.data:
p.drawLine(QtCore.QPointF(n, min), QtCore.QPointF(n, max))
if open > close:
p.setBrush(pg.mkBrush('g'))
else:
p.setBrush(pg.mkBrush('r'))
p.drawRect(QtCore.QRectF(n-w, open, w*2, close-open))
pg.setConfigOption('leftButtonPan', False)
p.end()
def paint(self, p, *args):
p.drawPicture(0, 0, self.picture)
def boundingRect(self):
## boundingRect _must_ indicate the entire area that will be drawn on
## or else we will get artifacts and possibly crashing.
## (in this case, QPicture does all the work of computing the bouning rect for us)
return QtCore.QRectF(self.picture.boundingRect())
#----------------------------------------------------------------------
def __init__(self, eventEngine, chanlunEngine, data, parent=None):
"""Constructor"""
super(PriceWidget, self).__init__(parent)
# K线图EMA均线的参数、变量
self.EMAFastAlpha = 0.0167 # 快速EMA的参数,60
self.EMASlowAlpha = 0.0083 # 慢速EMA的参数,120
self.fastEMA = 0 # 快速EMA的数值
self.slowEMA = 0 # 慢速EMA的数值
self.listfastEMA = []
self.listslowEMA = []
# 保存K线数据的列表对象
self.listBar = []
self.listClose = []
self.listHigh = []
self.listLow = []
self.listOpen = []
# 是否完成了历史数据的读取
self.initCompleted = False
self.__eventEngine = eventEngine
self.__chanlunEngine = chanlunEngine
self.data = data #画图所需数据
# MongoDB数据库相关
self.__mongoConnected = False
self.__mongoConnection = None
# 调用函数
self.__connectMongo()
self.initUi()
# self.registerEvent()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'Price')
self.vbl_1 = QtGui.QHBoxLayout()
self.initplotKline() # plotKline初始化
self.setLayout(self.vbl_1)
#----------------------------------------------------------------------
def initplotKline(self):
"""Kline"""
s = self.data.index #横坐标值
print "numbers of KLine: ", len(s)
xdict = dict(enumerate(s))
self.__axisTime = MyStringAxis(xdict, orientation='bottom')
self.pw2 = pg.PlotWidget(axisItems={'bottom': self.__axisTime}) # K线图
pw2x = self.pw2.getAxis('bottom')
pw2x.setGrid(150) # 设置默认x轴网格
pw2y = self.pw2.getAxis('left')
pw2y.setGrid(150) # 设置默认y轴网格
self.vbl_1.addWidget(self.pw2)
self.pw2.setMinimumWidth(1500)
self.pw2.setMaximumWidth(1800)
self.pw2.setDownsampling(mode='peak')
self.pw2.setClipToView(True)
self.curve5 = self.pw2.plot()
self.curve6 = self.pw2.plot()
self.candle = self.CandlestickItem(self.listBar)
self.pw2.addItem(self.candle)
## Draw an arrowhead next to the text box
# self.arrow = pg.ArrowItem()
# self.pw2.addItem(self.arrow)
# 从数据库读取一分钟数据画分钟线
def plotMin(self, symbol):
self.initCompleted = True
cx = self.__mongoMinDB[symbol].find()
print cx.count()
if cx:
for data in cx:
self.barOpen = data['open']
self.barClose = data['close']
self.barLow = data['low']
self.barHigh = data['high']
self.barOpenInterest = data['openInterest']
# print self.num, self.barOpen, self.barClose, self.barLow, self.barHigh, self.barOpenInterest
self.onBar(self.num, self.barOpen, self.barClose, self.barLow, self.barHigh, self.barOpenInterest)
self.num += 1
# 画历史数据K线图
def plotHistorticData(self):
self.initCompleted = True
for i in xrange(len(self.data)):
self.onBar(i, self.data.index[i], self.data.open[i], self.data.close[i], self.data.low[i], self.data.high[i])
self.plotKline()
print "plotKLine success"
#----------------------------------------------------------------------
def initHistoricalData(self):
"""初始历史数据"""
if self.symbol!='':
print "download histrical data:",self.symbol
self.initCompleted = True # 读取历史数据完成
td = timedelta(days=1) # 读取3天的历史TICK数据
# if startDate:
# cx = self.loadTick(self.symbol, startDate-td)
# else:
# today = datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)
# cx = self.loadTick(self.symbol, today-td)
print cx.count()
if cx:
for data in cx:
tick = Tick(data['symbol'])
tick.openPrice = data['lastPrice']
tick.highPrice = data['upperLimit']
tick.lowPrice = data['lowerLimit']
tick.lastPrice = data['lastPrice']
tick.volume = data['volume']
tick.openInterest = data['openInterest']
tick.upperLimit = data['upperLimit']
tick.lowerLimit = data['lowerLimit']
tick.time = data['time']
# tick.ms = data['UpdateMillisec']
tick.bidPrice1 = data['bidPrice1']
tick.bidPrice2 = data['bidPrice2']
tick.bidPrice3 = data['bidPrice3']
tick.bidPrice4 = data['bidPrice4']
tick.bidPrice5 = data['bidPrice5']
tick.askPrice1 = data['askPrice1']
tick.askPrice2 = data['askPrice2']
tick.askPrice3 = data['askPrice3']
tick.askPrice4 = data['askPrice4']
tick.askPrice5 = data['askPrice5']
tick.bidVolume1 = data['bidVolume1']
tick.bidVolume2 = data['bidVolume2']
tick.bidVolume3 = data['bidVolume3']
tick.bidVolume4 = data['bidVolume4']
tick.bidVolume5 = data['bidVolume5']
tick.askVolume1 = data['askVolume1']
tick.askVolume2 = data['askVolume2']
tick.askVolume3 = data['askVolume3']
tick.askVolume4 = data['askVolume4']
tick.askVolume5 = data['askVolume5']
self.onTick(tick)
print('load historic data completed')
#----------------------------------------------------------------------
def plotKline(self):
"""K线图"""
if self.initCompleted:
# 均线
self.curve5.setData(self.listfastEMA, pen=(255, 0, 0), name="Red curve")
self.curve6.setData(self.listslowEMA, pen=(0, 255, 0), name="Green curve")
# 画K线
self.pw2.removeItem(self.candle)
self.candle = self.CandlestickItem(self.listBar)
self.pw2.addItem(self.candle)
#----------------------------------------------------------------------
def plotText(self):
lenClose = len(self.listClose)
if lenClose >= 5: # Fractal Signal
if self.listClose[-1] > self.listClose[-2] and self.listClose[-3] > self.listClose[-2] and self.listClose[-4] > self.listClose[-2] and self.listClose[-5] > self.listClose[-2] and self.listfastEMA[-1] > self.listslowEMA[-1]:
## Draw an arrowhead next to the text box
# self.pw2.removeItem(self.arrow)
self.arrow = pg.ArrowItem(pos=(lenClose-1, self.listLow[-1]), angle=90, brush=(255, 0, 0))#红色
self.pw2.addItem(self.arrow)
elif self.listClose[-1] < self.listClose[-2] and self.listClose[-3] < self.listClose[-2] and self.listClose[-4] < self.listClose[-2] and self.listClose[-5] < self.listClose[-2] and self.listfastEMA[-1] < self.listslowEMA[-1]:
## Draw an arrowhead next to the text box
# self.pw2.removeItem(self.arrow)
self.arrow = pg.ArrowItem(pos=(lenClose-1, self.listHigh[-1]), angle=-90, brush=(0, 255, 0))#绿色
self.pw2.addItem(self.arrow)
#----------------------------------------------------------------------
def onBar(self, n, t, o, c, l, h):
self.listBar.append((n, t, o, c, l, h))
self.listOpen.append(o)
self.listClose.append(c)
self.listHigh.append(h)
self.listLow.append(l)
#计算K线图EMA均线
if self.fastEMA:
self.fastEMA = c*self.EMAFastAlpha + self.fastEMA*(1-self.EMAFastAlpha)
self.slowEMA = c*self.EMASlowAlpha + self.slowEMA*(1-self.EMASlowAlpha)
else:
self.fastEMA = c
self.slowEMA = c
self.listfastEMA.append(self.fastEMA)
self.listslowEMA.append(self.slowEMA)
self.plotText() #显示开仓位置
# ----------------------------------------------------------------------
#画合并后的K线Bar
def onBarAfterFenXing(self, n, t, o, c, l, h):
self.listBar.append((n, t, o, c, l, h))
def plotKlineAfterFenXing(self):
# 画K线
self.pw2.removeItem(self.candle)
self.candle = self.CandlestickItem(self.listBar)
self.pw2.addItem(self.candle)
#----------------------------------------------------------------------
def __connectMongo(self):
"""连接MongoDB数据库"""
try:
self.__mongoConnection = pymongo.MongoClient("localhost", 27017)
self.__mongoConnected = True
self.__mongoMinDB = self.__mongoConnection['VnTrader_1Min_Db']
except ConnectionFailure:
pass
########################################################################
class TickWidget(QtGui.QWidget):
"""用于显示价格走势图"""
signal = QtCore.pyqtSignal(type(Event()))
# tick图的相关参数、变量
listlastPrice = np.empty(1000)
fastMA = 0
midMA = 0
slowMA = 0
listfastMA = np.empty(1000)
listmidMA = np.empty(1000)
listslowMA = np.empty(1000)
tickFastAlpha = 0.0333 # 快速均线的参数,30
tickMidAlpha = 0.0167 # 中速均线的参数,60
tickSlowAlpha = 0.0083 # 慢速均线的参数,120
ptr = 0
ticktime = None # tick数据时间
class CandlestickItem(pg.GraphicsObject):
def __init__(self, data):
pg.GraphicsObject.__init__(self)
self.data = data ## data must have fields: time, open, close, min, max
self.generatePicture()
def generatePicture(self):
## pre-computing a QPicture object allows paint() to run much more quickly,
## rather than re-drawing the shapes every time.
self.picture = QtGui.QPicture()
p = QtGui.QPainter(self.picture)
p.setPen(pg.mkPen(color='w', width=0.4)) # 0.4 means w*2
a = pg.AxisItem('bottom', pen=None, linkView=None, parent=None, maxTickLength=-5, showValues=True)
a.setFixedWidth(1)
a.setWidth(1)
a.setLabel(show=True)
a.setGrid(grid=True)
labelStyle = {'color': '#FFF', 'font-size': '14pt'}
a.setLabel('label text', units='V', **labelStyle)
# w = (self.data[1][0] - self.data[0][0]) / 3.
w = 0.2
for (t, open, close, min, max) in self.data:
p.drawLine(QtCore.QPointF(t, min), QtCore.QPointF(t, max))
if open > close:
p.setBrush(pg.mkBrush('g'))
else:
p.setBrush(pg.mkBrush('r'))
p.drawRect(QtCore.QRectF(t-w, open, w*2, close-open))
pg.setConfigOption('leftButtonPan', False)
p.end()
def paint(self, p, *args):
p.drawPicture(0, 0, self.picture)
def boundingRect(self):
## boundingRect _must_ indicate the entire area that will be drawn on
## or else we will get artifacts and possibly crashing.
## (in this case, QPicture does all the work of computing the bouning rect for us)
return QtCore.QRectF(self.picture.boundingRect())
#----------------------------------------------------------------------
def __init__(self, eventEngine, chanlunEngine, parent=None):
"""Constructor"""
super(TickWidget, self).__init__(parent)
self.__eventEngine = eventEngine
self.__chanlunEngine = chanlunEngine
# MongoDB数据库相关
self.__mongoConnected = False
self.__mongoConnection = None
self.__mongoTickDB = None
# 调用函数
self.initUi()
self.registerEvent()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'Tick')
self.vbl_1 = QtGui.QHBoxLayout()
self.initplotTick() # plotTick初始化
self.setLayout(self.vbl_1)
#----------------------------------------------------------------------
def initplotTick(self):
""""""
self.pw1 = pg.PlotWidget(name='Plot1')
self.vbl_1.addWidget(self.pw1)
self.pw1.setMinimumWidth(1500)
self.pw1.setMaximumWidth(1800)
self.pw1.setRange(xRange=[-360, 0])
self.pw1.setLimits(xMax=5)
self.pw1.setDownsampling(mode='peak')
self.pw1.setClipToView(True)
self.curve1 = self.pw1.plot()
self.curve2 = self.pw1.plot()
self.curve3 = self.pw1.plot()
self.curve4 = self.pw1.plot()
# #----------------------------------------------------------------------
# def initHistoricalData(self,startDate=None):
# """初始历史数据"""
# print "download histrical data"
# self.initCompleted = True # 读取历史数据完成
# td = timedelta(days=1) # 读取3天的历史TICK数据
#
# if startDate:
# cx = self.loadTick(self.symbol, startDate-td)
# else:
# today = datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)
# cx = self.loadTick(self.symbol, today-td)
#
# print cx.count()
#
# if cx:
# for data in cx:
# tick = Tick(data['symbol'])
#
# tick.openPrice = data['lastPrice']
# tick.highPrice = data['upperLimit']
# tick.lowPrice = data['lowerLimit']
# tick.lastPrice = data['lastPrice']
#
# tick.volume = data['volume']
# tick.openInterest = data['openInterest']
#
# tick.upperLimit = data['upperLimit']
# tick.lowerLimit = data['lowerLimit']
#
# tick.time = data['time']
# # tick.ms = data['UpdateMillisec']
#
# tick.bidPrice1 = data['bidPrice1']
# tick.bidPrice2 = data['bidPrice2']
# tick.bidPrice3 = data['bidPrice3']
# tick.bidPrice4 = data['bidPrice4']
# tick.bidPrice5 = data['bidPrice5']
#
# tick.askPrice1 = data['askPrice1']
# tick.askPrice2 = data['askPrice2']
# tick.askPrice3 = data['askPrice3']
# tick.askPrice4 = data['askPrice4']
# tick.askPrice5 = data['askPrice5']
#
# tick.bidVolume1 = data['bidVolume1']
# tick.bidVolume2 = data['bidVolume2']
# tick.bidVolume3 = data['bidVolume3']
# tick.bidVolume4 = data['bidVolume4']
# tick.bidVolume5 = data['bidVolume5']
#
# tick.askVolume1 = data['askVolume1']
# tick.askVolume2 = data['askVolume2']
# tick.askVolume3 = data['askVolume3']
# tick.askVolume4 = data['askVolume4']
# tick.askVolume5 = data['askVolume5']
#
# self.onTick(tick)
#
# print('load historic data completed')
#----------------------------------------------------------------------
def plotTick(self):
"""画tick图"""
self.curve1.setData(self.listlastPrice[:self.ptr])
self.curve2.setData(self.listfastMA[:self.ptr], pen=(255, 0, 0), name="Red curve")
self.curve3.setData(self.listmidMA[:self.ptr], pen=(0, 255, 0), name="Green curve")
self.curve4.setData(self.listslowMA[:self.ptr], pen=(0, 0, 255), name="Blue curve")
self.curve1.setPos(-self.ptr, 0)
self.curve2.setPos(-self.ptr, 0)
self.curve3.setPos(-self.ptr, 0)
self.curve4.setPos(-self.ptr, 0)
#----------------------------------------------------------------------
def updateMarketData(self, event):
"""更新行情"""
data = event.dict_['data']
print "update", data['InstrumentID']
symbol = data['InstrumentID']
tick = Tick(symbol)
tick.openPrice = data['OpenPrice']
tick.highPrice = data['HighestPrice']
tick.lowPrice = data['LowestPrice']
tick.lastPrice = data['LastPrice']
tick.volume = data['Volume']
tick.openInterest = data['OpenInterest']
tick.upperLimit = data['UpperLimitPrice']
tick.lowerLimit = data['LowerLimitPrice']
tick.time = data['UpdateTime']
tick.ms = data['UpdateMillisec']
tick.bidPrice1 = data['BidPrice1']
tick.bidPrice2 = data['BidPrice2']
tick.bidPrice3 = data['BidPrice3']
tick.bidPrice4 = data['BidPrice4']
tick.bidPrice5 = data['BidPrice5']
tick.askPrice1 = data['AskPrice1']
tick.askPrice2 = data['AskPrice2']
tick.askPrice3 = data['AskPrice3']
tick.askPrice4 = data['AskPrice4']
tick.askPrice5 = data['AskPrice5']
tick.bidVolume1 = data['BidVolume1']
tick.bidVolume2 = data['BidVolume2']
tick.bidVolume3 = data['BidVolume3']
tick.bidVolume4 = data['BidVolume4']
tick.bidVolume5 = data['BidVolume5']
tick.askVolume1 = data['AskVolume1']
tick.askVolume2 = data['AskVolume2']
tick.askVolume3 = data['AskVolume3']
tick.askVolume4 = data['AskVolume4']
tick.askVolume5 = data['AskVolume5']
self.onTick(tick) # tick数据更新
self.__recordTick(tick) #记录Tick数据
#----------------------------------------------------------------------
def onTick(self, tick):
"""tick数据更新"""
from datetime import time
# 首先生成datetime.time格式的时间(便于比较),从字符串时间转化为time格式的时间
hh, mm, ss = tick.time.split(':')
self.ticktime = time(int(hh), int(mm), int(ss), microsecond=tick.ms)
# 计算tick图的相关参数
if self.ptr == 0:
self.fastMA = tick.lastPrice
self.midMA = tick.lastPrice
self.slowMA = tick.lastPrice
else:
self.fastMA = (1-self.tickFastAlpha) * self.fastMA + self.tickFastAlpha * tick.lastPrice
self.midMA = (1-self.tickMidAlpha) * self.midMA + self.tickMidAlpha * tick.lastPrice
self.slowMA = (1-self.tickSlowAlpha) * self.slowMA + self.tickSlowAlpha * tick.lastPrice
self.listlastPrice[self.ptr] = int(tick.lastPrice)
self.listfastMA[self.ptr] = int(self.fastMA)
self.listmidMA[self.ptr] = int(self.midMA)
self.listslowMA[self.ptr] = int(self.slowMA)
self.ptr += 1
print(self.ptr)
if self.ptr >= self.listlastPrice.shape[0]:
tmp = self.listlastPrice
self.listlastPrice = np.empty(self.listlastPrice.shape[0] * 2)
self.listlastPrice[:tmp.shape[0]] = tmp
tmp = self.listfastMA
self.listfastMA = np.empty(self.listfastMA.shape[0] * 2)
self.listfastMA[:tmp.shape[0]] = tmp
tmp = self.listmidMA
self.listmidMA = np.empty(self.listmidMA.shape[0] * 2)
self.listmidMA[:tmp.shape[0]] = tmp
tmp = self.listslowMA
self.listslowMA = np.empty(self.listslowMA.shape[0] * 2)
self.listslowMA[:tmp.shape[0]] = tmp
# 调用画图函数
self.plotTick() # tick图
#----------------------------------------------------------------------
def __connectMongo(self):
"""连接MongoDB数据库"""
try:
self.__mongoConnection = pymongo.MongoClient("localhost", 27017)
self.__mongoConnected = True
self.__mongoTickDB = self.__mongoConnection['VnTrader_Tick_Db']
except ConnectionFailure:
pass
#----------------------------------------------------------------------
def __recordTick(self, data):
"""将Tick数据插入到MongoDB中"""
if self.__mongoConnected:
symbol = data['InstrumentID']
data['date'] = datetime.now().strftime('%Y%m%d')
self.__mongoTickDB[symbol].insert(data)
# #----------------------------------------------------------------------
# def loadTick(self, symbol, startDate, endDate=None):
# """从MongoDB中读取Tick数据"""
# cx = self.__mongoTickDB[symbol].find()
# print cx.count()
# return cx
# # if self.__mongoConnected:
# # collection = self.__mongoTickDB[symbol]
# #
# # # 如果输入了读取TICK的最后日期
# # if endDate:
# # cx = collection.find({'date': {'$gte': startDate, '$lte': endDate}})
# # else:
# # cx = collection.find({'date': {'$gte': startDate}})
# # return cx
# # else:
# # return None
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
print "connect"
self.signal.connect(self.updateMarketData)
self.__eventEngine.register(EVENT_MARKETDATA, self.signal.emit)
class Tick:
"""Tick数据对象"""
#----------------------------------------------------------------------
def __init__(self, symbol):
"""Constructor"""
self.symbol = symbol # 合约代码
self.openPrice = 0 # OHLC
self.highPrice = 0
self.lowPrice = 0
self.lastPrice = 0
self.volume = 0 # 成交量
self.openInterest = 0 # 持仓量
self.upperLimit = 0 # 涨停价
self.lowerLimit = 0 # 跌停价
self.time = '' # 更新时间和毫秒
self.ms = 0
self.bidPrice1 = 0 # 深度行情
self.bidPrice2 = 0
self.bidPrice3 = 0
self.bidPrice4 = 0
self.bidPrice5 = 0
self.askPrice1 = 0
self.askPrice2 = 0
self.askPrice3 = 0
self.askPrice4 = 0
self.askPrice5 = 0
self.bidVolume1 = 0
self.bidVolume2 = 0
self.bidVolume3 = 0
self.bidVolume4 = 0
self.bidVolume5 = 0
self.askVolume1 = 0
self.askVolume2 = 0
self.askVolume3 = 0
self.askVolume4 = 0
self.askVolume5 = 0 | mit | 1,005,031,568,701,190,500 | 37.755556 | 237 | 0.49587 | false | 3.238819 | false | false | false |
blsmit5728/PokeAlarm | PokeAlarm/Events/MonEvent.py | 1 | 13089 | # Standard Library Imports
from datetime import datetime
# 3rd Party Imports
# Local Imports
from PokeAlarm import Unknown
from PokeAlarm.Utilities import MonUtils
from PokeAlarm.Utils import (
get_gmaps_link, get_move_type, get_move_damage, get_move_dps,
get_move_duration, get_move_energy, get_pokemon_size,
get_applemaps_link, get_time_as_str, get_seconds_remaining,
get_base_types, get_dist_as_str, get_weather_emoji,
get_type_emoji)
from . import BaseEvent
class MonEvent(BaseEvent):
""" Event representing the discovery of a Pokemon. """
def __init__(self, data):
""" Creates a new Monster Event based on the given dict. """
super(MonEvent, self).__init__('monster')
check_for_none = BaseEvent.check_for_none
# Identification
self.enc_id = data['encounter_id']
self.monster_id = int(data['pokemon_id'])
# Time Left
self.disappear_time = datetime.utcfromtimestamp(data['disappear_time'])
self.time_left = get_seconds_remaining(self.disappear_time)
# Spawn Data
self.spawn_start = check_for_none(
int, data.get('spawn_start'), Unknown.REGULAR)
self.spawn_end = check_for_none(
int, data.get('spawn_end'), Unknown.REGULAR)
self.spawn_verified = check_for_none(bool, data.get('verified'), False)
# Location
self.lat = float(data['latitude'])
self.lng = float(data['longitude'])
self.distance = Unknown.SMALL # Completed by Manager
self.direction = Unknown.TINY # Completed by Manager
self.weather_id = check_for_none(
int, data.get('weather'), Unknown.TINY)
self.boosted_weather_id = check_for_none(
int, data.get('boosted_weather')
or data.get('weather_boosted_condition'), 0)
# Encounter Stats
self.mon_lvl = check_for_none(
int, data.get('pokemon_level'), Unknown.TINY)
self.cp = check_for_none(int, data.get('cp'), Unknown.TINY)
# IVs
self.atk_iv = check_for_none(
int, data.get('individual_attack'), Unknown.TINY)
self.def_iv = check_for_none(
int, data.get('individual_defense'), Unknown.TINY)
self.sta_iv = check_for_none(
int, data.get('individual_stamina'), Unknown.TINY)
if Unknown.is_not(self.atk_iv, self.def_iv, self.sta_iv):
self.iv = \
100 * (self.atk_iv + self.def_iv + self.sta_iv) / float(45)
else:
self.iv = Unknown.SMALL
# Quick Move
self.quick_id = check_for_none(
int, data.get('move_1'), Unknown.TINY)
self.quick_type = get_move_type(self.quick_id)
self.quick_damage = get_move_damage(self.quick_id)
self.quick_dps = get_move_dps(self.quick_id)
self.quick_duration = get_move_duration(self.quick_id)
self.quick_energy = get_move_energy(self.quick_id)
# Charge Move
self.charge_id = check_for_none(
int, data.get('move_2'), Unknown.TINY)
self.charge_type = get_move_type(self.charge_id)
self.charge_damage = get_move_damage(self.charge_id)
self.charge_dps = get_move_dps(self.charge_id)
self.charge_duration = get_move_duration(self.charge_id)
self.charge_energy = get_move_energy(self.charge_id)
# Catch Probs
self.base_catch = check_for_none(
float, data.get('base_catch'), Unknown.TINY)
self.great_catch = check_for_none(
float, data.get('great_catch'), Unknown.TINY)
self.ultra_catch = check_for_none(
float, data.get('ultra_catch'), Unknown.TINY)
# Attack Rating
self.atk_grade = check_for_none(
str, data.get('atk_grade'), Unknown.TINY)
self.def_grade = check_for_none(
str, data.get('def_grade'), Unknown.TINY)
# Cosmetic
self.gender = MonUtils.get_gender_sym(
check_for_none(int, data.get('gender'), Unknown.TINY))
self.height = check_for_none(float, data.get('height'), Unknown.SMALL)
self.weight = check_for_none(float, data.get('weight'), Unknown.SMALL)
if Unknown.is_not(self.height, self.weight):
self.size_id = get_pokemon_size(
self.monster_id, self.height, self.weight)
else:
self.size_id = Unknown.SMALL
self.types = get_base_types(self.monster_id)
# Form
self.form_id = check_for_none(int, data.get('form'), 0)
# Costume
self.costume_id = check_for_none(int, data.get('costume'), 0)
# Correct this later
self.name = self.monster_id
self.geofence = Unknown.REGULAR
self.custom_dts = {}
def generate_dts(self, locale, timezone, units):
""" Return a dict with all the DTS for this event. """
time = get_time_as_str(self.disappear_time, timezone)
form_name = locale.get_form_name(self.monster_id, self.form_id)
costume_name = locale.get_costume_name(
self.monster_id, self.costume_id)
weather_name = locale.get_weather_name(self.weather_id)
boosted_weather_name = locale.get_weather_name(self.boosted_weather_id)
type1 = locale.get_type_name(self.types[0])
type2 = locale.get_type_name(self.types[1])
dts = self.custom_dts.copy()
dts.update({
# Identification
'encounter_id': self.enc_id,
'mon_name': locale.get_pokemon_name(self.monster_id),
'mon_id': self.monster_id,
'mon_id_3': "{:03}".format(self.monster_id),
# Time Remaining
'time_left': time[0],
'12h_time': time[1],
'24h_time': time[2],
# Spawn Data
'spawn_start': self.spawn_start,
'spawn_end': self.spawn_end,
'spawn_verified': self.spawn_verified,
# Location
'lat': self.lat,
'lng': self.lng,
'lat_5': "{:.5f}".format(self.lat),
'lng_5': "{:.5f}".format(self.lng),
'distance': (
get_dist_as_str(self.distance, units)
if Unknown.is_not(self.distance) else Unknown.SMALL),
'direction': self.direction,
'gmaps': get_gmaps_link(self.lat, self.lng),
'applemaps': get_applemaps_link(self.lat, self.lng),
'geofence': self.geofence,
# Weather
'weather_id': self.weather_id,
'weather': weather_name,
'weather_or_empty': Unknown.or_empty(weather_name),
'weather_emoji': get_weather_emoji(self.weather_id),
'boosted_weather_id': self.boosted_weather_id,
'boosted_weather': boosted_weather_name,
'boosted_weather_or_empty': (
'' if self.boosted_weather_id == 0
else Unknown.or_empty(boosted_weather_name)),
'boosted_weather_emoji':
get_weather_emoji(self.boosted_weather_id),
'boosted_or_empty': locale.get_boosted_text() if \
Unknown.is_not(self.boosted_weather_id) and
self.boosted_weather_id != 0 else '',
# Encounter Stats
'mon_lvl': self.mon_lvl,
'cp': self.cp,
# IVs
'iv_0': (
"{:.0f}".format(self.iv) if Unknown.is_not(self.iv)
else Unknown.TINY),
'iv': (
"{:.1f}".format(self.iv) if Unknown.is_not(self.iv)
else Unknown.SMALL),
'iv_2': (
"{:.2f}".format(self.iv) if Unknown.is_not(self.iv)
else Unknown.SMALL),
'atk': self.atk_iv,
'def': self.def_iv,
'sta': self.sta_iv,
# Type
'type1': type1,
'type1_or_empty': Unknown.or_empty(type1),
'type1_emoji': Unknown.or_empty(get_type_emoji(self.types[0])),
'type2': type2,
'type2_or_empty': Unknown.or_empty(type2),
'type2_emoji': Unknown.or_empty(get_type_emoji(self.types[1])),
'types': (
"{}/{}".format(type1, type2)
if Unknown.is_not(type2) else type1),
'types_emoji': (
"{}{}".format(
get_type_emoji(self.types[0]),
get_type_emoji(self.types[1]))
if Unknown.is_not(type2) else get_type_emoji(self.types[0])),
# Form
'form': form_name,
'form_or_empty': Unknown.or_empty(form_name),
'form_id': self.form_id,
'form_id_3': "{:03d}".format(self.form_id),
# Costume
'costume': costume_name,
'costume_or_empty': Unknown.or_empty(costume_name),
'costume_id': self.costume_id,
'costume_id_3': "{:03d}".format(self.costume_id),
# Quick Move
'quick_move': locale.get_move_name(self.quick_id),
'quick_id': self.quick_id,
'quick_type_id': self.quick_type,
'quick_type': locale.get_type_name(self.quick_type),
'quick_type_emoji': get_type_emoji(self.quick_type),
'quick_damage': self.quick_damage,
'quick_dps': self.quick_dps,
'quick_duration': self.quick_duration,
'quick_energy': self.quick_energy,
# Charge Move
'charge_move': locale.get_move_name(self.charge_id),
'charge_id': self.charge_id,
'charge_type_id': self.charge_type,
'charge_type': locale.get_type_name(self.charge_type),
'charge_type_emoji': get_type_emoji(self.charge_type),
'charge_damage': self.charge_damage,
'charge_dps': self.charge_dps,
'charge_duration': self.charge_duration,
'charge_energy': self.charge_energy,
# Cosmetic
'gender': self.gender,
'height_0': (
"{:.0f}".format(self.height) if Unknown.is_not(self.height)
else Unknown.TINY),
'height': (
"{:.1f}".format(self.height) if Unknown.is_not(self.height)
else Unknown.SMALL),
'height_2': (
"{:.2f}".format(self.height) if Unknown.is_not(self.height)
else Unknown.SMALL),
'weight_0': (
"{:.0f}".format(self.weight) if Unknown.is_not(self.weight)
else Unknown.TINY),
'weight': (
"{:.1f}".format(self.weight) if Unknown.is_not(self.weight)
else Unknown.SMALL),
'weight_2': (
"{:.2f}".format(self.weight) if Unknown.is_not(self.weight)
else Unknown.SMALL),
'size': locale.get_size_name(self.size_id),
# Attack rating
'atk_grade': (
Unknown.or_empty(self.atk_grade, Unknown.TINY)),
'def_grade': (
Unknown.or_empty(self.def_grade, Unknown.TINY)),
# Catch Prob
'base_catch_0': (
"{:.0f}".format(self.base_catch * 100)
if Unknown.is_not(self.base_catch)
else Unknown.TINY),
'base_catch': (
"{:.1f}".format(self.base_catch * 100)
if Unknown.is_not(self.base_catch)
else Unknown.SMALL),
'base_catch_2': (
"{:.2f}".format(self.base_catch * 100)
if Unknown.is_not(self.base_catch)
else Unknown.SMALL),
'great_catch_0': (
"{:.0f}".format(self.great_catch * 100)
if Unknown.is_not(self.great_catch)
else Unknown.TINY),
'great_catch': (
"{:.1f}".format(self.great_catch * 100)
if Unknown.is_not(self.great_catch)
else Unknown.SMALL),
'great_catch_2': (
"{:.2f}".format(self.great_catch * 100)
if Unknown.is_not(self.great_catch)
else Unknown.SMALL),
'ultra_catch_0': (
"{:.0f}".format(self.ultra_catch * 100)
if Unknown.is_not(self.ultra_catch)
else Unknown.TINY),
'ultra_catch': (
"{:.1f}".format(self.ultra_catch * 100)
if Unknown.is_not(self.ultra_catch)
else Unknown.SMALL),
'ultra_catch_2': (
"{:.2f}".format(self.ultra_catch * 100)
if Unknown.is_not(self.ultra_catch)
else Unknown.SMALL),
# Misc
'big_karp': (
'big' if self.monster_id == 129 and Unknown.is_not(self.weight)
and self.weight >= 13.13 else ''),
'tiny_rat': (
'tiny' if self.monster_id == 19 and Unknown.is_not(self.weight)
and self.weight <= 2.41 else '')
})
return dts
| agpl-3.0 | 1,737,988,250,561,061,600 | 39.150307 | 79 | 0.533578 | false | 3.544273 | false | false | false |
PLyczkowski/Sticky-Keymap | 2.74/scripts/addons/io_anim_bvh/__init__.py | 1 | 8032 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
bl_info = {
"name": "BioVision Motion Capture (BVH) format",
"author": "Campbell Barton",
"blender": (2, 74, 0),
"location": "File > Import-Export",
"description": "Import-Export BVH from armature objects",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/MotionCapture_BVH",
"support": 'OFFICIAL',
"category": "Import-Export"}
if "bpy" in locals():
import importlib
if "import_bvh" in locals():
importlib.reload(import_bvh)
if "export_bvh" in locals():
importlib.reload(export_bvh)
import bpy
from bpy.props import (StringProperty,
FloatProperty,
IntProperty,
BoolProperty,
EnumProperty,
)
from bpy_extras.io_utils import (ImportHelper,
ExportHelper,
orientation_helper_factory,
axis_conversion,
)
ImportBVHOrientationHelper = orientation_helper_factory("ImportBVHOrientationHelper", axis_forward='-Z', axis_up='Y')
class ImportBVH(bpy.types.Operator, ImportHelper, ImportBVHOrientationHelper):
"""Load a BVH motion capture file"""
bl_idname = "import_anim.bvh"
bl_label = "Import BVH"
bl_options = {'REGISTER', 'UNDO'}
filename_ext = ".bvh"
filter_glob = StringProperty(default="*.bvh", options={'HIDDEN'})
target = EnumProperty(items=(
('ARMATURE', "Armature", ""),
('OBJECT', "Object", ""),
),
name="Target",
description="Import target type",
default='ARMATURE')
global_scale = FloatProperty(
name="Scale",
description="Scale the BVH by this value",
min=0.0001, max=1000000.0,
soft_min=0.001, soft_max=100.0,
default=1.0,
)
frame_start = IntProperty(
name="Start Frame",
description="Starting frame for the animation",
default=1,
)
use_fps_scale = BoolProperty(
name="Scale FPS",
description=("Scale the framerate from the BVH to "
"the current scenes, otherwise each "
"BVH frame maps directly to a Blender frame"),
default=False,
)
use_cyclic = BoolProperty(
name="Loop",
description="Loop the animation playback",
default=False,
)
rotate_mode = EnumProperty(
name="Rotation",
description="Rotation conversion",
items=(('QUATERNION', "Quaternion",
"Convert rotations to quaternions"),
('NATIVE', "Euler (Native)", ("Use the rotation order "
"defined in the BVH file")),
('XYZ', "Euler (XYZ)", "Convert rotations to euler XYZ"),
('XZY', "Euler (XZY)", "Convert rotations to euler XZY"),
('YXZ', "Euler (YXZ)", "Convert rotations to euler YXZ"),
('YZX', "Euler (YZX)", "Convert rotations to euler YZX"),
('ZXY', "Euler (ZXY)", "Convert rotations to euler ZXY"),
('ZYX', "Euler (ZYX)", "Convert rotations to euler ZYX"),
),
default='NATIVE',
)
def execute(self, context):
keywords = self.as_keywords(ignore=("axis_forward",
"axis_up",
"filter_glob",
))
global_matrix = axis_conversion(from_forward=self.axis_forward,
from_up=self.axis_up,
).to_4x4()
keywords["global_matrix"] = global_matrix
from . import import_bvh
return import_bvh.load(self, context, **keywords)
class ExportBVH(bpy.types.Operator, ExportHelper):
"""Save a BVH motion capture file from an armature"""
bl_idname = "export_anim.bvh"
bl_label = "Export BVH"
filename_ext = ".bvh"
filter_glob = StringProperty(
default="*.bvh",
options={'HIDDEN'},
)
global_scale = FloatProperty(
name="Scale",
description="Scale the BVH by this value",
min=0.0001, max=1000000.0,
soft_min=0.001, soft_max=100.0,
default=1.0,
)
frame_start = IntProperty(
name="Start Frame",
description="Starting frame to export",
default=0,
)
frame_end = IntProperty(
name="End Frame",
description="End frame to export",
default=0,
)
rotate_mode = EnumProperty(
name="Rotation",
description="Rotation conversion",
items=(('NATIVE', "Euler (Native)",
"Use the rotation order defined in the BVH file"),
('XYZ', "Euler (XYZ)", "Convert rotations to euler XYZ"),
('XZY', "Euler (XZY)", "Convert rotations to euler XZY"),
('YXZ', "Euler (YXZ)", "Convert rotations to euler YXZ"),
('YZX', "Euler (YZX)", "Convert rotations to euler YZX"),
('ZXY', "Euler (ZXY)", "Convert rotations to euler ZXY"),
('ZYX', "Euler (ZYX)", "Convert rotations to euler ZYX"),
),
default='NATIVE',
)
root_transform_only = BoolProperty(
name="Root Translation Only",
description="Only write out translation channels for the root bone",
default=False,
)
@classmethod
def poll(cls, context):
obj = context.object
return obj and obj.type == 'ARMATURE'
def invoke(self, context, event):
self.frame_start = context.scene.frame_start
self.frame_end = context.scene.frame_end
return super().invoke(context, event)
def execute(self, context):
if self.frame_start == 0 and self.frame_end == 0:
self.frame_start = context.scene.frame_start
self.frame_end = context.scene.frame_end
keywords = self.as_keywords(ignore=("check_existing", "filter_glob"))
from . import export_bvh
return export_bvh.save(self, context, **keywords)
def menu_func_import(self, context):
self.layout.operator(ImportBVH.bl_idname, text="Motion Capture (.bvh)")
def menu_func_export(self, context):
self.layout.operator(ExportBVH.bl_idname, text="Motion Capture (.bvh)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func_import)
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func_import)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
if __name__ == "__main__":
register()
| gpl-2.0 | 8,315,018,521,340,759,000 | 35.017937 | 117 | 0.54868 | false | 4.058615 | false | false | false |
desihub/desimodel | py/desimodel/weather.py | 1 | 15591 | # See LICENSE.rst for BSD 3-clause license info
# -*- coding: utf-8 -*-
"""
desimodel.weather
=================
Model of the expected weather conditions at KPNO during the DESI survey.
To generate a random time series of expected FWHM seeing in arcsecs and
atmospheric transparency, use, for example::
n = 10000
dt = 300 # seconds
t = np.arange(n) * dt
gen = np.random.RandomState(seed=123)
seeing = sample_seeing(n, dt_sec=dt, gen=gen)
transp = sample_transp(n, dt_sec=dt, gen=gen)
The resulting arrays are randomly sampled from models of the 1D probability
density and 2-point power spectral density derived from MzLS observations.
See `DESI-doc-3087
<https://desi.lbl.gov/DocDB/cgi-bin/private/ShowDocument?docid=3087>`__
for details.
Used by :mod:`surveysim.weather` for simulations of DESI observing and
survey strategy studies.
"""
from __future__ import print_function, division
import os
import datetime
import calendar
import numpy as np
import scipy.interpolate
import scipy.special
import astropy.table
def whiten_transforms_from_cdf(x, cdf):
"""
Calculate a pair of transforms to whiten and unwhiten a distribution.
The whitening transform is monotonic and invertible.
Parameters
----------
x : array
1D array of non-decreasing values giving bin edges for the distribution
to whiten and unwhiten.
cdf : array
1D array of non-decreasing values giving the cummulative probability
density associated with each bin edge. Does not need to be normalized.
Must have the same length as x.
Returns
-------
tuple
Tuple (F,G) of callable objects that whiten y=F(x) and unwhiten x=G(y)
samples x of the input distribution, so that y has a Gaussian
distribution with zero mean and unit variance.
"""
x = np.asarray(x)
cdf = np.asarray(cdf)
if x.shape != cdf.shape:
raise ValueError('Input arrays must have same shape.')
if len(x.shape) != 1:
raise ValueError('Input arrays must be 1D.')
if not np.all(np.diff(x) >= 0):
raise ValueError('Values of x must be non-decreasing.')
if not np.all(np.diff(cdf) >= 0):
raise ValueError('Values of cdf must be non-decreasing.')
# Normalize.
cdf /= cdf[-1]
# Use linear interpolation for the forward and inverse transforms between
# the input range and Gaussian CDF values.
args = dict(
kind='linear', assume_sorted=True, copy=False, bounds_error=True)
forward = scipy.interpolate.interp1d(x, cdf, **args)
backward = scipy.interpolate.interp1d(cdf, x, **args)
# Add wrappers to convert between CDF and PDF samples.
root2 = np.sqrt(2)
forward_transform = (
lambda x: root2 * scipy.special.erfinv(2 * forward(x) - 1))
inverse_transform = (
lambda y: backward(0.5 * (1 + scipy.special.erf(y / root2))))
return forward_transform, inverse_transform
def whiten_transforms(data, data_min=None, data_max=None):
"""Calculate a pair of transforms to whiten and unwhiten a distribution.
Uses :func:`desimodel.weather.whiten_transforms_from_cdf`.
Parameters
----------
data : array
1D array of samples from the distribution to whiten.
data_min : float or None
Clip the distribution to this minimum value, or at min(data) if None.
Must be <= min(data).
data_max : float or None
Clip the distribution to this maximum value, or at max(data) if None.
Must be >= max(data).
Returns
-------
tuple
See :func:`desimodel.weather.whiten_transforms_from_cdf`.
"""
n_data = len(data)
# Sort the input data with padding at each end for the min/max values.
sorted_data = np.empty(shape=n_data + 2, dtype=data.dtype)
sorted_data[1:-1] = np.sort(data)
if data_min is None:
sorted_data[0] = sorted_data[1]
else:
if data_min > sorted_data[1]:
raise ValueError('data_min > min(data)')
sorted_data[0] = data_min
if data_max is None:
sorted_data[-1] = sorted_data[-2]
else:
if data_max < sorted_data[-2]:
raise ValueError('data_max < max(data)')
sorted_data[-1] = data_max
# Calculate the Gaussian CDF value associated with each input value in
# sorted order. The pad values are associated with CDF = 0, 1 respectively.
cdf = np.arange(n_data + 2) / (n_data + 1.)
return whiten_transforms_from_cdf(sorted_data, cdf)
def _seeing_fit_model(x):
"""Evalute the fit to MzLS seeing described in DESI-doc-3087.
"""
p = np.array([ 0.07511146, 0.44276671, 23.02442192, 38.07691498])
y = (1 + ((x - p[0]) / p[1]) ** 2) ** (-p[2]) * x ** p[3]
return y / (y.sum() * np.gradient(x))
def get_seeing_pdf(median_seeing=1.1, max_seeing=2.5, n=250):
"""Return PDF of FWHM seeing for specified clipped median value.
Note that this is atmospheric seeing, not delivered image quality.
The reference wavelength for seeing values is 6355A, in the r band,
and the observed wavelength dependence in Dey & Valdes is closer to
``lambda ** (-1/15)`` than the ``lambda ** (-1/5)`` predicted by
Kolmogorov theory. See DESI-doc-3087 for details.
Scales the clipped MzLS seeing PDF in order to achieve the requested
median value. Note that clipping is applied before scaling, so
the output PDF is clipped at scale * max_seeing.
Parameters
----------
median_seeing : float
Target FWHM seeing value in arcsec. Must be in the range [0.95, 1.30].
max_seeing : float
Calculate scaled median using unscaled values below this value.
n : int
Size of grid to use for tabulating the returned arrays.
Returns
-------
tuple
Tuple (fwhm, pdf) that tabulates pdf[fwhm]. Normalized so that
``np.sum(pdf * np.gradient(fwhm)) = 1``.
"""
# Tabulate the nominal (scale=1) seeing PDF.
fwhm = np.linspace(0., max_seeing, n)
pdf = _seeing_fit_model(fwhm)
pdf /= (pdf.sum() * np.gradient(fwhm))
cdf = np.cumsum(pdf)
cdf /= cdf[-1]
# Tabulate the median as a function of FWHM scale.
scale = np.linspace(0.9, 1.4, 11)
median = np.empty_like(scale)
for i, s in enumerate(scale):
median[i] = np.interp(0.5, cdf, s * fwhm)
if median_seeing < median[0] or median_seeing > median[-1]:
raise ValueError('Requested median is outside allowed range.')
# Interpolate to find the scale factor that gives the requested median.
s = np.interp(median_seeing, median, scale)
return fwhm * s, pdf / s
def sample_timeseries(x_grid, pdf_grid, psd, n_sample, dt_sec=180., gen=None):
"""Sample a time series specified by a power spectrum and 1D PDF.
The PSD should describe the temporal correlations of whitened samples.
Generated samples will then be unwhitened to recover the input 1D PDF.
See DESI-doc-3087 for details.
Uses :func:`whiten_transforms_from_cdf`.
Parameters
----------
x_grid : array
1D array of N increasing grid values covering the parameter range
to sample from.
pdf_grid : array
1D array of N increasing PDF values corresponding to each x_grid.
Does not need to be normalized.
psd : callable
Function of frequency in 1/days that returns the power-spectral
density of whitened temporal fluctations to sample from. Will only be
called for positive frequencies. Normalization does not matter.
n_sample : int
Number of equally spaced samples to generate.
dt_sec : float
Time interval between samples in seconds.
gen : np.random.RandomState or None
Provide an existing RandomState for full control of reproducible random
numbers, or None for non-reproducible random numbers.
"""
x_grid = np.array(x_grid)
pdf_grid = np.array(pdf_grid)
if not np.all(np.diff(x_grid) > 0):
raise ValueError('x_grid values are not increasing.')
if x_grid.shape != pdf_grid.shape:
raise ValueError('x_grid and pdf_grid arrays have different shapes.')
# Initialize random numbers if necessary.
if gen is None:
gen = np.random.RandomState()
# Calculate the CDF.
cdf_grid = np.cumsum(pdf_grid)
cdf_grid /= cdf_grid[-1]
# Calculate whitening / unwhitening transforms.
whiten, unwhiten = whiten_transforms_from_cdf(x_grid, cdf_grid)
# Build a linear grid of frequencies present in the Fourier transform
# of the requested time series. Frequency units are 1/day.
dt_day = dt_sec / (24. * 3600.)
df_day = 1. / (n_sample * dt_day)
f_grid = np.arange(1 + (n_sample // 2)) * df_day
# Tabulate the power spectral density at each frequency. The PSD
# describes seeing fluctuations that have been "whitened", i.e., mapped
# via a non-linear monotonic transform to have unit Gaussian probability
# density.
psd_grid = np.empty_like(f_grid)
psd_grid[1:] = psd(f_grid[1:])
# Force the mean to zero.
psd_grid[0] = 0.
# Force the variance to one.
psd_grid[1:] /= psd_grid[1:].sum() * df_day ** 2
# Generate random whitened samples.
n_psd = len(psd_grid)
x_fft = np.ones(n_psd, dtype=complex)
x_fft[1:-1].real = gen.normal(size=n_psd - 2)
x_fft[1:-1].imag = gen.normal(size=n_psd - 2)
x_fft *= np.sqrt(psd_grid) / (2 * dt_day)
x_fft[0] *= np.sqrt(2)
x = np.fft.irfft(x_fft, n_sample)
# Un-whiten the samples to recover the desired 1D PDF.
x_cdf = 0.5 * (1 + scipy.special.erf(x / np.sqrt(2)))
return np.interp(x_cdf, cdf_grid, x_grid)
def _seeing_psd(freq):
"""Evaluate the 'chi-by-eye' fit of the seeing PSD described in
DESI-doc-3087.
"""
N, f0, a0, a1 = 8000, 0.10, 2.8, -1.1
return (N * (freq/f0)**a0 / (1 + (freq/f0)**a0) *
(freq/f0) ** a1 / (10 + (freq/f0) ** a1))
def sample_seeing(n_sample, dt_sec=180., median_seeing=1.1, max_seeing=2.5,
gen=None):
"""Generate a random time series of FWHM seeing values.
See DESI-doc-3087 for details. Uses :func:`get_seeing_pdf`,
:func:`_seeing_psd` and :func:`sample_timeseries`.
Parameters
----------
n_sample : int
Number of equally spaced samples to generate.
dt_sec : float
Time interval between samples in seconds.
median_seeing : float
See :func:`get_seeing_pdf`.
mex_seeing : float
See :func:`get_seeing_pdf`.
gen : np.random.RandomState or None
Provide an existing RandomState for full control of reproducible random
numbers, or None for non-reproducible random numbers.
Returns
-------
array
1D array of randomly generated samples.
"""
fwhm_grid, pdf_grid = get_seeing_pdf(median_seeing, max_seeing)
return sample_timeseries(
fwhm_grid, pdf_grid, _seeing_psd, n_sample, dt_sec, gen)
_transp_pdf_cum = np.array([0.06,0.11,1.0])
_transp_pdf_powers = np.array([0., 2.5, 35.])
def get_transp_pdf(n=250):
"""Return PDF of atmospheric transparency.
Derived from MzLS observations, but corrected for dust accumulation and
measurement error. See DESI-doc-3087 for details.
Parameters
----------
n : int
Size of grid to use for tabulating the returned arrays.
Returns
-------
tuple
Tuple (transp, pdf) that tabulates pdf[transp]. Normalized so that
``np.sum(pdf * np.gradient(transp)) = 1``.
"""
transp = np.linspace(0., 1., n)
pdf = np.zeros_like(transp)
last_c = 0.
for c, p in zip(_transp_pdf_cum, _transp_pdf_powers):
pdf += (c - last_c) * np.power(transp, p) * (p + 1)
last_c = c
pdf /= pdf.sum() * np.gradient(transp)
return transp, pdf
def _transp_psd(freq):
"""Evaluate the 'chi-by-eye' fit of the transparency PSD described in
DESI-doc-3087.
"""
N, f0, a0, a1 = 500, 1.5, 0.0, -1.5
return (N * (freq/f0)**a0 / (1 + (freq/f0)**a0) *
(freq/f0) ** a1 / (1 + (freq/f0) ** a1))
def sample_transp(n_sample, dt_sec=180., gen=None):
"""Generate a random time series of atmospheric transparency values.
See DESI-doc-3087 for details. Uses :func:`get_transp_pdf`,
:func:`_transp_psd` and :func:`sample_timeseries`.
Parameters
----------
n_sample : int
Number of equally spaced samples to generate.
dt_sec : float
Time interval between samples in seconds.
gen : np.random.RandomState or None
Provide an existing RandomState for full control of reproducible random
numbers, or None for non-reproducible random numbers.
Returns
-------
array
1D array of randomly generated samples.
"""
transp_grid, pdf_grid = get_transp_pdf()
return sample_timeseries(
transp_grid, pdf_grid, _transp_psd, n_sample, dt_sec, gen)
def dome_closed_fractions(start_date, stop_date,
replay='Y2007,Y2008,Y2009,Y2010,Y2011,Y2012,Y2013,Y2014'):
"""Return dome-closed fractions for each night of the survey.
Years can be replayed in any order. If the number of years to replay is less
than the survey duration, they are repeated.
Parameters
----------
start_date : datetime.date or None
Survey starts on the evening of this date. Use the ``first_day``
config parameter if None (the default).
stop_date : datetime.date or None
Survey stops on the morning of this date. Use the ``last_day``
config parameter if None (the default).
replay : str
Comma-separated list of years to replay, identified by arbitrary strings
that must match column names in the DESIMODEL weather history.
Returns
-------
numpy array
1D array of N probabilities between 0-1, where N is the number of nights
spanned by the start and stop dates.
"""
# Check the inputs.
num_nights = (stop_date - start_date).days
if num_nights <= 0:
raise ValueError('Expected start_date < stop_date.')
replay = replay.split(',')
# Load tabulated daily weather history.
DESIMODEL = os.getenv('DESIMODEL')
path = os.path.join(DESIMODEL, 'data', 'weather', 'daily-2007-2017.csv')
t = astropy.table.Table.read(path)
if not len(t) == 365:
raise ValueError('Invalid weather history length (expected 365).')
years = t.colnames
lostfracs = []
for yr in replay:
if yr not in years:
raise ValueError('Replay year "{}" not in weather history.'.format(yr))
lostfrac = t[yr].data
if not np.all((lostfrac >= 0) & (lostfrac <= 1)):
raise ValueError('Invalid weather history for replay year "{}".'.format(yr))
lostfracs.append(lostfrac)
# Replay the specified years (with wrap-around if necessary),
# overlaid on the actual survey dates.
probs = np.zeros(num_nights)
start = start_date
for year_num, year in enumerate(range(start_date.year, stop_date.year + 1)):
first = datetime.date(year=year, month=1, day=1)
stop = datetime.date(year=year + 1, month=1, day=1)
if stop > stop_date:
stop = stop_date
n = (stop - start).days
if n == 0:
break
if calendar.isleap(year):
n -= 1
idx = (start - start_date).days
jdx = (start - first).days
lostfrac = lostfracs[year_num % len(replay)]
probs[idx:idx + n] = lostfrac[jdx:jdx + n]
start = stop
return probs
| bsd-3-clause | -3,878,208,059,081,789,000 | 35.174014 | 88 | 0.635944 | false | 3.441722 | false | false | false |
taoliu/taolib | Scripts/kmeans2image.py | 1 | 1598 | #!/usr/bin/env python
# Time-stamp: <2009-04-14 14:07:21 Tao Liu>
import os
import sys
import re
from PIL import Image, ImageDraw
# ------------------------------------
# Main function
# ------------------------------------
help_message = """
Draw the K-means clustering result.
need 6 parameter: %s <kmeans_file> <lim> <x_points> <y_points> <x_ext> <y_ext>
kmeans_file : tab-delimited plain text file. First column is cluster number by k-means, and following columns are data columns.
lim : data value limit
x_points : number of data value columns
y_points : number of rows
x_ext : pixels extended in x-axis
y_ext : pixels extended in y-axis
""" % sys.argv[0]
def main():
if len(sys.argv) < 7:
sys.stderr.write(help_message)
sys.exit(1)
fhd = open (sys.argv[1])
lim = int(sys.argv[2])
x_points = int(sys.argv[3])
y_points = int(sys.argv[4])
x_ext = int(sys.argv[5])
y_ext = int(sys.argv[6])
a = Image.new("RGB",(x_points*x_ext,y_points*y_ext),"white")
d = ImageDraw.Draw(a)
y = 0
for i in fhd:
y += 1
i.strip()
if not re.search("^\d+",i):
continue
values = map(float,i.split())
x = 0
cl = values[0]
for v in values[1:]:
x += 1
c = "hsl(%d,100%%,%d%%)" % (cl*70,min(1,v/lim)*90.0)
d.rectangle([(int(x*x_ext),int(y*y_ext)),(int((x+1)*x_ext),int((y+1)*y_ext))],outline=c,fill=c)
a.save(sys.argv[1]+".png")
print "check %s!" % (sys.argv[1]+".png")
if __name__ == '__main__':
main()
| bsd-3-clause | -4,875,349,459,735,880,000 | 25.196721 | 127 | 0.530663 | false | 2.910747 | false | false | false |
TripleSnail/blender-zombie | python/text.py | 1 | 1754 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bge
import bgl
import blf
DPI = 72
class TextObject(object):
def __init__(self, text, px, py, size, time=0):
self.text = text
self.px = px
self.py = py
self.size = size
self.time = time
text_objects = []
def init(controller):
font_path = bge.logic.expandPath('//fonts/DejaVuSans.ttf')
bge.logic.font_id = blf.load(font_path)
scene = bge.logic.getCurrentScene()
scene.post_draw = [write]
def write():
width = bge.render.getWindowWidth()
height = bge.render.getWindowHeight()
bgl.glMatrixMode(bgl.GL_PROJECTION)
bgl.glLoadIdentity()
bgl.gluOrtho2D(0, width, 0, height)
bgl.glMatrixMode(bgl.GL_MODELVIEW)
bgl.glLoadIdentity()
font_id = bge.logic.font_id
for text_obj in text_objects:
blf.position(font_id, width * text_obj.px , height * text_obj.py, 0)
blf.size(font_id, text_obj.size, DPI)
blf.draw(font_id, text_obj.text)
| gpl-2.0 | 8,312,866,228,532,016,000 | 28.728814 | 76 | 0.676739 | false | 3.31569 | false | false | false |
renyi/drum | drum/links/templatetags/drum_tags.py | 1 | 1472 | from __future__ import unicode_literals
from collections import defaultdict
from django.template.defaultfilters import timesince
from mezzanine import template
from mezzanine.generic.models import ThreadedComment
from drum.links.utils import order_by_score
from drum.links.models import LinkCategory
from drum.links.views import CommentList, USER_PROFILE_RELATED_NAME
register = template.Library()
@register.filter
def get_profile(user):
"""
Returns the profile object associated with the given user.
"""
return getattr(user, USER_PROFILE_RELATED_NAME)
@register.simple_tag(takes_context=True)
def order_comments_by_score_for(context, link):
"""
Preloads threaded comments in the same way Mezzanine initially does,
but here we order them by score.
"""
comments = defaultdict(list)
qs = link.comments.visible().select_related(
"user",
"user__%s" % (USER_PROFILE_RELATED_NAME)
)
for comment in order_by_score(qs, CommentList.score_fields, "submit_date"):
comments[comment.replied_to_id].append(comment)
context["all_comments"] = comments
return ""
@register.filter
def short_timesince(date):
return timesince(date).split(",")[0]
@register.as_tag
def link_category_list(*args):
return LinkCategory.objects.all()
@register.as_tag
def latest_comments(limit=5, *args):
qs = ThreadedComment.objects.filter(is_removed=False, is_public=True)
return qs.reverse()[:limit]
| bsd-2-clause | -4,074,832,187,691,731,000 | 25.763636 | 79 | 0.724185 | false | 3.607843 | false | false | false |
gdsfactory/gdsfactory | pp/layers.py | 1 | 9564 | """A GDS layer is a tuple of two integers.
You can:
- Define your layers in a dataclass
- Load it from Klayout XML file (.lyp)
LayerSet adapted from phidl.device_layout
load_lyp, name_to_description, name_to_short_name adapted from phidl.utilities
preview_layerset adapted from phidl.geometry
"""
import pathlib
from pathlib import Path
from typing import Optional, Tuple
import xmltodict
from phidl.device_layout import Layer as LayerPhidl
from phidl.device_layout import LayerSet as LayerSetPhidl
from pp.component import Component
from pp.name import clean_name
from pp.tech import TECH
from pp.types import PathType
LAYER = TECH.layer
class LayerSet(LayerSetPhidl):
def add_layer(
self,
name: str = "unnamed",
gds_layer: int = 0,
gds_datatype: int = 0,
description: Optional[str] = None,
color: Optional[str] = None,
inverted: bool = False,
alpha: float = 0.6,
dither: bool = None,
):
"""Adds a layer to an existing LayerSet object for nice colors.
Args:
name: Name of the Layer.
gds_layer: GDSII Layer number.
gds_datatype: GDSII datatype.
description: Layer description.
color: Hex code of color for the Layer.
inverted: If true, inverts the Layer.
alpha: layer opacity between 0 and 1.
dither: KLayout dither style, only used in phidl.utilities.write_lyp().
"""
new_layer = LayerPhidl(
gds_layer=gds_layer,
gds_datatype=gds_datatype,
name=name,
description=description,
inverted=inverted,
color=color,
alpha=alpha,
dither=dither,
)
if name in self._layers:
raise ValueError(
f"Adding {name} already defined {list(self._layers.keys())}"
)
else:
self._layers[name] = new_layer
# def __getitem__(self, val: str) -> Tuple[int, int]:
# """Returns gds layer tuple."""
# if val not in self._layers:
# raise ValueError(f"Layer {val} not in {list(self._layers.keys())}")
# else:
# layer = self._layers[val]
# return layer.gds_layer, layer.gds_datatype
def __repr__(self):
"""Prints the number of Layers in the LayerSet object."""
return (
f"LayerSet ({len(self._layers)} layers total) \n"
+ f"{list(self._layers.keys())}"
)
def get(self, name: str) -> LayerPhidl:
"""Returns Layer from name."""
if name not in self._layers:
raise ValueError(f"Layer {name} not in {list(self._layers.keys())}")
else:
return self._layers[name]
def get_from_tuple(self, layer_tuple: Tuple[int, int]) -> LayerPhidl:
"""Returns Layer from layer tuple (gds_layer, gds_datatype)."""
tuple_to_name = {
(v.gds_layer, v.gds_datatype): k for k, v in self._layers.items()
}
if layer_tuple not in tuple_to_name:
raise ValueError(f"Layer {layer_tuple} not in {list(tuple_to_name.keys())}")
name = tuple_to_name[layer_tuple]
return self._layers[name]
LAYER_COLORS = LayerSet() # Layerset makes plotgds look good
LAYER_COLORS.add_layer("WG", LAYER.WG[0], 0, "wg", color="gray", alpha=1)
LAYER_COLORS.add_layer("WGCLAD", LAYER.WGCLAD[0], 0, "", color="gray", alpha=0)
LAYER_COLORS.add_layer("SLAB150", LAYER.SLAB150[0], 0, "", color="lightblue", alpha=0.6)
LAYER_COLORS.add_layer("SLAB90", LAYER.SLAB90[0], 0, "", color="lightblue", alpha=0.2)
LAYER_COLORS.add_layer("WGN", LAYER.WGN[0], 0, "", color="orange", alpha=1)
LAYER_COLORS.add_layer("WGN_CLAD", LAYER.WGN_CLAD[0], 0, "", color="gray", alpha=0)
LAYER_COLORS.add_layer("DEVREC", LAYER.DEVREC[0], 0, "", color="gray", alpha=0.1)
PORT_LAYER_TO_TYPE = {
LAYER.PORT: "optical",
LAYER.PORTE: "dc",
LAYER.PORTH: "heater",
LAYER.TE: "vertical_te",
LAYER.TM: "vertical_tm",
}
PORT_TYPE_TO_LAYER = {v: k for k, v in PORT_LAYER_TO_TYPE.items()}
def preview_layerset(
ls: LayerSet = LAYER_COLORS, size: float = 100.0, spacing: float = 100.0
) -> Component:
"""Generates a preview Device with representations of all the layers,
used for previewing LayerSet color schemes in quickplot or saved .gds
files
"""
import numpy as np
import pp
D = Component(name="layerset")
scale = size / 100
num_layers = len(ls._layers)
matrix_size = int(np.ceil(np.sqrt(num_layers)))
sorted_layers = sorted(
ls._layers.values(), key=lambda x: (x.gds_layer, x.gds_datatype)
)
for n, layer in enumerate(sorted_layers):
R = pp.components.rectangle(size=(100 * scale, 100 * scale), layer=layer)
T = pp.components.text(
text="%s\n%s / %s" % (layer.name, layer.gds_layer, layer.gds_datatype),
size=20 * scale,
position=(50 * scale, -20 * scale),
justify="center",
layer=layer,
)
xloc = n % matrix_size
yloc = int(n // matrix_size)
D.add_ref(R).movex((100 + spacing) * xloc * scale).movey(
-(100 + spacing) * yloc * scale
)
D.add_ref(T).movex((100 + spacing) * xloc * scale).movey(
-(100 + spacing) * yloc * scale
)
return D
def _name_to_short_name(name_str: str) -> str:
"""Maps the name entry of the lyp element to a name of the layer,
i.e. the dictionary key used to access it.
Default format of the lyp name is
key - layer/datatype - description
or
key - description
"""
if name_str is None:
raise IOError(f"layer {name_str} has no name")
fields = name_str.split("-")
name = fields[0].split()[0].strip()
return clean_name(name)
def _name_to_description(name_str) -> str:
"""Gets the description of the layer contained in the lyp name field.
It is not strictly necessary to have a description. If none there, it returns ''.
Default format of the lyp name is
key - layer/datatype - description
or
key - description
"""
if name_str is None:
raise IOError(f"layer {name_str} has no name")
fields = name_str.split()
description = ""
if len(fields) > 1:
description = " ".join(fields[1:])
return description
def _add_layer(entry, lys: LayerSet) -> LayerSet:
"""Entry is a dict of one element of 'properties'.
No return value. It adds it to the lys variable directly
"""
info = entry["source"].split("@")[0]
# skip layers without name or with */*
if "'" in info or "*" in info:
return
name = entry.get("name") or entry.get("source")
if not name:
return
gds_layer, gds_datatype = info.split("/")
gds_layer = gds_layer.split()[-1]
gds_datatype = gds_datatype.split()[-1]
settings = dict()
settings["gds_layer"] = int(gds_layer)
settings["gds_datatype"] = int(gds_datatype)
settings["color"] = entry["fill-color"]
settings["dither"] = entry["dither-pattern"]
settings["name"] = _name_to_short_name(name)
settings["description"] = _name_to_description(name)
lys.add_layer(**settings)
return lys
def load_lyp(filepath: Path) -> LayerSet:
"""Returns a LayerSet object from a Klayout lyp file in XML format."""
with open(filepath, "r") as fx:
lyp_dict = xmltodict.parse(fx.read(), process_namespaces=True)
# lyp files have a top level that just has one dict: layer-properties
# That has multiple children 'properties', each for a layer. So it gives a list
lyp_list = lyp_dict["layer-properties"]["properties"]
if not isinstance(lyp_list, list):
lyp_list = [lyp_list]
lys = LayerSet()
for entry in lyp_list:
try:
group_members = entry["group-members"]
except KeyError: # it is a real layer
_add_layer(entry, lys)
else: # it is a group of other entries
if not isinstance(group_members, list):
group_members = [group_members]
for member in group_members:
_add_layer(member, lys)
return lys
# For port labelling purpose
# LAYERS_OPTICAL = [LAYER.WG]
# LAYERS_ELECTRICAL = [LAYER.M1, LAYER.M2, LAYER.M3]
# LAYERS_HEATER = [LAYER.HEATER]
def lyp_to_dataclass(lyp_filepath: PathType, overwrite: bool = True) -> str:
filepathin = pathlib.Path(lyp_filepath)
filepathout = filepathin.with_suffix(".py")
if filepathout.exists() and not overwrite:
raise FileExistsError(f"You can delete {filepathout}")
script = """
import dataclasses
@dataclasses.dataclass
class LayerMap():
"""
lys = load_lyp(filepathin)
for layer_name, layer in sorted(lys._layers.items()):
script += (
f" {layer_name}: Layer = ({layer.gds_layer}, {layer.gds_datatype})\n"
)
filepathout.write_text(script)
return script
def test_load_lyp():
from pp.config import layer_path
lys = load_lyp(layer_path)
assert len(lys._layers) == 82
return lys
if __name__ == "__main__":
pass
# print(LAYER_STACK.get_from_tuple((1, 0)))
# print(LAYER_STACK.get_layer_to_material())
# lys = test_load_lyp()
# c = preview_layerset(ls)
# c.show()
# print(LAYERS_OPTICAL)
# print(layer("wgcore"))
# print(layer("wgclad"))
# print(layer("padding"))
# print(layer("TEXT"))
# print(type(layer("wgcore")))
| mit | 7,863,857,862,018,327,000 | 30.564356 | 88 | 0.603199 | false | 3.429186 | false | false | false |
alissonperez/django-onmydesk | onmydesk/utils.py | 1 | 1501 | """Module with common utilities to this package"""
import re
from datetime import timedelta
import importlib
def my_import(class_name):
"""
Usage example::
Report = my_import('myclass.models.Report')
model_instance = Report()
model_instance.name = 'Test'
model_instance.save()
:param str class_name: Class name
:returns: Class object
"""
*packs, class_name = class_name.split('.')
try:
module = importlib.import_module('.'.join(packs))
klass = getattr(module, class_name)
return klass
except (ImportError, AttributeError) as e:
msg = 'Could not import "{}" from {}: {}.'.format(
class_name, e.__class__.__name__, e)
raise ImportError(msg)
def str_to_date(value, reference_date):
'''
Convert a string like 'D-1' to a "reference_date - timedelta(days=1)"
:param str value: String like 'D-1', 'D+1', 'D'...
:param date reference_date: Date to be used as 'D'
:returns: Result date
:rtype: date
'''
n_value = value.strip(' ').replace(' ', '').upper()
if not re.match('^D[\-+][0-9]+$|^D$', n_value):
raise ValueError('Wrong value "{}"'.format(value))
if n_value == 'D':
return reference_date
elif n_value[:2] == 'D-':
days = int(n_value[2:])
return reference_date - timedelta(days=days)
elif n_value[:2] == 'D+':
days = int(n_value[2:])
return reference_date + timedelta(days=days)
| mit | -7,885,954,554,795,849,000 | 26.290909 | 73 | 0.578281 | false | 3.643204 | false | false | false |
bradallred/gemrb | gemrb/GUIScripts/iwd2/Abilities.py | 1 | 7433 | # GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#character generation, ability (GUICG4)
import GemRB
from GUIDefines import *
import CharOverview
import CommonTables
from ie_stats import IE_STR, IE_DEX, IE_CON, IE_INT, IE_WIS, IE_CHR
AbilityWindow = 0
TextAreaControl = 0
DoneButton = 0
AbilityTable = 0
PointsLeft = 0
Minimum = 0
Maximum = 0
Add = 0
KitIndex = 0
CharGen = 0
Stats = [ IE_STR, IE_DEX, IE_CON, IE_INT, IE_WIS, IE_CHR ]
def CalcLimits(Abidx):
global Minimum, Maximum, Add
if not CharGen:
pc = GemRB.GameGetSelectedPCSingle ()
Minimum = GemRB.GetPlayerStat (pc, Stats[Abidx], 1)
Maximum = 25
return
Abracead = GemRB.LoadTable("ABRACEAD")
RaceID = GemRB.GetVar("Race")
RowIndex = CommonTables.Races.FindValue(3, RaceID)
RaceName = CommonTables.Races.GetRowName(RowIndex)
Minimum = 3
Maximum = 18
Abclasrq = GemRB.LoadTable("ABCLASRQ")
tmp = Abclasrq.GetValue(KitIndex, Abidx)
if tmp!=0 and tmp>Minimum:
Minimum = tmp
Abracerq = GemRB.LoadTable("ABRACERQ")
Race = Abracerq.GetRowIndex(RaceName)
tmp = Abracerq.GetValue(Race, Abidx*2)
if tmp!=0 and tmp>Minimum:
Minimum = tmp
tmp = Abracerq.GetValue(Race, Abidx*2+1)
if tmp!=0 and tmp>Maximum:
Maximum = tmp
Race = Abracead.GetRowIndex(RaceName)
Add = Abracead.GetValue(Race, Abidx)
Maximum = Maximum + Add
Minimum = Minimum + Add
if Minimum<1:
Minimum=1
return
def GetModColor(mod):
if mod < 0:
return {'r' : 255, 'g' : 0, 'b' : 0}
elif mod > 0:
return {'r' : 0, 'g' : 255, 'b' : 0}
else:
return {'r' : 255, 'g' : 255, 'b' : 255}
def RollPress():
global Add
GemRB.SetVar("Ability",0)
SumLabel = AbilityWindow.GetControl(0x10000002)
SumLabel.SetTextColor ({'r' : 255, 'g' : 255, 'b' : 0})
SumLabel.SetUseRGB(1)
SumLabel.SetText(str(PointsLeft))
for i in range(0,6):
CalcLimits(i)
v = 10+Add
if not CharGen:
v = Minimum
b = v//2-5
GemRB.SetVar("Ability "+str(i), v )
Label = AbilityWindow.GetControl(0x10000003+i)
Label.SetText(str(v) )
Label = AbilityWindow.GetControl(0x10000024+i)
Label.SetUseRGB(1)
Label.SetTextColor (GetModColor (b))
Label.SetText("%+d"%(b))
return
def OnLoad():
OpenAbilitiesWindow (1, 16)
def OpenAbilitiesWindow(chargen, points):
global AbilityWindow, TextAreaControl, DoneButton
global CharGen, PointsLeft
global AbilityTable
global KitIndex, Minimum, Maximum
CharGen = chargen
PointsLeft = points
AbilityTable = GemRB.LoadTable ("ability")
if chargen:
Kit = GemRB.GetVar("Class Kit")
Class = GemRB.GetVar("Class")-1
if Kit == 0:
KitName = CommonTables.Classes.GetRowName(Class)
else:
#rowname is just a number, first value row what we need here
KitName = CommonTables.KitList.GetValue(Kit, 0)
Abclasrq = GemRB.LoadTable("ABCLASRQ")
KitIndex = Abclasrq.GetRowIndex(KitName)
# in a fit of clarity, they used the same ids in both windowpacks
if chargen:
AbilityWindow = GemRB.LoadWindow (4, "GUICG")
else:
AbilityWindow = GemRB.LoadWindow (7, "GUIREC")
CharOverview.PositionCharGenWin(AbilityWindow)
RollPress ()
for i in range(0,6):
Button = AbilityWindow.GetControl(i+30)
Button.SetEvent(IE_GUI_BUTTON_ON_PRESS, JustPress)
Button.SetVarAssoc("Ability", i)
Button = AbilityWindow.GetControl(i*2+16)
Button.SetEvent(IE_GUI_BUTTON_ON_PRESS, LeftPress)
Button.SetVarAssoc("Ability", i )
Button.SetActionInterval (200)
Button = AbilityWindow.GetControl(i*2+17)
Button.SetEvent(IE_GUI_BUTTON_ON_PRESS, RightPress)
Button.SetVarAssoc("Ability", i )
Button.SetActionInterval (200)
if chargen:
BackButton = AbilityWindow.GetControl (36)
BackButton.SetText (15416)
BackButton.MakeEscape()
BackButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, BackPress)
else:
AbilityWindow.DeleteControl (36)
DoneButton = AbilityWindow.GetControl(0)
DoneButton.SetText(36789)
DoneButton.MakeDefault()
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, NextPress)
TextAreaControl = AbilityWindow.GetControl(29)
TextAreaControl.SetText(17247)
if not chargen:
AbilityWindow.ShowModal (MODAL_SHADOW_GRAY)
else:
AbilityWindow.Focus()
return
def RightPress(btn, Abidx):
global PointsLeft
Ability = GemRB.GetVar("Ability "+str(Abidx) )
#should be more elaborate
CalcLimits(Abidx)
GemRB.SetToken("MINIMUM",str(Minimum) )
GemRB.SetToken("MAXIMUM",str(Maximum) )
TextAreaControl.SetText(AbilityTable.GetValue(Abidx, 1) )
if Ability<=Minimum:
return
Ability -= 1
GemRB.SetVar("Ability "+str(Abidx), Ability)
PointsLeft = PointsLeft + 1
SumLabel = AbilityWindow.GetControl(0x10000002)
SumLabel.SetText(str(PointsLeft) )
SumLabel.SetTextColor ({'r' : 255, 'g' : 255, 'b' : 0})
Label = AbilityWindow.GetControl(0x10000003+Abidx)
Label.SetText(str(Ability) )
Label = AbilityWindow.GetControl(0x10000024+Abidx)
b = Ability // 2 - 5
Label.SetTextColor (GetModColor (b))
Label.SetText("%+d"%(b))
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
return
def JustPress(btn, Abidx):
Ability = GemRB.GetVar("Ability "+str(Abidx) )
#should be more elaborate
CalcLimits(Abidx)
GemRB.SetToken("MINIMUM",str(Minimum) )
GemRB.SetToken("MAXIMUM",str(Maximum) )
TextAreaControl.SetText(AbilityTable.GetValue(Abidx, 1) )
return
def LeftPress(btn, Abidx):
global PointsLeft
CalcLimits(Abidx)
GemRB.SetToken("MINIMUM",str(Minimum) )
GemRB.SetToken("MAXIMUM",str(Maximum) )
Ability = GemRB.GetVar("Ability "+str(Abidx) )
TextAreaControl.SetText(AbilityTable.GetValue(Abidx, 1) )
if PointsLeft == 0:
return
if Ability>=Maximum: #should be more elaborate
return
Ability += 1
GemRB.SetVar("Ability "+str(Abidx), Ability)
PointsLeft = PointsLeft - 1
SumLabel = AbilityWindow.GetControl(0x10000002)
if PointsLeft == 0:
SumLabel.SetTextColor({'r' : 255, 'g' : 255, 'b' : 255})
SumLabel.SetText(str(PointsLeft) )
Label = AbilityWindow.GetControl(0x10000003+Abidx)
Label.SetText(str(Ability) )
Label = AbilityWindow.GetControl(0x10000024+Abidx)
b = Ability // 2 - 5
Label.SetTextColor (GetModColor (b))
Label.SetText("%+d"%(b))
if PointsLeft == 0:
DoneButton.SetState(IE_GUI_BUTTON_ENABLED)
return
def BackPress():
if AbilityWindow:
AbilityWindow.Unload()
GemRB.SetNextScript("CharGen5")
for i in range(6):
GemRB.SetVar("Ability "+str(i),0) #scrapping the abilities
return
def NextPress():
if AbilityWindow:
AbilityWindow.Unload()
if CharGen:
GemRB.SetNextScript("CharGen6") #skills
else:
# set the upgraded stats
pc = GemRB.GameGetSelectedPCSingle ()
for i in range (len(Stats)):
newValue = GemRB.GetVar ("Ability "+str(i))
GemRB.SetPlayerStat (pc, Stats[i], newValue)
# open up the next levelup window
import Enemy
Enemy.OpenEnemyWindow ()
return
| gpl-2.0 | 7,460,413,711,233,489,000 | 26.428044 | 81 | 0.727297 | false | 2.695069 | false | false | false |
Southpaw-TACTIC/Team | src/python/Lib/site-packages/pythonwin/pywin/tools/browseProjects.py | 1 | 8295 | import hierlist, string, regutil, os
import win32con, win32ui, win32api
import commctrl
from pywin.mfc import dialog
import glob
import pyclbr
import pywin.framework.scriptutils
import afxres
class HLIErrorItem(hierlist.HierListItem):
def __init__(self, text):
self.text = text
hierlist.HierListItem.__init__(self)
def GetText(self):
return self.text
class HLICLBRItem(hierlist.HierListItem):
def __init__(self, name, file, lineno, suffix = ""):
# If the 'name' object itself has a .name, use it. Not sure
# how this happens, but seems pyclbr related.
# See PyWin32 bug 817035
self.name = getattr(name, "name", name)
self.file = file
self.lineno = lineno
self.suffix = suffix
def __cmp__(self, other):
return cmp(self.name, other.name)
def GetText(self):
return self.name + self.suffix
def TakeDefaultAction(self):
if self.file:
pywin.framework.scriptutils.JumpToDocument(self.file, self.lineno, bScrollToTop=1)
else:
win32ui.SetStatusText("The source of this object is unknown")
def PerformItemSelected(self):
if self.file is None:
msg = "%s - source can not be located." % (self.name, )
else:
msg = "%s defined at line %d of %s" % (self.name, self.lineno, self.file)
win32ui.SetStatusText(msg)
class HLICLBRClass(HLICLBRItem):
def __init__(self, clbrclass, suffix = ""):
try:
name = clbrclass.name
file = clbrclass.file
lineno = clbrclass.lineno
self.super = clbrclass.super
self.methods = clbrclass.methods
except AttributeError:
name = clbrclass
file = lineno = None
self.super = []; self.methods = {}
HLICLBRItem.__init__(self, name, file, lineno, suffix)
def GetSubList(self):
ret = []
for c in self.super:
ret.append(HLICLBRClass(c, " (Parent class)"))
for meth, lineno in self.methods.items():
ret.append(HLICLBRMethod(meth, self.file, lineno, " (method)"))
return ret
def IsExpandable(self):
return len(self.methods) + len(self.super)
def GetBitmapColumn(self):
return 21
class HLICLBRFunction(HLICLBRClass):
def GetBitmapColumn(self):
return 22
class HLICLBRMethod(HLICLBRItem):
def GetBitmapColumn(self):
return 22
class HLIModuleItem(hierlist.HierListItem):
def __init__(self, path):
hierlist.HierListItem.__init__(self)
self.path = path
def GetText(self):
return os.path.split(self.path)[1] + " (module)"
def IsExpandable(self):
return 1
def TakeDefaultAction(self):
win32ui.GetApp().OpenDocumentFile( self.path )
def GetBitmapColumn(self):
col = 4 # Default
try:
if win32api.GetFileAttributes(self.path) & win32con.FILE_ATTRIBUTE_READONLY:
col = 5
except win32api.error:
pass
return col
def GetSubList(self):
mod, path = pywin.framework.scriptutils.GetPackageModuleName(self.path)
win32ui.SetStatusText("Building class list - please wait...", 1)
win32ui.DoWaitCursor(1)
try:
try:
reader = pyclbr.readmodule_ex # Post 1.5.2 interface.
extra_msg = " or functions"
except AttributeError:
reader = pyclbr.readmodule
extra_msg = ""
data = reader(mod, [path])
if data:
ret = []
for item in data.values():
if item.__class__ != pyclbr.Class: # ie, it is a pyclbr Function instance (only introduced post 1.5.2)
ret.append(HLICLBRFunction( item, " (function)" ) )
else:
ret.append(HLICLBRClass( item, " (class)") )
ret.sort()
return ret
else:
return [HLIErrorItem("No Python classes%s in module." % (extra_msg,))]
finally:
win32ui.DoWaitCursor(0)
win32ui.SetStatusText(win32ui.LoadString(afxres.AFX_IDS_IDLEMESSAGE))
def MakePathSubList(path):
ret = []
for filename in glob.glob(os.path.join(path,'*')):
if os.path.isdir(filename) and os.path.isfile(os.path.join(filename, "__init__.py")):
ret.append(HLIDirectoryItem(filename, os.path.split(filename)[1]))
else:
if string.lower(os.path.splitext(filename)[1]) in ['.py', '.pyw']:
ret.append(HLIModuleItem(filename))
return ret
class HLIDirectoryItem(hierlist.HierListItem):
def __init__(self, path, displayName = None, bSubDirs = 0):
hierlist.HierListItem.__init__(self)
self.path = path
self.bSubDirs = bSubDirs
if displayName:
self.displayName = displayName
else:
self.displayName = path
def IsExpandable(self):
return 1
def GetText(self):
return self.displayName
def GetSubList(self):
ret = MakePathSubList(self.path)
if os.path.split(self.path)[1] == "win32com": # Complete and utter hack for win32com.
try:
path = win32api.GetFullPathName(os.path.join(self.path, "..\\win32comext"))
ret = ret + MakePathSubList(path)
except win32ui.error:
pass
return ret
class HLIProjectRoot(hierlist.HierListItem):
def __init__(self, projectName, displayName = None):
hierlist.HierListItem.__init__(self)
self.projectName = projectName
self.displayName = displayName or projectName
def GetText(self):
return self.displayName
def IsExpandable(self):
return 1
def GetSubList(self):
paths = regutil.GetRegisteredNamedPath(self.projectName)
pathList = string.split(paths,";")
if len(pathList)==1: # Single dir - dont bother putting the dir in
ret = MakePathSubList(pathList[0])
else:
ret = map( HLIDirectoryItem, pathList )
return ret
class HLIRoot(hierlist.HierListItem):
def __init__(self):
hierlist.HierListItem.__init__(self)
def IsExpandable(self):
return 1
def GetSubList(self):
keyStr = regutil.BuildDefaultPythonKey() + "\\PythonPath"
hKey = win32api.RegOpenKey(regutil.GetRootKey(), keyStr)
try:
ret = []
ret.append(HLIProjectRoot("", "Standard Python Library")) # The core path.
index = 0
while 1:
try:
ret.append(HLIProjectRoot(win32api.RegEnumKey(hKey, index)))
index = index + 1
except win32api.error:
break
return ret
finally:
win32api.RegCloseKey(hKey)
class dynamic_browser (dialog.Dialog):
style = win32con.WS_OVERLAPPEDWINDOW | win32con.WS_VISIBLE
cs = (
win32con.WS_CHILD |
win32con.WS_VISIBLE |
commctrl.TVS_HASLINES |
commctrl.TVS_LINESATROOT |
commctrl.TVS_HASBUTTONS
)
dt = [
["Python Projects", (0, 0, 200, 200), style, None, (8, "MS Sans Serif")],
["SysTreeView32", None, win32ui.IDC_LIST1, (0, 0, 200, 200), cs]
]
def __init__ (self, hli_root):
dialog.Dialog.__init__ (self, self.dt)
self.hier_list = hierlist.HierListWithItems (
hli_root,
win32ui.IDB_BROWSER_HIER
)
self.HookMessage (self.on_size, win32con.WM_SIZE)
def OnInitDialog (self):
self.hier_list.HierInit (self)
return dialog.Dialog.OnInitDialog (self)
def on_size (self, params):
lparam = params[3]
w = win32api.LOWORD(lparam)
h = win32api.HIWORD(lparam)
self.GetDlgItem (win32ui.IDC_LIST1).MoveWindow((0,0,w,h))
def BrowseDialog():
root = HLIRoot()
if not root.IsExpandable():
raise TypeError, "Browse() argument must have __dict__ attribute, or be a Browser supported type"
dlg = dynamic_browser (root)
dlg.CreateWindow()
def DockableBrowserCreator(parent):
root = HLIRoot()
hl = hierlist.HierListWithItems (
root,
win32ui.IDB_BROWSER_HIER
)
style = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.WS_BORDER | commctrl.TVS_HASLINES | commctrl.TVS_LINESATROOT | commctrl.TVS_HASBUTTONS
control = win32ui.CreateTreeCtrl()
control.CreateWindow(style, (0, 0, 150, 300), parent, win32ui.IDC_LIST1)
list = hl.HierInit (parent, control)
return control
def DockablePathBrowser():
import pywin.docking.DockingBar
bar = pywin.docking.DockingBar.DockingBar()
bar.CreateWindow(win32ui.GetMainFrame(), DockableBrowserCreator, "Path Browser", 0x8e0a)
bar.SetBarStyle( bar.GetBarStyle()|afxres.CBRS_TOOLTIPS|afxres.CBRS_FLYBY|afxres.CBRS_SIZE_DYNAMIC)
bar.EnableDocking(afxres.CBRS_ALIGN_ANY)
win32ui.GetMainFrame().DockControlBar(bar)
# The "default" entry point
Browse = DockablePathBrowser
| epl-1.0 | 5,805,571,004,573,647,000 | 30.276265 | 146 | 0.66522 | false | 3.014172 | false | false | false |
Commonists/SurfaceImageContentGap | surfaceimagecontentgap/rc.py | 1 | 2812 | from argparse import ArgumentParser
import datetime
import time
from surfaceimagecontentgap.imagegap import isthereanimage
from surfaceimagecontentgap.bot import SurfaceContentGapBot
def last_rc_time(site):
"""Datetime of last change."""
rc = site.recentchanges()
last_rev = rc.next()
return datetime.datetime \
.utcfromtimestamp(time.mktime(last_rev['timestamp']))
def previoushour(dt):
"""One hour previous given datetime."""
delta = datetime.timedelta(hours=1)
return dt - delta
def previousday(dt):
"""One day before given datetime."""
delta = datetime.timedelta(days=1)
return dt - delta
def rc_from(site, dt):
"""Recent changes from a given datetime."""
kwargs = {
'end': dt.strftime('%Y%m%d%H%M%S'),
'namespace': 0
}
rc = site.recentchanges(**kwargs)
# revisions
changes = []
# page titles
pages = []
for rev in rc:
changes.append(rev)
title = rev['title'].encode('utf-8')
if title not in pages:
pages.append(title)
return {
'list_revisions': changes,
'list_pages': pages
}
def articles_from_titles(site, titles):
"""Articles object from list of titles"""
return [site.Pages[title.decode('utf-8')] for title in titles]
def list_articles(bot):
# site
site = bot.site
# last hours rc
end_dt = previoushour(last_rc_time(site))
recent_changes = rc_from(site, end_dt)
pages = recent_changes['list_pages']
return articles_from_titles(site, pages)
def main():
description = 'Analyzing Wikipedia to surface image content gap (rc).'
parser = ArgumentParser(description=description)
parser.add_argument('-w', '--wikipedia',
type=str,
dest='lang',
required=False,
default='fr',
help='Language code for Wikipedia')
parser.add_argument('-r', '--report',
type=str,
dest='report',
required=True,
help='Page name to write a report.')
parser.add_argument('-f', '--configfile',
type=str,
dest='config',
required=True,
help='Config file with login and password.')
args = parser.parse_args()
kwargs = {
'config_file': args.config,
'lang': args.lang,
'report': args.report,
'list_fun': list_articles,
'filter_fun': lambda bot, x: not isthereanimage(x),
'rank_fun': lambda bot, x: 0,
'frequency': 60
}
rc_bot = SurfaceContentGapBot(**kwargs)
rc_bot.run()
if __name__ == '__main__':
main()
| mit | 8,370,749,980,757,185,000 | 26.568627 | 74 | 0.558677 | false | 4.111111 | false | false | false |
Purg/SMQTK | python/smqtk/bin/classifyFiles.py | 1 | 5843 | """
Based on an input, trained classifier configuration, classify a number of media
files, whose descriptor is computed by the configured descriptor generator.
Input files that classify as the given label are then output to standard out.
Thus, this script acts like a filter.
"""
import glob
import json
import logging
import os
from smqtk.algorithms import get_classifier_impls
from smqtk.algorithms import get_descriptor_generator_impls
from smqtk.representation import ClassificationElementFactory
from smqtk.representation import DescriptorElementFactory
from smqtk.representation.data_element.file_element import DataFileElement
from smqtk.utils import plugin
from smqtk.utils.bin_utils import (
initialize_logging,
output_config,
basic_cli_parser,
)
__author__ = "[email protected]"
def get_cli_parser():
parser = basic_cli_parser(__doc__)
g_classifier = parser.add_argument_group("Classification")
g_classifier.add_argument('--overwrite',
action='store_true', default=False,
help='When generating a configuration file, '
'overwrite an existing file.')
g_classifier.add_argument('-l', '--label',
type=str, default=None,
help='The class to filter by. This is based on '
'the classifier configuration/model used. '
'If this is not provided, we will list the '
'available labels in the provided '
'classifier configuration.')
# Positional
parser.add_argument("file_globs",
nargs='*',
metavar='GLOB',
help='Series of shell globs specifying the files to '
'classify.')
return parser
def get_default_config():
return {
"descriptor_factory":
DescriptorElementFactory.get_default_config(),
"descriptor_generator":
plugin.make_config(get_descriptor_generator_impls()),
"classification_factory":
ClassificationElementFactory.get_default_config(),
"classifier":
plugin.make_config(get_classifier_impls()),
}
def main():
log = logging.getLogger(__name__)
parser = get_cli_parser()
args = parser.parse_args()
config_path = args.config
generate_config = args.generate_config
config_overwrite = args.overwrite
is_debug = args.verbose
label = args.label
file_globs = args.file_globs
initialize_logging(logging.getLogger(__name__),
is_debug and logging.DEBUG or logging.INFO)
initialize_logging(logging.getLogger('smqtk'),
is_debug and logging.DEBUG or logging.INFO)
log.debug("Showing debug messages.")
config = get_default_config()
config_loaded = False
if config_path and os.path.isfile(config_path):
with open(config_path) as f:
log.info("Loading configuration: %s", config_path)
config.update(
json.load(f)
)
config_loaded = True
output_config(generate_config, config, log, config_overwrite, 100)
if not config_loaded:
log.error("No configuration provided")
exit(101)
classify_files(config, label, file_globs)
def classify_files(config, label, file_globs):
log = logging.getLogger(__name__)
#: :type: smqtk.algorithms.Classifier
classifier = \
plugin.from_plugin_config(config['classifier'],
get_classifier_impls())
def log_avaialable_labels():
log.info("Available classifier labels:")
for l in classifier.get_labels():
log.info("- %s", l)
if label is None:
log_avaialable_labels()
return
elif label not in classifier.get_labels():
log.error("Invalid classification label provided to compute and filter "
"on: '%s'", label)
log_avaialable_labels()
return
log.info("Collecting files from globs")
#: :type: list[DataFileElement]
data_elements = []
uuid2filepath = {}
for g in file_globs:
if os.path.isfile(g):
d = DataFileElement(g)
data_elements.append(d)
uuid2filepath[d.uuid()] = g
else:
log.debug("expanding glob: %s", g)
for fp in glob.iglob(g):
d = DataFileElement(fp)
data_elements.append(d)
uuid2filepath[d.uuid()] = fp
if not data_elements:
raise RuntimeError("No files provided for classification.")
log.info("Computing descriptors")
descriptor_factory = \
DescriptorElementFactory.from_config(config['descriptor_factory'])
#: :type: smqtk.algorithms.DescriptorGenerator
descriptor_generator = \
plugin.from_plugin_config(config['descriptor_generator'],
get_descriptor_generator_impls())
descr_map = descriptor_generator\
.compute_descriptor_async(data_elements, descriptor_factory)
log.info("Classifying descriptors")
classification_factory = ClassificationElementFactory \
.from_config(config['classification_factory'])
classification_map = classifier\
.classify_async(descr_map.values(), classification_factory)
log.info("Printing input file paths that classified as the given label.")
# map of UUID to filepath:
uuid2c = dict((c.uuid, c) for c in classification_map.itervalues())
for data in data_elements:
if uuid2c[data.uuid()].max_label() == label:
print uuid2filepath[data.uuid()]
if __name__ == '__main__':
main()
| bsd-3-clause | -9,202,389,211,392,059,000 | 33.169591 | 80 | 0.610303 | false | 4.403165 | true | false | false |
bgris/ODL_bgris | lib/python3.5/site-packages/odl/util/graphics.py | 1 | 15419 | # Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Functions for graphical output."""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
import numpy as np
from odl.util.testutils import run_doctests
from odl.util.utility import is_real_dtype
__all__ = ('show_discrete_data',)
def _safe_minmax(values):
"""Calculate min and max of array with guards for nan and inf."""
# Nan and inf guarded min and max
minval = np.min(values[np.isfinite(values)])
maxval = np.max(values[np.isfinite(values)])
return minval, maxval
def _colorbar_ticks(minval, maxval):
"""Return the ticks (values show) in the colorbar."""
return [minval, (maxval + minval) / 2., maxval]
def _digits(minval, maxval):
"""Digits needed to comforatbly display values in [minval, maxval]"""
if minval == maxval:
return 3
else:
return min(10, max(2, int(1 + abs(np.log10(maxval - minval)))))
def _colorbar_format(minval, maxval):
"""Return the format string for the colorbar."""
return '%.{}f'.format(_digits(minval, maxval))
def _axes_info(grid, npoints=5):
result = []
min_pt = grid.min()
max_pt = grid.max()
for axis in range(grid.ndim):
xmin = min_pt[axis]
xmax = max_pt[axis]
points = np.linspace(xmin, xmax, npoints)
indices = np.linspace(0, grid.shape[axis] - 1, npoints, dtype=int)
tick_values = grid.coord_vectors[axis][indices]
# Do not use corner point in case of a partition, use outer corner
tick_values[[0, -1]] = xmin, xmax
format_str = '{:.' + str(_digits(xmin, xmax)) + 'f}'
tick_labels = [format_str.format(f) for f in tick_values]
result += [(points, tick_labels)]
return result
def show_discrete_data(values, grid, title=None, method='',
force_show=False, fig=None, **kwargs):
"""Display a discrete 1d or 2d function.
Parameters
----------
values : `numpy.ndarray`
The values to visualize
grid : `TensorGrid` or `RectPartition`
Grid of the values
title : string, optional
Set the title of the figure
method : string, optional
1d methods:
'plot' : graph plot
'scatter' : scattered 2d points
(2nd axis <-> value)
2d methods:
'imshow' : image plot with coloring according to value,
including a colorbar.
'scatter' : cloud of scattered 3d points
(3rd axis <-> value)
'wireframe', 'plot_wireframe' : surface plot
force_show : bool, optional
Whether the plot should be forced to be shown now or deferred until
later. Note that some backends always displays the plot, regardless
of this value.
fig : `matplotlib.figure.Figure`, optional
The figure to show in. Expected to be of same "style", as the figure
given by this function. The most common usecase is that fig is the
return value from an earlier call to this function.
Default: New figure
interp : {'nearest', 'linear'}, optional
Interpolation method to use.
Default: 'nearest'
axis_labels : string, optional
Axis labels, default: ['x', 'y']
update_in_place : bool, optional
Update the content of the figure in place. Intended for faster real
time plotting, typically ~5 times faster.
This is only performed for ``method == 'imshow'`` with real data and
``fig != None``. Otherwise this parameter is treated as False.
Default: False
axis_fontsize : int, optional
Fontsize for the axes. Default: 16
kwargs : {'figsize', 'saveto', ...}
Extra keyword arguments passed on to display method
See the Matplotlib functions for documentation of extra
options.
Returns
-------
fig : `matplotlib.figure.Figure`
The resulting figure. It is also shown to the user.
See Also
--------
matplotlib.pyplot.plot : Show graph plot
matplotlib.pyplot.imshow : Show data as image
matplotlib.pyplot.scatter : Show scattered 3d points
"""
# Importing pyplot takes ~2 sec, only import when needed.
import matplotlib.pyplot as plt
args_re = []
args_im = []
dsp_kwargs = {}
sub_kwargs = {}
arrange_subplots = (121, 122) # horzontal arrangement
# Create axis labels which remember their original meaning
axis_labels = kwargs.pop('axis_labels', ['x', 'y'])
values_are_complex = not is_real_dtype(values.dtype)
figsize = kwargs.pop('figsize', None)
saveto = kwargs.pop('saveto', None)
interp = kwargs.pop('interp', 'nearest')
axis_fontsize = kwargs.pop('axis_fontsize', 16)
# Check if we should and can update the plot in place
update_in_place = kwargs.pop('update_in_place', False)
if (update_in_place and
(fig is None or values_are_complex or values.ndim != 2 or
(values.ndim == 2 and method not in ('', 'imshow')))):
update_in_place = False
if values.ndim == 1: # TODO: maybe a plotter class would be better
if not method:
if interp == 'nearest':
method = 'step'
dsp_kwargs['where'] = 'mid'
elif interp == 'linear':
method = 'plot'
else:
method = 'plot'
if method == 'plot' or method == 'step' or method == 'scatter':
args_re += [grid.coord_vectors[0], values.real]
args_im += [grid.coord_vectors[0], values.imag]
else:
raise ValueError('`method` {!r} not supported'
''.format(method))
elif values.ndim == 2:
if not method:
method = 'imshow'
if method == 'imshow':
args_re = [np.rot90(values.real)]
args_im = [np.rot90(values.imag)] if values_are_complex else []
extent = [grid.min()[0], grid.max()[0],
grid.min()[1], grid.max()[1]]
if interp == 'nearest':
interpolation = 'nearest'
elif interp == 'linear':
interpolation = 'bilinear'
else:
interpolation = 'none'
dsp_kwargs.update({'interpolation': interpolation,
'cmap': 'bone',
'extent': extent,
'aspect': 'auto'})
elif method == 'scatter':
pts = grid.points()
args_re = [pts[:, 0], pts[:, 1], values.ravel().real]
args_im = ([pts[:, 0], pts[:, 1], values.ravel().imag]
if values_are_complex else [])
sub_kwargs.update({'projection': '3d'})
elif method in ('wireframe', 'plot_wireframe'):
method = 'plot_wireframe'
x, y = grid.meshgrid
args_re = [x, y, np.rot90(values.real)]
args_im = ([x, y, np.rot90(values.imag)] if values_are_complex
else [])
sub_kwargs.update({'projection': '3d'})
else:
raise ValueError('`method` {!r} not supported'
''.format(method))
else:
raise NotImplementedError('no method for {}d display implemented'
''.format(values.ndim))
# Additional keyword args are passed on to the display method
dsp_kwargs.update(**kwargs)
if fig is not None:
# Reuse figure if given as input
if not isinstance(fig, plt.Figure):
raise TypeError('`fig` {} not a matplotlib figure'.format(fig))
if not plt.fignum_exists(fig.number):
# If figure does not exist, user either closed the figure or
# is using IPython, in this case we need a new figure.
fig = plt.figure(figsize=figsize)
updatefig = False
else:
# Set current figure to given input
fig = plt.figure(fig.number)
updatefig = True
if values.ndim > 1 and not update_in_place:
# If the figure is larger than 1d, we can clear it since we
# dont reuse anything. Keeping it causes performance problems.
fig.clf()
else:
fig = plt.figure(figsize=figsize)
updatefig = False
if values_are_complex:
# Real
if len(fig.axes) == 0:
# Create new axis if needed
sub_re = plt.subplot(arrange_subplots[0], **sub_kwargs)
sub_re.set_title('Real part')
sub_re.set_xlabel(axis_labels[0], fontsize=axis_fontsize)
if values.ndim == 2:
sub_re.set_ylabel(axis_labels[1], fontsize=axis_fontsize)
else:
sub_re.set_ylabel('value')
else:
sub_re = fig.axes[0]
display_re = getattr(sub_re, method)
csub_re = display_re(*args_re, **dsp_kwargs)
# Axis ticks
if method == 'imshow' and not grid.is_uniform:
(xpts, xlabels), (ypts, ylabels) = _axes_info(grid)
plt.xticks(xpts, xlabels)
plt.yticks(ypts, ylabels)
if method == 'imshow' and len(fig.axes) < 2:
# Create colorbar if none seems to exist
# Use clim from kwargs if given
if 'clim' not in kwargs:
minval_re, maxval_re = _safe_minmax(values.real)
else:
minval_re, maxval_re = kwargs['clim']
ticks_re = _colorbar_ticks(minval_re, maxval_re)
format_re = _colorbar_format(minval_re, maxval_re)
plt.colorbar(csub_re, orientation='horizontal',
ticks=ticks_re, format=format_re)
# Imaginary
if len(fig.axes) < 3:
sub_im = plt.subplot(arrange_subplots[1], **sub_kwargs)
sub_im.set_title('Imaginary part')
sub_im.set_xlabel(axis_labels[0], fontsize=axis_fontsize)
if values.ndim == 2:
sub_im.set_ylabel(axis_labels[1], fontsize=axis_fontsize)
else:
sub_im.set_ylabel('value')
else:
sub_im = fig.axes[2]
display_im = getattr(sub_im, method)
csub_im = display_im(*args_im, **dsp_kwargs)
# Axis ticks
if method == 'imshow' and not grid.is_uniform:
(xpts, xlabels), (ypts, ylabels) = _axes_info(grid)
plt.xticks(xpts, xlabels)
plt.yticks(ypts, ylabels)
if method == 'imshow' and len(fig.axes) < 4:
# Create colorbar if none seems to exist
# Use clim from kwargs if given
if 'clim' not in kwargs:
minval_im, maxval_im = _safe_minmax(values.imag)
else:
minval_im, maxval_im = kwargs['clim']
ticks_im = _colorbar_ticks(minval_im, maxval_im)
format_im = _colorbar_format(minval_im, maxval_im)
plt.colorbar(csub_im, orientation='horizontal',
ticks=ticks_im, format=format_im)
else:
if len(fig.axes) == 0:
# Create new axis object if needed
sub = plt.subplot(111, **sub_kwargs)
sub.set_xlabel(axis_labels[0], fontsize=axis_fontsize)
if values.ndim == 2:
sub.set_ylabel(axis_labels[1], fontsize=axis_fontsize)
else:
sub.set_ylabel('value')
try:
# For 3d plots
sub.set_zlabel('z')
except AttributeError:
pass
else:
sub = fig.axes[0]
if update_in_place:
import matplotlib as mpl
imgs = [obj for obj in sub.get_children()
if isinstance(obj, mpl.image.AxesImage)]
if len(imgs) > 0 and updatefig:
imgs[0].set_data(args_re[0])
csub = imgs[0]
# Update min-max
if 'clim' not in kwargs:
minval, maxval = _safe_minmax(values)
else:
minval, maxval = kwargs['clim']
csub.set_clim(minval, maxval)
else:
display = getattr(sub, method)
csub = display(*args_re, **dsp_kwargs)
else:
display = getattr(sub, method)
csub = display(*args_re, **dsp_kwargs)
# Axis ticks
if method == 'imshow' and not grid.is_uniform:
(xpts, xlabels), (ypts, ylabels) = _axes_info(grid)
plt.xticks(xpts, xlabels)
plt.yticks(ypts, ylabels)
if method == 'imshow':
# Add colorbar
# Use clim from kwargs if given
if 'clim' not in kwargs:
minval, maxval = _safe_minmax(values)
else:
minval, maxval = kwargs['clim']
ticks = _colorbar_ticks(minval, maxval)
format = _colorbar_format(minval, maxval)
if len(fig.axes) < 2:
# Create colorbar if none seems to exist
plt.colorbar(mappable=csub, ticks=ticks, format=format)
elif update_in_place:
# If it exists and we should update it
csub.colorbar.set_clim(minval, maxval)
csub.colorbar.set_ticks(ticks)
csub.colorbar.set_ticklabels([format % tick for tick in ticks])
csub.colorbar.draw_all()
# Fixes overlapping stuff at the expense of potentially squashed subplots
if not update_in_place:
fig.tight_layout()
if title is not None:
if not values_are_complex:
# Do not overwrite title for complex values
plt.title(title)
fig.canvas.manager.set_window_title(title)
if updatefig or plt.isinteractive():
# If we are running in interactive mode, we can always show the fig
# This causes an artifact, where users of `CallbackShow` without
# interactive mode only shows the figure after the second iteration.
plt.show(block=False)
if not update_in_place:
plt.draw()
plt.pause(0.0001)
else:
try:
sub.draw_artist(csub)
fig.canvas.blit(fig.bbox)
fig.canvas.update()
fig.canvas.flush_events()
except AttributeError:
plt.draw()
plt.pause(0.0001)
if force_show:
plt.show()
if saveto is not None:
fig.savefig(saveto)
return fig
if __name__ == '__main__':
run_doctests()
| gpl-3.0 | -3,538,194,548,052,486,700 | 33.037528 | 79 | 0.560672 | false | 4.01223 | false | false | false |
kylejusticemagnuson/pyti | tests/test_stochastic.py | 1 | 15612 | from __future__ import absolute_import
import unittest
import numpy as np
from tests.sample_data import SampleData
from pyti import stochastic
class TestStochastic(unittest.TestCase):
def setUp(self):
"""Create data to use for testing."""
self.data = SampleData().get_sample_close_data()
self.percent_k_period_6_expected = [np.nan, np.nan, np.nan, np.nan,
np.nan, 0.9856979405034324, 1.0, 1.0, 0.63513513513513342,
0.27567567567568274, 1.0, 1.0, 0.68322981366460012, 0.0,
0.15515515515516184, 0.0, 0.0, 0.0, 0.06131650135257203, 0.0, 0.0,
0.4255711127487089, 1.0, 0.85463958582237798, 0.63201911589008342,
0.58422939068100166, 0.67256637168141331, 0.55555555555554825, 0.0, 1.0,
0.39352306182532032, 0.0, 0.0, 0.56253794778384958, 0.82179720704310821,
1.0, 1.0, 0.83066712049012859, 0.23241362167536711,
0.059955822025878437, 0.23704663212435031, 0.78950777202072531, 1.0,
1.0, 0.94086165373294273, 1.0, 1.0, 1.0, 0.36487221315932178,
0.23273518216421837, 0.38695960311835798, 0.0, 0.0, 0.0, 0.0,
0.33420252064319617, 0.31533601378518206, 1.0, 0.0, 0.17607726597325543,
0.038632986627041961, 0.15453194650816784, 0.0, 1.0,
0.61413043478261453, 1.0, 1.0, 0.21932367149758231, 1.0, 1.0,
0.17894736842105138, 0.0, 0.0, 0.12548638132295883, 0.2840466926070046,
0.0, 0.0, 0.80735411670663715, 0.0, 1.0, 1.0, 1.0, 0.42937563971340847,
0.14943705220061232, 0.0, 0.11392405063290814, 0.32856356631810901,
0.48005698005698194, 0.24288107202678813, 0.62814070351758511, 1.0, 1.0,
1.0, 1.0, 1.0, 0.52095130237826281, 1.0, 1.0, 1.0, 1.0,
0.86164383561643876, 0.0, 0.52147239263801737, 0.0, 0.14857651245551226,
0.28054740957966762, 0.3811983471074456, 0.0, 0.0, 0.0, 0.0, 0.0,
0.052040212891779666, 0.0, 0.35317460317461002, 0.0, 0.0, 0.0,
0.0079254079254060007, 0.0, 0.12661930631007018, 0.0, 0.0, 0.0,
0.067722772277229157, 0.0, 0.24025100851636036]
self.percent_k_period_8_expected = [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, 1.0, 0.78084415584415301, 0.49576669802445755,
1.0, 1.0, 0.68940316686967806, 0.0, 0.15515515515516184, 0.0, 0.0, 0.0,
0.048909134500121687, 0.0, 0.0, 0.25598404255319046,
0.81420233463035285, 0.79071481208548022, 0.63201911589008342,
0.58422939068100166, 0.82317801672640178, 0.81521306252488657,
0.0066371681415952387, 0.75649591685225837, 0.39352306182532032, 0.0,
0.0, 0.56253794778384958, 0.82179720704310821, 1.0, 1.0,
0.83066712049012859, 0.47447243022464258, 0.49302246426140284,
0.41436738752174873, 0.79488797727989935, 0.93264248704663077, 1.0,
0.94253770150806226, 1.0, 1.0, 1.0, 0.61401189689358671,
0.45394736842105277, 0.52963567156063163, 0.22512234910277268, 0.0, 0.0,
0.0, 0.33420252064319617, 0.23859191655801873, 0.43850499782702834, 0.0,
0.17607726597325543, 0.038632986627041961, 0.15453194650816784, 0.0,
0.26686004350978676, 0.16388687454677281, 1.0, 1.0, 0.21932367149758231,
1.0, 1.0, 0.17956423741547525, 0.0, 0.0, 0.12548638132295883,
0.2840466926070046, 0.0, 0.0, 0.61925199264255404, 0.0, 1.0, 1.0, 1.0,
0.42937563971340847, 0.14943705220061232, 0.070112589559877536,
0.17604912998976188, 0.32856356631810901, 0.18547055586131053,
0.079801871216287013, 0.53418803418803562, 1.0, 1.0, 1.0, 1.0, 1.0,
0.7004249291784771, 1.0, 1.0, 1.0, 1.0, 0.86164383561643876,
0.55342465753424508, 0.78630136986300425, 0.0, 0.14857651245551226,
0.25533807829181515, 0.32829181494662379, 0.0, 0.0, 0.0, 0.0, 0.0,
0.040534315983417502, 0.0, 0.07229894394801159, 0.0, 0.0, 0.0,
0.0071881606765310463, 0.0, 0.1097826086956511, 0.0, 0.0, 0.0,
0.059915907498249425, 0.0, 0.19406227371469995]
self.percent_k_period_10_expected = [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, 0.76439560439560383, 1.0, 1.0,
0.74727452923687354, 0.009910802775026999, 0.15515515515516184, 0.0,
0.0, 0.0, 0.048909134500121687, 0.0, 0.0, 0.22642619094295152,
0.55651595744680871, 0.47562056737588476, 0.51459143968871746,
0.54053058216654259, 0.82317801672640178, 0.81521306252488657,
0.46356033452807566, 0.86937475109517781, 0.30235988200590008, 0.0, 0.0,
0.56253794778384958, 0.82179720704310821, 1.0, 1.0, 0.83066712049012859,
0.47447243022464258, 0.49302246426140284, 0.59904697072838564,
0.88938053097345127, 0.94829729057916878, 1.0, 0.94253770150806226, 1.0,
1.0, 1.0, 0.78188608776843938, 0.70181741335587489, 0.7141440846001329,
0.44852941176470656, 0.0, 0.0, 0.0, 0.24289324068224727,
0.17340492735312743, 0.43850499782702834, 0.0, 0.089840788476118455,
0.025024061597689246, 0.15453194650816784, 0.0, 0.26686004350978676,
0.16388687454677281, 0.70195794053661897, 0.75054387237128717,
0.21932367149758231, 1.0, 1.0, 0.2986512524084754, 0.0, 0.0,
0.12548638132295883, 0.2840466926070046, 0.0, 0.0, 0.3709144326110913,
0.0, 0.86767371601208776, 1.0, 1.0, 0.42937563971340847,
0.14943705220061232, 0.070112589559877536, 0.17604912998976188,
0.37563971340839536, 0.24257932446264166, 0.079801871216287013,
0.2063841496973037, 0.37094111172262106, 1.0, 1.0, 1.0, 1.0,
0.7004249291784771, 1.0, 1.0, 1.0, 1.0, 0.9124783362218376,
0.63122171945701588, 0.78630136986300425, 0.0, 0.14857651245551226,
0.25533807829181515, 0.32829181494662379, 0.0, 0.0, 0.0, 0.0, 0.0,
0.040534315983417502, 0.0, 0.057382333978080118, 0.0, 0.0, 0.0,
0.0064540622627167372, 0.0, 0.10167785234899253, 0.0, 0.0, 0.0,
0.037053087757313918, 0.0, 0.17340666450986797]
self.percent_d_period_6_expected = [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, 0.99523264683447754, 0.87837837837837773,
0.63693693693693865, 0.63693693693693876, 0.75855855855856091,
0.8944099378882, 0.56107660455486663, 0.27946165627325398,
0.051718385051720613, 0.051718385051720613, 0.0, 0.020438833784190678,
0.020438833784190678, 0.020438833784190678, 0.14185703758290297,
0.47519037091623634, 0.76007023285702902, 0.82888623390415372,
0.69029603079782087, 0.62960495941749939, 0.60411710597265433,
0.40937397574565387, 0.51851851851851605, 0.46450768727510677,
0.46450768727510677, 0.13117435394177343, 0.18751264926128319,
0.46144505160898591, 0.79477838494231923, 0.9405990690143694,
0.94355570683004286, 0.68769358072183184, 0.37434552139712474,
0.17647202527519865, 0.36217007539031804, 0.6755181347150252,
0.9298359240069084, 0.98028721791098095, 0.98028721791098095,
0.98028721791098095, 1.0, 0.78829073771977398, 0.53253579844118004,
0.32818899948063268, 0.20656492842752547, 0.12898653437278598, 0.0, 0.0,
0.11140084021439872, 0.2165128448094594, 0.54984617814279269,
0.43844533792839407, 0.39202575532441847, 0.071570084200099124,
0.12308073303615508, 0.064388311045069938, 0.38484398216938925,
0.53804347826087151, 0.87137681159420488, 0.87137681159420488,
0.73977455716586071, 0.73977455716586071, 0.73977455716586071,
0.7263157894736838, 0.39298245614035049, 0.059649122807017126,
0.041828793774319611, 0.13651102464332113, 0.13651102464332113,
0.09468223086900153, 0.26911803890221236, 0.26911803890221236,
0.60245137223554568, 0.66666666666666663, 1.0, 0.80979187990446944,
0.52627089730467358, 0.19293756397134029, 0.08778703427784014,
0.1474958723170057, 0.307514865669333, 0.35050053946729304,
0.45035958520045166, 0.6236739251814577, 0.87604690117252837, 1.0, 1.0,
1.0, 0.84031710079275435, 0.84031710079275435, 0.84031710079275435, 1.0,
1.0, 0.95388127853881288, 0.62054794520547951, 0.46103874275148532,
0.17382413087933912, 0.22334963503117655, 0.14304130734505996,
0.27010742304754182, 0.22058191889570442, 0.12706611570248186, 0.0, 0.0,
0.0, 0.017346737630593221, 0.017346737630593221, 0.13507160535546323,
0.11772486772487001, 0.11772486772487001, 0.0, 0.0026418026418020004,
0.0026418026418020004, 0.044848238078492059, 0.04220643543669006,
0.04220643543669006, 0.0, 0.022574257425743052, 0.022574257425743052,
0.10265792693119651]
self.percent_d_period_8_expected = [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, 0.75887028462287021,
0.75887028462287009, 0.83192223267481913, 0.89646772228989269,
0.56313438895655932, 0.28151944067494666, 0.051718385051720613,
0.051718385051720613, 0.0, 0.016303044833373897, 0.016303044833373897,
0.016303044833373897, 0.085328014184396825, 0.35672879239451444,
0.62030039642300794, 0.74564542086863883, 0.66898777288552169,
0.67980884109916229, 0.74087348997742997, 0.54834274913096126,
0.52611538250624668, 0.38555204893972461, 0.38333965955919291,
0.13117435394177343, 0.18751264926128319, 0.46144505160898591,
0.79477838494231923, 0.9405990690143694, 0.94355570683004286,
0.76837985023825706, 0.59938733832539137, 0.46062076066926472,
0.56742594302101701, 0.71396595061609303, 0.9091768214421766,
0.95839339618489772, 0.98084590050268738, 0.98084590050268738, 1.0,
0.87133729896452883, 0.68931975510487975, 0.53253164562509037,
0.40290179636148565, 0.25158600688780147, 0.075040783034257555, 0.0,
0.11140084021439872, 0.19093147906707164, 0.33709981167608111,
0.22569897146168236, 0.20486075460009459, 0.071570084200099124,
0.12308073303615508, 0.064388311045069938, 0.14046399667265153,
0.14358230601885319, 0.4769156393521865, 0.72129562484892418,
0.73977455716586071, 0.73977455716586071, 0.73977455716586071,
0.72652141247182511, 0.3931880791384918, 0.05985474580515842,
0.041828793774319611, 0.13651102464332113, 0.13651102464332113,
0.09468223086900153, 0.20641733088085135, 0.20641733088085135,
0.53975066421418472, 0.66666666666666663, 1.0, 0.80979187990446944,
0.52627089730467358, 0.21630842715796614, 0.13186625725008391,
0.19157509528924946, 0.23002775072306048, 0.19794533113190219,
0.26648682042187771, 0.53799663513477425, 0.84472934472934524, 1.0, 1.0,
1.0, 0.9001416430594924, 0.9001416430594924, 0.9001416430594924, 1.0,
1.0, 0.95388127853881288, 0.80502283105022787, 0.73378995433789607,
0.44657534246574976, 0.31162596077283883, 0.1346381969157758,
0.24406880189798374, 0.19454329774614632, 0.10943060498220793, 0.0, 0.0,
0.0, 0.013511438661139167, 0.013511438661139167, 0.037611086643809695,
0.02409964798267053, 0.02409964798267053, 0.0, 0.0023960535588436823,
0.0023960535588436823, 0.038990256457394047, 0.036594202898550365,
0.036594202898550365, 0.0, 0.019971969166083143, 0.019971969166083143,
0.084659393737649788]
self.percent_d_period_10_expected = [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
0.92146520146520128, 0.91575817641229118, 0.58572844400396684,
0.30411349572235413, 0.055021985976729616, 0.051718385051720613, 0.0,
0.016303044833373897, 0.016303044833373897, 0.016303044833373897,
0.075475396980983836, 0.26098071612992008, 0.41952090525521496,
0.51557598817047035, 0.5102475297437149, 0.62610001286055394,
0.72630722047261032, 0.70065047125978808, 0.71604938271604668,
0.54509832254305113, 0.39057821103369261, 0.10078662733530003,
0.18751264926128319, 0.46144505160898591, 0.79477838494231923,
0.9405990690143694, 0.94355570683004286, 0.76837985023825706,
0.59938733832539137, 0.52218062173814372, 0.66048332198774662,
0.81224159742700186, 0.94589260718420665, 0.96361166402907694,
0.98084590050268738, 0.98084590050268738, 1.0, 0.92729536258947975,
0.82790116704143812, 0.73261586190814898, 0.62149696990690473,
0.38755783212161315, 0.14950980392156885, 0.0, 0.080964413560749085,
0.13876605601179157, 0.284934388620801, 0.20396997506005191,
0.17611526210104891, 0.038288283357935902, 0.089798932193991862,
0.059852002701952366, 0.14046399667265153, 0.14358230601885319,
0.37756828619772614, 0.53879622915155967, 0.55727516146849621,
0.65662251462295651, 0.73977455716586071, 0.76621708413615852,
0.43288375080282515, 0.099550417469491795, 0.041828793774319611,
0.13651102464332113, 0.13651102464332113, 0.09468223086900153,
0.12363814420369711, 0.12363814420369711, 0.41286271620772635,
0.62255790533736255, 0.95589123867069592, 0.80979187990446944,
0.52627089730467358, 0.21630842715796614, 0.13186625725008391,
0.20726714431934493, 0.26475605595359963, 0.23267363636244134,
0.17625511512541078, 0.21904237754540393, 0.52577508713997501,
0.79031370390754041, 1.0, 1.0, 0.9001416430594924, 0.9001416430594924,
0.9001416430594924, 1.0, 1.0, 0.97082611207394587, 0.84790001855961783,
0.7766671418472858, 0.47250769644000673, 0.31162596077283883,
0.1346381969157758, 0.24406880189798374, 0.19454329774614632,
0.10943060498220793, 0.0, 0.0, 0.0, 0.013511438661139167,
0.013511438661139167, 0.032638883320499211, 0.019127444659360039,
0.019127444659360039, 0.0, 0.0021513540875722457, 0.0021513540875722457,
0.036043971537236423, 0.033892617449664174, 0.033892617449664174, 0.0,
0.012351029252437973, 0.012351029252437973, 0.070153250755727301]
def test_percent_k_period_6(self):
period = 6
percent_k = stochastic.percent_k(self.data, period)
np.testing.assert_array_equal(percent_k, self.percent_k_period_6_expected)
def test_percent_k_period_8(self):
period = 8
percent_k = stochastic.percent_k(self.data, period)
np.testing.assert_array_equal(percent_k, self.percent_k_period_8_expected)
def test_percent_k_period_10(self):
period = 10
percent_k = stochastic.percent_k(self.data, period)
np.testing.assert_array_equal(percent_k, self.percent_k_period_10_expected)
def test_percent_k_invalid_period(self):
period = 128
with self.assertRaises(Exception):
stochastic.percent_k(self.data, period)
def test_percent_d_period_6(self):
period = 6
percent_d = stochastic.percent_d(self.data, period)
np.testing.assert_array_equal(percent_d, self.percent_d_period_6_expected)
def test_percent_d_period_8(self):
period = 8
percent_d = stochastic.percent_d(self.data, period)
np.testing.assert_array_equal(percent_d, self.percent_d_period_8_expected)
def test_percent_d_period_10(self):
period = 10
percent_d = stochastic.percent_d(self.data, period)
np.testing.assert_array_equal(percent_d, self.percent_d_period_10_expected)
def test_percent_d_invalid_period(self):
period = 128
with self.assertRaises(Exception) as cm:
stochastic.percent_d(self.data, period)
expected = "Error: data_len < period"
self.assertEqual(str(cm.exception), expected)
| mit | 5,619,512,950,334,885,000 | 61.951613 | 83 | 0.711248 | false | 2.240207 | true | false | false |
edcast-inc/edx-platform-edcast | common/djangoapps/student/tests/test_login.py | 1 | 25194 | '''
Tests for student activation and login
'''
import json
import unittest
from unittest import skip
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import HttpResponseBadRequest, HttpResponse
import httpretty
from mock import patch
from social.apps.django_app.default.models import UserSocialAuth
from external_auth.models import ExternalAuthMap
from student.tests.factories import UserFactory, RegistrationFactory, UserProfileFactory
from student.views import login_oauth_token
from third_party_auth.tests.utils import (
ThirdPartyOAuthTestMixin,
ThirdPartyOAuthTestMixinFacebook,
ThirdPartyOAuthTestMixinGoogle
)
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class LoginTest(TestCase):
'''
Test student.views.login_user() view
'''
def setUp(self):
super(LoginTest, self).setUp()
# Create one user and save it to the database
self.user = UserFactory.build(username='test', email='[email protected]')
self.user.set_password('test_password')
self.user.save()
# Create a registration for the user
RegistrationFactory(user=self.user)
# Create a profile for the user
UserProfileFactory(user=self.user)
# Create the test client
self.client = Client()
cache.clear()
# Store the login url
try:
self.url = reverse('login_post')
except NoReverseMatch:
self.url = reverse('login')
def test_login_success(self):
response, mock_audit_log = self._login_response('[email protected]', 'test_password', patched_audit_log='student.models.AUDIT_LOG')
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success', u'[email protected]'])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_success_no_pii(self):
response, mock_audit_log = self._login_response('[email protected]', 'test_password', patched_audit_log='student.models.AUDIT_LOG')
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success'])
self._assert_not_in_audit_log(mock_audit_log, 'info', [u'[email protected]'])
def test_login_success_unicode_email(self):
unicode_email = u'test' + unichr(40960) + u'@edx.org'
self.user.email = unicode_email
self.user.save()
response, mock_audit_log = self._login_response(unicode_email, 'test_password', patched_audit_log='student.models.AUDIT_LOG')
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success', unicode_email])
def test_login_fail_no_user_exists(self):
nonexistent_email = u'[email protected]'
response, mock_audit_log = self._login_response(nonexistent_email, 'test_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email', nonexistent_email])
@patch.dict("django.conf.settings.FEATURES", {'ADVANCED_SECURITY': True})
def test_login_fail_incorrect_email_with_advanced_security(self):
nonexistent_email = u'[email protected]'
response, mock_audit_log = self._login_response(nonexistent_email, 'test_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email', nonexistent_email])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_fail_no_user_exists_no_pii(self):
nonexistent_email = u'[email protected]'
response, mock_audit_log = self._login_response(nonexistent_email, 'test_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [nonexistent_email])
def test_login_fail_wrong_password(self):
response, mock_audit_log = self._login_response('[email protected]', 'wrong_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'password for', u'[email protected]', u'invalid'])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_fail_wrong_password_no_pii(self):
response, mock_audit_log = self._login_response('[email protected]', 'wrong_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'password for', u'invalid'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [u'[email protected]'])
def test_login_not_activated(self):
# De-activate the user
self.user.is_active = False
self.user.save()
# Should now be unable to login
response, mock_audit_log = self._login_response('[email protected]', 'test_password')
self._assert_response(response, success=False,
value="This account has not been activated")
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Account not active for user'])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_not_activated_no_pii(self):
# De-activate the user
self.user.is_active = False
self.user.save()
# Should now be unable to login
response, mock_audit_log = self._login_response('[email protected]', 'test_password')
self._assert_response(response, success=False,
value="This account has not been activated")
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Account not active for user'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [u'test'])
def test_login_unicode_email(self):
unicode_email = u'[email protected]' + unichr(40960)
response, mock_audit_log = self._login_response(unicode_email, 'test_password')
self._assert_response(response, success=False)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', unicode_email])
def test_login_unicode_password(self):
unicode_password = u'test_password' + unichr(1972)
response, mock_audit_log = self._login_response('[email protected]', unicode_password)
self._assert_response(response, success=False)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'password for', u'[email protected]', u'invalid'])
def test_logout_logging(self):
response, _ = self._login_response('[email protected]', 'test_password')
self._assert_response(response, success=True)
logout_url = reverse('logout')
with patch('student.models.AUDIT_LOG') as mock_audit_log:
response = self.client.post(logout_url)
self.assertEqual(response.status_code, 302)
self._assert_audit_log(mock_audit_log, 'info', [u'Logout', u'test'])
def test_login_user_info_cookie(self):
response, _ = self._login_response('[email protected]', 'test_password')
self._assert_response(response, success=True)
# Verify the format of the "user info" cookie set on login
cookie = self.client.cookies[settings.EDXMKTG_USER_INFO_COOKIE_NAME]
user_info = json.loads(cookie.value)
# Check that the version is set
self.assertEqual(user_info["version"], settings.EDXMKTG_USER_INFO_COOKIE_VERSION)
# Check that the username and email are set
self.assertEqual(user_info["username"], self.user.username)
self.assertEqual(user_info["email"], self.user.email)
# Check that the URLs are absolute
for url in user_info["header_urls"].values():
self.assertIn("http://testserver/", url)
@skip('we skip in edcast')
def test_logout_deletes_mktg_cookies(self):
response, _ = self._login_response('[email protected]', 'test_password')
self._assert_response(response, success=True)
# Check that the marketing site cookies have been set
self.assertIn(settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, self.client.cookies)
self.assertIn(settings.EDXMKTG_USER_INFO_COOKIE_NAME, self.client.cookies)
# Log out
logout_url = reverse('logout')
response = self.client.post(logout_url)
# Check that the marketing site cookies have been deleted
# (cookies are deleted by setting an expiration date in 1970)
for cookie_name in [settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, settings.EDXMKTG_USER_INFO_COOKIE_NAME]:
cookie = self.client.cookies[cookie_name]
self.assertIn("01-Jan-1970", cookie.get('expires'))
@override_settings(
EDXMKTG_LOGGED_IN_COOKIE_NAME=u"unicode-logged-in",
EDXMKTG_USER_INFO_COOKIE_NAME=u"unicode-user-info",
)
@skip('we skip in edcast')
def test_unicode_mktg_cookie_names(self):
# When logged in cookie names are loaded from JSON files, they may
# have type `unicode` instead of `str`, which can cause errors
# when calling Django cookie manipulation functions.
response, _ = self._login_response('[email protected]', 'test_password')
self._assert_response(response, success=True)
response = self.client.post(reverse('logout'))
self.assertRedirects(response, "/")
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_logout_logging_no_pii(self):
response, _ = self._login_response('[email protected]', 'test_password')
self._assert_response(response, success=True)
logout_url = reverse('logout')
with patch('student.models.AUDIT_LOG') as mock_audit_log:
response = self.client.post(logout_url)
self.assertEqual(response.status_code, 302)
self._assert_audit_log(mock_audit_log, 'info', [u'Logout'])
self._assert_not_in_audit_log(mock_audit_log, 'info', [u'test'])
def test_login_ratelimited_success(self):
# Try (and fail) logging in with fewer attempts than the limit of 30
# and verify that you can still successfully log in afterwards.
for i in xrange(20):
password = u'test_password{0}'.format(i)
response, _audit_log = self._login_response('[email protected]', password)
self._assert_response(response, success=False)
# now try logging in with a valid password
response, _audit_log = self._login_response('[email protected]', 'test_password')
self._assert_response(response, success=True)
def test_login_ratelimited(self):
# try logging in 30 times, the default limit in the number of failed
# login attempts in one 5 minute period before the rate gets limited
for i in xrange(30):
password = u'test_password{0}'.format(i)
self._login_response('[email protected]', password)
# check to see if this response indicates that this was ratelimited
response, _audit_log = self._login_response('[email protected]', 'wrong_password')
self._assert_response(response, success=False, value='Too many failed login attempts')
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session(self):
creds = {'email': '[email protected]', 'password': 'test_password'}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
# Reload the user from the database
self.user = UserFactory.FACTORY_FOR.objects.get(pk=self.user.pk)
self.assertEqual(self.user.profile.get_meta()['session_id'], client1.session.session_key)
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
try:
# this test can be run with either lms or studio settings
# since studio does not have a dashboard url, we should
# look for another url that is login_required, in that case
url = reverse('dashboard')
except NoReverseMatch:
url = reverse('upload_transcripts')
response = client1.get(url)
# client1 will be logged out
self.assertEqual(response.status_code, 302)
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session_with_url_not_having_login_required_decorator(self):
# accessing logout url as it does not have login-required decorator it will avoid redirect
# and go inside the enforce_single_login
creds = {'email': '[email protected]', 'password': 'test_password'}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
self.assertEqual(self.user.profile.get_meta()['session_id'], client1.session.session_key)
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
url = reverse('logout')
response = client1.get(url)
self.assertEqual(response.status_code, 302)
def test_change_enrollment_400(self):
"""
Tests that a 400 in change_enrollment doesn't lead to a 404
and in fact just logs in the user without incident
"""
# add this post param to trigger a call to change_enrollment
extra_post_params = {"enrollment_action": "enroll"}
with patch('student.views.change_enrollment') as mock_change_enrollment:
mock_change_enrollment.return_value = HttpResponseBadRequest("I am a 400")
response, _ = self._login_response(
'[email protected]',
'test_password',
extra_post_params=extra_post_params,
)
response_content = json.loads(response.content)
self.assertIsNone(response_content["redirect_url"])
self._assert_response(response, success=True)
def test_change_enrollment_200_no_redirect(self):
"""
Tests "redirect_url" is None if change_enrollment returns a HttpResponse
with no content
"""
# add this post param to trigger a call to change_enrollment
extra_post_params = {"enrollment_action": "enroll"}
with patch('student.views.change_enrollment') as mock_change_enrollment:
mock_change_enrollment.return_value = HttpResponse()
response, _ = self._login_response(
'[email protected]',
'test_password',
extra_post_params=extra_post_params,
)
response_content = json.loads(response.content)
self.assertIsNone(response_content["redirect_url"])
self._assert_response(response, success=True)
def _login_response(self, email, password, patched_audit_log='student.views.AUDIT_LOG', extra_post_params=None):
''' Post the login info '''
post_params = {'email': email, 'password': password}
if extra_post_params is not None:
post_params.update(extra_post_params)
with patch(patched_audit_log) as mock_audit_log:
result = self.client.post(self.url, post_params)
return result, mock_audit_log
def _assert_response(self, response, success=None, value=None):
'''
Assert that the response had status 200 and returned a valid
JSON-parseable dict.
If success is provided, assert that the response had that
value for 'success' in the JSON dict.
If value is provided, assert that the response contained that
value for 'value' in the JSON dict.
'''
self.assertEqual(response.status_code, 200)
try:
response_dict = json.loads(response.content)
except ValueError:
self.fail("Could not parse response content as JSON: %s"
% str(response.content))
if success is not None:
self.assertEqual(response_dict['success'], success)
if value is not None:
msg = ("'%s' did not contain '%s'" %
(str(response_dict['value']), str(value)))
self.assertTrue(value in response_dict['value'], msg)
def _assert_audit_log(self, mock_audit_log, level, log_strings):
"""
Check that the audit log has received the expected call as its last call.
"""
method_calls = mock_audit_log.method_calls
name, args, _kwargs = method_calls[-1]
self.assertEquals(name, level)
self.assertEquals(len(args), 1)
format_string = args[0]
for log_string in log_strings:
self.assertIn(log_string, format_string)
def _assert_not_in_audit_log(self, mock_audit_log, level, log_strings):
"""
Check that the audit log has received the expected call as its last call.
"""
method_calls = mock_audit_log.method_calls
name, args, _kwargs = method_calls[-1]
self.assertEquals(name, level)
self.assertEquals(len(args), 1)
format_string = args[0]
for log_string in log_strings:
self.assertNotIn(log_string, format_string)
class ExternalAuthShibTest(ModuleStoreTestCase):
"""
Tests how login_user() interacts with ExternalAuth, in particular Shib
"""
def setUp(self):
super(ExternalAuthShibTest, self).setUp()
self.course = CourseFactory.create(
org='Stanford',
number='456',
display_name='NO SHIB',
user_id=self.user.id,
)
self.shib_course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.user.id,
)
self.user_w_map = UserFactory.create(email='[email protected]')
self.extauth = ExternalAuthMap(external_id='[email protected]',
external_email='[email protected]',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=self.user_w_map)
self.user_w_map.save()
self.extauth.save()
self.user_wo_map = UserFactory.create(email='[email protected]')
self.user_wo_map.save()
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_login_page_redirect(self):
"""
Tests that when a shib user types their email address into the login page, they get redirected
to the shib login.
"""
response = self.client.post(reverse('login'), {'email': self.user_w_map.email, 'password': ''})
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertEqual(obj, {
'success': False,
'redirect': reverse('shib-login'),
})
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_login_required_dashboard(self):
"""
Tests redirects to when @login_required to dashboard, which should always be the normal login,
since there is no course context
"""
response = self.client.get(reverse('dashboard'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], 'http://testserver/login?next=/dashboard')
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_externalauth_login_required_course_context(self):
"""
Tests the redirects when visiting course-specific URL with @login_required.
Should vary by course depending on its enrollment_domain
"""
TARGET_URL = reverse('courseware', args=[self.course.id.to_deprecated_string()]) # pylint: disable=invalid-name
noshib_response = self.client.get(TARGET_URL, follow=True)
self.assertEqual(noshib_response.redirect_chain[-1],
('http://testserver/login?next={url}'.format(url=TARGET_URL), 302))
self.assertContains(noshib_response, ("Sign in or Register | {platform_name}"
.format(platform_name=settings.PLATFORM_NAME)))
self.assertEqual(noshib_response.status_code, 200)
TARGET_URL_SHIB = reverse('courseware', args=[self.shib_course.id.to_deprecated_string()]) # pylint: disable=invalid-name
shib_response = self.client.get(**{'path': TARGET_URL_SHIB,
'follow': True,
'REMOTE_USER': self.extauth.external_id,
'Shib-Identity-Provider': 'https://idp.stanford.edu/'})
# Test that the shib-login redirect page with ?next= and the desired page are part of the redirect chain
# The 'courseware' page actually causes a redirect itself, so it's not the end of the chain and we
# won't test its contents
self.assertEqual(shib_response.redirect_chain[-3],
('http://testserver/shib-login/?next={url}'.format(url=TARGET_URL_SHIB), 302))
self.assertEqual(shib_response.redirect_chain[-2],
('http://testserver{url}'.format(url=TARGET_URL_SHIB), 302))
self.assertEqual(shib_response.status_code, 200)
@httpretty.activate
class LoginOAuthTokenMixin(ThirdPartyOAuthTestMixin):
"""
Mixin with tests for the login_oauth_token view. A TestCase that includes
this must define the following:
BACKEND: The name of the backend from python-social-auth
USER_URL: The URL of the endpoint that the backend retrieves user data from
UID_FIELD: The field in the user data that the backend uses as the user id
"""
def setUp(self):
super(LoginOAuthTokenMixin, self).setUp()
self.url = reverse(login_oauth_token, kwargs={"backend": self.BACKEND})
def _assert_error(self, response, status_code, error):
"""Assert that the given response was a 400 with the given error code"""
self.assertEqual(response.status_code, status_code)
self.assertEqual(json.loads(response.content), {"error": error})
self.assertNotIn("partial_pipeline", self.client.session)
def test_success(self):
self._setup_provider_response(success=True)
response = self.client.post(self.url, {"access_token": "dummy"})
self.assertEqual(response.status_code, 204)
self.assertEqual(self.client.session['_auth_user_id'], self.user.id) # pylint: disable=no-member
def test_invalid_token(self):
self._setup_provider_response(success=False)
response = self.client.post(self.url, {"access_token": "dummy"})
self._assert_error(response, 401, "invalid_token")
def test_missing_token(self):
response = self.client.post(self.url)
self._assert_error(response, 400, "invalid_request")
def test_unlinked_user(self):
UserSocialAuth.objects.all().delete()
self._setup_provider_response(success=True)
response = self.client.post(self.url, {"access_token": "dummy"})
self._assert_error(response, 401, "invalid_token")
def test_get_method(self):
response = self.client.get(self.url, {"access_token": "dummy"})
self.assertEqual(response.status_code, 405)
# This is necessary because cms does not implement third party auth
@unittest.skipUnless(settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH"), "third party auth not enabled")
class LoginOAuthTokenTestFacebook(LoginOAuthTokenMixin, ThirdPartyOAuthTestMixinFacebook, TestCase):
"""Tests login_oauth_token with the Facebook backend"""
pass
# This is necessary because cms does not implement third party auth
@unittest.skipUnless(settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH"), "third party auth not enabled")
class LoginOAuthTokenTestGoogle(LoginOAuthTokenMixin, ThirdPartyOAuthTestMixinGoogle, TestCase):
"""Tests login_oauth_token with the Google backend"""
pass
| agpl-3.0 | 888,919,011,720,206,700 | 45.828996 | 134 | 0.645114 | false | 3.910897 | true | false | false |
yantrabuddhi/blocos | tabs/UploadTab.py | 1 | 13455 | # -*- coding: utf-8 -*-
# Este arquivo é parte do programa Monitor
# Monitor é um software livre; você pode redistribui-lo e/ou
# modifica-lo dentro dos termos da Licença Pública Geral GNU como
# publicada pela Fundação do Software Livre (FSF); na versão 3 da
# Licença, ou (na sua opinião) qualquer versão.
#
# Este programa é distribuido na esperança que possa ser util,
# mas SEM NENHUMA GARANTIA; sem uma garantia implicita de ADEQUAÇÂO a qualquer
# MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a
# Licença Pública Geral GNU para maiores detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Geral GNU
# junto com este programa, se não, escreva para a Fundação do Software
# Livre(FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# Centro de Tecnologia da Informação Renato Archer, Campinas-SP, Brasil
# Projeto realizado com fundos do Conselho Nacional de Desenvolvimento Científico e Tecnológico (CNPQ)
# Esse código faz parte do projeto BR-Gogo, disponível em http://sourceforge.net/projects/br-gogo/
import os
if os.name=='nt':
import win32api
import win32con
from gettext import gettext as _
try:
import gtk
except ImportError:
#print _('GTK+ Runtime Enviromnt precisa ser instalado:')
print _('GTK+ Runtime Enviroment needs to be installed:')
print "http://downloads.sourceforge.net/gladewin32/gtk-2.12.9-win32-1.exe?modtime=1208401479&big_mirror=0"
raw_input()
from Tab import Tab
from pyLogoCompiler.Exceptions import ConnectionProblem
import pango
import math
from cairoplot import plots
from cairoplot.series import Series
# >>>>>>>>>>>>>>>>> temp
# For non-dev machines, quick hack at attempt to show traceback in in a msg dialog
import sys
import traceback
def logexception(type, value, tb):
text = ' '.join(t for t in traceback.format_exception(type, value, tb))
print text
try:
dialog = gtk.MessageDialog(None, gtk.DIALOG_MODAL, \
gtk.MESSAGE_INFO, \
gtk.BUTTONS_OK, \
text)
dialog.run()
dialog.destroy()
except:
pass
sys.excepthook = logexception
# <<<<<<<<<<<<<<<<<<< temp
class UploadTab(Tab):
LAST_DATA_FILENAME = '.last_data.txt'
defaultTab = 9
def __init__(self, gui, GoGo, liststoreSensorsTypes, sensorTypes):
self.gui = gui
self.GoGo = GoGo
self.sensorTypes = sensorTypes
self.dataFilename = ""
self.data = []
self.colDataRaw = []
self.colDataMapped = []
self.textviewData = self.gui.get_widget('textviewData')
self.textviewData.modify_font(pango.FontDescription('monospace'))
self.textviewBuffer = gtk.TextBuffer()
self.textviewData.set_buffer(self.textviewBuffer)
self.spinbuttonColumns = self.gui.get_widget('spinbuttonColumns')
self.checkbuttonShowHeaders = self.gui.get_widget('checkbuttonShowHeaders')
self.checkbuttonTwoLineHeader = self.gui.get_widget('checkbuttonTwoLineHeader')
self.radiobuttonUploadAuto = self.gui.get_widget("radiobuttonUploadAuto")
self.uploadCount = self.gui.get_widget("spinbuttonUploadCount")
self.progressbar = self.gui.get_widget('progressbarUpload')
self.lblProgress = self.gui.get_widget('labelValuesUploaded')
self.colSpec = []
for c in range(8):
w = self.gui.get_widget('comboboxC%i' % c)
w.set_active(0)
w.set_sensitive(c == 0)
w.set_model(liststoreSensorsTypes)
self.colSpec.append(w)
try:
f=open(self.LAST_DATA_FILENAME,'r')
self.textviewBuffer.set_text(f.read())
f.close()
except:
pass
self.graphContainer = None
self.graphWidth = 50
self.graphHeight = 50
self.graphData = None
self.graph = None
self.graphVisible = False
self.graphUpdateRequired = False
self.notebookDataView = self.gui.get_widget('notebookDataView')
#self.notebookDataView.set_current_page(0)
def buttonStartUpload_clicked_cb(self,widget):
try:
self.progressbar.set_fraction(0.0)
self.lblProgress.set_text(_("%i Values Uploaded") % 0)
while gtk.events_pending():
gtk.main_iteration(False)
if self.radiobuttonUploadAuto.get_active():
self.data = self.GoGo.autoUpload(None, self.uploadProgress_cb)
else:
count = self.uploadCount.get_value_as_int()
self.data = self.GoGo.autoUpload(count, self.uploadProgress_cb)
except ConnectionProblem:
self.showWarning(_("Check GoGo plugged in, turned on and connected"))
return
except:
self.showError(_("Error communicating"))
return
else:
self.lblProgress.set_text(_("%i Values Uploaded") % len(self.data))
if self.refreshTextView():
self.showInfo(_("Data successfully uploaded."), self.gui.get_widget('mainWindow'))
def buttonSaveData_clicked_cb(self,widget):
if len(self.data) == 0:
return
dialog = gtk.FileChooserDialog(_("Save As.."), None, gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.dataFilename = dialog.get_filename()
try:
FILE = open(self.dataFilename,"w")
FILE.write(self.dataFormattedForSaving())
FILE.close()
except:
self.showError(Exception.__str__())
dialog.destroy()
def buttonClearData_clicked_cb(self,widget):
self.data = []
self.colDataRaw = []
self.colDataMapped = []
self.dataFilename = ""
self.progressbar.set_fraction(0.0)
self.lblProgress.set_text(_("%i Values Uploaded") % 0)
self.refreshTextView()
def spinbuttonColumns_changed_cb(self,widget):
cc = self.spinbuttonColumns.get_value_as_int()
for c in range(8):
self.colSpec[c].set_sensitive(c < cc)
self.refreshTextView()
def colSpec_changed_cb(self,widget):
self.refreshTextView()
def checkbuttonShowHeaders_toggled_cb(self,widget):
self.checkbuttonTwoLineHeader.set_sensitive(widget.get_active())
self.refreshTextView()
def checkbuttonTwoLineHeader_toggled_cb(self,widget):
self.refreshTextView()
def notebookDataView_switch_page_cb(self,widget,page,page_num):
self.graphVisible = page_num == 1
if self.graphVisible:
self.refreshGraph()
def getSelectedSensors(self):
sensorIndexes = [w.get_active() for w in self.colSpec[:self.spinbuttonColumns.get_value_as_int()]]
for i in [i for i,v in enumerate(sensorIndexes) if v == -1]:
sensorIndexes[i] = 0
try:
return [self.sensorTypes[n] for n in sensorIndexes]
except:
return None
def calibrateData(self):
self.colDataMapped = []
maxRows = max([len(c) for c in self.colDataRaw])
sensors = self.getSelectedSensors()
for c,data in enumerate(self.colDataRaw):
m = [round(sensors[c].get_new_value(v),3) for v in data]
if len(m) < maxRows:
m += [''] * (maxRows - len(m))
self.colDataMapped += [m]
def getSensorHeaders(self):
self.useHdrs = False
self.hdrs = []
if not self.checkbuttonShowHeaders.get_active():
return False
sensors = self.getSelectedSensors()
if not sensors:
return False
self.hdrs = [[s.name,s.unit] for s in sensors]
for i in [i for i,h in enumerate(self.hdrs) if h[1] == None or h[1] == '']:
self.hdrs[i][1] = 'None'
self.useHdrs = True
return True
def csvHeaders(self):
if not self.useHdrs:
return ''
if not self.checkbuttonTwoLineHeader.get_active():
t = ','.join([('%s (%s)' % (h[0],h[1])) for h in self.hdrs]) + '\n'
return t
t = ','.join([h[0] for h in self.hdrs]) + '\n'
t += ','.join([h[1] for h in self.hdrs]) + '\n'
return t
def displayHeaders(self):
if not self.useHdrs:
return ''
t = ''
if not self.checkbuttonTwoLineHeader.get_active():
hdrs = [('%s (%s)' % (h[0],h[1])) for h in self.hdrs]
hdrs = [h.rjust(max(len(h),self.defaultTab), ' ') for h in hdrs]
self.hdrTabs = []
for h in hdrs:
t += h + ' '
self.hdrTabs.extend([len(h)])
return t + '\n' + ('-' * len(t)) + '\n'
hdrs0 = []
hdrs1 = []
for h in self.hdrs:
w = max(len(h[0]), len(h[1]), self.defaultTab)
hdrs0 += [h[0].rjust(w, ' ')]
hdrs1 += [h[1].rjust(w, ' ')]
self.hdrTabs = []
for h in hdrs0:
t += h + ' '
self.hdrTabs.extend([len(h)])
w = len(t)
t += '\n'
for h in hdrs1:
t += h + ' '
return t + '\n' + ('-' * w) + '\n'
def dataFormattedForSaving(self):
t = self.csvHeaders()
for line in self.colDataMapped:
t = t + ','.join(map(str, line)) + '\n'
return t
def dataFormattedForDisplay(self):
t = self.displayHeaders()
if len(self.colDataMapped) == 1:
d = zip(self.colDataMapped[0])
else:
d = zip(*self.colDataMapped)
for r,rowData in enumerate(d):
for c,v in enumerate(rowData):
if self.useHdrs:
t = t + str(v).rjust(self.hdrTabs[c], ' ') + ' '
else:
t = t + str(v).rjust(self.defaultTab, ' ') + ' '
t = t + '\n'
return t
def refreshTextView(self):
if len(self.data) == 0:
self.textviewBuffer.set_text("")
return False
if self.getSensorHeaders():
nCols = self.spinbuttonColumns.get_value_as_int()
if nCols == 1:
self.colDataRaw = [self.data]
else:
self.colDataRaw = list(self.data[i::nCols] for i in range(nCols))
for i in range(nCols-1, -1):
if len(self.colDataRaw[i]) > len(self.colDataRaw[i+1]):
self.colDataRaw[i].pop()
print "aqui"
self.calibrateData()
self.textviewBuffer.set_text(self.dataFormattedForDisplay())
self.graphUpdateRequired = True
self.refreshGraph()
return True
else:
self.showWarning(_("Please, add at least one sensor in Sensors Tab"))
return False
def refreshGraph(self):
if not (self.graphVisible and self.graphUpdateRequired): return
if self.graphContainer == None:
self.graphContainer = self.gui.get_widget("dataGraphContainer")
if self.graphContainer == None: return
r = self.graphContainer.get_allocation()
self.graphWidth, self.graphHeight = (r.width,r.height)
self.graph = None
data = {}
for c,t in enumerate(self.colDataMapped):
lbl = '%(colNum)i-%(name)s (%(units)s)' % \
{'colNum': c+1, 'name': self.hdrs[c][0], 'units': self.hdrs[c][1]}
data[lbl] = t
#if len(self.data) % self.spinbuttonColumns.get_value_as_int() > 0:
# self.showWarning(_("The graph can not be generated with this configuration.\nPlease check the number of columns."))
#else:
self.drawGraph(data,[str(x) for x in range(len(self.colDataMapped[0]))])
self.graphUpdateRequired = False
def drawGraph(self, data=[], xLabels=[]):
if data == {}: return
if self.graph != None:
self.graphContainer.remove(self.graph.handler)
self.graph = plots.DotLinePlot('gtk', data=data, x_labels=xLabels,
width=self.graphWidth, height=self.graphHeight, background="white",
border=5, axis=True, grid=True, series_legend = True)
self.graphContainer.add(self.graph.handler)
self.graph.handler.show()
def uploadProgress_cb(self, count, total):
self.progressbar.set_fraction(float(count) / total)
self.lblProgress.set_text(_('%i Values Uploaded' % count))
while gtk.events_pending():
gtk.main_iteration(False)
| gpl-3.0 | -6,296,304,755,429,825,000 | 33.242347 | 127 | 0.554943 | false | 3.703918 | false | false | false |
AnumSheraz/IP-Controlled-Robotic-Car | Manual-IP-Controlled-Robotic-Car/Code.py | 1 | 1696 |
import sys
from PyQt4 import QtGui, QtCore
import time, socket, json
from main import Ui_MainWindow
s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
IP = "localhost"
PORT = 8001
class main_menu(QtGui.QMainWindow):
def __init__(self):
super(main_menu, self).__init__()
self.ui=Ui_MainWindow()
self.ui.setupUi(self)
self.show()
def keyPressEvent(self, event1):
verbose = {"FB":"", "LR":""}
if event1.key() == QtCore.Qt.Key_W:
#print "Up pressed"
verbose["FB"] = "F"
if event1.key() == QtCore.Qt.Key_S:
#print "D pressed"
verbose["FB"] = "B"
if event1.key() == QtCore.Qt.Key_A:
#print "L pressed"
verbose["LR"] = "L"
if event1.key() == QtCore.Qt.Key_D:
#print "R pressed"
verbose["LR"] = "R"
print verbose
json_data=json.dumps(verbose)
s.sendto((json_data), (IP, PORT))
def keyReleaseEvent(self, event):
verbose = {"FB":"", "LR":""}
if event.key() == QtCore.Qt.Key_W:
#print "Up rel"
verbose["FB"] = "S"
if event.key() == QtCore.Qt.Key_S:
#print "D rel"
verbose["FB"] = "S"
if event.key() == QtCore.Qt.Key_A:
#print "L pressed"
verbose["LR"] = "S"
if event.key() == QtCore.Qt.Key_D:
#print "R pressed"
verbose["LR"] = "S"
print verbose
json_data=json.dumps(verbose)
s.sendto((json_data), (IP, PORT))
def main():
app = QtGui.QApplication(sys.argv)
ex = main_menu()
app.exec_()
if __name__ == '__main__':
main()
| gpl-2.0 | 5,014,913,625,355,572,000 | 23.228571 | 49 | 0.504717 | false | 3.299611 | false | false | false |
be-cloud-be/horizon-addons | horizon/school_evaluations/wizard/evaluation_summary.py | 1 | 3973 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2015 be-cloud.be
# Jerome Sonnet <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp import api, fields, models, _
from openerp.exceptions import UserError
from openerp.tools.safe_eval import safe_eval
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from dateutil.relativedelta import relativedelta
from datetime import datetime,date
import openerp.addons.decimal_precision as dp
_logger = logging.getLogger(__name__)
class EvaluationSummaryWizard(models.TransientModel):
_name = "school.evaluation.summary.wizard"
_description = "School Evaluation Summary Wizard"
year_id = fields.Many2one('school.year', string='Year', default=lambda self: self.env.user.current_year_id, ondelete='cascade')
domain_id = fields.Many2one('school.domain', string='Domain', ondelete='cascade')
session = fields.Selection([
('first','First Session'),
('second','Second Session'),
], string="Session")
@api.multi
def generate_summary(self):
self.ensure_one()
data = {}
data['year_id'] = self.year_id.id
data['domain_id'] = self.domain_id.id
data['session'] = self.session
return self.env['report'].get_action(self, 'school_evaluations.evaluation_summary_content', data=data)
class ReportEvaluationSummary(models.AbstractModel):
_name = 'report.school_evaluations.evaluation_summary_content'
@api.multi
def render_html(self, data):
_logger.info('render_html')
year_id = data['year_id']
session = data['session']
domain_id = data['domain_id']
if session == 'first':
states = ['postponed','awarded_first_session']
else:
states = ['awarded_second_session','failed']
if domain_id:
records = self.env['school.individual_bloc'].search([('year_id','=',year_id),('source_bloc_domain_id','=',domain_id),('state','in',states)],order="source_bloc_level, name")
else:
records = self.env['school.individual_bloc'].search([('year_id','=',year_id),('state','in',states)],order="source_bloc_level, name")
docs = [
{
"name" : 'Bac 1',
'blocs' : [],
},
{
"name" : 'Bac 2',
'blocs' : [],
},
{
"name" : 'Bac 3',
'blocs' : [],
},
{
"name" : 'Master 1',
'blocs' : [],
},
{
"name" : 'Master 2',
'blocs' : [],
},
]
for record in records:
docs[int(record.source_bloc_level)-1]['blocs'].append(record)
docargs = {
'doc_model': 'school.individual_bloc',
'docs': docs,
'year' : self.env['school.year'].browse(year_id).name,
}
return self.env['report'].render('school_evaluations.evaluation_summary_content', docargs) | agpl-3.0 | 5,508,467,179,672,360,000 | 36.490566 | 184 | 0.558017 | false | 4.151515 | false | false | false |
migihajami/memin | memin/frontend.py | 1 | 10715 | __author__ = 'algol'
import cherrypy
from jinja2 import Environment, PackageLoader
import memin.core as mc
from configparser import ConfigParser
class Menu:
def __init__(self):
self.menu = [
{'name': 'Главная', 'link': '/'},
{'name': 'Персоны', 'link': '/persons'},
{'name': 'Залы', 'link': '/classrooms'},
{'name': 'Занятия', 'link': '/lessons'},
{'name': 'Типы платежей', 'link': '/payment_types'}
]
class FrontendBase:
def __init__(self):
self.env = Environment(loader=PackageLoader('memin', 'templates'))
self.menu = Menu()
def get_template(self, template_name='index.html'):
return self.env.get_template(template_name)
class Main(FrontendBase):
def __init__(self):
super().__init__()
@staticmethod
def checkPassword(realm, username, password):
c = ConfigParser()
c.read('config.ini')
users = {k: c['users'][k].strip("'") for k in c['users']}
if password == users.get(username, None):
return True
return False
@cherrypy.expose
def index(self, name=''):
return self.get_template('index.html').render(nick=name,
title='Main page',
h1='Главная страница',
menu=self.menu.menu
)
@cherrypy.expose
def halt(self):
cherrypy.engine.exit()
@cherrypy.expose
def persons(self):
return self.get_template('persons.html').render(
title='Main page',
h1='Персоны',
menu=self.menu.menu,
table_title='Персоны',
url_prefix='person',
ptypes=str({a.pk_value: a.name for a in mc.PaymentType.get_all()}),
classrooms=str({a.pk_value: a.name for a in mc.Classroom.get_all()}),
lessons=str({a.pk_value: a.name for a in mc.Lesson.get_all()})
)
@cherrypy.expose
def payment_types(self):
return self.get_template('payment_types.html').render(
title='Типы платежей',
h1='Типы платежей',
menu=self.menu.menu,
table_title='Типы платежей',
url_prefix='ptype'
)
@cherrypy.expose
def classrooms(self):
return self.get_template('classrooms.html').render(
title='Залы для занятий',
h1='Залы для занятий',
menu=self.menu.menu,
table_title='список залов',
url_prefix='classroom'
)
@cherrypy.expose
def lessons(self):
return self.get_template('lessons.html').render(
title='Занятия',
h1='Занятия',
menu=self.menu.menu,
table_title='Список занятий',
url_prefix='lesson'
)
class MeminCrud(FrontendBase):
def __init__(self):
super().__init__()
@cherrypy.expose
@cherrypy.tools.json_out()
def list(self, **args):
raise Exception("Not implemented yet")
@cherrypy.expose
@cherrypy.tools.json_out()
def create(self, **args):
raise Exception("Not implemented yet")
@cherrypy.expose
@cherrypy.tools.json_out()
def update(self, **args):
raise Exception("Not implemented yet")
@cherrypy.expose
@cherrypy.tools.json_out()
def delete(self, **args):
raise Exception("Not implemented yet")
class Person(MeminCrud):
def __init__(self):
super().__init__()
@cherrypy.expose
@cherrypy.tools.json_out()
def list(self, **args):
prs = mc.Person.get_all()
persons = [{'PersonID': p.pk_value,
'Fname': p.fname,
'Lname': p.lname,
'Phone': p.phone,
'Email': p.email,
'InsertDate': p.insert_date
} for p in prs]
res = {'Result': 'OK' if prs else 'ERROR', 'Records': persons, 'Args': args}
return res
@cherrypy.expose
@cherrypy.tools.json_out()
def create(self, **args):
p = mc.Person(args['Fname'], args['Lname'], args['Phone'], args['Email'])
args['PersonID'] = p.save()
return {'Result': 'OK', 'Record': args}
@cherrypy.expose
@cherrypy.tools.json_out()
def update(self, **args):
p = mc.Person.load(args['PersonID'])
p.fname = args['Fname']
p.lname = args['Lname']
p.phone = args['Phone']
p.email = args['Email']
p.save()
return {'Result': 'OK'}
class PaymentType(MeminCrud):
def __init__(self):
super().__init__()
@cherrypy.expose
@cherrypy.tools.json_out()
def list(self, **args):
ptypes = mc.PaymentType.get_all()
res = [{'Name': p.name,
'Comment': p.comment,
'PaymentTypeID': p.pk_value} for p in ptypes]
return {'Result': 'OK' if ptypes else 'ERROR', 'Records': res}
@cherrypy.expose
@cherrypy.tools.json_out()
def create(self, **args):
pt = mc.PaymentType(args['Name'], args['Comment'])
args['PaymenTypeID'] = pt.save()
return {'Result': 'OK', 'Record': args}
@cherrypy.expose
@cherrypy.tools.json_out()
def update(self, **args):
pt = mc.PaymentType.load(args['PaymentTypeID'])
pt.name = args['Name']
pt.comment = args['Comment']
pt.save()
return {'Result': 'OK'}
@cherrypy.expose
@cherrypy.tools.json_out()
def delete(self, **args):
raise Exception("Not implemented yet")
class Classroom(MeminCrud):
def __init__(self):
super().__init__()
@cherrypy.expose
@cherrypy.tools.json_out()
def list(self, **args):
cl = mc.Classroom.get_all()
res = [{'Name': c.name,
'Address': c.address,
'Comment': c.comment,
'Active': c.active,
'ClassroomID': c.pk_value} for c in cl]
return {'Result': 'OK' if cl else 'ERROR', 'Records': res}
@cherrypy.expose
@cherrypy.tools.json_out()
def create(self, **args):
cl = mc.Classroom(args['Name'],
args['Address'],
args['Comment'],
args['Active'] if 'Active' in args else 0
)
args['ClassroomID'] = cl.save()
return {'Result': 'OK', 'Record': args}
@cherrypy.expose
@cherrypy.tools.json_out()
def update(self, **args):
cl = mc.Classroom.load(args['ClassroomID'])
cl.comment = args['Comment']
cl.name = args['Name']
cl.active = args['Active'] if 'Active' in args else 0
cl.address = args['Address']
cl.save()
return {'Result': 'OK'}
@cherrypy.expose
@cherrypy.tools.json_out()
def delete(self, **args):
raise Exception("Not implemented yet")
class Lesson(MeminCrud):
def __init__(self):
super().__init__()
@cherrypy.expose
@cherrypy.tools.json_out()
def list(self, **args):
lsns = mc.Lesson.get_all()
res = [{'Name': l.name,
'Comment': l.comment,
'Duration': l.duration,
'LessonID': l.pk_value
} for l in lsns]
return {'Result': 'OK' if lsns else 'ERROR', 'Records': res}
@cherrypy.expose
@cherrypy.tools.json_out()
def create(self, **args):
l = mc.Lesson(args['Name'], args['Duration'], args['Comment'])
args['LessonID'] = l.save()
return {'Result': 'OK', 'Record': args}
@cherrypy.expose
@cherrypy.tools.json_out()
def update(self, **args):
l = mc.Lesson.load(args['LessonID'])
l.name = args['Name']
l.comment = args['Comment']
l.duration = args['Duration']
l.save()
return {'Result': 'OK'}
@cherrypy.expose
@cherrypy.tools.json_out()
def delete(self, **args):
raise Exception("Not implemented yet")
class Payment(MeminCrud):
def __init__(self):
super().__init__()
@cherrypy.expose
@cherrypy.tools.json_out()
def list(self, **args):
pl = mc.Payment.get_all({'PersonID': args['PersonID']})
res = [{'PersonID': p.person_id,
'PaymentType': p.payment_type_id,
'PaymentTypeID': p.payment_type_id,
'PaymentID': p.pk_value,
'Amount': p.amount,
'Date': '-'.join(reversed(p.date.split('.')))
} for p in pl]
return {'Result': 'OK' if pl else 'ERROR', 'Records': res}
@cherrypy.expose
@cherrypy.tools.json_out()
def create(self, **args):
p = mc.Payment(args['PersonID'], args['Amount'], args['PaymentType'])
args['PaymentID'] = p.save()
args['Date'] = p.date
return {'Result': 'OK', 'Record': args}
@cherrypy.expose
@cherrypy.tools.json_out()
def update(self, **args):
raise Exception("Not implemented yet")
@cherrypy.expose
@cherrypy.tools.json_out()
def delete(self, **args):
raise Exception("Not implemented yet")
class Visit(MeminCrud):
def __init__(self):
super().__init__()
@cherrypy.expose
@cherrypy.tools.json_out()
def list(self, PersonID, **args):
visits = mc.Visit.get_all({'PersonID': PersonID})
res = [{'VisitID': a.pk_value,
'Classroom': a.classroom_id,
'Lesson': a.lesson_id,
'Date': '-'.join(reversed(a.date.split('.')))
} for a in visits]
return {'Result': 'OK' if visits else 'ERROR', 'Records': res}
@cherrypy.expose
@cherrypy.tools.json_out()
def create(self, **args):
v = mc.Visit(args['PersonID'], args['Classroom'], args['Lesson'], args['Date'])
args['VisitID'] = v.save()
return {'Result': 'OK', 'Record': args}
@cherrypy.expose
@cherrypy.tools.json_out()
def update(self, **args):
v = mc.Visit.load(args.get('VisitID'))
if v:
v.classroom_id = args['Classroom']
v.lesson_id = args['Lesson']
v.date = args['Date']
v.save()
return {'Result': 'OK'}
return {'Result': 'ERROR'}
@cherrypy.expose
@cherrypy.tools.json_out()
def delete(self, **args):
raise Exception("Not implemented yet")
| bsd-3-clause | -6,257,122,399,343,155,000 | 29.134286 | 87 | 0.526216 | false | 3.623154 | false | false | false |
MShel/ttw | listener/packets/udpPacket.py | 1 | 1408 | from listener.packets.abstractPacket import AbstractPacket
from struct import unpack
class UdpPacket(AbstractPacket):
UNPACK_FORMAT = '!HHHH'
UDP_HEADER_LENGTH = 8
PROTOCOL_NAME = 'UDP'
def __init__(self, binPacket: bytes, margin: int):
self.binPacket = binPacket
self.headerMargin = margin
self.parse()
def parse(self):
AbstractPacket.addMsg(AbstractPacket, 'Started Parsing UDP packet')
binUdpHeader = self.binPacket[self.headerMargin:self.headerMargin + self.UDP_HEADER_LENGTH]
unpackedHeader = unpack(self.UNPACK_FORMAT, binUdpHeader)
self.fromPort = str(unpackedHeader[0])
self.toPort = str(unpackedHeader[1])
self.udpHeaderLength = unpackedHeader[2]
self.udpCheckSum = unpackedHeader[3]
fullHeaderSize = self.headerMargin + self.udpHeaderLength
self.dataSize = len(self.binPacket) - fullHeaderSize
# get data from the packet
self.data = self.binPacket[fullHeaderSize:]
AbstractPacket.addMsg(AbstractPacket, 'Parsed UDP packet from port: ' + self.fromPort + ' to: ' + self.toPort)
AbstractPacket.addMsg(AbstractPacket, 'UDP-PACKET data:\n\n\n ' + str(self.data) +'\n\n')
def getMsg(self):
return self.msg
def getName(self):
return self.PROTOCOL_NAME
def __del__(self):
pass | mit | 2,091,908,008,284,972,500 | 36.078947 | 119 | 0.651989 | false | 3.857534 | false | false | false |
aerler/WRF-Tools | Python/wrfavg/wrfout_average.py | 1 | 79431 | '''
Created on 2013-09-28, revised 2014-06-17, added daily output 2020-05-04
A script to average WRF output; the default settings are meant for my 'fineIO' output configuration and
process the smaller diagnostic files.
The script can run in parallel mode, with each process averaging one filetype and domain, producing
exactly one output file.
@author: Andre R. Erler, GPL v3
'''
#TODO: add time-dependent auxiliary files to file processing (use prerequisites from other files)
#TODO: add option to discard prerequisit variables
#TODO: add base variables for correlation and standard deviation (and (co-)variance).
#TODO: more variables: tropopause height, baroclinicity, PV, water flux (require full 3D fields)
#TODO: add shape-averaged output stream (shapes based on a template file)
## imports
import numpy as np
from collections import OrderedDict
#import numpy.ma as ma
import os, re, sys, shutil, gc
import netCDF4 as nc
# my own netcdf stuff
from utils.nctools import add_coord, copy_dims, copy_ncatts, copy_vars
from processing.multiprocess import asyncPoolEC
# import module providing derived variable classes
import wrfavg.derived_variables as dv
# aliases
days_per_month_365 = dv.days_per_month_365
dtype_float = dv.dtype_float
# thresholds for wet-day variables (from AMS glossary and ETCCDI Climate Change Indices)
from utils.constants import precip_thresholds
# N.B.: importing from WRF Tools to GeoPy causes a name collision
# date error class
class DateError(Exception):
''' Exceptions related to wrfout date strings, e.g. in file names. '''
pass
# date error class
class ArgumentError(Exception):
''' Exceptions related to arguments passed to the script. '''
pass
def getDateRegX(period):
''' function to define averaging period based on argument '''
# use '\d' for any number and [1-3,45] for ranges; '\d\d\d\d'
if period == '1979-1980': prdrgx = '19(79|80)' # 2 year historical period
elif period == '1979-1981': prdrgx = '19(79|8[0-1])' # 3 year historical period
elif period == '1979-1983': prdrgx = '19(79|8[0-3])' # 5 year historical period
elif period == '1979-1988': prdrgx = '19(79|8[0-8])' # 10 year historical period
elif period == '1980-1994': prdrgx = '19(8[0-9]|9[04])' # 15 year historical period
elif period == '2045-2047': prdrgx = '204[5-7]' # 3 year future period
elif period == '2045-2049': prdrgx = '204[5-9]' # 5 year future period
elif period == '2045-2054': prdrgx = '20(4[5-9]|5[0-4])' # 10 year future period
elif period == '2045-2059': prdrgx = '20(4[5-9]|5[0-9])' # 15 year future period
elif period == '2085-2087': prdrgx = '208[5-7]' # 3 year future period
elif period == '2085-2089': prdrgx = '208[5-9]' # 5 year future period
elif period == '2085-2094': prdrgx = '20(8[5-9]|9[0-4])' # 10 year future period
elif period == '2085-2099': prdrgx = '20(8[5-9]|9[0-9])' # 15 year future period
elif period == '2090-2094': prdrgx = '209[0-4]' # 5 year future period
else: prdrgx = None
if prdrgx: print(("\nLoading regular expression for date string: '{:s}'".format(period)))
return prdrgx
## read arguments
# number of processes NP
if 'PYAVG_THREADS' in os.environ:
NP = int(os.environ['PYAVG_THREADS'])
else: NP = None
# only compute derived variables
if 'PYAVG_DERIVEDONLY' in os.environ:
lderivedonly = os.environ['PYAVG_DERIVEDONLY'] == 'DERIVEDONLY'
else: lderivedonly = False # i.e. all
# # scale dry-day threshold
# if os.environ.has_key('PYAVG_DRYDAY') and bool(os.environ['PYAVG_DRYDAY']): # i.e. not empty and non-zero
# dryday_correction = float(os.environ['PYAVG_DRYDAY']) # relative to WMO recommendation
# dv.dryday_threshold = dv.dryday_threshold * dryday_correction # precip treshold for a dry day: 2.3e-7 mm/s
# print("\n *** The dry-day threshold was increased by a factor of {:3.2f} relative to WMO recommendation *** \n".format(dryday_correction))
# recompute last timestep and continue (usefule after a crash)
if 'PYAVG_RECOVER' in os.environ:
lrecover = os.environ['PYAVG_RECOVER'] == 'RECOVER'
else: lrecover = False # i.e. normal operation
# just add new and leave old
if 'PYAVG_ADDNEW' in os.environ:
laddnew = os.environ['PYAVG_ADDNEW'] == 'ADDNEW'
else: laddnew = False # i.e. recompute all
# recompute specified variables
if 'PYAVG_RECALC' in os.environ:
if os.environ['PYAVG_RECALC'] == 'DERIVEDONLY':
# recalculate all derived variables and leave others in place
lrecalc = True; lderivedonly = True; recalcvars = []
else:
recalcvars = os.environ['PYAVG_RECALC'].split() # space separated list (other characters cause problems...)
if len(recalcvars) > 0 and len(recalcvars[0]) > 0: lrecalc = True # if there is a variable to recompute
else: lrecalc = False
# lrecalc uses the same pathway, but they can operate independently
else: lrecalc = False # i.e. recompute all
# overwrite existing data
if 'PYAVG_OVERWRITE' in os.environ:
loverwrite = os.environ['PYAVG_OVERWRITE'] == 'OVERWRITE'
if loverwrite: laddnew = False; lrecalc = False
else: loverwrite = False # i.e. append
# N.B.: when loverwrite is True and and prdarg is empty, the entire file is replaced,
# otherwise only the selected months are recomputed
# file types to process
if 'PYAVG_FILETYPES' in os.environ:
filetypes = os.environ['PYAVG_FILETYPES'].split() # space separated list (other characters cause problems...)
if len(filetypes) == 1 and len(filetypes[0]) == 0: filetypes = None # empty string, substitute default
else: filetypes = None # defaults are set below
# domains to process
if 'PYAVG_DOMAINS' in os.environ:
domains = os.environ['PYAVG_DOMAINS'].split() # space separated list (other characters cause problems...)
if len(domains) == 1: domains = [int(i) for i in domains[0]] # string of single-digit indices
else: domains = [int(i) for i in domains] # semi-colon separated list
else: domains = None # defaults are set below
# run script in debug mode
if 'PYAVG_DEBUG' in os.environ:
ldebug = os.environ['PYAVG_DEBUG'] == 'DEBUG'
lderivedonly = ldebug or lderivedonly # usually this is what we are debugging, anyway...
else: ldebug = False # operational mode
# wipe temporary storage after every month (no carry-over)
if 'PYAVG_CARRYOVER' in os.environ:
lcarryover = os.environ['PYAVG_CARRYOVER'] == 'CARRYOVER'
else: lcarryover = True # operational mode
# use simple differences or centered differences for accumulated variables
if 'PYAVG_SMPLDIFF' in os.environ:
lsmplDiff = os.environ['PYAVG_SMPLDIFF'] == 'SMPLDIFF'
else: lsmplDiff = False # default mode: centered differences
# generate formatted daily/sub-daily output files for selected variables
if 'PYAVG_DAILY' in os.environ:
lglobaldaily = os.environ['PYAVG_DAILY'] == 'DAILY'
else: lglobaldaily = False # operational mode
# working directories
exproot = os.getcwd()
exp = exproot.split('/')[-1] # root folder name
infolder = exproot + '/wrfout/' # input folder
outfolder = exproot + '/wrfavg/' # output folder
# figure out time period
# N.B.: values or regex' can be passed for year, month, and day as arguments in this order; alternatively,
# a single argument with the values/regex separated by commas (',') can be used
if len(sys.argv) == 1 or not any(sys.argv[1:]): # treat empty arguments as no argument
period = [] # means recompute everything
elif len(sys.argv) == 2:
period = sys.argv[1].split(',') # regular expression identifying
else:
period = sys.argv[1:]
# prdarg = '1979'; period = prdarg.split('-') # for tests
# default time intervals
yearstr = '\d\d\d\d'; monthstr = '\d\d'; daystr = '\d\d'
# figure out time interval
if len(period) >= 1:
# first try some common expressions
yearstr = getDateRegX(period[0])
if yearstr is None: yearstr = period[0]
if len(period) >= 2: monthstr = period[1]
if len(period) >= 3: daystr = period[2]
# N.B.: the timestr variables are interpreted as strings and support Python regex syntax
if len(period) > 0 or ldebug: print('Date string interpretation:',yearstr,monthstr,daystr)
## definitions
# input files and folders
filetypes = filetypes or ['srfc', 'plev3d', 'xtrm', 'hydro', 'lsm', 'rad', 'snow']
domains = domains or [1,2,3,4]
# filetypes and domains can also be set in an semi-colon-separated environment variable (see above)
# file pattern (WRF output and averaged files)
# inputpattern = 'wrf{0:s}_d{1:02d}_{2:s}-{3:s}-{4:s}_\d\d:\d\d:\d\d.nc' # expanded with format(type,domain,year,month)
inputpattern = '^wrf{0:s}_d{1:s}_{2:s}_\d\d[_:]\d\d[_:]\d\d(?:\.nc$|$)' # expanded with format(type,domain,datestring)
#inputpattern = '^wrf{0:s}_d{1:s}_{2:s}_\d\d[_:]\d\d[_:]\d\d.*$' # expanded with format(type,domain,datestring)
# N.B.: the last section (?:\.nc$|$) matches either .nc at the end or just the end of the string;
# ?: just means that the group defined by () can not be retrieved (it is just to hold "|")
constpattern = 'wrfconst_d{0:02d}' # expanded with format(domain), also WRF output
# N.B.: file extension is added automatically for constpattern and handled by regex for inputpattern
monthlypattern = 'wrf{0:s}_d{1:02d}_monthly.nc' # expanded with format(type,domain)
dailypattern = 'wrf{0:s}_d{1:02d}_daily.nc' # expanded with format(type,domain)
# variable attributes
wrftime = 'Time' # time dim in wrfout files
wrfxtime = 'XTIME' # time in minutes since WRF simulation start
wrfaxes = dict(Time='tax', west_east='xax', south_north='yax', num_press_levels_stag='pax')
wrftimestamp = 'Times' # time-stamp variable in WRF
time = 'time' # time dim in monthly mean files
dimlist = ['x','y'] # dimensions we just copy
dimmap = {time:wrftime} #{time:wrftime, 'x':'west_east','y':'south_north'}
midmap = dict(list(zip(list(dimmap.values()),list(dimmap.keys())))) # reverse dimmap
# accumulated variables (only total accumulation since simulation start, not, e.g., daily accumulated)
acclist = dict(RAINNC=100.,RAINC=100.,RAINSH=None,SNOWNC=None,GRAUPELNC=None,SFCEVP=None,POTEVP=None, # srfc vars
SFROFF=None,UDROFF=None,ACGRDFLX=None,ACSNOW=None,ACSNOM=None,ACHFX=None,ACLHF=None, # lsm vars
ACSWUPT=1.e9,ACSWUPTC=1.e9,ACSWDNT=1.e9,ACSWDNTC=1.e9,ACSWUPB=1.e9,ACSWUPBC=1.e9,ACSWDNB=1.e9,ACSWDNBC=1.e9, # rad vars
ACLWUPT=1.e9,ACLWUPTC=1.e9,ACLWDNT=1.e9,ACLWDNTC=1.e9,ACLWUPB=1.e9,ACLWUPBC=1.e9,ACLWDNB=1.e9,ACLWDNBC=1.e9) # rad vars
# N.B.: keys = variables and values = bucket sizes; value = None or 0 means no bucket
bktpfx = 'I_' # prefix for bucket variables; these are processed together with their accumulated variables
# derived variables
derived_variables = {filetype:[] for filetype in filetypes} # derived variable lists by file type
derived_variables['srfc'] = [dv.Rain(), dv.LiquidPrecipSR(), dv.SolidPrecipSR(), dv.NetPrecip(sfcevp='QFX'),
dv.WaterVapor(), dv.OrographicIndex(), dv.CovOIP(), dv.WindSpeed(),
dv.SummerDays(threshold=25., temp='T2'), dv.FrostDays(threshold=0., temp='T2')]
# N.B.: measures the fraction of 6-hourly samples above/below the threshold (day and night)
derived_variables['xtrm'] = [dv.RainMean(), dv.TimeOfConvection(),
dv.SummerDays(threshold=25., temp='T2MAX'), dv.FrostDays(threshold=0., temp='T2MIN')]
derived_variables['hydro'] = [dv.Rain(), dv.LiquidPrecip(), dv.SolidPrecip(),
dv.NetPrecip(sfcevp='SFCEVP'), dv.NetWaterFlux(), dv.WaterForcing()]
derived_variables['rad'] = [dv.NetRadiation(), dv.NetLWRadiation()]
derived_variables['lsm'] = [dv.RunOff()]
derived_variables['plev3d'] = [dv.OrographicIndexPlev(), dv.Vorticity(), dv.WindSpeed(),
dv.WaterDensity(), dv.WaterFlux_U(), dv.WaterFlux_V(), dv.ColumnWater(),
dv.WaterTransport_U(), dv.WaterTransport_V(),
dv.HeatFlux_U(), dv.HeatFlux_V(), dv.ColumnHeat(),
dv.HeatTransport_U(),dv.HeatTransport_V(),
dv.GHT_Var(), dv.Vorticity_Var()]
# add wet-day variables for different thresholds
wetday_variables = [dv.WetDays, dv.WetDayRain, dv.WetDayPrecip]
for threshold in precip_thresholds:
for wetday_var in wetday_variables:
derived_variables['srfc'].append(wetday_var(threshold=threshold, rain='RAIN'))
derived_variables['hydro'].append(wetday_var(threshold=threshold, rain='RAIN'))
derived_variables['xtrm'].append(wetday_var(threshold=threshold, rain='RAINMEAN'))
# N.B.: derived variables need to be listed in order of computation
# Consecutive exceedance variables
consecutive_variables = {filetype:None for filetype in filetypes} # consecutive variable lists by file type
# skip in debug mode (only specific ones for debug)
if ldebug:
print("Skipping 'Consecutive Days of Exceedance' Variables")
else:
consecutive_variables['srfc'] = {'CFD' : ('T2', 'below', 273.14, 'Consecutive Frost Days (< 0C)'),
'CSD' : ('T2', 'above', 273.14+25., 'Consecutive Summer Days (>25C)'),
# N.B.: night temperatures >25C will rarely happen... so this will be very short
'CNWD' : ('NetPrecip', 'above', 0., 'Consecutive Net Wet Days'),
'CNDD' : ('NetPrecip', 'below', 0., 'Consecutive Net Dry Days'),}
consecutive_variables['xtrm'] = {'CFD' : ('T2MIN', 'below', 273.14, 'Consecutive Frost Days (< 0C)'),
'CSD' : ('T2MAX', 'above', 273.14+25., 'Consecutive Summer Days (>25C)'),}
consecutive_variables['hydro'] = {'CNWD' : ('NetPrecip', 'above', 0., 'Consecutive Net Wet Days'),
'CNDD' : ('NetPrecip', 'below', 0., 'Consecutive Net Dry Days'),
'CWGD' : ('NetWaterFlux', 'above', 0., 'Consecutive Water Gain Days'),
'CWLD' : ('NetWaterFlux', 'below', 0., 'Consecutive Water Loss Days'),}
# add wet-day variables for different thresholds
for threshold in precip_thresholds:
for filetype,rain_var in zip(['srfc','hydro','xtrm'],['RAIN','RAIN','RAINMEAN']):
suffix = '_{:03d}'.format(int(10*threshold)); name_suffix = '{:3.1f} mm/day)'.format(threshold)
consecutive_variables[filetype]['CWD'+suffix] = (rain_var, 'above', threshold/86400.,
'Consecutive Wet Days (>'+name_suffix)
consecutive_variables[filetype]['CDD'+suffix] = (rain_var, 'below', threshold/86400. ,
'Consecutive Dry Days (<'+name_suffix)
## single- and multi-step Extrema
maximum_variables = {filetype:[] for filetype in filetypes} # maxima variable lists by file type
daymax_variables = {filetype:[] for filetype in filetypes} # maxima variable lists by file type
daymin_variables = {filetype:[] for filetype in filetypes} # mininma variable lists by file type
weekmax_variables = {filetype:[] for filetype in filetypes} # maxima variable lists by file type
minimum_variables = {filetype:[] for filetype in filetypes} # minima variable lists by file type
weekmin_variables = {filetype:[] for filetype in filetypes} # mininma variable lists by file type
# skip in debug mode (only specific ones for debug)
if ldebug:
print("Skipping Single- and Multi-step Extrema")
else:
# Maxima (just list base variables; derived variables will be created later)
maximum_variables['srfc'] = ['T2', 'U10', 'V10', 'RAIN', 'RAINC', 'RAINNC', 'NetPrecip', 'WindSpeed']
maximum_variables['xtrm'] = ['T2MEAN', 'T2MAX', 'SPDUV10MEAN', 'SPDUV10MAX',
'RAINMEAN', 'RAINNCVMAX', 'RAINCVMAX']
maximum_variables['hydro'] = ['RAIN', 'RAINC', 'RAINNC', 'ACSNOW', 'ACSNOM', 'NetPrecip', 'NetWaterFlux', 'WaterForcing']
maximum_variables['lsm'] = ['SFROFF', 'Runoff']
maximum_variables['plev3d'] = ['S_PL', 'GHT_PL', 'Vorticity']
# daily (smoothed) maxima
daymax_variables['srfc'] = ['T2','RAIN', 'RAINC', 'RAINNC', 'NetPrecip', 'WindSpeed']
# daily (smoothed) minima
daymin_variables['srfc'] = ['T2']
# weekly (smoothed) maxima
weekmax_variables['xtrm'] = ['T2MEAN', 'T2MAX', 'SPDUV10MEAN']
weekmax_variables['hydro'] = ['RAIN', 'RAINC', 'RAINNC', 'ACSNOW', 'ACSNOM', 'NetPrecip', 'NetWaterFlux', 'WaterForcing']
weekmax_variables['lsm'] = ['SFROFF', 'UDROFF', 'Runoff']
# Maxima (just list base variables; derived variables will be created later)
minimum_variables['srfc'] = ['T2']
minimum_variables['xtrm'] = ['T2MEAN', 'T2MIN', 'SPDUV10MEAN']
minimum_variables['hydro'] = ['RAIN', 'NetPrecip', 'NetWaterFlux', 'WaterForcing']
minimum_variables['plev3d'] = ['GHT_PL', 'Vorticity']
# weekly (smoothed) minima
weekmin_variables['xtrm'] = ['T2MEAN', 'T2MIN', 'SPDUV10MEAN']
weekmin_variables['hydro'] = ['RAIN', 'NetPrecip', 'NetWaterFlux', 'WaterForcing']
weekmin_variables['lsm'] = ['SFROFF','UDROFF','Runoff']
# N.B.: it is important that the derived variables are listed in order of dependency!
# set of pre-requisites
prereq_vars = {key:set() for key in derived_variables.keys()} # pre-requisite variable set by file type
for key in prereq_vars.keys():
prereq_vars[key].update(*[devar.prerequisites for devar in derived_variables[key] if not devar.linear])
## daily variables (can also be 6-hourly or hourly, depending on source file)
if lglobaldaily:
daily_variables = {filetype:[] for filetype in filetypes} # daily variable lists by file type
daily_variables['srfc'] = ['T2', 'PSFC', 'WaterVapor', 'WindSpeed',] # surface climate
daily_variables['xtrm'] = ['T2MIN', 'T2MAX'] # min/max T2
daily_variables['hydro'] = ['RAIN', 'RAINC', 'LiquidPrecip', 'WaterForcing', 'SFCEVP', 'POTEVP'] # water budget
daily_variables['rad'] = ['NetRadiation','ACSWDNB','ACLWDNB','NetLWRadiation',] # surface radiation budget
#daily_variables['lsm'] = [] # runoff and soil temperature
## main work function
# N.B.: the loop iterations should be entirely independent, so that they can be run in parallel
def processFileList(filelist, filetype, ndom, lparallel=False, pidstr='', logger=None, ldebug=False):
''' This function is doing the main work, and is supposed to be run in a multiprocessing environment. '''
## setup files and folders
# load first file to copy some meta data
wrfoutfile = infolder+filelist[0]
logger.debug("\n{0:s} Opening first input file '{1:s}'.".format(pidstr,wrfoutfile))
wrfout = nc.Dataset(wrfoutfile, 'r', format='NETCDF4')
# timeless variables (should be empty, since all timeless variables should be in constant files!)
timeless = [varname for varname,var in wrfout.variables.items() if 'Time' not in var.dimensions]
assert len(timeless) == 0 # actually useless, since all WRF variables have a time dimension...
# time-dependent variables
varlist = [] # list of time-dependent variables to be processed
for varname,var in wrfout.variables.items():
if ('Time' in var.dimensions) and np.issubdtype(var.dtype, np.number) and varname[0:len(bktpfx)] != bktpfx:
varlist.append(varname)
varlist.sort() # alphabetical order...
## derived variables, extrema, and dependencies
# derived variable list
derived_vars = OrderedDict() # it is important that the derived variables are computed in order:
# the reason is that derived variables can depend on other derived variables, and the order in
# which they are listed, should take this into account
for devar in derived_variables[filetype]:
derived_vars[devar.name] = devar
# create consecutive extrema variables
if consecutive_variables[filetype] is not None:
for key,value in consecutive_variables[filetype].items():
if value[0] in derived_vars:
derived_vars[key] = dv.ConsecutiveExtrema(derived_vars[value[0]], value[1], threshold=value[2],
name=key, long_name=value[3])
else:
derived_vars[key] = dv.ConsecutiveExtrema(wrfout.variables[value[0]], value[1], threshold=value[2],
name=key, long_name=value[3], dimmap=midmap)
# method to create derived variables for extrema
def addExtrema(new_variables, mode, interval=0):
for exvar in new_variables[filetype]:
# create derived variable instance
if exvar in derived_vars:
if interval == 0: devar = dv.Extrema(derived_vars[exvar],mode)
else: devar = dv.MeanExtrema(derived_vars[exvar],mode,interval=interval)
else:
if interval == 0: devar = dv.Extrema(wrfout.variables[exvar],mode, dimmap=midmap)
else: devar = dv.MeanExtrema(wrfout.variables[exvar],mode, interval=interval, dimmap=midmap)
# append to derived variables
derived_vars[devar.name] = devar # derived_vars is from the parent scope, not local!
# and now add them
addExtrema(maximum_variables, 'max')
addExtrema(minimum_variables, 'min')
addExtrema(daymax_variables, 'max', interval=1)
addExtrema(daymin_variables, 'min', interval=1)
addExtrema(weekmax_variables, 'max', interval=5) # 5 days is the preferred interval, according to
addExtrema(weekmin_variables, 'min', interval=5) # ETCCDI Climate Change Indices
ldaily = False
if lglobaldaily:
# get varlist (does not include dependencies)
daily_varlist_full = daily_variables[filetype]
if len(daily_varlist_full)>0:
ldaily = True
daily_varlist = []; daily_derived_vars = []
for varname in daily_varlist_full:
if varname in wrfout.variables: daily_varlist.append(varname)
elif varname in derived_vars: daily_derived_vars.append(varname)
else:
raise ArgumentError("Variable '{}' not found in wrfout or derived variables; can only output derived variables that are already being computed for monthly output.".format(varname))
else:
logger.info("\n{0:s} Skipping (sub-)daily output for filetype '{1:s}', since variable list is empty.\n".format(pidstr,filetype))
# if we are only computing derived variables, remove all non-prerequisites
prepq = set().union(*[devar.prerequisites for devar in derived_vars.values()])
if ldaily: prepq |= set(daily_varlist)
if lderivedonly: varlist = [var for var in varlist if var in prepq]
# get some meta info and construct title string (printed after file creation)
begindate = str(nc.chartostring(wrfout.variables[wrftimestamp][0,:10])) # first timestamp in first file
beginyear, beginmonth, beginday = [int(tmp) for tmp in begindate.split('-')]
# always need to begin on the first of a month (discard incomplete data of first month)
if beginday != 1:
beginmonth += 1 # move on to next month
beginday = 1 # and start at the first (always...)
begindate = '{0:04d}-{1:02d}-{2:02d}'.format(beginyear, beginmonth, beginday) # rewrite begin date
# open last file and get last date
lastoutfile = infolder+filelist[-1]
logger.debug("{0:s} Opening last input file '{1:s}'.".format(pidstr,lastoutfile))
lastout = nc.Dataset(lastoutfile, 'r', format='NETCDF4')
lstidx = lastout.variables[wrftimestamp].shape[0]-1 # netcdf library has problems with negative indexing
enddate = str(nc.chartostring(lastout.variables[wrftimestamp][lstidx,:10])) # last timestamp in last file
endyear, endmonth, endday = [int(tmp) for tmp in enddate.split('-')]; del endday # make warning go away...
# the last timestamp should be the next month (i.e. that month is not included)
if endmonth == 1:
endmonth = 12; endyear -= 1 # previous year
else: endmonth -= 1
endday = 1 # first day of last month (always 1st..)
assert 1 <= endday <= 31 and 1 <= endmonth <= 12 # this is kinda trivial...
enddate = '{0:04d}-{1:02d}-{2:02d}'.format(endyear, endmonth, endday) # rewrite begin date
## open/create monthly mean output file
monthly_file = monthlypattern.format(filetype,ndom)
if lparallel: tmppfx = 'tmp_wrfavg_{:s}_'.format(pidstr[1:-1])
else: tmppfx = 'tmp_wrfavg_'
monthly_filepath = outfolder + monthly_file
tmp_monthly_filepath = outfolder + tmppfx + monthly_file
if os.path.exists(monthly_filepath):
if loverwrite or os.path.getsize(monthly_filepath) < 1e6: os.remove(monthly_filepath)
# N.B.: NetCDF files smaller than 1MB are usually incomplete header fragments from a previous crashed job
if os.path.exists(tmp_monthly_filepath) and not lrecover: os.remove(tmp_monthly_filepath) # remove old temp files
if os.path.exists(monthly_filepath):
# make a temporary copy of the file to work on (except, if we are recovering a broken temp file)
if not ( lrecover and os.path.exists(tmp_monthly_filepath) ): shutil.copy(monthly_filepath,tmp_monthly_filepath)
# open (temporary) file
logger.debug("{0:s} Opening existing output file '{1:s}'.\n".format(pidstr,monthly_filepath))
monthly_dataset = nc.Dataset(tmp_monthly_filepath, mode='a', format='NETCDF4') # open to append data (mode='a')
# infer start index
meanbeginyear, meanbeginmonth, meanbeginday = [int(tmp) for tmp in monthly_dataset.begin_date.split('-')]
assert meanbeginday == 1, 'always have to begin on the first of a month'
t0 = (beginyear-meanbeginyear)*12 + (beginmonth-meanbeginmonth) + 1
# check time-stamps in old datasets
if monthly_dataset.end_date < begindate: assert t0 == len(monthly_dataset.dimensions[time]) + 1 # another check
else: assert t0 <= len(monthly_dataset.dimensions[time]) + 1 # get time index where we start; in month beginning 1979
##
## *** special functions like adding new and recalculating old variables could be added later for daily output ***
##
# checks for new variables
if laddnew or lrecalc:
if t0 != 1: raise DateError("Have to start at the beginning to add new or recompute old variables!") # t0 starts with 1, not 0
meanendyear, meanendmonth, meanendday = [int(tmp) for tmp in monthly_dataset.end_date.split('-')]
assert meanendday == 1
endyear, endmonth = meanendyear, meanendmonth # just adding new, not extending!
enddate = monthly_dataset.end_date # for printing...
# check base variables
if laddnew or lrecalc: newvars = []
for var in varlist:
if var not in monthly_dataset.variables:
if laddnew: newvars.append(var)
else: varlist.remove(var)
#raise IOError, "{0:s} variable '{1:s}' not found in file '{2:s}'".format(pidstr,var.name,monthly_file)
# add new variables to netcdf file
if laddnew and len(newvars) > 0:
# copy remaining dimensions to new datasets
if midmap is not None:
dimlist = [midmap.get(dim,dim) for dim in wrfout.dimensions.keys() if dim != wrftime]
else: dimlist = [dim for dim in wrfout.dimensions.keys() if dim != wrftime]
dimlist = [dim for dim in dimlist if dim not in monthly_dataset.dimensions] # only the new ones!
copy_dims(monthly_dataset, wrfout, dimlist=dimlist, namemap=dimmap, copy_coords=False) # don't have coordinate variables
# create time-dependent variable in new datasets
copy_vars(monthly_dataset, wrfout, varlist=newvars, dimmap=dimmap, copy_data=False) # do not copy data - need to average
# change units of accumulated variables (per second)
for varname in newvars: # only new vars
assert varname in monthly_dataset.variables
if varname in acclist:
meanvar = monthly_dataset.variables[varname]
meanvar.units = meanvar.units + '/s' # units per second!
# add variables that should be recalculated
if lrecalc:
for var in recalcvars:
if var in monthly_dataset.variables and var in wrfout.variables:
if var not in newvars: newvars.append(var)
#else: raise ArgumentError, "Variable '{:s}' scheduled for recalculation is not present in output file '{:s}'.".format(var,monthly_filepath)
# check derived variables
if laddnew or lrecalc: newdevars = []
for varname,var in derived_vars.items():
if varname in monthly_dataset.variables:
var.checkPrerequisites(monthly_dataset)
if not var.checked: raise ValueError("Prerequisits for derived variable '{:s}' not found.".format(varname))
if lrecalc:
if ( lderivedonly and len(recalcvars) == 0 ) or ( varname in recalcvars ):
newdevars.append(varname)
var.checkPrerequisites(monthly_dataset) # as long as they are sorted correctly...
#del monthly_dataset.variables[varname]; monthly_dataset.sync()
#var.createVariable(monthly_dataset) # this does not seem to work...
else:
if laddnew:
var.checkPrerequisites(monthly_dataset) # as long as they are sorted correctly...
var.createVariable(monthly_dataset)
newdevars.append(varname)
else: del derived_vars[varname] # don't bother
# N.B.: it is not possible that a previously computed variable depends on a missing variable,
# unless it was purposefully deleted, in which case this will crash!
#raise (dv.DerivedVariableError, "{0:s} Derived variable '{1:s}' not found in file '{2:s}'".format(pidstr,var.name,monthly_file))
# now figure out effective variable list
if laddnew or lrecalc:
varset = set(newvars)
devarset = set(newdevars)
ndv = -1
# check prerequisites
while ndv != len(devarset):
ndv = len(devarset)
for devar in list(devarset): # normal variables don't have prerequisites
for pq in derived_vars[devar].prerequisites:
if pq in derived_vars: devarset.add(pq)
else: varset.add(pq)
# N.B.: this algorithm for dependencies relies on the fact that derived_vars is already ordered correctly,
# and unused variables can simply be removed (below), without changing the order;
# a stand-alone dependency resolution would require soring the derived_vars in order of execution
# consolidate lists
for devar in derived_vars.keys():
if devar not in devarset: del derived_vars[devar] # don't bother with this one...
varlist = list(varset) # order doesnt really matter... but whatever...
varlist.sort() # ... alphabetical order...
else:
logger.debug("{0:s} Creating new output file '{1:s}'.\n".format(pidstr,monthly_filepath))
monthly_dataset = nc.Dataset(tmp_monthly_filepath, 'w', format='NETCDF4') # open to start a new file (mode='w')
t0 = 1 # time index where we start (first month)
monthly_dataset.createDimension(time, size=None) # make time dimension unlimited
add_coord(monthly_dataset, time, data=None, dtype='i4', atts=dict(units='month since '+begindate)) # unlimited time dimension
# copy remaining dimensions to new datasets
if midmap is not None:
dimlist = [midmap.get(dim,dim) for dim in wrfout.dimensions.keys() if dim != wrftime]
else: dimlist = [dim for dim in wrfout.dimensions.keys() if dim != wrftime]
copy_dims(monthly_dataset, wrfout, dimlist=dimlist, namemap=dimmap, copy_coords=False) # don't have coordinate variables
# copy time-less variable to new datasets
copy_vars(monthly_dataset, wrfout, varlist=timeless, dimmap=dimmap, copy_data=True) # copy data
# create time-dependent variable in new datasets
copy_vars(monthly_dataset, wrfout, varlist=varlist, dimmap=dimmap, copy_data=False) # do not copy data - need to average
# change units of accumulated variables (per second)
for varname in acclist:
if varname in monthly_dataset.variables:
meanvar = monthly_dataset.variables[varname]
meanvar.units = meanvar.units + '/s' # units per second!
# also create variable for time-stamps in new datasets
if wrftimestamp in wrfout.variables:
copy_vars(monthly_dataset, wrfout, varlist=[wrftimestamp], dimmap=dimmap, copy_data=False) # do nto copy data - need to average
# create derived variables
for var in derived_vars.values():
var.checkPrerequisites(monthly_dataset) # as long as they are sorted correctly...
var.createVariable(monthly_dataset) # derived variables need to be added in order of computation
# copy global attributes
copy_ncatts(monthly_dataset, wrfout, prefix='') # copy all attributes (no need for prefix; all upper case are original)
# some new attributes
monthly_dataset.acc_diff_mode = 'simple' if lsmplDiff else 'centered'
monthly_dataset.description = 'wrf{0:s}_d{1:02d} monthly means'.format(filetype,ndom)
monthly_dataset.begin_date = begindate
monthly_dataset.experiment = exp
monthly_dataset.creator = 'Andre R. Erler'
# sync with file
monthly_dataset.sync()
## open/create daily output file
if ldaily:
# get datetime
begindatetime = dv.getTimeStamp(wrfout, 0, wrftimestamp)
# figure out filename
daily_file = dailypattern.format(filetype,ndom)
if lparallel: tmppfx = 'tmp_wrfavg_{:s}_'.format(pidstr[1:-1])
else: tmppfx = 'tmp_wrfavg_'
daily_filepath = outfolder + daily_file
tmp_daily_filepath = outfolder + tmppfx + daily_file
if os.path.exists(daily_filepath):
if loverwrite or os.path.getsize(daily_filepath) < 1e6: os.remove(daily_filepath)
# N.B.: NetCDF files smaller than 1MB are usually incomplete header fragments from a previous crashed job
if os.path.exists(tmp_daily_filepath) and not lrecover: os.remove(tmp_daily_filepath) # remove old temp files
if os.path.exists(daily_filepath):
raise NotImplementedError("Currently, updating of and appending to (sub-)daily output files is not supported.")
else:
logger.debug("{0:s} Creating new (sub-)daily output file '{1:s}'.\n".format(pidstr,daily_filepath))
daily_dataset = nc.Dataset(tmp_daily_filepath, 'w', format='NETCDF4') # open to start a new file (mode='w')
timestep_start = 0 # time step where we start (first tiem step)
daily_dataset.createDimension(time, size=None) # make time dimension unlimited
add_coord(daily_dataset, time, data=None, dtype='i8', atts=dict(units='seconds since '+begindatetime)) # unlimited time dimension
# copy remaining dimensions to new datasets
if midmap is not None:
dimlist = [midmap.get(dim,dim) for dim in wrfout.dimensions.keys() if dim != wrftime]
else: dimlist = [dim for dim in wrfout.dimensions.keys() if dim != wrftime]
copy_dims(daily_dataset, wrfout, dimlist=dimlist, namemap=dimmap, copy_coords=False) # don't have coordinate variables
# copy time-less variable to new datasets
copy_vars(daily_dataset, wrfout, varlist=timeless, dimmap=dimmap, copy_data=True) # copy data
# create time-dependent variable in new datasets
copy_vars(daily_dataset, wrfout, varlist=daily_varlist, dimmap=dimmap, copy_data=False) # do not copy data - need to resolve buckets and straighten time
# change units of accumulated variables (per second)
for varname in acclist:
if varname in daily_dataset.variables:
dayvar = daily_dataset.variables[varname]
dayvar.units = dayvar.units + '/s' # units per second!
# also create variable for time-stamps in new datasets
if wrftimestamp in wrfout.variables:
copy_vars(daily_dataset, wrfout, varlist=[wrftimestamp], dimmap=dimmap, copy_data=False) # do not copy data - need to straighten out time axis
if wrfxtime in wrfout.variables:
copy_vars(daily_dataset, wrfout, varlist=[wrfxtime], dimmap=dimmap, copy_data=False) # do not copy data - need to straighten out time axis
# create derived variables
for devarname in daily_derived_vars:
# don't need to check for prerequisites, since they are already being checked and computed for monthly output
derived_vars[devarname].createVariable(daily_dataset) # derived variables need to be added in order of computation
# copy global attributes
copy_ncatts(daily_dataset, wrfout, prefix='') # copy all attributes (no need for prefix; all upper case are original)
# some new attributes
daily_dataset.acc_diff_mode = 'simple' if lsmplDiff else 'centered'
daily_dataset.description = 'wrf{0:s}_d{1:02d} post-processed timestep output'.format(filetype,ndom)
daily_dataset.begin_date = begindatetime
daily_dataset.experiment = exp
daily_dataset.creator = 'Andre R. Erler'
# sync with file
daily_dataset.sync()
## construct dependencies
# update linearity: dependencies of non-linear variables have to be treated as non-linear themselves
lagain = True
# parse through dependencies until nothing changes anymore
while lagain:
lagain = False
for dename,devar in derived_vars.items():
# variables for daily output can be treated as non-linear, so that they are computed at the native timestep
if ldaily and dename in daily_derived_vars: devar.linear = False
if not devar.linear:
# make sure all dependencies are also treated as non-linear
for pq in devar.prerequisites:
if pq in derived_vars and derived_vars[pq].linear:
lagain = True # indicate modification
derived_vars[pq].linear = False
# construct dependency set (should include extrema now)
pqset = set().union(*[devar.prerequisites for devar in derived_vars.values() if not devar.linear])
if ldaily:
# daily output variables need to be treated as prerequisites, so that full timestep fields are loaded for bucket variables
pqset |= set(daily_varlist)
cset = set().union(*[devar.constants for devar in derived_vars.values() if devar.constants is not None])
# initialize dictionary for temporary storage
tmpdata = dict() # not allocated - use sparingly
# load constants, if necessary
const = dict()
lconst = len(cset) > 0
if lconst:
constfile = infolder+constpattern.format(ndom)
if not os.path.exists(constfile): constfile += '.nc' # try with extension
if not os.path.exists(constfile): raise IOError("No constants file found! ({:s})".format(constfile))
logger.debug("\n{0:s} Opening constants file '{1:s}'.\n".format(pidstr,constfile))
wrfconst = nc.Dataset(constfile, 'r', format='NETCDF4')
# constant variables
for cvar in cset:
if cvar in wrfconst.variables: const[cvar] = wrfconst.variables[cvar][:]
elif cvar in wrfconst.ncattrs(): const[cvar] = wrfconst.getncattr(cvar)
else: raise ValueError("Constant variable/attribute '{:s}' not found in constants file '{:s}'.".format(cvar,constfile))
else: const = None
# check axes order of prerequisits and constants
for devar in derived_vars.values():
for pq in devar.prerequisites:
# get dimensions of prerequisite
if pq in varlist: pqax = wrfout.variables[pq].dimensions
elif lconst and pq in wrfconst.variables: pqax = wrfconst.variables[pq].dimensions
elif lconst and pq in const: pqax = () # a scalar value, i.e. no axes
elif pq in derived_vars: pqax = derived_vars[pq].axes
else: raise ValueError("Prerequisite '{:s} for variable '{:s}' not found!".format(pq,devar.name))
# check axes for consistent order
index = -1
for ax in devar.axes:
if ax in pqax:
idx = pqax.index(ax)
if idx > index: index = idx
else: raise IndexError("The axis order of '{:s}' and '{:s}' is inconsistent - this can lead to unexpected results!".format(devar.name,pq))
# announcement: format title string and print
varstr = ''; devarstr = '' # make variable list, also for derived variables
for var in varlist: varstr += '{}, '.format(var)
for devar in derived_vars.values(): devarstr += '%s, '%devar.name
titlestr = '\n\n{0:s} *** Processing wrf{1:s} files for domain {2:d}. ***'.format(pidstr,filetype,ndom)
titlestr += '\n (monthly means from {0:s} to {1:s}, incl.)'.format(begindate,enddate)
if varstr: titlestr += '\n Variable list: {0:s}'.format(str(varstr),)
else: titlestr += '\n Variable list: None'
if devarstr: titlestr += '\n Derived variables: {0:s}'.format(str(devarstr),)
# print meta info (print everything in one chunk, so output from different processes does not get mangled)
logger.info(titlestr)
# extend time dimension in monthly average
if (endyear < beginyear) or (endyear == beginyear and endmonth < beginmonth):
raise DateError("End date is before begin date: {:04d}-{:02d} < {:04d}-{:02d}".format(endyear,endmonth,beginyear,beginmonth))
times = np.arange(t0,t0+(endyear-beginyear)*12+endmonth-beginmonth+1)
# handling of time intervals for accumulated variables
if wrfxtime in wrfout.variables:
lxtime = True # simply compute differences from XTIME (assuming minutes)
time_desc = wrfout.variables[wrfxtime].description
assert time_desc.startswith("minutes since "), time_desc
assert "simulation start" in time_desc or begindate in time_desc or '**' in time_desc, time_desc
# N.B.: the last check (**) is for cases where the date in WRF is garbled...
if t0 == 1 and not wrfout.variables[wrfxtime][0] == 0:
raise ValueError( 'XTIME in first input file does not start with 0!\n'+
'(this can happen, when the first input file is missing)' )
elif wrftimestamp in wrfout.variables:
lxtime = False # interpret timestamp in Times using datetime module
else: raise TypeError
# check if there is a missing_value flag
if 'P_LEV_MISSING' in wrfout.ncattrs():
missing_value = wrfout.P_LEV_MISSING # usually -999.
# N.B.: this is only used in plev3d files, where pressure levels intersect the ground
else: missing_value = None
# allocate fields
data = dict() # temporary data arrays
for var in varlist:
tmpshape = list(wrfout.variables[var].shape)
del tmpshape[wrfout.variables[var].dimensions.index(wrftime)] # allocated arrays have no time dimension
assert len(tmpshape) == len(wrfout.variables[var].shape) -1
data[var] = np.zeros(tmpshape, dtype=dtype_float) # allocate
#if missing_value is not None:
# data[var] += missing_value # initialize with missing value
# allocate derived data arrays (for non-linear variables)
pqdata = {pqvar:None for pqvar in pqset} # temporary data array holding instantaneous values to compute derived variables
# N.B.: since data is only referenced from existing arrays, allocation is not necessary
dedata = dict() # non-linear derived variables
# N.B.: linear derived variables are computed directly from the monthly averages
for dename,devar in derived_vars.items():
if not devar.linear:
tmpshape = [len(wrfout.dimensions[ax]) for ax in devar.axes if ax != time] # infer shape
assert len(tmpshape) == len(devar.axes) -1 # no time dimension
dedata[dename] = np.zeros(tmpshape, dtype=dtype_float) # allocate
# prepare computation of monthly means
filecounter = 0 # number of wrfout file currently processed
i0 = t0-1 # index position we write to: i = i0 + n (zero-based, of course)
if ldaily: daily_start_idx = daily_end_idx = timestep_start # for each file cycle, the time index where to write the data
## start loop over month
if lparallel: progressstr = '' # a string printing the processed dates
else: logger.info('\n Processed dates:')
try:
# loop over month and progressively stepping through input files
for n,meantime in enumerate(times):
# meantime: (complete) month since simulation start
lasttimestamp = None # carry over start time, when moving to the next file (defined below)
# N.B.: when moving to the next file, the script auto-detects and resets this property, no need to change here!
# However (!) it is necessary to reset this for every month, because it is not consistent!
# extend time array / month counter
meanidx = i0 + n
if meanidx == len(monthly_dataset.variables[time]):
lskip = False # append next data point / time step
elif loverwrite or laddnew or lrecalc:
lskip = False # overwrite this step or add data point for new variables
elif meanidx == len(monthly_dataset.variables[time])-1:
if lrecover or monthly_dataset.variables[time][meanidx] == -1:
lskip = False # recompute last step, because it may be incomplete
else: lskip = True
else:
lskip = True # skip this step, but we still have to verify the timing
# check if we are overwriting existing data
if meanidx != len(monthly_dataset.variables[time]):
assert meanidx < len(monthly_dataset.variables[time])
assert meantime == monthly_dataset.variables[time][meanidx] or monthly_dataset.variables[time][meanidx] == -1
# N.B.: writing records is delayed to avoid incomplete records in case of a crash
# current date
currentyear, currentmonth = divmod(n+beginmonth-1,12)
currentyear += beginyear; currentmonth +=1
# sanity checks
assert meanidx + 1 == meantime
currentdate = '{0:04d}-{1:02d}'.format(currentyear,currentmonth)
# determine appropriate start index
wrfstartidx = 0
while currentdate > str(nc.chartostring(wrfout.variables[wrftimestamp][wrfstartidx,0:7])):
wrfstartidx += 1 # count forward
if wrfstartidx != 0: logger.debug('\n{0:s} {1:s}: Starting month at index {2:d}.'.format(pidstr, currentdate, wrfstartidx))
# save WRF time-stamp for beginning of month for the new file, for record
firsttimestamp_chars = wrfout.variables[wrftimestamp][wrfstartidx,:]
#logger.debug('\n{0:s}{1:s}-01_00:00:00, {2:s}'.format(pidstr, currentdate, str(nc.chartostring(wrfout.variables[wrftimestamp][wrfstartidx,:])))
if '{0:s}-01_00:00:00'.format(currentdate,) == str(nc.chartostring(wrfout.variables[wrftimestamp][wrfstartidx,:])):
pass # proper start of the month
elif meanidx == 0 and '{0:s}-01_06:00:00'.format(currentdate,) == str(nc.chartostring(wrfout.variables[wrftimestamp][wrfstartidx,:])):
pass # for some reanalysis... but only at start of simulation
else: raise DateError("{0:s} Did not find first day of month to compute monthly average.".format(pidstr) +
"file: {0:s} date: {1:s}-01_00:00:00".format(monthly_file,currentdate))
# prepare summation of output time steps
lcomplete = False #
ntime = 0 # accumulated output time steps
# time when accumulation starts (in minutes)
# N.B.: the first value is saved as negative, so that adding the last value yields a positive interval
if lxtime: xtime = -1 * wrfout.variables[wrfxtime][wrfstartidx] # minutes
monthlytimestamps = [] # list of timestamps, also used for time period calculation
# clear temporary arrays
for varname,var in data.items(): # base variables
data[varname] = np.zeros(var.shape, dtype=dtype_float) # reset to zero
for dename,devar in dedata.items(): # derived variables
dedata[dename] = np.zeros(devar.shape, dtype=dtype_float) # reset to zero
## loop over files and average
while not lcomplete:
# determine valid end index by checking dates from the end counting backwards
# N.B.: start index is determined above (if a new file was opened in the same month,
# the start index is automatically set to 0 or 1 when the file is opened, below)
wrfendidx = len(wrfout.dimensions[wrftime])-1
while wrfendidx >= 0 and currentdate < str(nc.chartostring(wrfout.variables[wrftimestamp][wrfendidx,0:7])):
if not lcomplete: lcomplete = True # break loop over file if next month is in this file (critical!)
wrfendidx -= 1 # count backwards
#if wrfendidx < len(wrfout.dimensions[wrftime])-1: # check if count-down actually happened
wrfendidx += 1 # reverse last step so that counter sits at first step of next month
# N.B.: if this is not the last file, there was no iteration and wrfendidx should be the length of the the file;
# in this case, wrfendidx is only used to define Python ranges, which are exclusive to the upper boundary;
# if the first date in the file is already the next month, wrfendidx will be 0 and this is the final step;
assert wrfendidx >= wrfstartidx # i.e. wrfendidx = wrfstartidx = 0 is an empty step to finalize accumulation
assert lcomplete or wrfendidx == len(wrfout.dimensions[wrftime])
# if this is the last file and the month is not complete, we have to forcefully terminate
if filecounter == len(filelist)-1 and not lcomplete:
lcomplete = True # end loop
lskip = True # don't write results for this month!
if not lskip:
## compute monthly averages
# loop over variables
for varname in varlist:
logger.debug('{0:s} {1:s}'.format(pidstr,varname))
if varname not in wrfout.variables:
logger.info("{:s} Variable {:s} missing in file '{:s}' - filling with NaN!".format(pidstr,varname,filelist[filecounter]))
data[varname] *= np.NaN # turn everything into NaN, if variable is missing
# N.B.: this can happen, when an output stream was reconfigured between cycle steps
else:
var = wrfout.variables[varname]
tax = var.dimensions.index(wrftime) # index of time axis
slices = [slice(None)]*len(var.shape)
# construct informative IOError message
ioerror = "An Error occcured in file '{:s}'; variable: '{:s}'\n('{:s}')".format(filelist[filecounter], varname, infolder)
# decide how to average
## Accumulated Variables
if varname in acclist:
if missing_value is not None:
raise NotImplementedError("Can't handle accumulated variables with missing values yet.")
# compute mean as difference between end points; normalize by time difference
if ntime == 0: # first time step of the month
slices[tax] = wrfstartidx # relevant time interval
try: tmp = var.__getitem__(slices) # get array
except: raise IOError(ioerror) # informative IO Error
if acclist[varname] is not None: # add bucket level, if applicable
bkt = wrfout.variables[bktpfx+varname]
tmp += bkt.__getitem__(slices) * acclist[varname]
# check that accumulated fields at the beginning of the simulation are zero
if meanidx == 0 and wrfstartidx == 0:
# note that if we are skipping the first step, there is no check
if np.max(tmp) != 0 or np.min(tmp) != 0:
raise ValueError( 'Accumulated fields were not initialized with zero!\n' +
'(this can happen, when the first input file is missing)' )
data[varname] = -1 * tmp # so we can do an in-place operation later
# N.B.: both, begin and end, can be in the same file, hence elif is not appropriate!
if lcomplete: # last step
slices[tax] = wrfendidx # relevant time interval
try: tmp = var.__getitem__(slices) # get array
except: raise IOError(ioerror) # informative IO Error
if acclist[varname] is not None: # add bucket level, if applicable
bkt = wrfout.variables[bktpfx+varname]
tmp += bkt.__getitem__(slices) * acclist[varname]
data[varname] += tmp # the starting data is already negative
# if variable is a prerequisit to others, compute instantaneous values
if varname in pqset:
# compute mean via sum over all elements; normalize by number of time steps
if lsmplDiff: slices[tax] = slice(wrfstartidx,wrfendidx+1) # load longer time interval for diff
else: slices[tax] = slice(wrfstartidx,wrfendidx) # relevant time interval
try: tmp = var.__getitem__(slices) # get array
except: raise IOError(ioerror) # informative IO Error
if acclist[varname] is not None: # add bucket level, if applicable
bkt = wrfout.variables[bktpfx+varname]
tmp = tmp + bkt.__getitem__(slices) * acclist[varname]
if lsmplDiff: pqdata[varname] = np.diff(tmp, axis=tax) # simple differences
else: pqdata[varname] = dv.ctrDiff(tmp, axis=tax, delta=1) # normalization comes later
##
## *** daily values for bucket variables are generated here, ***
## *** but should we really use *centered* differences??? ***
##
elif varname[0:len(bktpfx)] == bktpfx:
pass # do not process buckets
## Normal Variables
else:
# skip "empty" steps (only needed to difference accumulated variables)
if wrfendidx > wrfstartidx:
# compute mean via sum over all elements; normalize by number of time steps
slices[tax] = slice(wrfstartidx,wrfendidx) # relevant time interval
try: tmp = var.__getitem__(slices) # get array
except: raise IOError(ioerror) # informative IO Error
if missing_value is not None:
# N.B.: missing value handling is really only necessary when missing values are time-dependent
tmp = np.where(tmp == missing_value, np.NaN, tmp) # set missing values to NaN
#tmp = ma.masked_equal(tmp, missing_value, copy=False) # mask missing values
data[varname] = data[varname] + tmp.sum(axis=tax) # add to sum
# N.B.: in-place operations with non-masked array destroy the mask, hence need to use this
# keep data in memory if used in computation of derived variables
if varname in pqset: pqdata[varname] = tmp
## compute derived variables
# but first generate a list of timestamps
if lcomplete: tmpendidx = wrfendidx
else: tmpendidx = wrfendidx -1 # end of file
# assemble list of time stamps
currenttimestamps = [] # relevant timestamps in this file
for i in range(wrfstartidx,tmpendidx+1):
timestamp = str(nc.chartostring(wrfout.variables[wrftimestamp][i,:]))
currenttimestamps.append(timestamp)
monthlytimestamps.extend(currenttimestamps) # add to monthly collection
# write daily timestamps
if ldaily:
nsteps = wrfendidx - wrfstartidx
daily_start_idx = daily_end_idx # from previous step
daily_end_idx = daily_start_idx + nsteps
# set time values to -1, to inticate they are being worked on
daily_dataset.variables[time][daily_start_idx:daily_end_idx] = -1
ncvar = None; vardata = None # dummies, to prevent crash later on, if varlist is empty
# copy timestamp and xtime data
daily_dataset.variables[wrftimestamp][daily_start_idx:daily_end_idx,:] = wrfout.variables[wrftimestamp][wrfstartidx:wrfendidx,:]
if lxtime:
daily_dataset.variables[wrfxtime][daily_start_idx:daily_end_idx] = wrfout.variables[wrfxtime][wrfstartidx:wrfendidx]
daily_dataset.sync()
# normalize accumulated pqdata with output interval time
if wrfendidx > wrfstartidx:
assert tmpendidx > wrfstartidx, 'There should never be a single value in a file: wrfstartidx={:d}, wrfendidx={:d}, lcomplete={:s}'.format(wrfstartidx,wrfendidx,str(lcomplete))
# compute time delta
delta = dv.calcTimeDelta(currenttimestamps)
if lxtime:
xdelta = wrfout.variables[wrfxtime][tmpendidx] - wrfout.variables[wrfxtime][wrfstartidx]
xdelta *= 60. # convert minutes to seconds
if delta != xdelta: raise ValueError("Time calculation from time stamps and model time are inconsistent: {:f} != {:f}".format(delta,xdelta))
delta /= float(tmpendidx - wrfstartidx) # the average interval between output time steps
# loop over time-step data
for pqname,pqvar in pqdata.items():
if pqname in acclist: pqvar /= delta # normalize
# write to daily file
if ldaily:
# loop over variables and save data arrays
for varname in daily_varlist:
ncvar = daily_dataset.variables[varname] # destination variable in daily output
vardata = pqdata[varname] # timestep data
if missing_value is not None: # make sure the missing value flag is preserved
vardata = np.where(np.isnan(vardata), missing_value, vardata)
ncvar.missing_value = missing_value # just to make sure
if ncvar.ndim > 1: ncvar[daily_start_idx:daily_end_idx,:] = vardata # here time is always the outermost index
else: ncvar[daily_start_idx:daily_end_idx] = vardata
daily_dataset.sync()
# loop over derived variables
# special treatment for certain string variables
if 'Times' in pqset: pqdata['Times'] = currenttimestamps[:wrfendidx-wrfstartidx] # need same length as actual time dimension
logger.debug('\n{0:s} Available prerequisites: {1:s}'.format(pidstr, str(list(pqdata.keys()))))
for dename,devar in derived_vars.items():
if not devar.linear: # only non-linear ones here, linear one at the end
logger.debug('{0:s} {1:s} {2:s}'.format(pidstr, dename, str(devar.prerequisites)))
tmp = devar.computeValues(pqdata, aggax=tax, delta=delta, const=const, tmp=tmpdata) # possibly needed as pre-requisite
dedata[dename] = devar.aggregateValues(tmp, aggdata=dedata[dename], aggax=tax)
# N.B.: in-place operations with non-masked array destroy the mask, hence need to use this
if dename in pqset: pqdata[dename] = tmp
# save to daily output
if ldaily:
if dename in daily_derived_vars:
ncvar = daily_dataset.variables[dename] # destination variable in daily output
vardata = tmp
if missing_value is not None: # make sure the missing value flag is preserved
vardata = np.where(np.isnan(vardata), missing_value, vardata)
ncvar.missing_value = missing_value # just to make sure
if ncvar.ndim > 1: ncvar[daily_start_idx:daily_end_idx,:] = vardata # here time is always the outermost index
else: ncvar[daily_start_idx:daily_end_idx] = vardata
# N.B.: missing values should be handled implicitly, following missing values in pre-requisites
del tmp # memory hygiene
if ldaily:
# add time in seconds, based on index and time delta
daily_dataset.variables[time][daily_start_idx:daily_end_idx] = np.arange(daily_start_idx,daily_end_idx, dtype='i8')*int(delta)
daily_dataset.end_date = dv.getTimeStamp(wrfout, wrfendidx-1, wrftimestamp) # update current end date
# N.B.: adding the time coordinate and attributes finalized this step
# sync data and clear memory
daily_dataset.sync(); daily_dataset.close() # sync and close dataset
del daily_dataset, ncvar, vardata # remove all other references to data
gc.collect() # clean up memory
# N.B.: the netCDF4 module keeps all data written to a netcdf file in memory; there is no flush command
daily_dataset = nc.Dataset(tmp_daily_filepath, mode='a', format='NETCDF4') # re-open to append more data (mode='a')
# N.B.: flushing the mean file here prevents repeated close/re-open when no data was written (i.e.
# the month was skiped); only flush memory when data was actually written.
# increment counters
ntime += wrfendidx - wrfstartidx
if lcomplete:
# N.B.: now wrfendidx should be a valid time step
# check time steps for this month
laststamp = monthlytimestamps[0]
for timestamp in monthlytimestamps[1:]:
if laststamp >= timestamp:
raise DateError('Timestamps not in order, or repetition: {:s}'.format(timestamp))
laststamp = timestamp
# calculate time period and check against model time (if available)
timeperiod = dv.calcTimeDelta(monthlytimestamps)
if lxtime:
xtime += wrfout.variables[wrfxtime][wrfendidx] # get final time interval (in minutes)
xtime *= 60. # convert minutes to seconds
if timeperiod != xtime:
logger.info("Time calculation from time stamps and model time are inconsistent: {:f} != {:f}".format(timeperiod,xtime))
# two possible ends: month is done or reached end of file
# if we reached the end of the file, open a new one and go again
if not lcomplete:
# N.B.: here wrfendidx is not a valid time step, but the length of the file, i.e. wrfendidx-1 is the last valid time step
lasttimestamp = str(nc.chartostring(wrfout.variables[wrftimestamp][wrfendidx-1,:])) # needed to determine, if first timestep is the same as last
assert lskip or lasttimestamp == monthlytimestamps[-1]
# lasttimestep is also used for leap-year detection later on
assert len(wrfout.dimensions[wrftime]) == wrfendidx, (len(wrfout.dimensions[wrftime]),wrfendidx) # wrfendidx should be the length of the file, not the last index!
## find first timestep (compare to last of previous file) and (re-)set time step counter
# initialize search
tmptimestamp = lasttimestamp; filelen1 = len(wrfout.dimensions[wrftime]) - 1; wrfstartidx = filelen1;
while tmptimestamp <= lasttimestamp:
if wrfstartidx < filelen1:
wrfstartidx += 1 # step forward in current file
else:
# open next file, if we reach the end
wrfout.close() # close file
#del wrfout; gc.collect() # doesn't seem to work here - strange error
# N.B.: filecounter +1 < len(filelist) is already checked above
filecounter += 1 # move to next file
if filecounter < len(filelist):
logger.debug("\n{0:s} Opening input file '{1:s}'.\n".format(pidstr,filelist[filecounter]))
wrfout = nc.Dataset(infolder+filelist[filecounter], 'r', format='NETCDF4') # ... and open new one
filelen1 = len(wrfout.dimensions[wrftime]) - 1 # length of new file
wrfstartidx = 0 # reset index
# check consistency of missing value flag
assert missing_value is None or missing_value == wrfout.P_LEV_MISSING
else: break # this is not really tested...
tmptimestamp = str(nc.chartostring(wrfout.variables[wrftimestamp][wrfstartidx,:]))
# some checks
firsttimestamp = str(nc.chartostring(wrfout.variables[wrftimestamp][0,:]))
error_string = "Inconsistent time-stamps between files:\n lasttimestamp='{:s}', firsttimestamp='{:s}', wrfstartidx={:d}"
if firsttimestamp == lasttimestamp: # skip the initialization step (was already processed in last step)
if wrfstartidx != 1: raise DateError(error_string.format(lasttimestamp, firsttimestamp, wrfstartidx))
if firsttimestamp > lasttimestamp: # no duplicates: first timestep in next file was not present in previous file
if wrfstartidx != 0: raise DateError(error_string.format(lasttimestamp, firsttimestamp, wrfstartidx))
if firsttimestamp < lasttimestamp: # files overlap: count up to next timestamp in sequence
#if wrfstartidx == 2: warn(error_string.format(lasttimestamp, firsttimestamp, wrfstartidx))
if wrfstartidx == 0: raise DateError(error_string.format(lasttimestamp, firsttimestamp, wrfstartidx))
else: # month complete
# print feedback (the current month) to indicate completion
if lparallel: progressstr += '{0:s}, '.format(currentdate) # bundle output in parallel mode
else: logger.info('{0:s},'.format(currentdate)) # serial mode
# clear temporary storage
if lcarryover:
for devar in list(derived_vars.values()):
if not (devar.tmpdata is None or devar.carryover):
if devar.tmpdata in tmpdata: del tmpdata[devar.tmpdata]
else: tmpdata = dict() # reset entire temporary storage
# N.B.: now wrfendidx is a valid timestep, but indicates the first of the next month
lasttimestamp = str(nc.chartostring(wrfout.variables[wrftimestamp][wrfendidx,:])) # this should be the first timestep of the next month
assert lskip or lasttimestamp == monthlytimestamps[-1]
# open next file (if end of month and file coincide)
if wrfendidx == len(wrfout.dimensions[wrftime])-1: # reach end of file
## find first timestep (compare to last of previous file) and (re-)set time step counter
# initialize search
tmptimestamp = lasttimestamp; filelen1 = len(wrfout.dimensions[wrftime]) - 1; wrfstartidx = filelen1;
while tmptimestamp <= lasttimestamp:
if wrfstartidx < filelen1:
wrfstartidx += 1 # step forward in current file
else:
# open next file, if we reach the end
wrfout.close() # close file
#del wrfout; gc.collect() # doesn't seem to work here - strange error
# N.B.: filecounter +1 < len(filelist) is already checked above
filecounter += 1 # move to next file
if filecounter < len(filelist):
logger.debug("\n{0:s} Opening input file '{1:s}'.\n".format(pidstr,filelist[filecounter]))
wrfout = nc.Dataset(infolder+filelist[filecounter], 'r', format='NETCDF4') # ... and open new one
filelen1 = len(wrfout.dimensions[wrftime]) - 1 # length of new file
wrfstartidx = 0 # reset index
# check consistency of missing value flag
assert missing_value is None or missing_value == wrfout.P_LEV_MISSING
else: break # this is not really tested...
tmptimestamp = str(nc.chartostring(wrfout.variables[wrftimestamp][wrfstartidx,:]))
# N.B.: same code as in "not complete" section
# wrfout.close() # close file
# #del wrfout; gc.collect() # doesn't seem to work here - strange error
# filecounter += 1 # move to next file
# if filecounter < len(filelist):
# logger.debug("\n{0:s} Opening input file '{1:s}'.\n".format(pidstr,filelist[filecounter]))
# wrfout = nc.Dataset(infolder+filelist[filecounter], 'r', format='NETCDF4') # ... and open new one
# firsttimestamp = str(nc.chartostring(wrfout.variables[wrftimestamp][0,:])) # check first timestep (compare to last of previous file)
# wrfstartidx = 0 # always use initialization step (but is reset above anyway)
# if firsttimestamp != lasttimestamp:
# raise NotImplementedError, "If the first timestep of the next month is the last timestep in the file, it has to be duplicated in the next file."
## now the the loop over files has terminated and we need to normalize and save the results
if not lskip:
# extend time axis
monthly_dataset.variables[time][meanidx] = -1 # mark timestep in progress
ncvar = None; vardata = None # dummies, to prevent crash later on, if varlist is empty
# loop over variable names
for varname in varlist:
vardata = data[varname]
# decide how to normalize
if varname in acclist: vardata /= timeperiod
else: vardata /= ntime
# save variable
ncvar = monthly_dataset.variables[varname] # this time the destination variable
if missing_value is not None: # make sure the missing value flag is preserved
vardata = np.where(np.isnan(vardata), missing_value, vardata)
ncvar.missing_value = missing_value # just to make sure
if ncvar.ndim > 1: ncvar[meanidx,:] = vardata # here time is always the outermost index
else: ncvar[meanidx] = vardata
# compute derived variables
#logger.debug('\n{0:s} Derived Variable Stats: (mean/min/max)'.format(pidstr))
for dename,devar in derived_vars.items():
if devar.linear:
vardata = devar.computeValues(data) # compute derived variable now from averages
elif devar.normalize:
vardata = dedata[dename] / ntime # no accumulated variables here!
else: vardata = dedata[dename] # just the data...
# not all variables are normalized (e.g. extrema)
#if ldebug:
# mmm = (float(np.nanmean(vardata)),float(np.nanmin(vardata)),float(np.nanmax(vardata)),)
# logger.debug('{0:s} {1:s}, {2:f}, {3:f}, {4:f}'.format(pidstr,dename,*mmm))
data[dename] = vardata # add to data array, so that it can be used to compute linear variables
# save variable
ncvar = monthly_dataset.variables[dename] # this time the destination variable
if missing_value is not None: # make sure the missing value flag is preserved
vardata = np.where(np.isnan(vardata), missing_value, vardata)
ncvar.missing_value = missing_value # just to make sure
if ncvar.ndim > 1: ncvar[meanidx,:] = vardata # here time is always the outermost index
else: ncvar[meanidx] = vardata
#raise dv.DerivedVariableError, "%s Derived variable '%s' is not linear."%(pidstr,devar.name)
# update current end date
monthly_dataset.end_date = str(nc.chartostring(firsttimestamp_chars[:10])) # the date of the first day of the last included month
monthly_dataset.variables[wrftimestamp][meanidx,:] = firsttimestamp_chars
monthly_dataset.variables[time][meanidx] = meantime # update time axis (last action)
# sync data and clear memory
monthly_dataset.sync(); monthly_dataset.close() # sync and close dataset
del monthly_dataset, ncvar, vardata # remove all other references to data
gc.collect() # clean up memory
# N.B.: the netCDF4 module keeps all data written to a netcdf file in memory; there is no flush command
monthly_dataset = nc.Dataset(tmp_monthly_filepath, mode='a', format='NETCDF4') # re-open to append more data (mode='a')
# N.B.: flushing the mean file here prevents repeated close/re-open when no data was written (i.e.
# the month was skiped); only flush memory when data was actually written.
ec = 0 # set zero exit code for this operation
except Exception:
# report error
logger.exception('\n # {0:s} WARNING: an Error occured while stepping through files! '.format(pidstr)+
'\n # Last State: month={0:d}, variable={1:s}, file={2:s}'.format(meanidx,varname,filelist[filecounter])+
'\n # Saving current data and exiting\n')
wrfout.close()
#logger.exception(pidstr) # print stack trace of last exception and current process ID
ec = 1 # set non-zero exit code
# N.B.: this enables us to still close the file!
## here the loop over months finishes and we can close the output file
# print progress
# save to file
if not lparallel: logger.info('') # terminate the line (of dates)
else: logger.info('\n{0:s} Processed dates: {1:s}'.format(pidstr, progressstr))
monthly_dataset.sync()
logger.info("\n{0:s} Writing monthly output to: {1:s}\n('{2:s}')\n".format(pidstr, monthly_file, monthly_filepath))
if ldaily:
daily_dataset.sync()
logger.info("\n{0:s} Writing (sub-)daily output to: {1:s}\n('{2:s}')\n".format(pidstr, daily_file, daily_filepath))
# Finalize: close files and rename to proper names, clean up
monthly_dataset.close() # close NetCDF file
os.rename(tmp_monthly_filepath,monthly_filepath) # rename file to proper name
del monthly_dataset, data # clean up memory
if ldaily:
daily_dataset.close() # close NetCDF file
os.rename(tmp_daily_filepath,daily_filepath) # rename file to proper name
del daily_dataset # clean up memory
gc.collect()
# return exit code
return ec
## now begin execution
if __name__ == '__main__':
# print settings
print('')
print(('OVERWRITE: {:s}, RECOVER: {:s}, CARRYOVER: {:s}, SMPLDIFF: {:s}'.format(
str(loverwrite), str(lrecover), str(lcarryover), str(lsmplDiff))))
print(('DERIVEDONLY: {:s}, ADDNEW: {:s}, RECALC: {:s}'.format(
str(lderivedonly), str(laddnew), str(recalcvars) if lrecalc else str(lrecalc))))
print(('DAILY: {:s}, FILETYPES: {:s}, DOMAINS: {:s}'.format(str(lglobaldaily),str(filetypes),str(domains))))
print(('THREADS: {:s}, DEBUG: {:s}'.format(str(NP),str(ldebug))))
print('')
# compile regular expression, used to infer start and end dates and month (later, during computation)
datestr = '{0:s}-{1:s}-{2:s}'.format(yearstr,monthstr,daystr)
datergx = re.compile(datestr)
# get file list
wrfrgx = re.compile(inputpattern.format('.*','\d\d',datestr,)) # for initial search (all filetypes)
# regular expression to match the name pattern of WRF timestep output files
masterlist = [wrfrgx.match(filename) for filename in os.listdir(infolder)] # list folder and match
masterlist = [match.group() for match in masterlist if match is not None] # assemble valid file list
if len(masterlist) == 0:
raise IOError('No matching WRF output files found for date: {0:s}'.format(datestr))
## loop over filetypes and domains to construct job list
args = []
for filetype in filetypes:
# make list of files
filelist = []
for domain in domains:
typergx = re.compile(inputpattern.format(filetype,"{:02d}".format(domain), datestr))
# N.B.: domain has to be inserted as string, because above it is replaced by a regex
# regular expression to also match type and domain index
filelist = [typergx.match(filename) for filename in masterlist] # list folder and match
filelist = [match.group() for match in filelist if match is not None] # assemble valid file list
filelist.sort() # now, when the list is shortest, we can sort...
# N.B.: sort alphabetically, so that files are in temporally sequence
# now put everything into the lists
if len(filelist) > 0:
args.append( (filelist, filetype, domain) )
else:
print(("Can not process filetype '{:s}' (domain {:d}): no source files.".format(filetype,domain)))
print('\n')
# call parallel execution function
kwargs = dict() # no keyword arguments
ec = asyncPoolEC(processFileList, args, kwargs, NP=NP, ldebug=ldebug, ltrialnerror=True)
# exit with number of failures plus 10 as exit code
exit(int(10+ec) if ec > 0 else 0)
| gpl-3.0 | 659,906,441,429,333,200 | 64.054054 | 196 | 0.6327 | false | 3.803074 | false | false | false |
Aloomaio/googleads-python-lib | examples/ad_manager/v201808/creative_wrapper_service/update_creative_wrappers.py | 1 | 2747 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates a creative wrapper to the 'OUTER' wrapping order.
To determine which creative wrappers exist, run get_all_creative_wrappers.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
# Set the ID of the creative wrapper to update.
CREATIVE_WRAPPER_ID = 'INSERT_CREATIVE_WRAPPER_ID_HERE'
def main(client, creative_wrapper_id):
# Initialize appropriate service.
creative_wrapper_service = client.GetService('CreativeWrapperService',
version='v201808')
# Create statement to get a creative wrapper by ID.
statement = (ad_manager.StatementBuilder(version='v201808')
.Where('id = :creativeWrapperId')
.WithBindVariable('creativeWrapperId',
long(creative_wrapper_id)))
# Get creative wrappers.
response = creative_wrapper_service.getCreativeWrappersByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
updated_creative_wrappers = []
for creative_wrapper in response['results']:
creative_wrapper['ordering'] = 'OUTER'
updated_creative_wrappers.append(creative_wrapper)
# Update the creative wrappers on the server.
creative_wrappers = creative_wrapper_service.updateCreativeWrappers(
updated_creative_wrappers)
# Display results.
for creative_wrapper in creative_wrappers:
print (('Creative wrapper with ID "%s" and wrapping order "%s" '
'was updated.') % (creative_wrapper['id'],
creative_wrapper['ordering']))
else:
print 'No creative wrappers found to update.'
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, CREATIVE_WRAPPER_ID)
| apache-2.0 | 8,009,690,733,829,738,000 | 36.630137 | 78 | 0.705497 | false | 4.106129 | false | false | false |
icomms/wqmanager | apps/domain/models.py | 1 | 6972 | from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.db import models
from domain import Permissions
from wqm.models import WqmAuthority
from locations.models import LocationType
##############################################################################################################
#
# Originally had my own hacky global storage of content type, but it turns out that contenttype.models
# wisely caches content types! No hit to the db beyond the first call - no need for us to do our own
# custom caching.
#
# See ContentType.get_for_model() code for details.
class Domain(models.Model):
'''Domain is the highest level collection of people/stuff
in the system. Pretty much everything happens at the
domain-level, including user membership, permission to
see data, reports, charts, etc.'''
name = models.CharField(max_length=64, unique=True)
full_name = models.CharField(max_length = 100, null=True)
is_active = models.BooleanField(default=False)
#description = models.CharField(max_length=255, null=True, blank=True)
#timezone = models.CharField(max_length=64,null=True)
# Utility function - gets active domains in which user has an active membership
# Note that User.is_active is not checked here - we're only concerned about usable
# domains in which the user can theoretically participate, not whether the user
# is cleared to login.
@staticmethod
def active_for_user(user):
return Domain.objects.filter( membership__member_type = ContentType.objects.get_for_model(User),
membership__member_id = user.id,
membership__is_active=True, # Looks in membership table
is_active=True) # Looks in domain table
def save(self, *args, **kwargs):
edit = False
if self.pk is not None:
edit = True
super(Domain, self).save(*args, **kwargs)
if edit:
wqmauthority = WqmAuthority.objects.get(domain=self)
wqmauthority.code = self.name
wqmauthority.name = self.full_name
wqmauthority.save()
else:
type = LocationType.objects.get(name="authority")
wqmauthority = WqmAuthority(name=self.full_name, domain=self, type=type, code=self.name)
wqmauthority.save()
def __unicode__(self):
return self.name
##############################################################################################################
#
# Use cases:
#
# Get all members in a domain:
# Member.objects.filter(member_type = 3, domain = 1) then iterate - slow, because of one query (for User) per row
# User.objects.filter(membership__domain = 2) - fast, but requires the addition of a GenericRelation to User.
# See UserInDomain, below.
#
# Get all domains to which a member belongs:
# User.objects.get(id = 1).membership.all() and then iterate to pick out domains - slow, because of one query
# (for Domain) per row. Requires GenericRelation on User.
# Member.objects.filter(member_type = 3, member_id = 1).query.as_sql() Generate same SQL, and require same
# slow iteration
# Domain.objects.filter(membership__member_type = 3, membership__member_id = 1) - fast, and requires no new fields
# (as Domain is a FK of Member)
#
member_limits = {'model__in':('user', 'formdatagroup')}
class Membership(models.Model):
domain = models.ForeignKey(Domain)
member_type = models.ForeignKey(ContentType, limit_choices_to=member_limits)
member_id = models.PositiveIntegerField()
member_object = generic.GenericForeignKey('member_type', 'member_id')
is_active = models.BooleanField(default=False)
def __unicode__(self):
return str(self.member_type) + str(self.member_id) + str(self.member_object)
##############################################################################################################
class RegistrationRequest(models.Model):
tos_confirmed = models.BooleanField(default=False)
# No verbose name on times and IPs - filled in on server
request_time = models.DateTimeField()
request_ip = models.IPAddressField()
activation_guid = models.CharField(max_length=32, unique=True)
# confirm info is blank until a confirming click is received
confirm_time = models.DateTimeField(null=True, blank=True)
confirm_ip = models.IPAddressField(null=True, blank=True)
domain = models.OneToOneField(Domain)
new_user = models.ForeignKey(User, related_name='new_user') # Not clear if we'll always create a new user - might be many reqs to one user, thus FK
# requesting_user is only filled in if a logged-in user requests a domain.
requesting_user = models.ForeignKey(User, related_name='requesting_user', null=True, blank=True) # blank and null -> FK is optional.
class Meta:
db_table = 'domain_registration_request'
# To be added:
# language
# number pref
# currency pref
# date pref
# time pref
##############################################################################################################
class Settings(models.Model):
domain = models.OneToOneField(Domain)
max_users = models.PositiveIntegerField()
# To be added - all of the date, time, etc. fields that will go into RegistrationRequest
##############################################################################################################
#
# http://bolhoed.net/blog/how-to-dynamically-add-fields-to-a-django-model shows:
#
# User.add_to_class('membership', generic.GenericRelation(Membership, content_type_field='member_type', object_id_field='member_id'))
#
# Rather than that hackery, I tried to implemenet a trivial proxy model for User, containing just the
# GenericRelation field. Doesn't work, though! Django complains about a field being defined on a proxy model.
#
# Looks like we have to enable the above hackery if we want an easy means of filtering users in a domain. Makes
# life easier, too, in that views will have access to this information.
#
User.add_to_class('domain_membership',
generic.GenericRelation( Membership, content_type_field='member_type', object_id_field='member_id' ) )
##############################################################################################################
# Monkeypatch a function onto User to tell if user is administrator of selected domain
def _admin_p (self):
dom = getattr(self, 'selected_domain', None)
if dom is not None:
return self.has_row_perm(dom, Permissions.ADMINISTRATOR)
else:
return False
User.is_selected_dom_admin = _admin_p | bsd-3-clause | 1,128,780,121,399,136,400 | 44.575163 | 152 | 0.613884 | false | 4.26683 | false | false | false |
ArcherSys/ArcherSys | Lib/opcode.py | 1 | 16466 | <<<<<<< HEAD
<<<<<<< HEAD
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"]
# It's a chicken-and-egg I'm afraid:
# We're imported before _opcode's made.
# With exception unheeded
# (stack_effect is not needed)
# Both our chickens and eggs are allayed.
# --Larry Hastings, 2013/11/23
try:
from _opcode import stack_effect
__all__.append('stack_effect')
except ImportError:
pass
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
'is not', 'exception match', 'BAD')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
hasnargs = []
opmap = {}
opname = [''] * 256
for op in range(256): opname[op] = '<%r>' % (op,)
del op
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('DUP_TOP_TWO', 5)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_INVERT', 15)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('STORE_MAP', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
def_op('WITH_CLEANUP', 81)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('END_FINALLY', 88)
def_op('POP_EXCEPT', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
def_op('UNPACK_EX', 94)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_SET', 104) # Number of set items
def_op('BUILD_MAP', 105) # Number of dict entries (upto 255)
name_op('LOAD_ATTR', 106) # Index in name list
def_op('COMPARE_OP', 107) # Comparison operator
hascompare.append(107)
name_op('IMPORT_NAME', 108) # Index in name list
name_op('IMPORT_FROM', 109) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
jabs_op('JUMP_IF_TRUE_OR_POP', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # ""
jabs_op('POP_JUMP_IF_FALSE', 114) # ""
jabs_op('POP_JUMP_IF_TRUE', 115) # ""
name_op('LOAD_GLOBAL', 116) # Index in name list
jabs_op('CONTINUE_LOOP', 119) # Target address
jrel_op('SETUP_LOOP', 120) # Distance to target address
jrel_op('SETUP_EXCEPT', 121) # ""
jrel_op('SETUP_FINALLY', 122) # ""
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args + (#kwargs << 8)
hasnargs.append(131)
def_op('MAKE_FUNCTION', 132) # Number of args with default values
def_op('BUILD_SLICE', 133) # Number of items
def_op('MAKE_CLOSURE', 134)
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('DELETE_DEREF', 138)
hasfree.append(138)
def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8)
hasnargs.append(140)
def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8)
hasnargs.append(141)
def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8)
hasnargs.append(142)
jrel_op('SETUP_WITH', 143)
def_op('LIST_APPEND', 145)
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
def_op('LOAD_CLASSDEREF', 148)
hasfree.append(148)
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
del def_op, name_op, jrel_op, jabs_op
=======
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"]
# It's a chicken-and-egg I'm afraid:
# We're imported before _opcode's made.
# With exception unheeded
# (stack_effect is not needed)
# Both our chickens and eggs are allayed.
# --Larry Hastings, 2013/11/23
try:
from _opcode import stack_effect
__all__.append('stack_effect')
except ImportError:
pass
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
'is not', 'exception match', 'BAD')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
hasnargs = []
opmap = {}
opname = [''] * 256
for op in range(256): opname[op] = '<%r>' % (op,)
del op
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('DUP_TOP_TWO', 5)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_INVERT', 15)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('STORE_MAP', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
def_op('WITH_CLEANUP', 81)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('END_FINALLY', 88)
def_op('POP_EXCEPT', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
def_op('UNPACK_EX', 94)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_SET', 104) # Number of set items
def_op('BUILD_MAP', 105) # Number of dict entries (upto 255)
name_op('LOAD_ATTR', 106) # Index in name list
def_op('COMPARE_OP', 107) # Comparison operator
hascompare.append(107)
name_op('IMPORT_NAME', 108) # Index in name list
name_op('IMPORT_FROM', 109) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
jabs_op('JUMP_IF_TRUE_OR_POP', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # ""
jabs_op('POP_JUMP_IF_FALSE', 114) # ""
jabs_op('POP_JUMP_IF_TRUE', 115) # ""
name_op('LOAD_GLOBAL', 116) # Index in name list
jabs_op('CONTINUE_LOOP', 119) # Target address
jrel_op('SETUP_LOOP', 120) # Distance to target address
jrel_op('SETUP_EXCEPT', 121) # ""
jrel_op('SETUP_FINALLY', 122) # ""
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args + (#kwargs << 8)
hasnargs.append(131)
def_op('MAKE_FUNCTION', 132) # Number of args with default values
def_op('BUILD_SLICE', 133) # Number of items
def_op('MAKE_CLOSURE', 134)
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('DELETE_DEREF', 138)
hasfree.append(138)
def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8)
hasnargs.append(140)
def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8)
hasnargs.append(141)
def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8)
hasnargs.append(142)
jrel_op('SETUP_WITH', 143)
def_op('LIST_APPEND', 145)
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
def_op('LOAD_CLASSDEREF', 148)
hasfree.append(148)
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
del def_op, name_op, jrel_op, jabs_op
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"]
# It's a chicken-and-egg I'm afraid:
# We're imported before _opcode's made.
# With exception unheeded
# (stack_effect is not needed)
# Both our chickens and eggs are allayed.
# --Larry Hastings, 2013/11/23
try:
from _opcode import stack_effect
__all__.append('stack_effect')
except ImportError:
pass
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
'is not', 'exception match', 'BAD')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
hasnargs = []
opmap = {}
opname = [''] * 256
for op in range(256): opname[op] = '<%r>' % (op,)
del op
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('DUP_TOP_TWO', 5)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_INVERT', 15)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('STORE_MAP', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
def_op('WITH_CLEANUP', 81)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('END_FINALLY', 88)
def_op('POP_EXCEPT', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
def_op('UNPACK_EX', 94)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_SET', 104) # Number of set items
def_op('BUILD_MAP', 105) # Number of dict entries (upto 255)
name_op('LOAD_ATTR', 106) # Index in name list
def_op('COMPARE_OP', 107) # Comparison operator
hascompare.append(107)
name_op('IMPORT_NAME', 108) # Index in name list
name_op('IMPORT_FROM', 109) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
jabs_op('JUMP_IF_TRUE_OR_POP', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # ""
jabs_op('POP_JUMP_IF_FALSE', 114) # ""
jabs_op('POP_JUMP_IF_TRUE', 115) # ""
name_op('LOAD_GLOBAL', 116) # Index in name list
jabs_op('CONTINUE_LOOP', 119) # Target address
jrel_op('SETUP_LOOP', 120) # Distance to target address
jrel_op('SETUP_EXCEPT', 121) # ""
jrel_op('SETUP_FINALLY', 122) # ""
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args + (#kwargs << 8)
hasnargs.append(131)
def_op('MAKE_FUNCTION', 132) # Number of args with default values
def_op('BUILD_SLICE', 133) # Number of items
def_op('MAKE_CLOSURE', 134)
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('DELETE_DEREF', 138)
hasfree.append(138)
def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8)
hasnargs.append(140)
def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8)
hasnargs.append(141)
def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8)
hasnargs.append(142)
jrel_op('SETUP_WITH', 143)
def_op('LIST_APPEND', 145)
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
def_op('LOAD_CLASSDEREF', 148)
hasfree.append(148)
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
del def_op, name_op, jrel_op, jabs_op
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit | -3,256,446,462,933,646,000 | 26.171617 | 80 | 0.635005 | false | 2.584929 | false | false | false |
benagricola/exabgp | lib/exabgp/bgp/message/update/nlri/evpn/multicast.py | 1 | 2426 | """
multicast.py
Created by Thomas Morin on 2014-06-23.
Copyright (c) 2014-2015 Orange. All rights reserved.
"""
from exabgp.protocol.ip import IP
from exabgp.bgp.message.update.nlri.qualifier import RouteDistinguisher
from exabgp.bgp.message.update.nlri.qualifier import EthernetTag
from exabgp.bgp.message.update.nlri.evpn.nlri import EVPN
# +---------------------------------------+
# | RD (8 octets) |
# +---------------------------------------+
# | Ethernet Tag ID (4 octets) |
# +---------------------------------------+
# | IP Address Length (1 octet) |
# +---------------------------------------+
# | Originating Router's IP Addr |
# | (4 or 16 octets) |
# +---------------------------------------+
# ===================================================================== EVPNNLRI
@EVPN.register
class Multicast (EVPN):
CODE = 3
NAME = "Inclusive Multicast Ethernet Tag"
SHORT_NAME = "Multicast"
def __init__ (self, rd, etag, ip, packed=None,nexthop=None,action=None,addpath=None):
EVPN.__init__(self,action,addpath)
self.nexthop = nexthop
self.rd = rd
self.etag = etag
self.ip = ip
self._pack(packed)
def __ne__ (self, other):
return not self.__eq__(other)
def __str__ (self):
return "%s:%s:%s:%s" % (
self._prefix(),
self.rd._str(),
self.etag,
self.ip,
)
def __hash__ (self):
return hash((self.afi,self.safi,self.CODE,self.rd,self.etag,self.ip))
def _pack (self, packed=None):
if self._packed:
return self._packed
if packed:
self._packed = packed
return packed
self._packed = '%s%s%s%s' % (
self.rd.pack(),
self.etag.pack(),
chr(len(self.ip)*8),
self.ip.pack()
)
return self._packed
@classmethod
def unpack (cls, data):
rd = RouteDistinguisher.unpack(data[:8])
etag = EthernetTag.unpack(data[8:12])
iplen = ord(data[12])
if iplen not in (4*8,16*8):
raise Exception("IP len is %d, but EVPN route currently support only IPv4" % iplen)
ip = IP.unpack(data[13:13+iplen/8])
return cls(rd,etag,ip,data)
def json (self, compact=None):
content = ' "code": %d, ' % self.CODE
content += '"parsed": true, '
content += '"raw": "%s", ' % self._raw()
content += '"name": "%s", ' % self.NAME
content += '%s, ' % self.rd.json()
content += self.etag.json()
if self.ip:
content += ', "ip": "%s"' % str(self.ip)
return '{%s }' % content
| bsd-3-clause | 8,568,785,827,975,385,000 | 25.659341 | 86 | 0.54122 | false | 2.980344 | false | false | false |
emoronayuso/beeton | asterisk-bee/asteriskbee/api_status/scripts_graficas/recoge_marcas_graficas.py | 1 | 2307 | #!/usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
#import calendar
from datetime import datetime
from django.conf import settings
settings.configure()
import os
#para conexion con la bases de datos de beeton (asteriskbee)
import sqlite3 as dbapi
##Directorio de la aplicaion
### STATIC_ROOT = '/var/www/asterisk-bee/asteriskbee/'
#directorio = settings.STATIC_ROOT+"api_status/"
directorio = "/var/www/asterisk-bee/asteriskbee/api_status/"
##Numero de tuplas maximas por grafica
num_cpu_dia = 20
def recoge_marcas():
#Conexion con la base de datos de estadisticas
bbdd = dbapi.connect(directorio+"bbdd/estadisticas.db")
cursor = bbdd.cursor()
os.system("ps -e -o pcpu,cpu,nice,state,cputime,args --sort pcpu | sed '/^ 0.0 /d' > "+directorio+"scripts_graficas/temp/temp_cpu_dia; cat "+directorio+"scripts_graficas/temp/temp_cpu_dia | sed 's/^[ \t]*//;s/[ \t]*$//' | grep -v 'recoge_marcas_graficas.py' | cut -d ' ' -f 1 > "+directorio+"scripts_graficas/temp/temp_cpu_dia2")
total = 0.0
f = open(directorio+'scripts_graficas/temp/temp_cpu_dia2','r')
##Leemos la primera linea para quitar el encabezado
linea = f.readline()
while True:
linea = f.readline()
if not linea:
break
#Quitamos el uso de la cpu del script que recoge las marcas
else:
total = total + float(linea)
f.close()
res = total
# print str(res)
#Creamos la consulta ordenada por fecha
con_ordenada = """select * from api_status_marcas_graficas where tipo='cpu_dia' order by fecha_hora;"""
cursor.execute(con_ordenada)
p = cursor.fetchall()
if len(p) < num_cpu_dia:
#insetar en al base de datos
insert = "insert into api_status_marcas_graficas (tipo,valor) values ('cpu_dia',?);"
cursor.execute(insert ,(res,))
bbdd.commit()
else:
#Ordenar por fecha, eliminar el ultimo e introducir nuevo
# strftime('%d-%m-%Y %H:%M',calldate)
hora_actual = datetime.now()
con_update = " update api_status_marcas_graficas set fecha_hora=datetime(?),valor=? where id=?; "
# print "Antes del update, hora_actual->"+str(hora_actual)+"valor->"+str(res)+ " id->"+str(p[0][0])
cursor.execute(con_update ,(hora_actual,res,p[0][0]))
bbdd.commit()
##Cerramos la conexion con la BBDD
cursor.close()
bbdd.close()
if __name__ == "__main__":
recoge_marcas()
| gpl-3.0 | 2,008,526,283,982,816,800 | 24.633333 | 330 | 0.688773 | false | 2.595051 | false | false | false |
diedthreetimes/VCrash | pybindgen-0.15.0.795/pybindgen/typehandlers/inttype.py | 1 | 29684 | # docstrings not needed here (the type handler interfaces are fully
# documented in base.py)
# pylint: disable-msg=C0111
import struct
assert struct.calcsize('i') == 4 # assumption is made that sizeof(int) == 4 for all platforms pybindgen runs on
from base import ReturnValue, Parameter, PointerParameter, PointerReturnValue, \
ReverseWrapperBase, ForwardWrapperBase, TypeConfigurationError, NotSupportedError
class IntParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['int', 'int32_t']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('i', [self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable(self.ctype_no_const, self.name, self.default_value)
wrapper.parse_params.add_parameter('i', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.call_params.append(name)
class UnsignedIntParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['unsigned int', 'uint32_t']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('N', ["PyLong_FromUnsignedLong(%s)" % self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable('unsigned int', self.name, self.default_value)
wrapper.parse_params.add_parameter('I', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.call_params.append(name)
class UnsignedIntPtrParam(PointerParameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_OUT, Parameter.DIRECTION_INOUT]
CTYPES = ['unsigned int*', 'uint32_t*']
def __init__(self, ctype, name, direction=Parameter.DIRECTION_IN, is_const=False,
default_value=None, transfer_ownership=False, array_length=None):
super(UnsignedIntPtrParam, self).__init__(ctype, name, direction, is_const, default_value, transfer_ownership)
self.array_length = array_length
if transfer_ownership:
raise NotSupportedError("%s: transfer_ownership=True not yet implemented." % ctype)
def convert_c_to_python(self, wrapper):
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('I', ['*'+self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter('I', [self.value], self.name)
def convert_python_to_c(self, wrapper):
#assert self.ctype == 'unsigned int*'
if self.array_length is None:
name = wrapper.declarations.declare_variable(str(self.type_traits.target), self.name)
wrapper.call_params.append('&'+name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('I', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter('I', [name])
else: # complicated code path to deal with arrays...
name = wrapper.declarations.declare_variable(str(self.type_traits.target), self.name, array="[%i]" % self.array_length)
py_list = wrapper.declarations.declare_variable("PyObject*", "py_list")
idx = wrapper.declarations.declare_variable("int", "idx")
wrapper.call_params.append(name)
if self.direction & self.DIRECTION_IN:
elem = wrapper.declarations.declare_variable("PyObject*", "element")
wrapper.parse_params.add_parameter('O!', ['&PyList_Type', '&'+py_list], self.name)
wrapper.before_call.write_error_check(
'PyList_Size(%s) != %i' % (py_list, self.array_length),
'PyErr_SetString(PyExc_TypeError, "Parameter `%s\' must be a list of %i ints/longs");'
% (self.name, self.array_length))
wrapper.before_call.write_code(
"for (%s = 0; %s < %i; %s++) {" % (idx, idx, self.array_length, idx))
wrapper.before_call.indent()
wrapper.before_call.write_code("%(elem)s = PyList_GET_ITEM(%(py_list)s, %(idx)s);" % vars())
wrapper.before_call.write_error_check(
'!(PyInt_Check(%(elem)s) || PyLong_Check(%(elem)s))',
'PyErr_SetString(PyExc_TypeError, "Parameter `%s\' must be a list of %i ints / longs");'
% (self.name, self.array_length))
wrapper.before_call.write_code("%(name)s[%(idx)s] = PyLong_AsUnsignedInt(%(elem)s);" % vars())
wrapper.before_call.unindent()
wrapper.before_call.write_code('}')
if self.direction & self.DIRECTION_OUT:
wrapper.after_call.write_code("%s = PyList_New(%i);" % (py_list, self.array_length))
wrapper.after_call.write_code(
"for (%s = 0; %s < %i; %s++) {" % (idx, idx, self.array_length, idx))
wrapper.after_call.indent()
wrapper.after_call.write_code("PyList_SET_ITEM(%(py_list)s, %(idx)s, PyLong_FromUnsignedLong(%(name)s[%(idx)s]));"
% vars())
wrapper.after_call.unindent()
wrapper.after_call.write_code('}')
wrapper.build_params.add_parameter("N", [py_list])
class IntReturn(ReturnValue):
CTYPES = ['int', 'int32_t']
def get_c_error_return(self):
return "return INT_MIN;"
def convert_python_to_c(self, wrapper):
wrapper.parse_params.add_parameter("i", ["&"+self.value], prepend=True)
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("i", [self.value], prepend=True)
class UnsignedIntReturn(ReturnValue):
CTYPES = ['unsigned int', 'uint32_t']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
wrapper.parse_params.add_parameter("I", ["&"+self.value], prepend=True)
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter('N', ["PyLong_FromUnsignedLong(%s)" % self.value], prepend=True)
class IntPtrParam(PointerParameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_OUT,
Parameter.DIRECTION_IN|Parameter.DIRECTION_OUT]
CTYPES = ['int*']
def __init__(self, ctype, name, direction=None, is_const=None, transfer_ownership=None):
if direction is None:
if is_const:
direction = Parameter.DIRECTION_IN
else:
raise TypeConfigurationError("direction not given")
super(IntPtrParam, self).__init__(ctype, name, direction, is_const, transfer_ownership)
def convert_c_to_python(self, wrapper):
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('i', ['*'+self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("i", [self.value], self.name)
def convert_python_to_c(self, wrapper):
name = wrapper.declarations.declare_variable(self.ctype_no_const[:-1], self.name)
wrapper.call_params.append('&'+name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('i', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("i", [name])
class IntRefParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_OUT,
Parameter.DIRECTION_IN|Parameter.DIRECTION_OUT]
CTYPES = ['int&']
def convert_c_to_python(self, wrapper):
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('i', [self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("i", [self.value], self.name)
def convert_python_to_c(self, wrapper):
#assert self.ctype == 'int&'
name = wrapper.declarations.declare_variable(self.ctype_no_const[:-1], self.name)
wrapper.call_params.append(name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('i', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("i", [name])
class UnsignedIntRefParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_OUT,
Parameter.DIRECTION_IN|Parameter.DIRECTION_OUT]
CTYPES = ['unsigned int&', 'unsigned &']
def convert_c_to_python(self, wrapper):
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('I', [self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("I", [self.value], self.name)
def convert_python_to_c(self, wrapper):
#assert self.ctype == 'int&'
name = wrapper.declarations.declare_variable(self.ctype_no_const[:-1], self.name)
wrapper.call_params.append(name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('I', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("I", [name])
class UInt16Return(ReturnValue):
CTYPES = ['uint16_t', 'unsigned short', 'unsigned short int', 'short unsigned int']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
tmp_var = wrapper.declarations.declare_variable("int", "tmp")
wrapper.parse_params.add_parameter("i", ["&"+tmp_var], prepend=True)
wrapper.after_call.write_error_check('%s > 0xffff' % tmp_var,
'PyErr_SetString(PyExc_ValueError, "Out of range");')
wrapper.after_call.write_code(
"%s = %s;" % (self.value, tmp_var))
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("i", [self.value], prepend=True)
class Int16Return(ReturnValue):
CTYPES = ['int16_t', 'short', 'short int']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
tmp_var = wrapper.declarations.declare_variable("int", "tmp")
wrapper.parse_params.add_parameter("i", ["&"+tmp_var], prepend=True)
wrapper.after_call.write_error_check('%s > 32767 || %s < -32768' % (tmp_var, tmp_var),
'PyErr_SetString(PyExc_ValueError, "Out of range");')
wrapper.after_call.write_code(
"%s = %s;" % (self.value, tmp_var))
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("i", [self.value], prepend=True)
class UInt16Param(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['uint16_t', 'unsigned short', 'unsigned short int']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('i', ["(int) "+self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable("int", self.name, self.default_value)
wrapper.parse_params.add_parameter('i', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.before_call.write_error_check('%s > 0xffff' % name,
'PyErr_SetString(PyExc_ValueError, "Out of range");')
wrapper.call_params.append(name)
class UInt16RefParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_INOUT, Parameter.DIRECTION_OUT]
CTYPES = ['uint16_t&', 'unsigned short&', 'unsigned short int&', 'short unsigned&', 'short unsigned int&']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('H', [self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("H", [self.value], self.name)
def convert_python_to_c(self, wrapper):
name = wrapper.declarations.declare_variable(self.ctype_no_const[:-1], self.name)
wrapper.call_params.append(name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('H', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("H", [name])
class Int16Param(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['int16_t', 'short', 'short int']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('i', ["(int) "+self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable("int", self.name, self.default_value)
wrapper.parse_params.add_parameter('i', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.before_call.write_error_check('%s > 0x7fff' % name,
'PyErr_SetString(PyExc_ValueError, "Out of range");')
wrapper.call_params.append(name)
class Int16RefParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_INOUT, Parameter.DIRECTION_OUT]
CTYPES = ['int16_t&', 'short&', 'short int&']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('h', [self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("h", [self.value], self.name)
def convert_python_to_c(self, wrapper):
name = wrapper.declarations.declare_variable(self.ctype_no_const[:-1], self.name)
wrapper.call_params.append(name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('h', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("h", [name])
class UInt8Param(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['uint8_t', 'unsigned char', 'char unsigned']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('i', ["(int) "+self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable("int", self.name, self.default_value)
wrapper.parse_params.add_parameter('i', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.before_call.write_error_check('%s > 0xff' % name,
'PyErr_SetString(PyExc_ValueError, "Out of range");')
wrapper.call_params.append(name)
class UInt8RefParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_INOUT, Parameter.DIRECTION_OUT]
CTYPES = ['uint8_t&', 'unsigned char&', 'char unsigned&']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('B', [self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("B", [self.value], self.name)
def convert_python_to_c(self, wrapper):
name = wrapper.declarations.declare_variable(self.ctype_no_const[:-1], self.name)
wrapper.call_params.append(name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('B', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("B", [name])
class UInt8Return(ReturnValue):
CTYPES = ['uint8_t', 'unsigned char', 'char unsigned']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
tmp_var = wrapper.declarations.declare_variable("int", "tmp")
wrapper.parse_params.add_parameter("i", ["&"+tmp_var], prepend=True)
wrapper.after_call.write_error_check('%s > 0xff' % tmp_var,
'PyErr_SetString(PyExc_ValueError, "Out of range");')
wrapper.after_call.write_code(
"%s = %s;" % (self.value, tmp_var))
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("i", ['(int)' + self.value], prepend=True)
class Int8Param(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['int8_t', 'signed char', 'char signed']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('i', ["(int) "+self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable("int", self.name, self.default_value)
wrapper.parse_params.add_parameter('i', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.before_call.write_error_check('%s > 0x7f' % name,
'PyErr_SetString(PyExc_ValueError, "Out of range");')
wrapper.call_params.append(name)
class Int8RefParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_INOUT, Parameter.DIRECTION_OUT]
CTYPES = ['int8_t&', 'signed char &', 'char signed&']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('b', [self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("b", [self.value], self.name)
def convert_python_to_c(self, wrapper):
name = wrapper.declarations.declare_variable(self.ctype_no_const[:-1], self.name)
wrapper.call_params.append(name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('b', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("b", [name])
class Int8Return(ReturnValue):
CTYPES = ['int8_t', 'signed char']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
tmp_var = wrapper.declarations.declare_variable("int", "tmp")
wrapper.parse_params.add_parameter("i", ["&"+tmp_var], prepend=True)
wrapper.after_call.write_error_check('%s > 128 || %s < -127' % (tmp_var, tmp_var),
'PyErr_SetString(PyExc_ValueError, "Out of range");')
wrapper.after_call.write_code(
"%s = %s;" % (self.value, tmp_var))
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("i", [self.value], prepend=True)
class UnsignedLongLongParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['unsigned long long', 'uint64_t', 'unsigned long long int', 'long long unsigned int', 'long long unsigned']
def get_ctype_without_ref(self):
return str(self.type_traits.ctype_no_const)
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('K', [self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable(self.get_ctype_without_ref(), self.name, self.default_value)
wrapper.parse_params.add_parameter('K', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.call_params.append(name)
class UnsignedLongLongRefParam(UnsignedLongLongParam):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['unsigned long long&', 'uint64_t&', 'long long unsigned int&']
def get_ctype_without_ref(self):
assert self.type_traits.target is not None
return str(self.type_traits.target)
class UnsignedLongLongReturn(ReturnValue):
CTYPES = ['unsigned long long', 'uint64_t', 'long long unsigned int']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
wrapper.parse_params.add_parameter("K", ["&"+self.value], prepend=True)
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("K", [self.value], prepend=True)
class UnsignedLongParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['unsigned long', 'unsigned long int', 'long unsigned', 'long unsigned int']
def get_ctype_without_ref(self):
return str(self.type_traits.ctype_no_const)
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('k', [self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable(self.get_ctype_without_ref(), self.name, self.default_value)
wrapper.parse_params.add_parameter('k', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.call_params.append(name)
class UnsignedLongRefParam(UnsignedLongParam):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['unsigned long&', 'long unsigned&', 'long unsigned int&', 'unsigned long int&']
def get_ctype_without_ref(self):
assert self.type_traits.target is not None
return str(self.type_traits.target)
class UnsignedLongReturn(ReturnValue):
CTYPES = ['unsigned long', 'long unsigned int']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
wrapper.parse_params.add_parameter("k", ["&"+self.value], prepend=True)
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("k", [self.value], prepend=True)
class LongParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['signed long', 'signed long int', 'long', 'long int', 'long signed', 'long signed int']
def get_ctype_without_ref(self):
return str(self.type_traits.ctype_no_const)
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('l', [self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable(self.get_ctype_without_ref(), self.name, self.default_value)
wrapper.parse_params.add_parameter('l', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.call_params.append(name)
class LongRefParam(LongParam):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['signed long&', 'long signed&', 'long&', 'long int&', 'long signed int&', 'signed long int&']
def get_ctype_without_ref(self):
assert self.type_traits.target is not None
return str(self.type_traits.target)
class LongReturn(ReturnValue):
CTYPES = ['signed long', 'long signed int', 'long', 'long int']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
wrapper.parse_params.add_parameter("l", ["&"+self.value], prepend=True)
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("l", [self.value], prepend=True)
class SizeTReturn(ReturnValue):
CTYPES = ['size_t',]
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
# using the intermediate variable is not always necessary but
# it's safer this way in case of weird platforms where
# sizeof(size_t) != sizeof(unsigned PY_LONG_LONG).
name = wrapper.declarations.declare_variable("unsigned PY_LONG_LONG", "retval_tmp", self.value)
wrapper.parse_params.add_parameter("K", ["&"+name], prepend=True)
wrapper.after_call.write_code("retval = %s;" % (name))
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("K", ["((unsigned PY_LONG_LONG) %s)" % self.value], prepend=True)
class SizeTParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['size_t']
def get_ctype_without_ref(self):
return str(self.type_traits.ctype_no_const)
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('K', ["((unsigned PY_LONG_LONG) %s)" % self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable("unsigned PY_LONG_LONG", self.name, self.default_value)
wrapper.parse_params.add_parameter('K', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.call_params.append(name)
class LongLongParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['long long', 'int64_t', 'long long int']
def get_ctype_without_ref(self):
return str(self.type_traits.ctype_no_const)
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('L', [self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable(self.get_ctype_without_ref(), self.name, self.default_value)
wrapper.parse_params.add_parameter('L', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.call_params.append(name)
class LongLongRefParam(LongLongParam):
DIRECTIONS = [Parameter.DIRECTION_IN] # other directions not yet implemented
CTYPES = ['long long&', 'int64_t&', 'long long int&']
def get_ctype_without_ref(self):
assert self.type_traits.target is not None
return str(self.type_traits.target)
class LongLongReturn(ReturnValue):
CTYPES = ['long long', 'int64_t', 'long long int']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
wrapper.parse_params.add_parameter("L", ["&"+self.value], prepend=True)
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("L", [self.value], prepend=True)
class Int8PtrParam(PointerParameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_OUT,
Parameter.DIRECTION_IN|Parameter.DIRECTION_OUT]
CTYPES = ['int8_t*']
def __init__(self, ctype, name, direction=None, is_const=None, default_value=None, transfer_ownership=None):
if direction is None:
if is_const:
direction = Parameter.DIRECTION_IN
else:
raise TypeConfigurationError("direction not given")
super(Int8PtrParam, self).__init__(ctype, name, direction, is_const, default_value, transfer_ownership)
def convert_c_to_python(self, wrapper):
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('b', ['*'+self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("b", [self.value], self.name)
def convert_python_to_c(self, wrapper):
name = wrapper.declarations.declare_variable('int8_t', self.name)
wrapper.call_params.append('&'+name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('b', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("b", [name])
class UInt8PtrParam(PointerParameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_OUT,
Parameter.DIRECTION_IN|Parameter.DIRECTION_OUT]
CTYPES = ['uint8_t*']
def __init__(self, ctype, name, direction=None, is_const=None, default_value=None, transfer_ownership=None):
if direction is None:
if is_const:
direction = Parameter.DIRECTION_IN
else:
raise TypeConfigurationError("direction not given")
super(UInt8PtrParam, self).__init__(ctype, name, direction, is_const, default_value, transfer_ownership)
def convert_c_to_python(self, wrapper):
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('B', ['*'+self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("B", [self.value], self.name)
def convert_python_to_c(self, wrapper):
name = wrapper.declarations.declare_variable('uint8_t', self.name)
wrapper.call_params.append('&'+name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('B', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("B", [name])
| gpl-2.0 | 8,058,617,483,119,672,000 | 41.164773 | 131 | 0.638997 | false | 3.645787 | false | false | false |
persandstrom/home-assistant | homeassistant/components/sensor/netatmo_public.py | 1 | 4390 | """
Support for Sensors using public Netatmo data.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.netatmo_public/.
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_NAME, CONF_TYPE)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['netatmo']
CONF_AREAS = 'areas'
CONF_LAT_NE = 'lat_ne'
CONF_LON_NE = 'lon_ne'
CONF_LAT_SW = 'lat_sw'
CONF_LON_SW = 'lon_sw'
DEFAULT_NAME = 'Netatmo Public Data'
DEFAULT_TYPE = 'max'
SENSOR_TYPES = {'max', 'avg'}
# NetAtmo Data is uploaded to server every 10 minutes
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=600)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_AREAS): vol.All(cv.ensure_list, [
{
vol.Required(CONF_LAT_NE): cv.latitude,
vol.Required(CONF_LAT_SW): cv.latitude,
vol.Required(CONF_LON_NE): cv.longitude,
vol.Required(CONF_LON_SW): cv.longitude,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TYPE, default=DEFAULT_TYPE):
vol.In(SENSOR_TYPES)
}
]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the access to Netatmo binary sensor."""
netatmo = hass.components.netatmo
sensors = []
areas = config.get(CONF_AREAS)
for area_conf in areas:
data = NetatmoPublicData(netatmo.NETATMO_AUTH,
lat_ne=area_conf.get(CONF_LAT_NE),
lon_ne=area_conf.get(CONF_LON_NE),
lat_sw=area_conf.get(CONF_LAT_SW),
lon_sw=area_conf.get(CONF_LON_SW),
calculation=area_conf.get(CONF_TYPE))
sensors.append(NetatmoPublicSensor(area_conf.get(CONF_NAME), data))
add_entities(sensors)
class NetatmoPublicSensor(Entity):
"""Represent a single sensor in a Netatmo."""
def __init__(self, name, data):
"""Initialize the sensor."""
self.netatmo_data = data
self._name = name
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend."""
return 'mdi:weather-rainy'
@property
def device_class(self):
"""Return the device class of the sensor."""
return None
@property
def state(self):
"""Return true if binary sensor is on."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return 'mm'
def update(self):
"""Get the latest data from NetAtmo API and updates the states."""
self.netatmo_data.update()
self._state = self.netatmo_data.data
class NetatmoPublicData:
"""Get the latest data from NetAtmo."""
def __init__(self, auth, lat_ne, lon_ne, lat_sw, lon_sw, calculation):
"""Initialize the data object."""
self.auth = auth
self.data = None
self.lat_ne = lat_ne
self.lon_ne = lon_ne
self.lat_sw = lat_sw
self.lon_sw = lon_sw
self.calculation = calculation
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Request an update from the Netatmo API."""
import pyatmo
raindata = pyatmo.PublicData(self.auth,
LAT_NE=self.lat_ne,
LON_NE=self.lon_ne,
LAT_SW=self.lat_sw,
LON_SW=self.lon_sw,
required_data_type="rain")
if raindata.CountStationInArea() == 0:
_LOGGER.warning('No Rain Station available in this area.')
return
raindata_live = raindata.getLive()
if self.calculation == 'avg':
self.data = sum(raindata_live.values()) / len(raindata_live)
else:
self.data = max(raindata_live.values())
| apache-2.0 | -397,387,515,900,327,800 | 30.134752 | 75 | 0.591116 | false | 3.761782 | false | false | false |
ryfeus/lambda-packs | pytorch/source/torch/nn/parallel/deprecated/distributed_cpu.py | 1 | 4290 | import torch
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
import torch.distributed.deprecated as dist
from torch.nn.modules import Module
from collections import defaultdict
from torch.autograd import Variable
import torch.utils.hooks
class DistributedDataParallelCPU(Module):
r"""Implements distributed data parallelism for CPU at the module level.
This module support the ``mpi``, ``gloo``, ``tcp`` backends.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. The module is replicated on each machine, and each such replica
handles a portion of the input. During the backwards pass, gradients from
each node are averaged.
This module could be used in conjunction with the DistributedSampler,
(see :class `torch.utils.data.distributed.DistributedSampler`)
which will load a subset of the original datset for each node with the same
batch size. So strong scaling should be configured like this:
n = 1, batch size = 128
n = 2, batch size = 64
n = 4, batch size = 32
n = 8, batch size = 16
Creation of this class requires the distributed package to be already
initialized in the process group mode
(see :func:`torch.distributed.deprecated.init_process_group`).
.. warning::
Constructor, forward method, and differentiation of the output (or a
function of the output of this module) is a distributed synchronization
point. Take that into account in case different node might be
executing different code.
.. warning::
This module assumes all parameters are registered in the model by the
time it is created. No parameters should be added nor removed later.
.. warning::
This module assumes all gradients are dense.
.. warning::
This module doesn't work with :func:`torch.autograd.grad` (i.e. it will
only work if gradients are to be accumulated in ``.grad`` attributes of
parameters).
.. note::
Parameters are broadcast between nodes in the __init__() function. The
module performs an all-reduce step on gradients and assumes that they
will be modified by the optimizer in all nodes in the same way.
.. warning::
Forward and backward hooks defined on :attr:`module` and its submodules
won't be invoked anymore, unless the hooks are initialized in the
:meth:`forward` method.
Args:
module: module to be parallelized
Example::
>>> torch.distributed.deprecated.init_process_group(world_size=4, init_method='...')
>>> net = torch.nn.DistributedDataParallelCPU(model)
"""
def __init__(self, module):
super(DistributedDataParallelCPU, self).__init__()
self.module = module
self.sync_parameters()
def allreduce_params():
if self.needs_reduction:
self.needs_reduction = False
buckets = defaultdict(list)
for param in self.module.parameters():
if param.requires_grad and param.grad is not None:
tp = type(param.data)
buckets[tp].append(param)
for bucket in buckets.values():
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(self.module.parameters()):
@torch.utils.hooks.unserializable_hook
def allreduce_hook(*unused):
Variable._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
def sync_parameters(self):
for param in self.module.parameters():
dist.broadcast(param.data, 0)
def forward(self, *inputs, **kwargs):
self.needs_reduction = True
return self.module(*inputs, **kwargs)
| mit | -3,092,668,904,383,977,500 | 39.093458 | 94 | 0.648485 | false | 4.673203 | false | false | false |
geky/pyOCD | pyOCD/target/target_lpc4330.py | 1 | 2872 | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cortex_m import CortexM
class LPC4330(CortexM):
memoryMapXML = """<?xml version="1.0"?>
<!DOCTYPE memory-map PUBLIC "+//IDN gnu.org//DTD GDB Memory Map V1.0//EN" "http://sourceware.org/gdb/gdb-memory-map.dtd">
<memory-map>
<memory type="flash" start="0x14000000" length="0x4000000"> <property name="blocksize">0x400</property></memory>
<memory type="ram" start="0x10000000" length="0x20000"> </memory>
<memory type="ram" start="0x10080000" length="0x12000"> </memory>
<memory type="ram" start="0x20000000" length="0x8000"> </memory>
<memory type="ram" start="0x20008000" length="0x8000"> </memory>
</memory-map>
"""
def __init__(self, transport):
super(LPC4330, self).__init__(transport)
self.ignoreReset = False
def setFlash(self, flash):
self.flash = flash
def reset(self, software_reset = False):
# Always use software reset for LPC4330 since the hardware version
# will reset the DAP.
CortexM.reset(self, True)
def resetStopOnReset(self, software_reset = False):
if self.ignoreReset:
return
# Set core up to run some code in RAM that is guaranteed to be valid
# since FLASH could be corrupted and that is what user is trying to fix.
self.writeMemory(0x10000000, 0x10087ff0) # Initial SP
self.writeMemory(0x10000004, 0x1000000d) # Reset Handler
self.writeMemory(0x10000008, 0x1000000d) # Hard Fault Handler
self.writeMemory(0x1000000c, 0xe7fee7fe) # Infinite loop
self.writeMemory(0x40043100, 0x10000000) # Shadow 0x0 to RAM
# Always use software reset for LPC4330 since the hardware version
# will reset the DAP.
CortexM.resetStopOnReset(self, True)
# Map shadow memory to SPIFI FLASH
self.writeMemory(0x40043100, 0x80000000)
# The LPC4330 flash init routine can be used to remount FLASH.
self.ignoreReset = True
self.flash.init()
self.ignoreReset = False
# Set SP and PC based on interrupt vector in SPIFI_FLASH
sp = self.readMemory(0x14000000)
pc = self.readMemory(0x14000004)
self.writeCoreRegisterRaw('sp', sp)
self.writeCoreRegisterRaw('pc', pc)
| apache-2.0 | -5,108,212,287,236,866,000 | 38.342466 | 121 | 0.68071 | false | 3.585518 | false | false | false |
opennode/nodeconductor-assembly-waldur | src/waldur_slurm/apps.py | 1 | 2931 | from django.apps import AppConfig
from django.db.models import signals
class SlurmConfig(AppConfig):
name = 'waldur_slurm'
verbose_name = 'SLURM'
service_name = 'SLURM'
def ready(self):
from waldur_core.quotas.fields import QuotaField, CounterQuotaField
from waldur_core.structure import SupportedServices
from waldur_core.structure import models as structure_models
from waldur_core.structure import signals as structure_signals
from waldur_freeipa import models as freeipa_models
from .backend import SlurmBackend
from . import handlers, models, utils
SupportedServices.register_backend(SlurmBackend)
signals.post_save.connect(
handlers.process_user_creation,
sender=freeipa_models.Profile,
dispatch_uid='waldur_slurm.handlers.process_user_creation',
)
signals.pre_delete.connect(
handlers.process_user_deletion,
sender=freeipa_models.Profile,
dispatch_uid='waldur_slurm.handlers.process_user_deletion',
)
structure_models_with_roles = (
structure_models.Customer,
structure_models.Project,
)
for model in structure_models_with_roles:
structure_signals.structure_role_granted.connect(
handlers.process_role_granted,
sender=model,
dispatch_uid='waldur_slurm.handlers.process_role_granted.%s'
% model.__class__,
)
structure_signals.structure_role_revoked.connect(
handlers.process_role_revoked,
sender=model,
dispatch_uid='waldur_slurm.handlers.process_role_revoked.%s'
% model.__class__,
)
for quota in utils.QUOTA_NAMES:
structure_models.Customer.add_quota_field(
name=quota, quota_field=QuotaField(is_backend=True)
)
structure_models.Project.add_quota_field(
name=quota, quota_field=QuotaField(is_backend=True)
)
structure_models.Project.add_quota_field(
name='nc_allocation_count',
quota_field=CounterQuotaField(
target_models=lambda: [models.Allocation],
path_to_scope='service_project_link.project',
),
)
structure_models.Customer.add_quota_field(
name='nc_allocation_count',
quota_field=CounterQuotaField(
target_models=lambda: [models.Allocation],
path_to_scope='service_project_link.project.customer',
),
)
signals.post_save.connect(
handlers.update_quotas_on_allocation_usage_update,
sender=models.Allocation,
dispatch_uid='waldur_slurm.handlers.update_quotas_on_allocation_usage_update',
)
| mit | -963,469,003,066,524,500 | 34.743902 | 90 | 0.606619 | false | 4.278832 | false | false | false |
addisonElliott/SmartShopTouchScreen | Windows/ExpirationBox_ui.py | 1 | 8236 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ExpirationBox.ui'
#
# Created by: PyQt5 UI code generator 5.7.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ExpirationBox(object):
def setupUi(self, ExpirationBox):
ExpirationBox.setObjectName("ExpirationBox")
ExpirationBox.resize(506, 364)
font = QtGui.QFont()
font.setPointSize(19)
ExpirationBox.setFont(font)
ExpirationBox.setStyleSheet("QDialog\n"
"{\n"
" border: 1px solid #76797C;\n"
"}")
self.gridLayout = QtWidgets.QGridLayout(ExpirationBox)
self.gridLayout.setContentsMargins(5, 5, 5, 5)
self.gridLayout.setObjectName("gridLayout")
self.day_label = QtWidgets.QLabel(ExpirationBox)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(15)
self.day_label.setFont(font)
self.day_label.setAlignment(QtCore.Qt.AlignCenter)
self.day_label.setObjectName("day_label")
self.gridLayout.addWidget(self.day_label, 3, 2, 1, 1)
self.day_combo = QtWidgets.QComboBox(ExpirationBox)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(16)
font.setBold(False)
font.setWeight(50)
self.day_combo.setFont(font)
self.day_combo.setObjectName("day_combo")
self.day_combo.addItem("")
self.day_combo.setItemText(0, "")
self.gridLayout.addWidget(self.day_combo, 4, 2, 1, 1)
self.month_label = QtWidgets.QLabel(ExpirationBox)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(15)
self.month_label.setFont(font)
self.month_label.setAlignment(QtCore.Qt.AlignCenter)
self.month_label.setObjectName("month_label")
self.gridLayout.addWidget(self.month_label, 3, 1, 1, 1)
self.month_combo = QtWidgets.QComboBox(ExpirationBox)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(16)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.month_combo.setFont(font)
self.month_combo.setStyleSheet("QDialog\n"
"{\n"
" border: 1px solid #76797C;\n"
"}")
self.month_combo.setObjectName("month_combo")
self.month_combo.addItem("")
self.month_combo.setItemText(0, "")
self.gridLayout.addWidget(self.month_combo, 4, 1, 1, 1)
self.year_combo = QtWidgets.QComboBox(ExpirationBox)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(16)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.year_combo.setFont(font)
self.year_combo.setObjectName("year_combo")
self.year_combo.addItem("")
self.year_combo.setItemText(0, "")
self.gridLayout.addWidget(self.year_combo, 4, 3, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem, 13, 1, 1, 1)
self.year_label = QtWidgets.QLabel(ExpirationBox)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(15)
self.year_label.setFont(font)
self.year_label.setAlignment(QtCore.Qt.AlignCenter)
self.year_label.setObjectName("year_label")
self.gridLayout.addWidget(self.year_label, 3, 3, 1, 1)
self.qty_label = QtWidgets.QLabel(ExpirationBox)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(15)
self.qty_label.setFont(font)
self.qty_label.setObjectName("qty_label")
self.gridLayout.addWidget(self.qty_label, 6, 1, 1, 2)
self.horizontalLayout_1 = QtWidgets.QHBoxLayout()
self.horizontalLayout_1.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_1.setSpacing(15)
self.horizontalLayout_1.setObjectName("horizontalLayout_1")
self.cancel_label = QtWidgets.QLabel(ExpirationBox)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(15)
self.cancel_label.setFont(font)
self.cancel_label.setObjectName("cancel_label")
self.horizontalLayout_1.addWidget(self.cancel_label)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_1.addItem(spacerItem1)
self.accept_button = TouchButton(ExpirationBox)
self.accept_button.setMinimumSize(QtCore.QSize(48, 48))
self.accept_button.setMaximumSize(QtCore.QSize(48, 48))
self.accept_button.setStyleSheet("background-color: transparent;\n"
"border: 0;")
self.accept_button.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/Icons/Icons/GreenCheckIcon_Finished.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.accept_button.setIcon(icon)
self.accept_button.setIconSize(QtCore.QSize(48, 48))
self.accept_button.setObjectName("accept_button")
self.horizontalLayout_1.addWidget(self.accept_button)
self.cancel_button = TouchButton(ExpirationBox)
self.cancel_button.setMinimumSize(QtCore.QSize(48, 48))
self.cancel_button.setMaximumSize(QtCore.QSize(48, 48))
self.cancel_button.setStyleSheet("background-color: transparent;\n"
"border: 0;")
self.cancel_button.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/Icons/Icons/RedCancelIcon_Finished.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.cancel_button.setIcon(icon1)
self.cancel_button.setIconSize(QtCore.QSize(48, 48))
self.cancel_button.setObjectName("cancel_button")
self.horizontalLayout_1.addWidget(self.cancel_button)
self.gridLayout.addLayout(self.horizontalLayout_1, 14, 1, 1, 3)
self.qty_combo = QtWidgets.QComboBox(ExpirationBox)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(16)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.qty_combo.setFont(font)
self.qty_combo.setObjectName("qty_combo")
self.gridLayout.addWidget(self.qty_combo, 7, 1, 1, 3)
self.label = QtWidgets.QLabel(ExpirationBox)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(15)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 2, 1, 1, 1)
self.itemNameLabel = QtWidgets.QLabel(ExpirationBox)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(15)
self.itemNameLabel.setFont(font)
self.itemNameLabel.setObjectName("itemNameLabel")
self.gridLayout.addWidget(self.itemNameLabel, 2, 2, 1, 2)
self.exp_label = QtWidgets.QLabel(ExpirationBox)
font = QtGui.QFont()
font.setFamily("Cronus Round")
font.setPointSize(21)
self.exp_label.setFont(font)
self.exp_label.setObjectName("exp_label")
self.gridLayout.addWidget(self.exp_label, 1, 1, 1, 3, QtCore.Qt.AlignHCenter)
self.retranslateUi(ExpirationBox)
QtCore.QMetaObject.connectSlotsByName(ExpirationBox)
def retranslateUi(self, ExpirationBox):
_translate = QtCore.QCoreApplication.translate
ExpirationBox.setWindowTitle(_translate("ExpirationBox", "Dialog"))
self.day_label.setText(_translate("ExpirationBox", "Day"))
self.month_label.setText(_translate("ExpirationBox", "Month"))
self.year_label.setText(_translate("ExpirationBox", "Year"))
self.qty_label.setText(_translate("ExpirationBox", "Quantity"))
self.cancel_label.setText(_translate("ExpirationBox", "Scan to continue"))
self.label.setText(_translate("ExpirationBox", "Item Name:"))
self.itemNameLabel.setText(_translate("ExpirationBox", "Label"))
self.exp_label.setText(_translate("ExpirationBox", "Expiration Date"))
from Widgets.touchButton import TouchButton
import Resource_BY_rc
import style_rc
| agpl-3.0 | -5,871,618,067,304,195,000 | 43.76087 | 119 | 0.662943 | false | 3.571552 | false | false | false |
goddardl/gaffer | apps/gui/gui-1.py | 1 | 5810 | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import gc
import IECore
import Gaffer
import GafferUI
class gui( Gaffer.Application ) :
def __init__( self ) :
Gaffer.Application.__init__(
self,
"This application provides a graphical user interface for editing node graphs."
)
self.parameters().addParameters(
[
IECore.StringVectorParameter(
name = "scripts",
description = "A list of scripts to edit.",
defaultValue = IECore.StringVectorData(),
),
IECore.BoolParameter(
name = "fullScreen",
description = "Opens the UI in full screen mode.",
defaultValue = False,
),
]
)
self.parameters().userData()["parser"] = IECore.CompoundObject(
{
"flagless" : IECore.StringVectorData( [ "scripts" ] )
}
)
self.__setupClipboardSync()
def _run( self, args ) :
GafferUI.ScriptWindow.connect( self.root() )
if len( args["scripts"] ) :
for fileName in args["scripts"] :
scriptNode = Gaffer.ScriptNode()
scriptNode["fileName"].setValue( os.path.abspath( fileName ) )
# \todo: Display load errors in a dialog, like in python/GafferUI/FileMenu.py
scriptNode.load( continueOnError = True )
self.root()["scripts"].addChild( scriptNode )
GafferUI.FileMenu.addRecentFile( self, fileName )
del scriptNode
else :
self.root()["scripts"].addChild( Gaffer.ScriptNode() )
if args["fullScreen"].value :
primaryScript = self.root()["scripts"][-1]
primaryWindow = GafferUI.ScriptWindow.acquire( primaryScript )
primaryWindow.setFullScreen( True )
GafferUI.EventLoop.mainEventLoop().start()
return 0
def __setupClipboardSync( self ) :
## This function sets up two way syncing between the clipboard held in the Gaffer::ApplicationRoot
# and the global QtGui.QClipboard which is shared with external applications, and used by the cut and paste
# operations in GafferUI's underlying QWidgets. This is very useful, as it allows nodes to be copied from
# the graph and pasted into emails/chats etc, and then copied out of emails/chats and pasted into the node graph.
#
## \todo I don't think this is the ideal place for this functionality. Firstly, we need it in all apps
# rather than just the gui app. Secondly, we want a way of using the global clipboard using GafferUI
# public functions without needing an ApplicationRoot. Thirdly, it's questionable that ApplicationRoot should
# have a clipboard anyway - it seems like a violation of separation between the gui and non-gui libraries.
# Perhaps we should abolish the ApplicationRoot clipboard and the ScriptNode cut/copy/paste routines, relegating
# them all to GafferUI functionality?
QtGui = GafferUI._qtImport( "QtGui" )
self.__clipboardContentsChangedConnection = self.root().clipboardContentsChangedSignal().connect( Gaffer.WeakMethod( self.__clipboardContentsChanged ) )
QtGui.QApplication.clipboard().dataChanged.connect( Gaffer.WeakMethod( self.__qtClipboardContentsChanged ) )
self.__ignoreQtClipboardContentsChanged = False
def __clipboardContentsChanged( self, applicationRoot ) :
assert( applicationRoot.isSame( self.root() ) )
data = applicationRoot.getClipboardContents()
QtGui = GafferUI._qtImport( "QtGui" )
clipboard = QtGui.QApplication.clipboard()
try :
self.__ignoreQtClipboardContentsChanged = True # avoid triggering an unecessary copy back in __qtClipboardContentsChanged
clipboard.setText( str( data ) )
finally :
self.__ignoreQtClipboardContentsChanged = False
def __qtClipboardContentsChanged( self ) :
if self.__ignoreQtClipboardContentsChanged :
return
QtGui = GafferUI._qtImport( "QtGui" )
text = str( QtGui.QApplication.clipboard().text() )
if text :
with Gaffer.BlockedConnection( self.__clipboardContentsChangedConnection ) :
self.root().setClipboardContents( IECore.StringData( text ) )
IECore.registerRunTimeTyped( gui )
| bsd-3-clause | -2,702,933,978,917,300,000 | 36.973856 | 154 | 0.704819 | false | 4.031922 | false | false | false |
MaStanford/AnglishWordbook | Anglish/SyncWikia.py | 1 | 4522 | __author__ = 'm.stanford'
import string
from socket import error as SocketError
import json, httplib
STARTING_PAGE = 72;
ENDING_PAGE = 98;
invalidWords = ["un-English", "Anglish/English", "attested", "unattested", "Class"]
delimiter = "\'\'\'"
wierdfunkInSomeWords = ["\'\' \'\'\'", "\'\'\',", '\'\'\'\'\'', '\"\'\'']
def getWordPage(page):
connection = httplib.HTTPConnection('anglish.wikia.com', 80)
connection.connect()
connection.request('GET', '/api.php?action=query&prop=revisions&rvprop=content&format=json&pageids=' + str(page))
result = json.loads(connection.getresponse().read())
print result
return result
def processRawPage(page, number):
words = page['query']
words = words['pages']
words = words[str(number)]
words = words['revisions']
words = words[0]
listOfWords = []
for key, value in words.iteritems():
listOfLines = value
for strings in wierdfunkInSomeWords:
listOfLines = listOfLines.replace(strings, '')
listOfLines = value.split(delimiter)
print 'Raw Line: ' + str(listOfLines)
length = len(listOfLines)
i = 10;
while not isValidWord(listOfLines[i]):
i += 1
even = i % 2
while i < length:
#Check if we have an invalid word in a place where it should be valid. We then will append that line to the previous line in the list of words.
if not isValidWord(listOfLines[i]) and i % 2 == even:
out = listOfWords[len(listOfWords)-1] + listOfLines[i]
out = out.replace("\'\'", '').replace('|', '\n')
listOfWords.remove(listOfWords[len(listOfWords)-1])
listOfWords.append(out)
print 'Found odd line: ' + out.replace('\n', ' ')
i += 1
even = i % 2
else:
print 'Valid Line: ' + listOfLines[i].replace("\'\'", '').replace('|', '').replace('\n', ' ')
listOfWords.append(listOfLines[i].replace("\'\'", '').replace('|', '\n'))
i += 1
return listOfWords
def buildWordDef(processedHead, processedDef):
word = {}
word['word'] = processedHead.lower()
listOfDefs = [x for x in processedDef.split('\n') if x]
# print 'Def: ' + processedHead + ' : ' + str(listOfDefs)
if len(listOfDefs) > 3:
word['attested_definitions'] = listOfDefs[1].replace('-\n', '').replace('\n', '').replace(' ', '').split(',')
word['unattested_definitions'] = listOfDefs[2].replace('-\n', '').replace('\n', '').replace(' ', '').split(',')
word['type'] = listOfDefs[0].replace("\'", "")
else:
word['attested_definitions'] = []
word['unattested_definitions'] = []
word['type'] = ''
print "buildWordDef" + str(word)
return word
def addWord(wordDef):
word = wordDef['word']
attested = wordDef['attested_definitions']
unattested = wordDef['unattested_definitions']
wordType = wordDef['type']
try:
connection = httplib.HTTPSConnection('https://anglishwordbook.herokuapp.com/', 443)
connection.connect()
connection.request('POST', '/1/classes/Word', json.dumps({
"Word": word,
"Attested": attested,
"Unattested": unattested,
"Type": wordType
}), {
"X-Parse-Application-Id": "ApuxkukQC9mFuLIdIjG3qC27ms5kZ4XZbopxUohp",
"X-Parse-Master-Key ": "ME6doa9GdB2PTGesScr8DwNQVzlzMwmoEurf3kIX",
"Content-Type": "application/json"
})
result = json.loads(connection.getresponse().read())
if 'objectId' in result:
print result
return True
else:
return False
except SocketError as e:
return addWord(wordDef)
def isValidWord(line):
if len(line.split(' ')) > 2:
return False
if line in invalidWords:
return False
if all(c in string.punctuation for c in line.replace(' ', '').replace('\n','')):
return False
return True
for j in range(STARTING_PAGE, ENDING_PAGE):
rawPage = getWordPage(j)
processedPage = processRawPage(rawPage, j)
index = len(processedPage)
k = 0
while k < index - 1:
# print 'Obj 1 ' + processedPage[i]
# print 'Obj 2 ' + processedPage[i+1]
wordDef = buildWordDef(processedPage[k], processedPage[k+1])
if addWord(wordDef):
k += 2
else:
k = k
| apache-2.0 | -7,765,113,058,141,094,000 | 28.363636 | 156 | 0.570102 | false | 3.586043 | true | false | false |
jsirois/pants | src/python/pants/backend/python/goals/setup_py.py | 1 | 37779 | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import enum
import io
import itertools
import logging
import os
import pickle
from abc import ABC, abstractmethod
from collections import abc, defaultdict
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Set, Tuple, cast
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.subsystems.setuptools import Setuptools
from pants.backend.python.target_types import (
PexEntryPointField,
PythonProvidesField,
PythonRequirementsField,
PythonSources,
ResolvedPexEntryPoint,
ResolvePexEntryPointRequest,
SetupPyCommandsField,
)
from pants.backend.python.util_rules.pex import (
PexInterpreterConstraints,
PexRequest,
PexRequirements,
VenvPex,
VenvPexProcess,
)
from pants.backend.python.util_rules.python_sources import (
PythonSourceFilesRequest,
StrippedPythonSourceFiles,
)
from pants.backend.python.util_rules.python_sources import rules as python_sources_rules
from pants.base.specs import AddressSpecs, AscendantAddresses
from pants.core.goals.package import BuiltPackage, BuiltPackageArtifact, PackageFieldSet
from pants.core.target_types import FilesSources, ResourcesSources
from pants.engine.addresses import Address, UnparsedAddressInputs
from pants.engine.collection import Collection, DeduplicatedCollection
from pants.engine.fs import (
AddPrefix,
CreateDigest,
Digest,
DigestContents,
DigestSubset,
FileContent,
MergeDigests,
PathGlobs,
RemovePrefix,
Snapshot,
)
from pants.engine.process import ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (
Dependencies,
DependenciesRequest,
Sources,
Target,
Targets,
TransitiveTargets,
TransitiveTargetsRequest,
)
from pants.engine.unions import UnionMembership, UnionRule, union
from pants.option.subsystem import Subsystem
from pants.python.python_setup import PythonSetup
from pants.util.docutil import docs_url
from pants.util.logging import LogLevel
from pants.util.memo import memoized_property
from pants.util.meta import frozen_after_init
from pants.util.strutil import ensure_text
logger = logging.getLogger(__name__)
class InvalidSetupPyArgs(Exception):
"""Indicates invalid arguments to setup.py."""
class TargetNotExported(Exception):
"""Indicates a target that was expected to be exported is not."""
class InvalidEntryPoint(Exception):
"""Indicates that a specified binary entry point was invalid."""
class OwnershipError(Exception):
"""An error related to target ownership calculation."""
def __init__(self, msg: str):
super().__init__(
f"{msg} See {docs_url('python-distributions')} for "
f"how python_library targets are mapped to distributions."
)
class NoOwnerError(OwnershipError):
"""Indicates an exportable target has no owning exported target."""
class AmbiguousOwnerError(OwnershipError):
"""Indicates an exportable target has more than one owning exported target."""
@dataclass(frozen=True)
class ExportedTarget:
"""A target that explicitly exports a setup.py artifact, using a `provides=` stanza.
The code provided by this artifact can be from this target or from any targets it owns.
"""
target: Target # In practice, a PythonDistribution.
@property
def provides(self) -> PythonArtifact:
return self.target[PythonProvidesField].value
@dataclass(frozen=True)
class DependencyOwner:
"""An ExportedTarget in its role as an owner of other targets.
We need this type to prevent rule ambiguities when computing the list of targets owned by an
ExportedTarget (which involves going from ExportedTarget -> dep -> owner (which is itself an
ExportedTarget) and checking if owner is the original ExportedTarget.
"""
exported_target: ExportedTarget
@dataclass(frozen=True)
class OwnedDependency:
"""A target that is owned by some ExportedTarget.
Code in this target is published in the owner's distribution.
The owner of a target T is T's closest filesystem ancestor among the python_distribution
targets that directly or indirectly depend on it (including T itself).
"""
target: Target
class OwnedDependencies(Collection[OwnedDependency]):
pass
class ExportedTargetRequirements(DeduplicatedCollection[str]):
"""The requirements of an ExportedTarget.
Includes:
- The "normal" 3rdparty requirements of the ExportedTarget and all targets it owns.
- The published versions of any other ExportedTargets it depends on.
"""
sort_input = True
@dataclass(frozen=True)
class PythonDistributionFieldSet(PackageFieldSet):
required_fields = (PythonProvidesField,)
provides: PythonProvidesField
@dataclass(frozen=True)
class SetupPySourcesRequest:
targets: Targets
py2: bool # Whether to use py2 or py3 package semantics.
@dataclass(frozen=True)
class SetupPySources:
"""The sources required by a setup.py command.
Includes some information derived from analyzing the source, namely the packages, namespace
packages and resource files in the source.
"""
digest: Digest
packages: Tuple[str, ...]
namespace_packages: Tuple[str, ...]
package_data: Tuple["PackageDatum", ...]
@dataclass(frozen=True)
class SetupPyChrootRequest:
"""A request to create a chroot containing a setup.py and the sources it operates on."""
exported_target: ExportedTarget
py2: bool # Whether to use py2 or py3 package semantics.
@frozen_after_init
@dataclass(unsafe_hash=True)
class SetupKwargs:
"""The keyword arguments to the `setup()` function in the generated `setup.py`."""
_pickled_bytes: bytes
def __init__(
self, kwargs: Mapping[str, Any], *, address: Address, _allow_banned_keys: bool = False
) -> None:
super().__init__()
if "version" not in kwargs:
raise ValueError(f"Missing a `version` kwarg in the `provides` field for {address}.")
if not _allow_banned_keys:
for arg in {
"data_files",
"namespace_packages",
"package_dir",
"package_data",
"packages",
"install_requires",
}:
if arg in kwargs:
raise ValueError(
f"{arg} cannot be set in the `provides` field for {address}, but it was "
f"set to {kwargs[arg]}. Pants will dynamically set the value for you."
)
# We serialize with `pickle` so that is hashable. We don't use `FrozenDict` because it
# would require that all values are immutable, and we may have lists and dictionaries as
# values. It's too difficult/clunky to convert those all, then to convert them back out of
# `FrozenDict`. We don't use JSON because it does not preserve data types like `tuple`.
self._pickled_bytes = pickle.dumps({k: v for k, v in sorted(kwargs.items())}, protocol=4)
@memoized_property
def kwargs(self) -> Dict[str, Any]:
return cast(Dict[str, Any], pickle.loads(self._pickled_bytes))
@property
def name(self) -> str:
return cast(str, self.kwargs["name"])
@property
def version(self) -> str:
return cast(str, self.kwargs["version"])
# Note: This only exists as a hook for additional logic for the `setup()` kwargs, e.g. for plugin
# authors. To resolve `SetupKwargs`, call `await Get(SetupKwargs, ExportedTarget)`, which handles
# running any custom implementations vs. using the default implementation.
@union
@dataclass(frozen=True) # type: ignore[misc]
class SetupKwargsRequest(ABC):
"""A request to allow setting the kwargs passed to the `setup()` function.
By default, Pants will pass the kwargs provided in the BUILD file unchanged. To customize this
behavior, subclass `SetupKwargsRequest`, register the rule `UnionRule(SetupKwargsRequest,
MyCustomSetupKwargsRequest)`, and add a rule that takes your subclass as a parameter and returns
`SetupKwargs`.
"""
target: Target
@classmethod
@abstractmethod
def is_applicable(cls, target: Target) -> bool:
"""Whether the kwargs implementation should be used for this target or not."""
@property
def explicit_kwargs(self) -> Dict[str, Any]:
return self.target[PythonProvidesField].value.kwargs
class FinalizedSetupKwargs(SetupKwargs):
"""The final kwargs used for the `setup()` function, after Pants added requirements and sources
information."""
def __init__(self, kwargs: Mapping[str, Any], *, address: Address) -> None:
super().__init__(kwargs, address=address, _allow_banned_keys=True)
@dataclass(frozen=True)
class SetupPyChroot:
"""A chroot containing a generated setup.py and the sources it operates on."""
digest: Digest
setup_kwargs: FinalizedSetupKwargs
@dataclass(frozen=True)
class RunSetupPyRequest:
"""A request to run a setup.py command."""
exported_target: ExportedTarget
interpreter_constraints: PexInterpreterConstraints
chroot: SetupPyChroot
args: Tuple[str, ...]
@dataclass(frozen=True)
class RunSetupPyResult:
"""The result of running a setup.py command."""
output: Digest # The state of the chroot after running setup.py.
@enum.unique
class FirstPartyDependencyVersionScheme(enum.Enum):
EXACT = "exact" # i.e., ==
COMPATIBLE = "compatible" # i.e., ~=
ANY = "any" # i.e., no specifier
class SetupPyGeneration(Subsystem):
options_scope = "setup-py-generation"
help = "Options to control how setup.py is generated from a `python_distribution` target."
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--first-party-dependency-version-scheme",
type=FirstPartyDependencyVersionScheme,
default=FirstPartyDependencyVersionScheme.EXACT,
help=(
"What version to set in `install_requires` when a `python_distribution` depends on "
"other `python_distribution`s. If `exact`, will use `==`. If `compatible`, will "
"use `~=`. If `any`, will leave off the version. See "
"https://www.python.org/dev/peps/pep-0440/#version-specifiers."
),
)
def first_party_dependency_version(self, version: str) -> str:
"""Return the version string (e.g. '~=4.0') for a first-party dependency.
If the user specified to use "any" version, then this will return an empty string.
"""
scheme = self.options.first_party_dependency_version_scheme
if scheme == FirstPartyDependencyVersionScheme.ANY:
return ""
specifier = "==" if scheme == FirstPartyDependencyVersionScheme.EXACT else "~="
return f"{specifier}{version}"
def validate_commands(commands: Tuple[str, ...]):
# We rely on the dist dir being the default, so we know where to find the created dists.
if "--dist-dir" in commands or "-d" in commands:
raise InvalidSetupPyArgs(
"Cannot set --dist-dir/-d in setup.py args. To change where dists "
"are written, use the global --pants-distdir option."
)
# We don't allow publishing via setup.py, as we don't want the setup.py running rule,
# which is not a @goal_rule, to side-effect (plus, we'd need to ensure that publishing
# happens in dependency order). Note that `upload` and `register` were removed in
# setuptools 42.0.0, in favor of Twine, but we still check for them in case the user modified
# the default version used by our Setuptools subsystem.
# TODO: A `publish` rule, that can invoke Twine to do the actual uploading.
# See https://github.com/pantsbuild/pants/issues/8935.
if "upload" in commands or "register" in commands:
raise InvalidSetupPyArgs("Cannot use the `upload` or `register` setup.py commands")
@rule
async def package_python_dist(
field_set: PythonDistributionFieldSet,
python_setup: PythonSetup,
) -> BuiltPackage:
transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest([field_set.address]))
exported_target = ExportedTarget(transitive_targets.roots[0])
interpreter_constraints = PexInterpreterConstraints.create_from_targets(
transitive_targets.closure, python_setup
)
chroot = await Get(
SetupPyChroot,
SetupPyChrootRequest(exported_target, py2=interpreter_constraints.includes_python2()),
)
# If commands were provided, run setup.py with them; Otherwise just dump chroots.
commands = exported_target.target.get(SetupPyCommandsField).value or ()
if commands:
validate_commands(commands)
setup_py_result = await Get(
RunSetupPyResult,
RunSetupPyRequest(exported_target, interpreter_constraints, chroot, commands),
)
dist_snapshot = await Get(Snapshot, Digest, setup_py_result.output)
return BuiltPackage(
setup_py_result.output,
tuple(BuiltPackageArtifact(path) for path in dist_snapshot.files),
)
else:
dirname = f"{chroot.setup_kwargs.name}-{chroot.setup_kwargs.version}"
rel_chroot = await Get(Digest, AddPrefix(chroot.digest, dirname))
return BuiltPackage(rel_chroot, (BuiltPackageArtifact(dirname),))
# We write .py sources into the chroot under this dir.
CHROOT_SOURCE_ROOT = "src"
SETUP_BOILERPLATE = """
# DO NOT EDIT THIS FILE -- AUTOGENERATED BY PANTS
# Target: {target_address_spec}
from setuptools import setup
setup(**{setup_kwargs_str})
"""
@rule
async def run_setup_py(req: RunSetupPyRequest, setuptools: Setuptools) -> RunSetupPyResult:
"""Run a setup.py command on a single exported target."""
# Note that this pex has no entrypoint. We use it to run our generated setup.py, which
# in turn imports from and invokes setuptools.
setuptools_pex = await Get(
VenvPex,
PexRequest(
output_filename="setuptools.pex",
internal_only=True,
requirements=PexRequirements(setuptools.all_requirements),
interpreter_constraints=(
req.interpreter_constraints
if setuptools.options.is_default("interpreter_constraints")
else PexInterpreterConstraints(setuptools.interpreter_constraints)
),
),
)
# The setuptools dist dir, created by it under the chroot (not to be confused with
# pants's own dist dir, at the buildroot).
dist_dir = "dist/"
result = await Get(
ProcessResult,
VenvPexProcess(
setuptools_pex,
argv=("setup.py", *req.args),
input_digest=req.chroot.digest,
# setuptools commands that create dists write them to the distdir.
# TODO: Could there be other useful files to capture?
output_directories=(dist_dir,),
description=f"Run setuptools for {req.exported_target.target.address}",
level=LogLevel.DEBUG,
),
)
output_digest = await Get(Digest, RemovePrefix(result.output_digest, dist_dir))
return RunSetupPyResult(output_digest)
@rule
async def determine_setup_kwargs(
exported_target: ExportedTarget, union_membership: UnionMembership
) -> SetupKwargs:
target = exported_target.target
setup_kwargs_requests = union_membership.get(SetupKwargsRequest) # type: ignore[misc]
applicable_setup_kwargs_requests = tuple(
request for request in setup_kwargs_requests if request.is_applicable(target)
)
# If no provided implementations, fall back to our default implementation that simply returns
# what the user explicitly specified in the BUILD file.
if not applicable_setup_kwargs_requests:
return SetupKwargs(exported_target.provides.kwargs, address=target.address)
if len(applicable_setup_kwargs_requests) > 1:
possible_requests = sorted(plugin.__name__ for plugin in applicable_setup_kwargs_requests)
raise ValueError(
f"Multiple of the registered `SetupKwargsRequest`s can work on the target "
f"{target.address}, and it's ambiguous which to use: {possible_requests}\n\nPlease "
"activate fewer implementations, or make the classmethod `is_applicable()` more "
"precise so that only one implementation is applicable for this target."
)
setup_kwargs_request = tuple(applicable_setup_kwargs_requests)[0]
return await Get(SetupKwargs, SetupKwargsRequest, setup_kwargs_request(target))
@rule
async def generate_chroot(request: SetupPyChrootRequest) -> SetupPyChroot:
exported_target = request.exported_target
exported_addr = exported_target.target.address
owned_deps, transitive_targets = await MultiGet(
Get(OwnedDependencies, DependencyOwner(exported_target)),
Get(TransitiveTargets, TransitiveTargetsRequest([exported_target.target.address])),
)
# files() targets aren't owned by a single exported target - they aren't code, so
# we allow them to be in multiple dists. This is helpful for, e.g., embedding
# a standard license file in a dist.
files_targets = (tgt for tgt in transitive_targets.closure if tgt.has_field(FilesSources))
targets = Targets(itertools.chain((od.target for od in owned_deps), files_targets))
sources, requirements = await MultiGet(
Get(SetupPySources, SetupPySourcesRequest(targets, py2=request.py2)),
Get(ExportedTargetRequirements, DependencyOwner(exported_target)),
)
# Generate the kwargs for the setup() call. In addition to using the kwargs that are either
# explicitly provided or generated via a user's plugin, we add additional kwargs based on the
# resolved requirements and sources.
target = exported_target.target
resolved_setup_kwargs = await Get(SetupKwargs, ExportedTarget, exported_target)
setup_kwargs = resolved_setup_kwargs.kwargs.copy()
# NB: We are careful to not overwrite these values, but we also don't expect them to have been
# set. The user must have have gone out of their way to use a `SetupKwargs` plugin, and to have
# specified `SetupKwargs(_allow_banned_keys=True)`.
setup_kwargs.update(
{
"package_dir": {"": CHROOT_SOURCE_ROOT, **setup_kwargs.get("package_dir", {})},
"packages": (*sources.packages, *(setup_kwargs.get("packages", []))),
"namespace_packages": (
*sources.namespace_packages,
*setup_kwargs.get("namespace_packages", []),
),
"package_data": {**dict(sources.package_data), **setup_kwargs.get("package_data", {})},
"install_requires": (*requirements, *setup_kwargs.get("install_requires", [])),
}
)
# Add any `pex_binary` targets from `setup_py().with_binaries()` to the dist's entry points.
key_to_binary_spec = exported_target.provides.binaries
binaries = await Get(
Targets, UnparsedAddressInputs(key_to_binary_spec.values(), owning_address=target.address)
)
entry_point_requests = []
for binary in binaries:
if not binary.has_field(PexEntryPointField):
raise InvalidEntryPoint(
"Expected addresses to `pex_binary` targets in `.with_binaries()` for the "
f"`provides` field for {exported_addr}, but found {binary.address} with target "
f"type {binary.alias}."
)
entry_point = binary[PexEntryPointField].value
url = "https://python-packaging.readthedocs.io/en/latest/command-line-scripts.html#the-console-scripts-entry-point"
if not entry_point:
raise InvalidEntryPoint(
"Every `pex_binary` used in `.with_binaries()` for the `provides` field for "
f"{exported_addr} must explicitly set the `entry_point` field, but "
f"{binary.address} left the field off. Set `entry_point` to either "
f"`app.py:func` or the longhand `path.to.app:func`. See {url}."
)
if not entry_point.function:
raise InvalidEntryPoint(
"Every `pex_binary` used in `with_binaries()` for the `provides()` field for "
f"{exported_addr} must end in the format `:my_func` for the `entry_point` field, "
f"but {binary.address} set it to {entry_point.spec!r}. For example, set "
f"`entry_point='{entry_point.module}:main'. See {url}."
)
entry_point_requests.append(ResolvePexEntryPointRequest(binary[PexEntryPointField]))
binary_entry_points = await MultiGet(
Get(ResolvedPexEntryPoint, ResolvePexEntryPointRequest, request)
for request in entry_point_requests
)
for key, binary_entry_point in zip(key_to_binary_spec.keys(), binary_entry_points):
entry_points = setup_kwargs.setdefault("entry_points", {})
console_scripts = entry_points.setdefault("console_scripts", [])
if binary_entry_point.val is not None:
console_scripts.append(f"{key}={binary_entry_point.val.spec}")
# Generate the setup script.
setup_py_content = SETUP_BOILERPLATE.format(
target_address_spec=target.address.spec,
setup_kwargs_str=distutils_repr(setup_kwargs),
).encode()
files_to_create = [
FileContent("setup.py", setup_py_content),
FileContent("MANIFEST.in", "include *.py".encode()),
]
extra_files_digest, src_digest = await MultiGet(
Get(Digest, CreateDigest(files_to_create)),
# Nest the sources under the src/ prefix.
Get(Digest, AddPrefix(sources.digest, CHROOT_SOURCE_ROOT)),
)
chroot_digest = await Get(Digest, MergeDigests((src_digest, extra_files_digest)))
return SetupPyChroot(chroot_digest, FinalizedSetupKwargs(setup_kwargs, address=target.address))
@rule
async def get_sources(request: SetupPySourcesRequest) -> SetupPySources:
python_sources_request = PythonSourceFilesRequest(
targets=request.targets, include_resources=False, include_files=False
)
all_sources_request = PythonSourceFilesRequest(
targets=request.targets, include_resources=True, include_files=True
)
python_sources, all_sources = await MultiGet(
Get(StrippedPythonSourceFiles, PythonSourceFilesRequest, python_sources_request),
Get(StrippedPythonSourceFiles, PythonSourceFilesRequest, all_sources_request),
)
python_files = set(python_sources.stripped_source_files.snapshot.files)
all_files = set(all_sources.stripped_source_files.snapshot.files)
resource_files = all_files - python_files
init_py_digest_contents = await Get(
DigestContents,
DigestSubset(
python_sources.stripped_source_files.snapshot.digest, PathGlobs(["**/__init__.py"])
),
)
packages, namespace_packages, package_data = find_packages(
python_files=python_files,
resource_files=resource_files,
init_py_digest_contents=init_py_digest_contents,
py2=request.py2,
)
return SetupPySources(
digest=all_sources.stripped_source_files.snapshot.digest,
packages=packages,
namespace_packages=namespace_packages,
package_data=package_data,
)
@rule(desc="Compute distribution's 3rd party requirements")
async def get_requirements(
dep_owner: DependencyOwner,
union_membership: UnionMembership,
setup_py_generation: SetupPyGeneration,
) -> ExportedTargetRequirements:
transitive_targets = await Get(
TransitiveTargets, TransitiveTargetsRequest([dep_owner.exported_target.target.address])
)
ownable_tgts = [
tgt for tgt in transitive_targets.closure if is_ownable_target(tgt, union_membership)
]
owners = await MultiGet(Get(ExportedTarget, OwnedDependency(tgt)) for tgt in ownable_tgts)
owned_by_us: Set[Target] = set()
owned_by_others: Set[Target] = set()
for tgt, owner in zip(ownable_tgts, owners):
(owned_by_us if owner == dep_owner.exported_target else owned_by_others).add(tgt)
# Get all 3rdparty deps of our owned deps.
#
# Note that we need only consider requirements that are direct dependencies of our owned deps:
# If T depends on R indirectly, then it must be via some direct deps U1, U2, ... For each such U,
# if U is in the owned deps then we'll pick up R through U. And if U is not in the owned deps
# then it's owned by an exported target ET, and so R will be in the requirements for ET, and we
# will require ET.
direct_deps_tgts = await MultiGet(
Get(Targets, DependenciesRequest(tgt.get(Dependencies))) for tgt in owned_by_us
)
reqs = PexRequirements.create_from_requirement_fields(
tgt[PythonRequirementsField]
for tgt in itertools.chain.from_iterable(direct_deps_tgts)
if tgt.has_field(PythonRequirementsField)
)
req_strs = list(reqs)
# Add the requirements on any exported targets on which we depend.
kwargs_for_exported_targets_we_depend_on = await MultiGet(
Get(SetupKwargs, OwnedDependency(tgt)) for tgt in owned_by_others
)
req_strs.extend(
f"{kwargs.name}{setup_py_generation.first_party_dependency_version(kwargs.version)}"
for kwargs in set(kwargs_for_exported_targets_we_depend_on)
)
return ExportedTargetRequirements(req_strs)
@rule(desc="Find all code to be published in the distribution", level=LogLevel.DEBUG)
async def get_owned_dependencies(
dependency_owner: DependencyOwner, union_membership: UnionMembership
) -> OwnedDependencies:
"""Find the dependencies of dependency_owner that are owned by it.
Includes dependency_owner itself.
"""
transitive_targets = await Get(
TransitiveTargets,
TransitiveTargetsRequest([dependency_owner.exported_target.target.address]),
)
ownable_targets = [
tgt for tgt in transitive_targets.closure if is_ownable_target(tgt, union_membership)
]
owners = await MultiGet(Get(ExportedTarget, OwnedDependency(tgt)) for tgt in ownable_targets)
owned_dependencies = [
tgt
for owner, tgt in zip(owners, ownable_targets)
if owner == dependency_owner.exported_target
]
return OwnedDependencies(OwnedDependency(t) for t in owned_dependencies)
@rule(desc="Get exporting owner for target")
async def get_exporting_owner(owned_dependency: OwnedDependency) -> ExportedTarget:
"""Find the exported target that owns the given target (and therefore exports it).
The owner of T (i.e., the exported target in whose artifact T's code is published) is:
1. An exported target that depends on T (or is T itself).
2. Is T's closest filesystem ancestor among those satisfying 1.
If there are multiple such exported targets at the same degree of ancestry, the ownership
is ambiguous and an error is raised. If there is no exported target that depends on T
and is its ancestor, then there is no owner and an error is raised.
"""
target = owned_dependency.target
ancestor_addrs = AscendantAddresses(target.address.spec_path)
ancestor_tgts = await Get(Targets, AddressSpecs([ancestor_addrs]))
# Note that addresses sort by (spec_path, target_name), and all these targets are
# ancestors of the given target, i.e., their spec_paths are all prefixes. So sorting by
# address will effectively sort by closeness of ancestry to the given target.
exported_ancestor_tgts = sorted(
[t for t in ancestor_tgts if t.has_field(PythonProvidesField)],
key=lambda t: t.address,
reverse=True,
)
exported_ancestor_iter = iter(exported_ancestor_tgts)
for exported_ancestor in exported_ancestor_iter:
transitive_targets = await Get(
TransitiveTargets, TransitiveTargetsRequest([exported_ancestor.address])
)
if target in transitive_targets.closure:
owner = exported_ancestor
# Find any exported siblings of owner that also depend on target. They have the
# same spec_path as it, so they must immediately follow it in ancestor_iter.
sibling_owners = []
sibling = next(exported_ancestor_iter, None)
while sibling and sibling.address.spec_path == owner.address.spec_path:
transitive_targets = await Get(
TransitiveTargets, TransitiveTargetsRequest([sibling.address])
)
if target in transitive_targets.closure:
sibling_owners.append(sibling)
sibling = next(exported_ancestor_iter, None)
if sibling_owners:
all_owners = [exported_ancestor] + sibling_owners
raise AmbiguousOwnerError(
f"Found multiple sibling python_distribution targets that are the closest "
f"ancestor dependees of {target.address} and are therefore candidates to "
f"own it: {', '.join(o.address.spec for o in all_owners)}. Only a "
f"single such owner is allowed, to avoid ambiguity."
)
return ExportedTarget(owner)
raise NoOwnerError(
f"No python_distribution target found to own {target.address}. Note that "
f"the owner must be in or above the owned target's directory, and must "
f"depend on it (directly or indirectly)."
)
def is_ownable_target(tgt: Target, union_membership: UnionMembership) -> bool:
return (
# Note that we check for a PythonProvides field so that a python_distribution
# target can be owned (by itself). This is so that if there are any 3rdparty
# requirements directly on the python_distribution target, we apply them to the dist.
# This isn't particularly useful (3rdparty requirements should be on the python_library
# that consumes them)... but users may expect it to work anyway.
tgt.has_field(PythonProvidesField)
or tgt.has_field(PythonSources)
or tgt.has_field(ResourcesSources)
or tgt.get(Sources).can_generate(PythonSources, union_membership)
)
# Convenient type alias for the pair (package name, data files in the package).
PackageDatum = Tuple[str, Tuple[str, ...]]
# Distutils does not support unicode strings in setup.py, so we must explicitly convert to binary
# strings as pants uses unicode_literals. A natural and prior technique was to use `pprint.pformat`,
# but that embeds u's in the string itself during conversion. For that reason we roll out own
# literal pretty-printer here.
#
# Note that we must still keep this code, even though Pants only runs with Python 3, because
# the created product may still be run by Python 2.
#
# For more information, see http://bugs.python.org/issue13943.
def distutils_repr(obj):
"""Compute a string repr suitable for use in generated setup.py files."""
output = io.StringIO()
linesep = os.linesep
def _write(data):
output.write(ensure_text(data))
def _write_repr(o, indent=False, level=0):
pad = " " * 4 * level
if indent:
_write(pad)
level += 1
if isinstance(o, (bytes, str)):
# The py2 repr of str (unicode) is `u'...'` and we don't want the `u` prefix; likewise,
# the py3 repr of bytes is `b'...'` and we don't want the `b` prefix so we hand-roll a
# repr here.
o_txt = ensure_text(o)
if linesep in o_txt:
_write('"""{}"""'.format(o_txt.replace('"""', r"\"\"\"")))
else:
_write("'{}'".format(o_txt.replace("'", r"\'")))
elif isinstance(o, abc.Mapping):
_write("{" + linesep)
for k, v in o.items():
_write_repr(k, indent=True, level=level)
_write(": ")
_write_repr(v, indent=False, level=level)
_write("," + linesep)
_write(pad + "}")
elif isinstance(o, abc.Iterable):
if isinstance(o, abc.MutableSequence):
open_collection, close_collection = "[]"
elif isinstance(o, abc.Set):
open_collection, close_collection = "{}"
else:
open_collection, close_collection = "()"
_write(open_collection + linesep)
for i in o:
_write_repr(i, indent=True, level=level)
_write("," + linesep)
_write(pad + close_collection)
else:
_write(repr(o)) # Numbers and bools.
_write_repr(obj)
return output.getvalue()
def find_packages(
*,
python_files: Set[str],
resource_files: Set[str],
init_py_digest_contents: DigestContents,
py2: bool,
) -> Tuple[Tuple[str, ...], Tuple[str, ...], Tuple[PackageDatum, ...]]:
"""Analyze the package structure for the given sources.
Returns a tuple (packages, namespace_packages, package_data), suitable for use as setup()
kwargs.
"""
# Find all packages implied by the sources.
packages: Set[str] = set()
package_data: Dict[str, List[str]] = defaultdict(list)
for python_file in python_files:
# Python 2: An __init__.py file denotes a package.
# Python 3: Any directory containing python source files is a package.
if not py2 or os.path.basename(python_file) == "__init__.py":
packages.add(os.path.dirname(python_file).replace(os.path.sep, "."))
# Now find all package_data.
for resource_file in resource_files:
# Find the closest enclosing package, if any. Resources will be loaded relative to that.
maybe_package: str = os.path.dirname(resource_file).replace(os.path.sep, ".")
while maybe_package and maybe_package not in packages:
maybe_package = maybe_package.rpartition(".")[0]
# If resource is not in a package, ignore it. There's no principled way to load it anyway.
if maybe_package:
package_data[maybe_package].append(
os.path.relpath(resource_file, maybe_package.replace(".", os.path.sep))
)
# See which packages are pkg_resources-style namespace packages.
# Note that implicit PEP 420 namespace packages and pkgutil-style namespace packages
# should *not* be listed in the setup namespace_packages kwarg. That's for pkg_resources-style
# namespace packages only. See https://github.com/pypa/sample-namespace-packages/.
namespace_packages: Set[str] = set()
init_py_by_path: Dict[str, bytes] = {ipc.path: ipc.content for ipc in init_py_digest_contents}
for pkg in packages:
path = os.path.join(pkg.replace(".", os.path.sep), "__init__.py")
if path in init_py_by_path and declares_pkg_resources_namespace_package(
init_py_by_path[path].decode()
):
namespace_packages.add(pkg)
return (
tuple(sorted(packages)),
tuple(sorted(namespace_packages)),
tuple((pkg, tuple(sorted(files))) for pkg, files in package_data.items()),
)
def declares_pkg_resources_namespace_package(python_src: str) -> bool:
"""Given .py file contents, determine if it declares a pkg_resources-style namespace package.
Detects pkg_resources-style namespaces. See here for details:
https://packaging.python.org/guides/packaging-namespace-packages/.
Note: Accepted namespace package decls are valid Python syntax in all Python versions,
so this code can, e.g., detect namespace packages in Python 2 code while running on Python 3.
"""
import ast
def is_name(node: ast.AST, name: str) -> bool:
return isinstance(node, ast.Name) and node.id == name
def is_call_to(node: ast.AST, func_name: str) -> bool:
if not isinstance(node, ast.Call):
return False
func = node.func
return (isinstance(func, ast.Attribute) and func.attr == func_name) or is_name(
func, func_name
)
def has_args(call_node: ast.Call, required_arg_ids: Tuple[str, ...]) -> bool:
args = call_node.args
if len(args) != len(required_arg_ids):
return False
actual_arg_ids = tuple(arg.id for arg in args if isinstance(arg, ast.Name))
return actual_arg_ids == required_arg_ids
try:
python_src_ast = ast.parse(python_src)
except SyntaxError:
# The namespace package incantations we check for are valid code in all Python versions.
# So if the code isn't parseable we know it isn't a valid namespace package.
return False
# Note that these checks are slightly heuristic. It is possible to construct adversarial code
# that would defeat them. But the only consequence would be an incorrect namespace_packages list
# in setup.py, and we're assuming our users aren't trying to shoot themselves in the foot.
for ast_node in ast.walk(python_src_ast):
# pkg_resources-style namespace, e.g.,
# __import__('pkg_resources').declare_namespace(__name__).
if is_call_to(ast_node, "declare_namespace") and has_args(
cast(ast.Call, ast_node), ("__name__",)
):
return True
return False
def rules():
return [
*python_sources_rules(),
*collect_rules(),
UnionRule(PackageFieldSet, PythonDistributionFieldSet),
]
| apache-2.0 | -2,219,731,043,789,042,400 | 39.710129 | 123 | 0.672675 | false | 4.041399 | false | false | false |
azumimuo/family-xbmc-addon | plugin.video.bubbles/resources/lib/externals/hachoir/hachoir_parser/image/bmp.py | 1 | 6874 | """
Microsoft Bitmap picture parser.
- file extension: ".bmp"
Author: Victor Stinner
Creation: 16 december 2005
"""
from resources.lib.externals.hachoir.hachoir_parser import Parser
from resources.lib.externals.hachoir.hachoir_core.field import (FieldSet,
UInt8, UInt16, UInt32, Bits,
String, RawBytes, Enum,
PaddingBytes, NullBytes, createPaddingField)
from resources.lib.externals.hachoir.hachoir_core.endian import LITTLE_ENDIAN
from resources.lib.externals.hachoir.hachoir_core.text_handler import textHandler, hexadecimal
from resources.lib.externals.hachoir.hachoir_parser.image.common import RGB, PaletteRGBA
from resources.lib.externals.hachoir.hachoir_core.tools import alignValue
class Pixel4bit(Bits):
static_size = 4
def __init__(self, parent, name):
Bits.__init__(self, parent, name, 4)
class ImageLine(FieldSet):
def __init__(self, parent, name, width, pixel_class):
FieldSet.__init__(self, parent, name)
self._pixel = pixel_class
self._width = width
self._size = alignValue(self._width * self._pixel.static_size, 32)
def createFields(self):
for x in xrange(self._width):
yield self._pixel(self, "pixel[]")
size = self.size - self.current_size
if size:
yield createPaddingField(self, size)
class ImagePixels(FieldSet):
def __init__(self, parent, name, width, height, pixel_class, size=None):
FieldSet.__init__(self, parent, name, size=size)
self._width = width
self._height = height
self._pixel = pixel_class
def createFields(self):
for y in xrange(self._height-1, -1, -1):
yield ImageLine(self, "line[%u]" % y, self._width, self._pixel)
size = (self.size - self.current_size) // 8
if size:
yield NullBytes(self, "padding", size)
class CIEXYZ(FieldSet):
def createFields(self):
yield UInt32(self, "x")
yield UInt32(self, "y")
yield UInt32(self, "z")
class BmpHeader(FieldSet):
color_space_name = {
1: "Business (Saturation)",
2: "Graphics (Relative)",
4: "Images (Perceptual)",
8: "Absolute colormetric (Absolute)",
}
def getFormatVersion(self):
if "gamma_blue" in self:
return 4
if "important_color" in self:
return 3
return 2
def createFields(self):
# Version 2 (12 bytes)
yield UInt32(self, "header_size", "Header size")
yield UInt32(self, "width", "Width (pixels)")
yield UInt32(self, "height", "Height (pixels)")
yield UInt16(self, "nb_plan", "Number of plan (=1)")
yield UInt16(self, "bpp", "Bits per pixel") # may be zero for PNG/JPEG picture
# Version 3 (40 bytes)
if self["header_size"].value < 40:
return
yield Enum(UInt32(self, "compression", "Compression method"), BmpFile.COMPRESSION_NAME)
yield UInt32(self, "image_size", "Image size (bytes)")
yield UInt32(self, "horizontal_dpi", "Horizontal DPI")
yield UInt32(self, "vertical_dpi", "Vertical DPI")
yield UInt32(self, "used_colors", "Number of color used")
yield UInt32(self, "important_color", "Number of import colors")
# Version 4 (108 bytes)
if self["header_size"].value < 108:
return
yield textHandler(UInt32(self, "red_mask"), hexadecimal)
yield textHandler(UInt32(self, "green_mask"), hexadecimal)
yield textHandler(UInt32(self, "blue_mask"), hexadecimal)
yield textHandler(UInt32(self, "alpha_mask"), hexadecimal)
yield Enum(UInt32(self, "color_space"), self.color_space_name)
yield CIEXYZ(self, "red_primary")
yield CIEXYZ(self, "green_primary")
yield CIEXYZ(self, "blue_primary")
yield UInt32(self, "gamma_red")
yield UInt32(self, "gamma_green")
yield UInt32(self, "gamma_blue")
def parseImageData(parent, name, size, header):
if ("compression" not in header) or (header["compression"].value in (0, 3)):
width = header["width"].value
height = header["height"].value
bpp = header["bpp"].value
if bpp == 32:
cls = UInt32
elif bpp == 24:
cls = RGB
elif bpp == 8:
cls = UInt8
elif bpp == 4:
cls = Pixel4bit
else:
cls = None
if cls:
return ImagePixels(parent, name, width, height, cls, size=size*8)
return RawBytes(parent, name, size)
class BmpFile(Parser):
PARSER_TAGS = {
"id": "bmp",
"category": "image",
"file_ext": ("bmp",),
"mime": (u"image/x-ms-bmp", u"image/x-bmp"),
"min_size": 30*8,
# "magic": (("BM", 0),),
"magic_regex": ((
# "BM", <filesize>, <reserved>, header_size=(12|40|108)
"BM.{4}.{8}[\x0C\x28\x6C]\0{3}",
0),),
"description": "Microsoft bitmap (BMP) picture"
}
endian = LITTLE_ENDIAN
COMPRESSION_NAME = {
0: u"Uncompressed",
1: u"RLE 8-bit",
2: u"RLE 4-bit",
3: u"Bitfields",
4: u"JPEG",
5: u"PNG",
}
def validate(self):
if self.stream.readBytes(0, 2) != 'BM':
return "Wrong file signature"
if self["header/header_size"].value not in (12, 40, 108):
return "Unknown header size (%s)" % self["header_size"].value
if self["header/nb_plan"].value != 1:
return "Invalid number of planes"
return True
def createFields(self):
yield String(self, "signature", 2, "Header (\"BM\")", charset="ASCII")
yield UInt32(self, "file_size", "File size (bytes)")
yield PaddingBytes(self, "reserved", 4, "Reserved")
yield UInt32(self, "data_start", "Data start position")
yield BmpHeader(self, "header")
# Compute number of color
header = self["header"]
bpp = header["bpp"].value
if 0 < bpp <= 8:
if "used_colors" in header and header["used_colors"].value:
nb_color = header["used_colors"].value
else:
nb_color = (1 << bpp)
else:
nb_color = 0
# Color palette (if any)
if nb_color:
yield PaletteRGBA(self, "palette", nb_color)
# Seek to data start
field = self.seekByte(self["data_start"].value)
if field:
yield field
# Image pixels
size = min(self["file_size"].value-self["data_start"].value, (self.size - self.current_size)//8)
yield parseImageData(self, "pixels", size, header)
def createDescription(self):
return u"Microsoft Bitmap version %s" % self["header"].getFormatVersion()
def createContentSize(self):
return self["file_size"].value * 8
| gpl-2.0 | 6,517,626,528,385,999,000 | 34.251282 | 104 | 0.586267 | false | 3.654439 | false | false | false |
msullivan/advent-of-code | 2020/17a.py | 1 | 1655 | #!/usr/bin/env python3
import copy
from collections import defaultdict
import sys
import re
def extract(s):
return [int(x) for x in re.findall(r'-?\d+', s)]
def first(grid, x, y, dx, dy):
while True:
x += dx
y += dy
if x < 0 or x >= len(grid[0]) or y < 0 or y >= len(grid):
return ''
if grid[y][x] in ('L', '#'):
return grid[y][x]
nbrs = [(x, y, z) for x in range(-1, 2) for y in range(-1, 2) for z in range(-1, 2) if not x == y == z == 0]
def add(v1, v2):
return tuple(x + y for x, y in zip(v1, v2))
def step(grid):
ngrid = copy.deepcopy(grid)
# ngrid = [x[:] for x in grid]
change = False
for pos in list(grid):
for dx in nbrs + [(0, 0, 0)]:
npos = add(dx, pos)
cnt = 0
for d in nbrs:
if grid[add(npos, d)] == "#":
cnt += 1
print(cnt)
if grid[npos] == '#' and not (cnt == 2 or cnt == 3):
ngrid[npos] = '.'
change = True
elif grid[npos] == '.' and cnt == 3:
ngrid[npos] = '#'
change = True
return ngrid, change
def main(args):
# data = [x.split('\n') for x in sys.stdin.read().split('\n\n')]
data = [list(s.strip()) for s in sys.stdin]
grid = defaultdict(lambda: ".")
for y in range(len(data)):
for x in range(len(data[0])):
grid[x,y,0] = data[y][x]
for i in range(6):
print(i, grid)
grid, _ = step(grid)
print(len([x for x in grid.values() if x == '#']))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit | 4,169,982,579,958,940,000 | 24.859375 | 108 | 0.467674 | false | 3.099251 | false | false | false |
asamerh4/mesos | support/push-commits.py | 1 | 4982 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is typically used by Mesos committers to push a locally applied
review chain to ASF git repo and mark the reviews as submitted on ASF
ReviewBoard.
Example Usage:
> git checkout master
> git pull origin
> ./support/apply-reviews.py -c -r 1234
> ./support/push-commits.py
"""
# TODO(vinod): Also post the commit message to the corresponding ASF JIRA
# tickets and resolve them if necessary.
import argparse
import os
import re
import sys
from subprocess import check_output
REVIEWBOARD_URL = 'https://reviews.apache.org'
def get_reviews(revision_range):
"""Return the list of reviews found in the commits in the revision range."""
reviews = [] # List of (review id, commit log) tuples
rev_list = check_output(['git',
'rev-list',
'--reverse',
revision_range]).strip().split('\n')
for rev in rev_list:
commit_log = check_output(['git',
'--no-pager',
'show',
'--no-color',
'--no-patch',
rev]).strip()
pos = commit_log.find('Review: ')
if pos != -1:
pattern = re.compile('Review: ({url})$'.format(
url=os.path.join(REVIEWBOARD_URL, 'r', '[0-9]+')))
match = pattern.search(commit_log.strip().strip('/'))
if match is None:
print "\nInvalid ReviewBoard URL: '{}'".format(commit_log[pos:])
sys.exit(1)
url = match.group(1)
reviews.append((os.path.basename(url), commit_log))
return reviews
def close_reviews(reviews, options):
"""Mark the given reviews as submitted on ReviewBoard."""
for review_id, commit_log in reviews:
print 'Closing review', review_id
if not options['dry_run']:
check_output(['rbt',
'close',
'--description',
commit_log,
review_id])
def parse_options():
"""Return a dictionary of options parsed from command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('-n',
'--dry-run',
action='store_true',
help='Perform a dry run.')
args = parser.parse_args()
options = {}
options['dry_run'] = args.dry_run
return options
def main():
"""Main function to push the commits in this branch as review requests."""
options = parse_options()
current_branch_ref = check_output(['git', 'symbolic-ref', 'HEAD']).strip()
current_branch = current_branch_ref.replace('refs/heads/', '', 1)
if current_branch != 'master':
print 'Please run this script from master branch'
sys.exit(1)
remote_tracking_branch = check_output(['git',
'rev-parse',
'--abbrev-ref',
'master@{upstream}']).strip()
merge_base = check_output([
'git',
'merge-base',
remote_tracking_branch,
'master']).strip()
if merge_base == current_branch_ref:
print 'No new commits found to push'
sys.exit(1)
reviews = get_reviews(merge_base + ".." + current_branch_ref)
# Push the current branch to remote master.
remote = check_output(['git',
'config',
'--get',
'branch.master.remote']).strip()
print 'Pushing commits to', remote
if options['dry_run']:
check_output(['git',
'push',
'--dry-run',
remote,
'master:master'])
else:
check_output(['git',
'push',
remote,
'master:master'])
# Now mark the reviews as submitted.
close_reviews(reviews, options)
if __name__ == '__main__':
main()
| apache-2.0 | -8,114,206,134,426,273,000 | 30.732484 | 80 | 0.545163 | false | 4.472172 | false | false | false |
quixey/scrapy-cluster | crawler/tests/tests_online.py | 1 | 3938 | '''
Online link spider test
'''
import unittest
from unittest import TestCase
import time
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import scrapy
import redis
from redis.exceptions import ConnectionError
import json
import threading, time
from crawling.spiders.link_spider import LinkSpider
from scrapy.utils.project import get_project_settings
from twisted.internet import reactor
from scrapy.crawler import CrawlerRunner
from kafka import KafkaClient, SimpleConsumer
class CustomSpider(LinkSpider):
'''
Overridden link spider for testing
'''
name = "test-spider"
class TestLinkSpider(TestCase):
example_feed = "\x80\x02}q\x00(X\x0f\x00\x00\x00allowed_domainsq\x01NX"\
"\x0b\x00\x00\x00allow_regexq\x02NX\a\x00\x00\x00crawlidq\x03X\x19"\
"\x00\x00\x0001234567890abcdefghijklmnq\x04X\x03\x00\x00\x00urlq\x05X"\
"\x13\x00\x00\x00www.istresearch.comq\x06X\a\x00\x00\x00expiresq\aK"\
"\x00X\b\x00\x00\x00priorityq\bK\x01X\n\x00\x00\x00deny_regexq\tNX\b"\
"\x00\x00\x00spideridq\nX\x0b\x00\x00\x00test-spiderq\x0bX\x05\x00"\
"\x00\x00attrsq\x0cNX\x05\x00\x00\x00appidq\rX\a\x00\x00\x00testappq"\
"\x0eX\x06\x00\x00\x00cookieq\x0fNX\t\x00\x00\x00useragentq\x10NX\x0f"\
"\x00\x00\x00deny_extensionsq\x11NX\b\x00\x00\x00maxdepthq\x12K\x00u."
def setUp(self):
self.settings = get_project_settings()
self.settings.set('KAFKA_TOPIC_PREFIX', "demo_test")
# set up redis
self.redis_conn = redis.Redis(host=self.settings['REDIS_HOST'],
port=self.settings['REDIS_PORT'])
try:
self.redis_conn.info()
except ConnectionError:
print "Could not connect to Redis"
# plugin is essential to functionality
sys.exit(1)
# clear out older test keys if any
keys = self.redis_conn.keys("test-spider:*")
for key in keys:
self.redis_conn.delete(key)
# set up kafka to consumer potential result
self.kafka_conn = KafkaClient(self.settings['KAFKA_HOSTS'])
self.kafka_conn.ensure_topic_exists("demo_test.crawled_firehose")
self.consumer = SimpleConsumer(
self.kafka_conn,
"demo-id",
"demo_test.crawled_firehose",
buffer_size=1024*100,
fetch_size_bytes=1024*100,
max_buffer_size=None
)
# move cursor to end of kafka topic
self.consumer.seek(0, 2)
def test_crawler_process(self):
runner = CrawlerRunner(self.settings)
d = runner.crawl(CustomSpider)
d.addBoth(lambda _: reactor.stop())
# add crawl to redis
key = "test-spider:istresearch.com:queue"
self.redis_conn.zadd(key, self.example_feed, -99)
# run the spider, give 20 seconds to see the url, crawl it,
# and send to kafka. Then we kill the reactor
def thread_func():
time.sleep(20)
reactor.stop()
thread = threading.Thread(target=thread_func)
thread.start()
reactor.run()
# ensure it was sent out to kafka
message_count = 0
for message in self.consumer.get_messages():
if message is None:
break
else:
the_dict = json.loads(message.message.value)
if the_dict is not None and the_dict['appid'] == 'testapp' \
and the_dict['crawlid'] == '01234567890abcdefghijklmn':
message_count += 1
self.assertEquals(message_count, 1)
def tearDown(self):
keys = self.redis_conn.keys('stats:crawler:*:test-spider:*')
keys = keys + self.redis_conn.keys('test-spider:*')
for key in keys:
self.redis_conn.delete(key)
if __name__ == '__main__':
unittest.main()
| mit | 2,532,433,231,757,263,000 | 33.243478 | 79 | 0.623667 | false | 3.284404 | true | false | false |
yfauser/maxwindownotify | setup.py | 1 | 1252 | from setuptools import setup
import io
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.rst')
setup(
name='maxwindownotify',
version='1.1.1',
packages=['maxwindownotify'],
package_data={'maxwindownotify':['*'], 'maxwindownotify':['notifier_modules/*']},
url='http://github.com/yfauser/maxwindownotify',
license='MIT',
author='yfauser',
author_email='[email protected]',
description='This little script (daemon) will poll for the status of all window sensors known to a MAX Cube system and check for open windows',
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: End Users/Desktop',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7'],
install_requires=['requests>=2.7.0', 'netaddr>=0.7.18'],
entry_points={
'console_scripts': ['maxwindownotify = maxwindownotify.maxwindownotify:main']
}
)
| mit | 2,280,797,815,360,075,000 | 33.777778 | 147 | 0.654153 | false | 3.748503 | false | false | false |
hazybluedot/indie_helper | util.py | 1 | 2278 | import requests
import bleach
import sys
if sys.version < '3':
from urlparse import urlparse
text_type = unicode
text_types = [ str, unicode ]
binary_type = str
else:
from urllib.parse import urlparse
text_type = str
text_types = [ str ]
binary_type = bytes
def is_url(url):
try:
parts = urlparse(url)
except TypeError:
return False
return parts.scheme in [ 'http', 'https' ]
def flatten(item):
if type(item) in [ list, tuple ] and len(item) == 1:
return item[0]
else:
return item
#bleach.ALLOWED_TAGS + ['p']
ALLOWED_TAGS=bleach.ALLOWED_TAGS + ['p', 'span']
def clean(text):
return bleach.clean(text, tags=ALLOWED_TAGS)
def clean_url(url):
if url.startswith('javascript:'):
return '';
return url
def bleachify(entry, key=None):
## todo for each property
if key == 'url':
bleached = bleachify(entry)
return [ clean_url(u) for u in bleached ]
if hasattr(entry, 'items'):
return dict([ (prop, bleachify(value, prop)) for prop, value in entry.items() ])
elif type(entry) is list:
## to flatten the list-of-one values that mf2py generates
## I have revisited this and decided to keep single element lists as this seems to be part of the mf2 defined format
#if len(entry) == 1:
# return bleachify(entry[0])
#else:
return map(bleachify, entry)
elif type(entry) in text_types:
return clean(entry)
else:
print('unhandled type of entry: {0}'.format(type(entry)))
return None
def follow_redirects(url, max_depth):
"""perform http GET url, following any redirects up to max_depth.
return resolved url.
Raises TooManyRedirects exception if max_depth is exceeded"""
def _wrapped(url, depth, acc):
if depth > max_depth:
raise TooManyRedirects('following redirects on {0} exceeded maximum depth of {1}'.format(url, max_depth))
r = requests.head(url)
acc.append( { 'url': url, 'status_code': r.status_code} )
if r.status_code in [ 301, 302 ]:
return _wrapped(r.headers['Location'], depth+1, acc)
else:
return acc
return _wrapped(url, 0, [])
| gpl-3.0 | 7,210,100,983,337,184,000 | 28.205128 | 124 | 0.611501 | false | 3.692058 | false | false | false |
jenshenrik/destiny-trader | destiny.py | 1 | 3112 | import sys
import re
TYPE_BATTLEFIELD = "Battlefield"
def print_usage():
print("""
Star Wars: Destiny trade list builder
Usage:
$>python destiny.py <target-file>
where <target-file> is the text-file to process.
This file should be generated by logging into swdestiny.com, going to 'My collection',
selecting all (Ctrl/Cmd + A), pasting into an empty file, and saving.
""")
# Opens file, and returns it as a list of lines
def open_file(path):
f = open(path, 'r+')
lines = f.readlines()
f.close()
return lines
def write_file(path, haves, wants):
output = open(path, 'w')
output.write("HAVES")
for card in haves:
qty = 0
if card.type == TYPE_BATTLEFIELD:
qty = card.qty - 1
else:
qty = card.qty - 2
output.write("\n%dx %s\t\t(%s)" % (qty, card.name, card.set_string))
output.write("\n\nWANTS")
for card in wants:
qty = 0
if card.type == TYPE_BATTLEFIELD:
qty = 1 #you always only want 1 battlefield
else:
qty = 2 - card.qty
output.write("\n%dx %s\t\t(%s)" % (qty, card.name, card.set_string))
output.close()
def strip_header(lines):
return lines[19:]
def strip_footer(lines):
return lines[:-11]
class Card:
def __init__(self, line):
split = line.split("\t")
self.name = split[0].lstrip().rstrip()
self.qty = self.parse_qty(split[1])
self.type = split[6]
self.rarity = split[7]
self.set = self.parse_set(split[-1].lstrip().rstrip())
self.number = self.parse_number(split[-1])
self.set_string = split[-1].lstrip().rstrip()
# Pulls number from quantity string
def parse_qty(self, qty_string):
found = re.findall(r'\d+', qty_string)
return int(found[0])
# Parse the card's set name.
# Assumes the last word is set number
def parse_set(self, set_string):
return set_string.rsplit(" ", 1)[0]
# Parse the card's number in the set.
# Assumes the last word is set number
def parse_number(self, number_string):
return int(number_string.rsplit(" ", 1)[1])
def check_usage():
num_args = len(sys.argv)
if num_args < 2:
print_usage()
sys.exit()
def extract_filename_and_extension(filename):
split_name = filename.rsplit(".", 1)
return (split_name[0], split_name[1])
# run script
check_usage()
input_file = sys.argv[1]
file_lines = open_file(input_file)
file_lines = strip_header(file_lines)
file_lines = strip_footer(file_lines)
cards = []
for line in file_lines:
cards.append(Card(line))
haves = []
wants = []
for card in cards:
if card.type == TYPE_BATTLEFIELD:
if card.qty < 1:
wants.append(card)
elif card.qty > 1:
haves.append(card)
else:
if card.qty < 2:
wants.append(card)
elif card.qty > 2:
haves.append(card)
(filename, extension) = extract_filename_and_extension(input_file)
write_file(filename+"_trades."+extension, haves, wants)
| gpl-3.0 | 8,064,256,526,302,666,000 | 24.719008 | 90 | 0.593509 | false | 3.314164 | false | false | false |
bfarr/kombine | examples/kepler/correlated_likelihood.py | 1 | 2577 | import numpy as np
import numpy.linalg as nl
import numpy.random as nr
import rv_model as rv
import scipy.linalg as sl
import scipy.stats as ss
def generate_covariance(ts, sigma, tau):
r"""Generates a covariance matrix according to an
squared-exponential autocovariance
.. math::
\left\langle x_i x_j \right\rangle = \sigma_0^2 \delta_{ij} + \sigma^2 \exp\left[ - \frac{\left| t_i - t_j\right|^2}{2 \tau^2} \right]
"""
ndim = ts.shape[0]
tis = ts[:, np.newaxis]
tjs = ts[np.newaxis, :]
return sigma*sigma*np.exp(-np.square(tis-tjs)/(2.0*tau*tau))
params_dtype = np.dtype([('mu', np.float),
('K', np.float),
('e', np.float),
('omega', np.float),
('chi', np.float),
('P', np.float),
('nu', np.float),
('sigma', np.float),
('tau', np.float)])
class Log1PPosterior(object):
"""Log of the posterior for a single planet system observed with a
single telescope. """
def __init__(self, ts, vs, dvs):
self.ts = np.sort(ts)
self.vs = vs
self.dvs = dvs
self.T = self.ts[-1] - self.ts[0]
self.dt_min = np.min(np.diff(self.ts))
def to_params(self, p):
p = np.atleast_1d(p)
return p.view(params_dtype)
def log_prior(self, p):
p = self.to_params(p)
# Bounds
if p['K'] < 0.0 or p['e'] < 0.0 or p['e'] > 1.0 or p['omega'] < 0.0 or p['omega'] > 2.0*np.pi or p['P'] < 0.0 or p['nu'] < 0.1 or p['nu'] > 10.0 or p['sigma'] < 0.0 or p['tau'] < 0.0 or p['tau'] > self.T:
return np.NINF
# Otherwise, flat prior on everything.
return 0.0
def log_likelihood(self, p):
p = self.to_params(p)
v = self.rvs(p)
res = self.vs - v - p['mu']
cov = p['nu']*p['nu']*np.diag(self.dvs*self.dvs)
cov += generate_covariance(self.ts, p['sigma'], p['tau'])
cfactor = sl.cho_factor(cov)
cc, lower = cfactor
n = self.ts.shape[0]
return -0.5*n*np.log(2.0*np.pi) - np.sum(np.log(np.diag(cc))) - 0.5*np.dot(res, sl.cho_solve(cfactor, res))
def __call__(self, p):
lp = self.log_prior(p)
if lp == np.NINF:
return np.NINF
else:
return lp + self.log_likelihood(p)
def rvs(self, p):
p = self.to_params(p)
return rv.rv_model(self.ts, p['K'], p['e'], p['omega'], p['chi'], p['P'])
| mit | -2,341,818,866,456,086,000 | 27.955056 | 212 | 0.498642 | false | 3.035336 | false | false | false |
FrancescoRizzi/AWSomesauce | articles/BAS4-pws/custauth/custauth.py | 1 | 18186 | #!/usr/bin/env python
import os
import json
import StringIO
from contextlib import closing
import re
import time
import pprint
import boto3
from boto3.session import Session
import botocore
import jwt
from cryptography.x509 import load_pem_x509_certificate
from cryptography.hazmat.backends import default_backend
# Simplest form of logging using the standard logging module:
# ============================================================
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Top-Level Handler:
# ============================================================
def lambda_handler(event, context):
logger.info("CustAuth Triggered.")
authToken = event.get('authorizationToken', '')
methodArn = event.get('methodArn', '')
authHeader = event.get('Authorization', '')
logger.info("Authorization Token : '{0!s}'.".format(authToken))
logger.info("Method ARN : '{0!s}'.".format(methodArn))
logger.info("Authorization Header: '{0!s}'.".format(authHeader))
# Check Configuration before wasting time
# ========================================================
# AUTH_APP_ID: required
auth_app_id = os.environ.get('AUTH_APP_ID', None)
if not auth_app_id:
logger.error("Missing Required 'AUTH_APP_ID' Environmental Variable.")
raise ValueError("Missing/blank 'AUTH_APP_ID'")
logger.info("Auth App ID : '{0!s}'.".format(auth_app_id))
# AUTH_TENANT_ID: required
auth_tenant_id = os.environ.get('AUTH_TENANT_ID', None)
if not auth_tenant_id:
logger.error("Missing Required 'AUTH_TENANT_ID' Environmental Variable.")
raise ValueError("Missing/blank 'AUTH_TENANT_ID'")
logger.info("Auth Tenant ID : '{0!s}'.".format(auth_tenant_id))
# CERTS_BUCKET: required
certs_bucket = os.environ.get('CERTS_BUCKET', None)
if not certs_bucket:
logger.error("Missing Required 'CERTS_BUCKET' Environmental Variable.")
raise ValueError("Missing/blank 'CERTS_BUCKET'")
logger.info("Certificates Bucket : '{0!s}'.".format(certs_bucket))
# ========================================================
# Client credentials expected in the authorizationToken, in the form:
# 'Bearer <id_token>'
# Missing authorizationToken:
# response 401 - Unauthorized (although we don't send back a 'WWW-Authenticate' header as we should)
if not authToken:
logger.warn("Missing Authorization Token: will trigger 401-Unauthorized response.")
raise Exception('Unauthorized')
validator = TokenValidator()
validToken = validator.ValidateToken(authToken, auth_app_id, auth_tenant_id, certs_bucket)
logger.info("Is the Authorization Token valid? {0!s}".format(validToken))
# authorizationToken invalid (format or contents):
# respond with Policy DENYING access, which will trigger API Gateway to respond with
# response 403 - Forbidden
# authorizationToken valid (format and contents):
# respond with Policy ALLOWING access, which will trigger API Gateway to
# proceed with the backend integration configured on the method.
principalId = auth_app_id
arnParts = event['methodArn'].split(':')
apiGatewayArnTmp = arnParts[5].split('/')
awsAccountId = arnParts[4]
policy = AuthPolicy(principalId, awsAccountId)
policy.restApiId = apiGatewayArnTmp[0]
policy.region = arnParts[3]
policy.stage = apiGatewayArnTmp[1]
policyDesc = ''
if validToken:
policy.allowAllMethods()
policyDesc = 'ALLOW'
else:
policy.denyAllMethods()
policyDesc = 'DENY'
authResponse = policy.build()
# Optional: context
# The response can also include a 'context' key-value pairs mapping,
# which will be rendered available to the configured backend
# (if the policy is such that the request handling continues)
# as $context.authorizer.<key>
# This mapping is part of the cached response.
#
# context = {
# 'key': 'value', # $context.authorizer.key -> value
# 'number' : 1,
# 'bool' : True
# }
# authResponse['context'] = context
#
# INVALID formats:
# context['arr'] = ['foo']
# context['obj'] = {'foo':'bar'}
logger.info("CustAuth completed: returning policy to {0!s} access.".format(policyDesc))
return authResponse
# TokenValidator
# ============================================================
class TokenValidator(object):
PEMSTART = "-----BEGIN CERTIFICATE-----\n"
PEMEND = "\n-----END CERTIFICATE-----\n"
def __init__(self):
self._session = None
self._client = None
def ValidateToken(self, auth_header, auth_app_id, auth_tenant_id, certs_bucket):
# auth_header expected to be in the form:
# 'Bearer <id_token>'
(pre, encoded_token) = auth_header.split(' ', 2)
if (not pre) or (pre.upper() != "BEARER"):
logger.warn("Authorization Token did not match expected 'Bearer <id_token>' format.")
return False
expected_issuer = 'https://sts.windows.net/{0!s}/'.format(auth_tenant_id)
unverified_headers = jwt.get_unverified_header(encoded_token)
#unverified_token = jwt.decode(encoded_token, algorithms=['RS256'], audience=auth_app_id, issuer=expected_issuer, options={'verify_signature': False})
#x5t = unverified_token.get('x5t', None)
#kid = unverified_token.get('kid', None)
kid = unverified_headers.get('kid', None)
logger.info("Token 'kid': '{0!s}'.".format(kid))
if not kid:
logger.warn("Could not extract 'kid' property from token.")
return False
cert_pem = self.GetSigningCertificate(certs_bucket, kid)
if cert_pem:
logger.info("Retrieved Signing Certificate.")
#if isinstance(cert_pem, unicode):
# logger.info("Signing Certificate is unicode. Will attempt STRICT conversion.")
# cert_pem = cert_pem.encode('ascii', 'strict')
# logger.info("Signing Certificate unicode encoded to ASCII.")
cert = load_pem_x509_certificate(cert_pem, default_backend())
logger.info("Loaded Signing Certificate.")
public_key = cert.public_key()
logger.info("Extracted Public Key from Signing Certificate.")
decoded = jwt.decode(encoded_token, public_key, algorithms=['RS256'], audience=auth_app_id, issuer=expected_issuer)
# NOTE: the JWT decode method verifies
# - general format of the encoded token
# - signature, using the given public key
# - aud claim (Audience) vs audience value
# - exp claim (Expiration) vs current datetime (UTC)
# - nbf claim (Not Before) vs current datetime (UTC)
# - iss claim (Issuer) vs issuer value
if decoded:
logger.info("Token Decoded and Validated Successfully.")
return True
else:
logger.warn("Failed to Decode Token when verifying signature.")
return False
else:
logger.warn("Could not retrieve signing certificate matching token's 'kid' property ('{0!s}').".format(kid))
return False
def GetSigningCertificate(self, certs_bucket, kid):
self.EnsureClient()
discovery_record_str = None
with closing(StringIO.StringIO()) as dest:
self._client.download_fileobj(
Bucket=certs_bucket,
Key=kid,
Fileobj=dest)
discovery_record_str = dest.getvalue()
if not discovery_record_str:
logger.warn("Could not retrieve Discovery Record from Bucket.")
return None
logger.info("Retrieved Discovery Record Payload from Bucket.")
# discovery_record_str is the payload extracted from
# the bucket, presumed to be the JSON-formatted string
# of the signing certificate discovery record. eg:
# {
# "x5t": "...",
# "use": "...",
# "e": "...",
# "kty": "...",
# "n": "...",
# "x5c": [
# "..."
# ],
# "issuer": "...",
# "kid": "..."
# }
# What we need to extract as 'certificate' is
# the first value in the "x5c" property list
discovery_record = json.loads(discovery_record_str)
logger.info("Parsed Discovery Record JSON.")
x5c = discovery_record.get('x5c', None)
if not x5c:
logger.warn("Could not find 'x5c' property from Discovery Record.")
return None
logger.info("Discovery Record x5c found.")
raw_cert = ""
if isinstance(x5c, list):
raw_cert = x5c[0]
elif isinstance(x5c, basestring):
raw_cert = x5c
else:
logger.warn("Unexpected data type for x5c value from Discovery Record (expected string or list).")
return None
logger.info("Raw Cert:|{0!s}|".format(raw_cert))
if isinstance(raw_cert, unicode):
logger.info("Raw Certificate is unicode. Attempting STRICT conversion to ASCII.")
raw_cert = raw_cert.encode('ascii', 'strict')
logger.info("Raw Certificate encoded to ASCII.")
logger.info("Formatting Raw Certificate according to PEM 64-characters lines.")
raw_cert = self.InsertNewLines(raw_cert)
logger.info("Raw Certificate lines length normalized to PEM.")
pem_cert = self.PEMSTART + raw_cert + self.PEMEND
logger.info("After wrapping Raw certificate in PEM Markers:")
logger.info(pem_cert)
#tmp = "is NOT"
#if isinstance(raw_cert, unicode):
# tmp = "is"
#logger.info("Before Wrapping in PEM delimiters, the raw_cert data type {0!s} unicode.".format(tmp))
#
#pem_cert = self.PEMSTART + raw_cert + self.PEMEND
#logger.info("PEM Cert:|{0!s}|".format(pem_cert))
#
#tmp = "is NOT"
#if isinstance(pem_cert, unicode):
# tmp = "is"
#logger.info("After Wrapping in PEM delimiters, the pem_cert data type {0!s} unicode.".format(tmp))
#
#if isinstance(pem_cert, unicode):
# logger.info("Signing Certificate is unicode. Will attempt STRICT conversion.")
# pem_cert = pem_cert.encode('ascii', 'strict')
# logger.info("Signing Certificate unicode encoded to ASCII.")
#
#logger.info("Splitting according to PEM format (64 characters per line).")
#pem_cert = self.InsertNewLines(pem_cert)
#logger.info("After splitting in 64-character long lines:")
#logger.info(pem_cert)
return pem_cert
def InsertNewLines(self, s, every=64):
lines = []
for i in xrange(0, len(s), every):
lines.append(s[i:i+every])
return '\n'.join(lines)
def EnsureClient(self):
self.EnsureSession()
if not self._client:
self._client = self._session.client('s3')
def EnsureSession(self):
if not self._session:
self._session = boto3.Session()
# HttpVerbs
# ============================================================
class HttpVerb:
GET = "GET"
POST = "POST"
PUT = "PUT"
PATCH = "PATCH"
HEAD = "HEAD"
DELETE = "DELETE"
OPTIONS = "OPTIONS"
ALL = "*"
# AuthPolicy
# ============================================================
class AuthPolicy(object):
awsAccountId = ""
"""The AWS account id the policy will be generated for. This is used to create the method ARNs."""
principalId = ""
"""The principal used for the policy, this should be a unique identifier for the end user."""
version = "2012-10-17"
"""The policy version used for the evaluation. This should always be '2012-10-17'"""
pathRegex = "^[/.a-zA-Z0-9-\*]+$"
"""The regular expression used to validate resource paths for the policy"""
"""these are the internal lists of allowed and denied methods. These are lists
of objects and each object has 2 properties: A resource ARN and a nullable
conditions statement.
the build method processes these lists and generates the approriate
statements for the final policy"""
allowMethods = []
denyMethods = []
restApiId = "*"
"""The API Gateway API id. By default this is set to '*'"""
region = "*"
"""The region where the API is deployed. By default this is set to '*'"""
stage = "*"
"""The name of the stage used in the policy. By default this is set to '*'"""
def __init__(self, principal, awsAccountId):
self.awsAccountId = awsAccountId
self.principalId = principal
self.allowMethods = []
self.denyMethods = []
def _addMethod(self, effect, verb, resource, conditions):
"""Adds a method to the internal lists of allowed or denied methods. Each object in
the internal list contains a resource ARN and a condition statement. The condition
statement can be null."""
if verb != "*" and not hasattr(HttpVerb, verb):
raise NameError("Invalid HTTP verb " + verb + ". Allowed verbs in HttpVerb class")
resourcePattern = re.compile(self.pathRegex)
if not resourcePattern.match(resource):
raise NameError("Invalid resource path: " + resource + ". Path should match " + self.pathRegex)
if resource[:1] == "/":
resource = resource[1:]
resourceArn = ("arn:aws:execute-api:" +
self.region + ":" +
self.awsAccountId + ":" +
self.restApiId + "/" +
self.stage + "/" +
verb + "/" +
resource)
if effect.lower() == "allow":
self.allowMethods.append({
'resourceArn' : resourceArn,
'conditions' : conditions
})
elif effect.lower() == "deny":
self.denyMethods.append({
'resourceArn' : resourceArn,
'conditions' : conditions
})
def _getEmptyStatement(self, effect):
"""Returns an empty statement object prepopulated with the correct action and the
desired effect."""
statement = {
'Action': 'execute-api:Invoke',
'Effect': effect[:1].upper() + effect[1:].lower(),
'Resource': []
}
return statement
def _getStatementForEffect(self, effect, methods):
"""This function loops over an array of objects containing a resourceArn and
conditions statement and generates the array of statements for the policy."""
statements = []
if len(methods) > 0:
statement = self._getEmptyStatement(effect)
for curMethod in methods:
if curMethod['conditions'] is None or len(curMethod['conditions']) == 0:
statement['Resource'].append(curMethod['resourceArn'])
else:
conditionalStatement = self._getEmptyStatement(effect)
conditionalStatement['Resource'].append(curMethod['resourceArn'])
conditionalStatement['Condition'] = curMethod['conditions']
statements.append(conditionalStatement)
statements.append(statement)
return statements
def allowAllMethods(self):
"""Adds a '*' allow to the policy to authorize access to all methods of an API"""
self._addMethod("Allow", HttpVerb.ALL, "*", [])
def denyAllMethods(self):
"""Adds a '*' allow to the policy to deny access to all methods of an API"""
self._addMethod("Deny", HttpVerb.ALL, "*", [])
def allowMethod(self, verb, resource):
"""Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods for the policy"""
self._addMethod("Allow", verb, resource, [])
def denyMethod(self, verb, resource):
"""Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods for the policy"""
self._addMethod("Deny", verb, resource, [])
def allowMethodWithConditions(self, verb, resource, conditions):
"""Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition"""
self._addMethod("Allow", verb, resource, conditions)
def denyMethodWithConditions(self, verb, resource, conditions):
"""Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition"""
self._addMethod("Deny", verb, resource, conditions)
def build(self):
"""Generates the policy document based on the internal lists of allowed and denied
conditions. This will generate a policy with two main statements for the effect:
one statement for Allow and one statement for Deny.
Methods that includes conditions will have their own statement in the policy."""
if ((self.allowMethods is None or len(self.allowMethods) == 0) and
(self.denyMethods is None or len(self.denyMethods) == 0)):
raise NameError("No statements defined for the policy")
policy = {
'principalId' : self.principalId,
'policyDocument' : {
'Version' : self.version,
'Statement' : []
}
}
policy['policyDocument']['Statement'].extend(self._getStatementForEffect("Allow", self.allowMethods))
policy['policyDocument']['Statement'].extend(self._getStatementForEffect("Deny", self.denyMethods))
return policy | mit | 3,410,329,240,392,538,600 | 38.027897 | 158 | 0.600352 | false | 4.217532 | false | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.