text
stringlengths 4
1.02M
| meta
dict |
---|---|
'''
Created on Apr 15, 2015
Description: This will help to plot SV using Genome Diagram from bio python
@author: Ronak H Shah
'''
from Bio.SeqFeature import SeqFeature, FeatureLocation
from Bio.Graphics import GenomeDiagram
from reportlab.lib.colors import red, grey, orange, green, brown, blue, lightblue, purple
import sys
import os
import logging
from PIL import Image
import coloredlogs
coloredlogs.install(level='DEBUG')
def VisualizeSV(svDF, refDF, upDF, args):
staticDir = args.outFilePrefix + "_iAnnotateSVplots"
AnalysisDir = os.path.join(args.outDir, staticDir)
try:
os.mkdir(AnalysisDir)
except OSError:
if(args.verbose):
logging.warn("iAnnotateSV::VisualizeSV: Dir: %s exists thus we wont be making it. Thus Results would be over-written", AnalysisDir)
for count, row in svDF.iterrows():
# print row
chr1 = str(row.loc['chr1'])
chr2 = str(row.loc['chr2'])
if(chr1.startswith('chr')):
chr1 = chr1
else:
chr1 = "chr" + chr1
if(chr2.startswith('chr')):
chr2 = chr2
else:
chr2 = "chr" + chr2
pos1 = int(row.loc['pos1'])
pos2 = int(row.loc['pos2'])
str1 = int(row.loc['str1'])
str2 = int(row.loc['str2'])
gene1 = str(row.loc['gene1'])
gene2 = str(row.loc['gene2'])
site1 = str(row.loc['site1'])
site2 = str(row.loc['site2'])
transcript1 = str(row.loc['transcript1'])
transcript2 = str(row.loc['transcript2'])
fusion = str(row.loc['fusion'])
if(fusion != "-"):
(domain1Idx, maxLen1, minLen1) = processData(chr1, transcript1, refDF, upDF)
(domain2Idx, maxLen2, minLen2) = processData(chr2, transcript2, refDF, upDF)
# print fusion, "\n", "1", domain1Idx, "\n", "2", domain2Idx, "\n\n",
eventtype = None
if(str1 == 0 and str2 == 0 and chr1 == chr2):
eventtype = "Inversion"
elif(str1 == 1 and str2 == 1 and chr1 == chr2):
eventtype = "Inversion"
elif(str1 == 1 and str2 == 0 and chr1 == chr2):
eventtype = "Duplication"
elif(str1 == 0 and str2 == 1 and chr1 == chr2):
eventtype = "Deletion"
else:
eventtype = "Translocation"
#imageMSG = eventtype + " causing " + fusion
if(gene1 != gene2):
outFile1Name = AnalysisDir + "/" + gene1 + "-" + str(chr1) + "_" + str(
pos1) + "_" + gene2 + "-" + str(chr2) + "_" + str(pos2) + "_" + str(eventtype) + "-part1.jpg"
outFile2Name = AnalysisDir + "/" + gene1 + "-" + str(chr1) + "_" + str(
pos1) + "_" + gene2 + "-" + str(chr2) + "_" + str(pos2) + "_" + str(eventtype) + "-part2.jpg"
outFileName = AnalysisDir + "/" + gene1 + "-" + str(chr1) + "_" + str(
pos1) + "_" + gene2 + "-" + str(chr2) + "_" + str(pos2) + "_" + str(eventtype) + ".jpg"
d1Name = eventtype + "-" + gene1
d2Name = eventtype + "-" + gene2
# Make an instace of class diagram
gdd1 = GenomeDiagram.Diagram(d1Name)
gdd2 = GenomeDiagram.Diagram(d2Name)
# Make name of the tracks
feature1Name = "GeneTrack:" + gene1 + ":" + eventtype
feature2Name = "AlignmentTrack:" + gene1 + ":" + eventtype
feature3Name = "DomainTrack:" + gene1 + ":" + eventtype
feature4Name = "GeneTrack:" + gene2 + ":" + eventtype
feature5Name = "AlignmentTrack:" + gene2 + ":" + eventtype
feature6Name = "DomainTrack:" + gene2 + ":" + eventtype
# Make track for each feature
gdt1_features = gdd1.new_track(1, greytrack=True, name=feature1Name)
gdt2_features = gdd1.new_track(1, greytrack=True, name=feature2Name)
gdt3_features = gdd1.new_track(1, greytrack=True, name=feature3Name)
gdt4_features = gdd2.new_track(1, greytrack=True, name=feature4Name)
gdt5_features = gdd2.new_track(1, greytrack=True, name=feature5Name)
gdt6_features = gdd2.new_track(1, greytrack=True, name=feature6Name)
# Write features to a track
gds_features = gdt1_features.new_set()
(gds_features) = makeReferenceFeatures(
transcript1,
site1,
chr1,
pos1,
refDF,
gds_features)
gds_features = gdt2_features.new_set()
(gds_features) = makeReadFeatures(chr1, pos1, str1, gds_features)
gds_features = gdt3_features.new_set()
if(domain1Idx):
(gds_features) = makeUniProtFeatures(domain1Idx, upDF, gds_features)
gdd1.draw(
format='linear',
#pagesize='A4',
fragments=1,
start=minLen1 -
1000,
end=maxLen1 +
1000)
gdd1.write(outFile1Name, "JPG", dpi=300)
gds_features = gdt4_features.new_set()
(gds_features) = makeReferenceFeatures(
transcript2,
site2,
chr2,
pos2,
refDF,
gds_features)
gds_features = gdt5_features.new_set()
(gds_features) = makeReadFeatures(chr2, pos2, str2, gds_features)
gds_features = gdt6_features.new_set()
if(domain2Idx):
(gds_features) = makeUniProtFeatures(domain2Idx, upDF, gds_features)
# draw the object and store in memory
gdd2.draw(
format='linear',
#pagesize='A4',
fragments=1,
start=minLen2 -
1000,
end=maxLen2 +
1000)
# Write the object to a file
gdd2.write(outFile2Name, "JPG", dpi=300)
# merge Images
img1 = Image.open(outFile1Name)
img2 = Image.open(outFile2Name)
images = map(Image.open, [outFile1Name, outFile2Name])
w = max(i.size[0] for i in images)
mh = sum(i.size[1] for i in images)
result = Image.new("RGB", (w, mh), (255, 255, 255))
x = 0
for i in images:
result.paste(i, (0, x))
x += i.size[1]
result.save(outFileName)
if(os.path.isfile(outFileName)):
os.remove(outFile1Name)
os.remove(outFile2Name)
else:
outFileName = AnalysisDir + "/" + gene1 + "-" + str(chr1) + "_" + str(
pos1) + "_" + gene2 + "-" + str(chr2) + "_" + str(pos2) + "_" + str(eventtype) + ".jpg"
gdd = GenomeDiagram.Diagram('Test Diagram')
feature1Name = "GeneTrack:" + gene1 + ":" + eventtype
feature2Name = "AlignmentTrack:" + gene1 + ":" + eventtype
feature3Name = "ProteinDomainTrack:" + gene1 + ":" + eventtype
feature4Name = "GeneTrack:" + gene2 + ":" + eventtype
feature5Name = "AlignmentTrack:" + gene2 + ":" + eventtype
feature6Name = "ProteinDomainTrack:" + gene2 + ":" + eventtype
gdt1_features = gdd.new_track(1, greytrack=True, name=feature1Name)
gdt2_features = gdd.new_track(1, greytrack=True, name=feature2Name)
gdt3_features = gdd.new_track(1, greytrack=True, name=feature3Name)
gdt4_features = gdd.new_track(1, greytrack=True, name=feature4Name)
gdt5_features = gdd.new_track(1, greytrack=True, name=feature5Name)
gdt6_features = gdd.new_track(1, greytrack=True, name=feature6Name)
gds_features = gdt1_features.new_set()
(gds_features) = makeReferenceFeatures(
transcript1,
site1,
chr1,
pos1,
refDF,
gds_features)
gds_features = gdt2_features.new_set()
(gds_features) = makeReadFeatures(chr1, pos1, str1, gds_features)
gds_features = gdt3_features.new_set()
if(domain1Idx):
(gds_features) = makeUniProtFeatures(domain1Idx, upDF, gds_features)
gds_features = gdt4_features.new_set()
(gds_features) = makeReferenceFeatures(
transcript2,
site2,
chr2,
pos2,
refDF,
gds_features)
gds_features = gdt5_features.new_set()
(gds_features) = makeReadFeatures(chr2, pos2, str2, gds_features)
gds_features = gdt6_features.new_set()
if(domain2Idx):
(gds_features) = makeUniProtFeatures(domain2Idx, upDF, gds_features)
max_len = max(maxLen1, maxLen2)
min_len = min(minLen1, minLen2)
gdd.draw(
format='linear',
pagesize='A4',
fragments=1,
start=min_len -
1000,
end=max_len +
1000)
gdd.write(outFileName, "JPG", dpi=300)
#gdd = GenomeDiagram.Diagram("Fusion Image")
#featureName = gene1 + ":" + gene2 + ":" + eventtype
#gdt_features = gdd.new_track(1, greytrack=True, name=featureName)
#gds_features = gdt_features.new_set()
# makePlainImage(refDF,eventtype,transcript1,transcript2,chr1,chr2,pos1,pos2,str1,str2,site1,site2,fusion,gds_features)
def processData(chrom, transcript, refDF, upDF):
transcripts = (refDF[refDF['#name'] == transcript])
if(len(transcripts) > 1):
transcriptIdx, = (transcripts[transcripts['chrom'] == chrom].index)
else:
transcriptIdx, = (refDF[refDF['#name'] == transcript].index)
refTxSt = int(refDF.iloc[transcriptIdx]['txStart'])
refTxEn = int(refDF.iloc[transcriptIdx]['txEnd'])
# print "1:",transcriptIdx,"\n",refTxSt,"\n", refTxEn, "\n"
up_idxList = upDF[upDF['#chrom'] == chrom].index.tolist()
# Find all overlapping transcripts
up_recordIndex = []
for index in (up_idxList):
# print upDF.iloc[index],"\n"
chromStart = upDF.iloc[index]['chromStart']
chromEnd = upDF.iloc[index]['chromEnd']
if((chromStart >= refTxSt) and (chromEnd <= refTxEn)):
# print "Chr" , chromStart,chromEnd, refTxSt, refTxEn,"\n"
if(upDF.iloc[index]['annotationType'] == 'domain'):
up_recordIndex.append(index)
allMaxVal = []
allMinVal = []
for index, val in enumerate(up_recordIndex):
chromStart = upDF.iloc[val]['chromStart']
chromEnd = upDF.iloc[val]['chromEnd']
maxVal = max(refTxEn, chromEnd)
allMaxVal.append(maxVal)
minVal = min(refTxSt, chromStart)
allMinVal.append(minVal)
if(allMaxVal):
max_len = max(allMaxVal)
else:
max_len = refTxEn
if(allMinVal):
min_len = max(allMinVal)
else:
min_len = refTxSt
return(up_recordIndex, max_len, min_len)
def makeReferenceFeatures(transcript, site, chrom, pos, refDF, gds_features):
transcripts = (refDF[refDF['#name'] == transcript])
if(len(transcripts) > 1):
transcriptIdx, = (transcripts[transcripts['chrom'] == chrom].index)
else:
transcriptIdx, = (refDF[refDF['#name'] == transcript].index)
refTxSt = int(refDF.iloc[transcriptIdx]['txStart'])
refTxEn = int(refDF.iloc[transcriptIdx]['txEnd'])
# print "2:",transcriptIdx,"\n",refTxSt,"\n", refTxEn, "\n"
ExonSts = filter(None, refDF.iloc[transcriptIdx]['exonStarts'].split(","))
ExonEnds = filter(None, refDF.iloc[transcriptIdx]['exonEnds'].split(","))
#ExonCounts = int(refDF.iloc[transcriptIdx]['exonCount'])
transcriptStrand = str(refDF.iloc[transcriptIdx]['strand'])
if(transcriptStrand == "+"):
transcriptStrand = +1
if(transcriptStrand == "-"):
transcriptStrand = -1
for idx, val in enumerate(ExonSts):
feature = SeqFeature(
FeatureLocation(
int(val),
int(ExonEnds[idx]),
strand=transcriptStrand))
if(transcriptStrand == -1):
fname = "exon" + str(len(ExonSts)-idx)
gds_features.add_feature(
feature,
sigil="ARROW",
color=brown,
arrowshaft_height=1.0,
name=fname,
label=True, label_position="middle", label_size=5, label_angle=90)
else:
fname = "exon" + str(idx + 1)
gds_features.add_feature(
feature,
sigil="ARROW",
color=brown,
arrowshaft_height=1.0,
name=fname,
label=True, label_position="middle", label_size=5)
feature = SeqFeature(FeatureLocation(pos - 5, pos + 5))
bname = site
gds_features.add_feature(
feature,
color=orange,
name=bname,
label=True,
label_size=6, label_color=orange)
return(gds_features)
def makeUniProtFeatures(domainIdx, upDF, gds_features):
#colors = ["green", "purple", "blue", "brown", "teal", "red", "yellow"]
for index, val in enumerate(domainIdx):
chromStart = upDF.iloc[val]['chromStart']
chromEnd = upDF.iloc[val]['chromEnd']
fname = upDF.iloc[val]['name']
feature = SeqFeature(FeatureLocation(chromStart, chromEnd), strand=None)
gds_features.add_feature(
feature,
name=fname,
label=True,
color=green,
label_position="middle",
label_size=6,
label_color=green)
return(gds_features)
def makeReadFeatures(chrom, pos, strand, gds_features):
start = int(pos) - 1000
end = int(pos) + 1000
bname = str(chrom) + ":" + str(pos)
if(strand == 0):
strandDirection = +1
feature = SeqFeature(FeatureLocation(start, end, strand=strandDirection))
gds_features.add_feature(
feature,
sigil="ARROW",
arrowshaft_height=0.1,
color=blue,
name=bname, label_size=8, label=True, label_angle=0, label_color=purple)
if(strand == 1):
strandDirection = -1
feature = SeqFeature(FeatureLocation(start, end, strand=strandDirection))
gds_features.add_feature(
feature,
sigil="ARROW",
arrowshaft_height=0.1,
color=red,
name=bname,
label_size=8,
label=True,
label_position="middle",
label_angle=-90,
label_color=purple)
return(gds_features)
# Work In Progress
def makePlainImage(
refDF,
eventtype,
transcript1,
transcript2,
chr1,
chr2,
pos1,
pos2,
str1,
str2,
site1,
site2,
fusion,
gds_features):
transcript1Idx, = (refDF[refDF['#name'] == transcript1].index)
ExonSts1 = filter(None, refDF.iloc[transcript1Idx]['exonStarts'].split(","))
ExonEnds1 = filter(None, refDF.iloc[transcript1Idx]['exonEnds'].split(","))
ExonCounts1 = int(refDF.iloc[transcript1Idx]['exonCount'])
transcript1Strand = str(refDF.iloc[transcript1Idx]['strand'])
transcript2Idx, = (refDF[refDF['#name'] == transcript2].index)
Exon1Sts2 = filter(None, refDF.iloc[transcript2Idx]['exonStarts'].split(","))
Exon1Ends2 = filter(None, refDF.iloc[transcript2Idx]['exonEnds'].split(","))
ExonCounts2 = int(refDF.iloc[transcript2Idx]['exonCount'])
transcript2Strand = str(refDF.iloc[transcript2Idx]['strand'])
if("before" in site1):
before_exonNum1 = site1[-1:]
if("after" in site1):
after_exonNum1 = site1[-1:]
if("before" in site2):
before_exonNum2 = site2[-1:]
if("after" in site2):
after_exonNum2 = site2[-1:]
beforeExons1 = []
afterExons1 = []
beforeExons2 = []
afterExons2 = []
for i in range(1, ExonCounts1):
if(before_exonNum1):
if(i <= int(before_exonNum1)):
beforeExons1.append("exon" + i)
if(after_exonNum1):
if(i >= int(after_exonNum1)):
afterExons1.append("exon" + i)
for i in range(1, ExonCounts2):
if(before_exonNum2):
if(i <= int(before_exonNum2)):
beforeExons2.append("exon" + i)
if(after_exonNum2):
if(i >= int(after_exonNum2)):
afterExons2.append("exon" + i)
def get_concat_v_resize(im1, im2, resample=Image.BICUBIC, resize_big_image=True):
if im1.width == im2.width:
_im1 = im1
_im2 = im2
elif (((im1.width > im2.width) and resize_big_image) or
((im1.width < im2.width) and not resize_big_image)):
_im1 = im1.resize((im2.width, int(im1.height * im2.width / im1.width)), resample=resample)
_im2 = im2
else:
_im1 = im1
_im2 = im2.resize((im1.width, int(im2.height * im1.width / im2.width)), resample=resample)
dst = Image.new('RGB', (_im1.width, _im1.height + _im2.height))
dst.paste(_im1, (0, 0))
dst.paste(_im2, (0, _im1.height))
return dst | {
"content_hash": "92818f3f8f20180f9e76b4a6c5b35d4c",
"timestamp": "",
"source": "github",
"line_count": 421,
"max_line_length": 143,
"avg_line_length": 42.612826603325416,
"alnum_prop": 0.5326644370122631,
"repo_name": "rhshah/iAnnotateSV",
"id": "8b831c28daff1ef51edecb406f1d22e1543d9151",
"size": "17940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iAnnotateSV/VisualizeSV.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "95"
},
{
"name": "Python",
"bytes": "90085"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['Lag1Trend'] , ['BestCycle'] , ['SVR'] ); | {
"content_hash": "25c6733bfb1a54b8b703b5494c2e9c4f",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 80,
"avg_line_length": 38.25,
"alnum_prop": 0.7058823529411765,
"repo_name": "antoinecarme/pyaf",
"id": "cd26471a56bb4bb2e3525041b02e59ec6679b7d1",
"size": "153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_Lag1Trend_BestCycle_SVR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import font
def status_wordlist(location, wlist):
return font.color('ciano')+'\nLoaded '+font.color('bold')+location+font.color('end')\
+font.color('ciano')+' wordlist with '+font.color('bold')+str(len(wlist))+font.color('end')\
+font.color('ciano')+' item(s)\n'+font.color('end')
def start_scan(domain):
text = font.color('bold') + 'Getting subdomain for ' + domain + font.color('end') + '\n\n'
text += 'Ip Address'.ljust(18) + 'Domain Name\n'
text += '----------'.ljust(18) + '-----------'
return text
def target(domain):
text = font.color('bold')+'Target information '+domain+font.color('end')+'\n\n'
text += 'Ip Address'.ljust(18) + 'Target Name\n'
text += '----------'.ljust(18) + '-----------'
return text
def response_code():
text = 'Code'.ljust(18) + 'Reason\n'
text += '----------'.ljust(18) + '-----------'
return text
def response_head():
text = 'Field'.ljust(18) + 'Value\n'
text += '----------'.ljust(18) + '-----------'
return text
def stats_summary():
return font.color('bold')+'\nSummary\n'+font.color('end')
def start_scan_zt(domain):
text = font.color('bold') + 'Getting zone transfer for ' + domain + font.color('end') + '\n\n'
text += 'Ip Address'.ljust(18) + 'Domain Name\n'
text += '----------'.ljust(18) + '-----------'
return text
| {
"content_hash": "96b11444388ab8de2a33c7b5ccc80b22",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 95,
"avg_line_length": 34.729729729729726,
"alnum_prop": 0.5782101167315176,
"repo_name": "rajeshmajumdar/TheFatCat",
"id": "076e3d6cdca456087c8eb4f51891fe0b372d3fe1",
"size": "2150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/headers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30014"
}
],
"symlink_target": ""
} |
from datetime import datetime
from flask import current_app, g, url_for
from werkzeug.security import check_password_hash, generate_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from application import db
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(40), index=True, unique=True)
password_hash = db.Column(db.String(128))
def encrypt_password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_auth_token(self, expires_in=2400):
s = Serializer(current_app.config['SECRET_KEY'], expires_in=expires_in)
return s.dumps({'id': self.id})
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
class Bucketlist(db.Model):
__tablename__ = "bucketlists"
bucketlist_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(60), index=True, unique=True, nullable=False)
date_created = db.Column(db.DateTime, default=datetime.now())
date_modified = db.Column(db.DateTime, default=datetime.now())
created_by = db.Column(db.Integer, db.ForeignKey(
'users.id', ondelete='CASCADE'))
items = db.relationship(
'Item', backref='bucketlists', passive_deletes=True)
def render_data(self):
return {
"bucketlist_id": self.bucketlist_id,
"name": self.name,
"date_created": self.date_created.strftime("%Y-%m-%d %H:%M:%S"),
"date_modified": self.date_modified.strftime("%Y-%m-%d %H:%M:%S"),
"created_by": self.created_by
}
class Item(db.Model):
__tablename__ = 'items'
item_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), nullable=False)
date_created = db.Column(db.DateTime, default=datetime.now())
date_modified = db.Column(db.DateTime, default=datetime.now())
done = db.Column(db.Boolean, default=False)
bucketlist_id = db.Column(db.Integer, db.ForeignKey(
'bucketlists.bucketlist_id', ondelete='CASCADE'))
def render_data(self):
return {
"item_id": self.item_id,
"name": self.name,
"date_created": self.date_created.strftime("%Y-%m-%d %H:%M:%S"),
"date_modified": self.date_modified.strftime("%Y-%m-%d %H:%M:%S"),
"done": self.done,
"created_by": self.created_by
}
| {
"content_hash": "d65c754576c9433cc458038c8aa0b442",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 37.57534246575342,
"alnum_prop": 0.6325191396281443,
"repo_name": "andela-lolali/bucketlist_api",
"id": "d41d6c924f0a429fd8f3d4dc61577770d7a80634",
"size": "2743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30700"
}
],
"symlink_target": ""
} |
"""Models representing Call Center API objects"""
from __future__ import absolute_import
from ..common.models import Item
class Call(Item):
"""An object for retrieving data from and working with an individual call."""
rest_root = '/callcenter/calls/'
@property
def url(self):
return None
| {
"content_hash": "52f41b2ce8ae8a1ee5f0314befd200f7",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 81,
"avg_line_length": 24.307692307692307,
"alnum_prop": 0.6867088607594937,
"repo_name": "PerformLine/python-performline-client",
"id": "ea6bb35f8be027078bf1424153073a94b335c083",
"size": "1858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "performline/products/callcenter/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2103"
},
{
"name": "Python",
"bytes": "177148"
},
{
"name": "Shell",
"bytes": "413"
}
],
"symlink_target": ""
} |
from __future__ import division
import numpy as np
from pysd import functions
from pysd import builder
class Components(builder.ComponentClass):
| {
"content_hash": "aae00510f609642d5eb357795eed81dd",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 64,
"avg_line_length": 65,
"alnum_prop": 0.26153846153846155,
"repo_name": "bpowers/pysd",
"id": "0975627af6e3a4cb404244db175905a18db186d9",
"size": "455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/vensim/test_variable_names_with_quotes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108427"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
} |
import pygame
from pygame import *
from button import Button
from player import Player
from scene import Scene
import globals
import glob
import os
class PlayerScene(Scene):
"""Player scene"""
def __init__(self, manager_scene):
super(PlayerScene, self).__init__(manager_scene)
self.bg = Surface(globals.SCREEN_SIZE)
self.bg.fill(Color("#fdf6e3"))
self.font = pygame.font.SysFont('Arial', 64)
self.text = self.font.render("Next", True, (211, 54, 130))
self.buttons = []
self.repeat_button = None
self.is_repeat_on = False
self.folder = None
self.cover_placeholder = "./resources/images/cover_placeholder.jpg"
self.tracks = []
self.covers = []
self.track_count = 0
self.current_track_index = 0
self.current_track = None
self.current_cover = None
self.is_for_music = False
self.repeat_empty_icon = "./resources/images/repeat_empty.png"
self.repeat_full_icon = "./resources/images/repeat_full.png"
self.player = Player()
self.is_paused = True
self.play_pause_button = None
self.play_icon_file = "./resources/images/play.png"
self.pause_icon_file = "./resources/images/pause.png"
self.play_icon = pygame.image.load(self.play_icon_file)
self.pause_icon = pygame.image.load(self.pause_icon_file)
self.has_updates = True
self.init_buttons()
def render(self, screen):
screen.blit(self.bg, (0, 0))
screen.blit(self.text, (2, 5))
self.current_cover = self.covers[self.current_track_index]
cover = pygame.image.load(self.current_cover)
cover = aspect_scale(cover)
black = (0, 0, 0)
screen.fill(black)
rect = cover.get_rect()
rect.x = 40
screen.blit(cover, rect)
for btn in self.buttons:
btn.draw(screen)
if self.is_for_music:
self.repeat_button.draw(screen)
def update(self):
if self.is_for_music:
if self.player.is_playing is False and self.is_paused is False:
print "track", self.tracks[self.current_track_index], "finished: Play next"
# self.is_paused = True
self.has_updates = True
print "ff"
if not self.is_repeat_on:
if self.current_track_index < self.track_count - 1:
self.current_track_index += 1
else:
self.current_track_index = 0
self.player.stop()
self.player.play_track(self.tracks[self.current_track_index])
else:
if self.player.is_playing is False and self.is_paused is False:
print "track", self.tracks[self.current_track_index], "finished"
self.is_paused = True
self.play_pause_button.iconFg = self.play_icon
self.has_updates = True
current_update_stat = self.has_updates
self.has_updates = False
return current_update_stat
def handle_events(self, e):
if e.type is pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
for btn in self.buttons:
btn.selected(pos)
if self.is_for_music:
self.repeat_button.selected(pos)
def set_folder(self, folder, is_for_music):
self.player.stop()
self.folder = folder
self.is_for_music = is_for_music
self.update_data()
def update_data(self):
self.has_updates = True
self.current_track_index = 0
self.tracks = glob.glob(self.folder + "*.mp3")
self.tracks.sort()
self.track_count = len(self.tracks)
self.covers = []
for track in self.tracks:
self.covers.append(
track.replace("mp3", "jpg") if os.path.isfile(
track.replace("mp3", "jpg")) else self.cover_placeholder)
def go_to_menu(self):
self.manager.show_main_menu()
def repeat(self):
self.has_updates = True
print("repeat")
print(self.is_repeat_on)
self.is_repeat_on = not self.is_repeat_on
if self.is_repeat_on:
self.repeat_button.iconFg = pygame.image.load(self.repeat_full_icon)
else:
self.repeat_button.iconFg = pygame.image.load(self.repeat_empty_icon)
def fast_forward(self):
self.has_updates = True
print "ff"
if self.current_track_index < self.track_count - 1:
self.current_track_index += 1
else:
self.current_track_index = 0
if not self.is_paused and self.player.is_playing:
self.player.stop()
self.player.play_track(self.tracks[self.current_track_index])
else:
self.player.stop()
def rewind(self):
self.has_updates = True
print "rewind"
if self.current_track_index > 0:
self.current_track_index -= 1
else:
self.current_track_index = self.track_count - 1
if not self.is_paused and self.player.is_playing:
self.player.stop()
self.player.play_track(self.tracks[self.current_track_index])
else:
self.player.stop()
def play_pause(self):
self.has_updates = True
print "play pause"
print self.tracks[self.current_track_index]
if not self.player.is_playing:
self.play_pause_button.iconFg = self.pause_icon
self.is_paused = False
self.player.play_track(self.tracks[self.current_track_index])
else:
if self.is_paused:
self.play_pause_button.iconFg = self.pause_icon
self.is_paused = False
else:
self.play_pause_button.iconFg = self.play_icon
self.is_paused = True
self.player.toggle_play_pause()
def init_buttons(self):
button = Button((0, 0, 42, 42), cb=self.go_to_menu)
button.iconFg = pygame.image.load("./resources/images/chevron.png")
self.buttons.append(button)
fast_forward = Button((256, 150, 64, 64), cb=self.fast_forward)
ff_icon = "./resources/images/next.png"
fast_forward.iconFg = pygame.image.load(ff_icon)
self.buttons.append(fast_forward)
self.play_pause_button = Button((128, 150, 64, 64), cb=self.play_pause)
self.play_pause_button.iconFg = self.play_icon
self.buttons.append(self.play_pause_button)
rewind_button = Button((0, 150, 64, 64), cb=self.rewind)
rewind_icon = "./resources/images/previous.png"
rewind_button.iconFg = pygame.image.load(rewind_icon)
self.buttons.append(rewind_button)
# repeat button for music mode
self.repeat_button = Button((256, 60, 64, 64), cb=self.repeat)
self.repeat_button.iconFg = pygame.image.load(self.repeat_empty_icon)
def aspect_scale(img):
""" Scales 'img' to fit into box bx/by.
This method will retain the original image's aspect ratio """
(bx, by) = globals.SCREEN_SIZE
ix, iy = img.get_size()
if ix > iy:
# fit to width
scale_factor = bx / float(ix)
sy = scale_factor * iy
if sy > by:
scale_factor = by / float(iy)
sx = scale_factor * ix
sy = by
else:
sx = bx
else:
# fit to height
scale_factor = by / float(iy)
sx = scale_factor * ix
if sx > bx:
scale_factor = bx / float(ix)
sx = bx
sy = scale_factor * iy
else:
sy = by
return pygame.transform.scale(img, (int(sx), int(sy)))
| {
"content_hash": "a42a32a607c3faa41817a0b71aa42fa6",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 91,
"avg_line_length": 33.427350427350426,
"alnum_prop": 0.5698031194068013,
"repo_name": "dnlkng/zplayer",
"id": "57e50362e777f508e096a23457cad3f1e7627aa1",
"size": "7822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "playerscene.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19623"
}
],
"symlink_target": ""
} |
"""
Display song currently playing in moc.
MOC (music on console) is a console audio player for Linux/Unix designed to be
powerful and easy to use. It consists of two parts, a server (moc) and a
player/interface (mocp). It supports OGG, WAV, MP3 and other formats.
Configuration parameters:
button_next: mouse button to skip next track (default None)
button_pause: mouse button to pause/play the playback (default 1)
button_previous: mouse button to skip previous track (default None)
button_stop: mouse button to stop the playback (default 3)
cache_timeout: refresh interval for this module (default 5)
format: display format for this module
(default '\?if=is_started [\?if=is_stopped \[\] moc|
[\?if=is_paused \|\|][\?if=is_playing >] {title}]')
sleep_timeout: when moc is not running, this interval will be used to
allow one to refresh constantly with time placeholders and/or
to refresh once every minute rather than every few seconds
(default 20)
Control placeholders:
{is_paused} a boolean based on moc status
{is_playing} a boolean based on moc status
{is_started} a boolean based on moc status
{is_stopped} a boolean based on moc status
Format placeholders:
{album} album name, eg (new output here)
{artist} artist name, eg (new output here)
{avgbitrate} audio average bitrate, eg 230kbps
{bitrate} audio bitrate, eg 230kbps
{currentsec} elapsed time in seconds, eg 32
{currenttime} elapsed time in [HH:]MM:SS, eg 00:32
{file} file location, eg /home/user/Music...
{rate} audio rate, eg 44kHz
{songtitle} song title, eg (new output here)
{state} playback state, eg PLAY, PAUSE, STOP
{timeleft} time left in [HH:]MM:SS, eg 71:30
{title} track title, eg (new output here)
{totalsec} total time in seconds, eg 4322
{totaltime} total time in seconds, eg 72:02
Placeholders are retrieved directly from `mocp --info` command.
The list was harvested once and should not represent a full list.
Color options:
color_paused: Paused, defaults to color_degraded
color_playing: Playing, defaults to color_good
color_stopped: Stopped, defaults to color_bad
Requires:
moc: a console audio player with simple ncurses interface
Examples:
```
# see 'man mocp' for more buttons
moc {
on_click 9 = 'exec mocp --example'
}
```
@author lasers
SAMPLE OUTPUT
{'color': '#00FF00', 'full_text': '> Music For Programming - Mindaugaszq'}
paused
{'color': '#FFFF00', 'full_text': '|| Music For Programming - Mindaugaszq'}
stopped
{'color': '#FF0000', 'full_text': '[] moc'}
"""
from __future__ import division
STRING_NOT_INSTALLED = "not installed"
class Py3status:
"""
"""
# available configuration parameters
button_next = None
button_pause = 1
button_previous = None
button_stop = 3
cache_timeout = 5
format = '\?if=is_started [\?if=is_stopped \[\] moc|' +\
'[\?if=is_paused \|\|][\?if=is_playing >] {title}]'
sleep_timeout = 20
def post_config_hook(self):
if not self.py3.check_commands('mocp'):
raise Exception(STRING_NOT_INSTALLED)
self.color_stopped = self.py3.COLOR_STOPPED or self.py3.COLOR_BAD
self.color_paused = self.py3.COLOR_PAUSED or self.py3.COLOR_DEGRADED
self.color_playing = self.py3.COLOR_PLAYING or self.py3.COLOR_GOOD
def _get_moc_data(self):
try:
data = self.py3.command_output('mocp --info')
is_started = True
except:
data = {}
is_started = False
return is_started, data
def moc(self):
is_paused = is_playing = is_stopped = None
cached_until = self.sleep_timeout
color = self.py3.COLOR_BAD
data = {}
is_started, moc_data = self._get_moc_data()
if is_started:
cached_until = self.cache_timeout
for line in moc_data.splitlines():
category, value = line.split(': ', 1)
data[category.lower()] = value
self.state = data['state']
if self.state == 'PLAY':
is_playing = True
color = self.color_playing
elif self.state == 'PAUSE':
is_paused = True
color = self.color_paused
elif self.state == 'STOP':
is_stopped = True
color = self.color_stopped
return {
'cached_until': self.py3.time_in(cached_until),
'color': color,
'full_text': self.py3.safe_format(
self.format,
dict(
is_paused=is_paused,
is_playing=is_playing,
is_started=is_started,
is_stopped=is_stopped,
**data
)
)
}
def on_click(self, event):
"""
Control moc with mouse clicks.
"""
button = event['button']
if button == self.button_pause:
if self.state == 'STOP':
self.py3.command_run('mocp --play')
else:
self.py3.command_run('mocp --toggle-pause')
elif button == self.button_stop:
self.py3.command_run('mocp --stop')
elif button == self.button_next:
self.py3.command_run('mocp --next')
elif button == self.button_previous:
self.py3.command_run('mocp --prev')
else:
self.py3.prevent_refresh()
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| {
"content_hash": "9fe85acc285c8aab84a8f3dac0a5f88e",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 78,
"avg_line_length": 32.259887005649716,
"alnum_prop": 0.5970227670753064,
"repo_name": "alexoneill/py3status",
"id": "48c91f11d6e8bb173d9ba645141652caef9d2edb",
"size": "5734",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py3status/modules/moc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "814174"
}
],
"symlink_target": ""
} |
__author__ = 'ar (Alexander Kalinovsky)'
import glob
import os
import sys
import time
import numpy as np
import json
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
import nibabel as nib
try:
import cPickle as pickle
except:
import pickle
import collections
# from collections import OrderedDict
import skimage.io as skio
import skimage.transform as sktf
import skimage.color as skolor
import pandas as pd
import matplotlib.pyplot as plt
import keras
from keras.engine import Layer, InputSpec
from keras import backend as K
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Convolution2D, Activation, MaxPooling2D,\
Flatten, BatchNormalization, InputLayer, Dropout, Reshape, Permute, Input, UpSampling3D, Lambda
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
# from keras.utils.visualize_util import plot as kplot
from keras.utils import plot_model as kplot
######################################################
def resize_images_interpolated(X, height_factor, width_factor, order, dim_ordering):
"""
Simple modification of the original Keras code (by ar)
****
Resizes the images contained in a 4D tensor of shape
- `[batch, channels, height, width]` (for 'th' dim_ordering)
- `[batch, height, width, channels]` (for 'tf' dim_ordering)
by a factor of `(height_factor, width_factor)`. Both factors should be
positive integers.
# Returns
A tensor.
"""
resizeMethod = 0
if order==0:
resizeMethod = tf.image.ResizeMethod.NEAREST_NEIGHBOR
elif order==1:
resizeMethod = tf.image.ResizeMethod.BILINEAR
elif order==2:
resizeMethod = tf.image.ResizeMethod.BICUBIC
else:
raise Exception('Incorrect interpolation method order [%s], currently available values is 0,1,2')
if dim_ordering == 'th':
original_shape = K.int_shape(X)
new_shape = tf.shape(X)[2:]
new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32'))
X = K.permute_dimensions(X, [0, 2, 3, 1])
X = tf.image.resize_images(X, new_shape, method=resizeMethod)
# X = tf.image.resize_nearest_neighbor(X, new_shape)
X = K.permute_dimensions(X, [0, 3, 1, 2])
X.set_shape((None, None, original_shape[2] * height_factor if original_shape[2] is not None else None,
original_shape[3] * width_factor if original_shape[3] is not None else None))
return X
elif dim_ordering == 'tf':
original_shape = K.int_shape(X)
new_shape = tf.shape(X)[1:3]
new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32'))
# X = tf.image.resize_nearest_neighbor(X, new_shape)
X = tf.image.resize_images(X, new_shape, method=resizeMethod)
X.set_shape((None, original_shape[1] * height_factor if original_shape[1] is not None else None,
original_shape[2] * width_factor if original_shape[2] is not None else None, None))
return X
else:
raise ValueError('Invalid dim_ordering:', dim_ordering)
######################################################
class UpSamplingInterpolated2D(Layer):
def __init__(self, size=(2, 2), order=2, dim_ordering='default', **kwargs):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
self.size = tuple(size)
if dim_ordering not in {'tf', 'th'}:
raise ValueError('dim_ordering must be in {tf, th}.')
if dim_ordering != 'tf':
raise Exception('Layer <UpSamplingInterpolated2D> currently supported only Tensorflow backend!')
self.dim_ordering = dim_ordering
self.input_spec = [InputSpec(ndim=4)]
self.order = order
super(UpSamplingInterpolated2D, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
if self.dim_ordering == 'th':
width = self.size[0] * input_shape[2] if input_shape[2] is not None else None
height = self.size[1] * input_shape[3] if input_shape[3] is not None else None
return (input_shape[0],
input_shape[1],
width,
height)
elif self.dim_ordering == 'tf':
width = self.size[0] * input_shape[1] if input_shape[1] is not None else None
height = self.size[1] * input_shape[2] if input_shape[2] is not None else None
return (input_shape[0],
width,
height,
input_shape[3])
else:
raise ValueError('Invalid dim_ordering:', self.dim_ordering)
def call(self, x, mask=None):
# return K.resize_images(x, self.size[0], self.size[1], self.dim_ordering)
return resize_images_interpolated(x, self.size[0], self.size[1], self.order, self.dim_ordering)
def get_config(self):
config = {
'size': self.size,
'order': self.order
}
base_config = super(UpSamplingInterpolated2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
######################################################
def split_list_by_blocks(lst, psiz):
tret = [lst[x:x + psiz] for x in range(0, len(lst), psiz)]
return tret
######################################################
def buildModelSegNet_InterpolatedUpSampling2D(inpShape=(256, 256, 5), numCls=2, kernelSize=3, order=1):
dataInput = Input(shape=inpShape)
# -------- Encoder --------
# Conv #1
x = Convolution2D(nb_filter=16, nb_row=kernelSize, nb_col=kernelSize,
border_mode='same', activation='relu')(dataInput)
x = MaxPooling2D(pool_size=(2,2))(x)
# Conv #2
x = Convolution2D(nb_filter=32, nb_row=kernelSize, nb_col=kernelSize,
border_mode='same', activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Conv #3
x = Convolution2D(nb_filter=64, nb_row=kernelSize, nb_col=kernelSize,
border_mode='same', activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Conv #4
x = Convolution2D(nb_filter=128, nb_row=kernelSize, nb_col=kernelSize,
border_mode='same', activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
#
# -------- Decoder --------
# UpConv #1
x = Convolution2D(nb_filter=128, nb_row=kernelSize, nb_col=kernelSize,
border_mode='same', activation='relu')(x)
x = UpSamplingInterpolated2D(size=(2, 2), order=order)(x)
# UpConv #2
x = Convolution2D(nb_filter=64, nb_row=kernelSize, nb_col=kernelSize,
border_mode='same', activation='relu')(x)
x = UpSamplingInterpolated2D(size=(2, 2), order=order)(x)
# UpConv #3
x = Convolution2D(nb_filter=32, nb_row=kernelSize, nb_col=kernelSize,
border_mode='same', activation='relu')(x)
x = UpSamplingInterpolated2D(size=(2, 2), order=order)(x)
retModel = Model(dataInput, x)
# UpConv #4
x = Convolution2D(nb_filter=16, nb_row=kernelSize, nb_col=kernelSize,
border_mode='same', activation='relu')(x)
x = UpSamplingInterpolated2D(size=(2, 2), order=order)(x)
#
# 1x1 Convolution: emulation of Dense layer
x = Convolution2D(nb_filter=numCls, nb_row=1, nb_col=1,
border_mode='valid', activation='linear')(x)
tmpModel = Model(dataInput, x)
if K.image_dim_ordering()=='th':
tmpShape = tmpModel.output_shape[-2:]
sizeReshape = np.prod(tmpShape)
x = Reshape([numCls, sizeReshape])(x)
x = Permute((2,1))(x)
else:
tmpShape = tmpModel.output_shape[1:-1]
sizeReshape = np.prod(tmpShape)
x = Reshape([sizeReshape,numCls])(x)
x = Activation('softmax')(x)
retModel = Model(dataInput, x)
return retModel
######################################################
class BatcherCTLung2D:
pathDataIdx=None
pathMeanData=None
meanPrefix='mean.pkl'
arrPathDataImg=None
arrPathDataMsk=None
wdir=None
dataImg = None
dataMsk = None
dataMskCls = None
meanData = None
#
imgScale = 1.
modelPrefix = None
#
isTheanoShape=True
# isRemoveMeanImage=False
isDataInMemory=False
shapeImg = None
shapeImgSlc = None
shapeMsk = None
shapeMskSlc = None
sizeZ = -1
numCh = 1
numImg = -1
numSlices = -1
#
modelPath = None
model = None
def __init__(self, pathDataIdx=None, pathMeanData=None, numSlices=-1, isRecalculateMeanIfExist=False,
isTheanoShape=True,
isLoadIntoMemory=False):
if pathDataIdx is not None:
self.loadDataset(pathDataIdx=pathDataIdx,
pathMeanData=pathMeanData,
numSlices=numSlices,
isRecalculateMeanIfExist=isRecalculateMeanIfExist,
isTheanoShape=isTheanoShape,
isLoadIntoMemory=isLoadIntoMemory)
def loadDataset(self, pathDataIdx, pathMeanData=None, numSlices=-1, isRecalculateMeanIfExist=False,
isTheanoShape=True,
isLoadIntoMemory=False):
self.isTheanoShape=isTheanoShape
# self.isRemoveMeanImage=isRemoveMeanImage
# (1) Check input Image
if not os.path.isfile(pathDataIdx):
raise Exception('Cant find input Image file [%s]' % pathDataIdx)
self.pathDataIdx = os.path.abspath(pathDataIdx)
self.wdir = os.path.dirname(self.pathDataIdx)
tdata = pd.read_csv(self.pathDataIdx)
# (2) Check input Image Mask
# self.pathDataMsk = '%s_msk.png' % os.path.splitext(self.pathDataImg)[0]
self.arrPathDataImg = np.array([os.path.join(self.wdir, xx) for xx in tdata['path']])
self.arrPathDataMsk = np.array([os.path.join(self.wdir, xx) for xx in tdata['pathmsk']])
# (3) Load Image and Mask
tpathImg = self.arrPathDataImg[0]
tpathMsk = self.arrPathDataMsk[0]
if not os.path.isfile(tpathImg):
raise Exception('Cant find CT Image file [%s]' % tpathImg)
if not os.path.isfile(tpathMsk):
raise Exception('Cant find CT Image Mask file [%s]' % tpathMsk)
tdataImg = nib.load(tpathImg).get_data()
tdataMsk = nib.load(tpathMsk).get_data()
tdataImg = self.adjustImage(self.transformImageFromOriginal(tdataImg, isRemoveMean=False))
tdataMsk = self.transformImageFromOriginal(tdataMsk>200, isRemoveMean=False)
self.numCls = len(np.unique(tdataMsk))
tdataMskCls = self.convertMskToOneHot(tdataMsk)
self.shapeImg = tdataImg.shape
self.shapeMsk = tdataMskCls.shape
self.numSlices = numSlices
if numSlices<0:
self.shapeImgSlc = self.shapeImg
self.shapeMskSlc = self.shapeMsk
else:
tnumSlc = 2*self.numSlices+1
if K.image_dim_ordering()=='th':
self.shapeImgSlc = list(self.shapeImg[:-1]) + [tnumSlc]
self.shapeMskSlc = list(self.shapeMsk[:-1]) + [tnumSlc]
self.sizeZ = self.shapeImg[-1]
else:
self.shapeImgSlc = list(self.shapeImg[:2]) + [tnumSlc] #+ [self.shapeImg[-1]]
self.shapeMskSlc = list(self.shapeMsk[:2]) + [self.numCls] #+ [self.shapeMsk[-1]]
self.sizeZ = self.shapeImg[-2]
# (4) Check input Mean Image Data
if pathMeanData is None:
self.pathMeanData = '%s-%s' % (self.pathDataIdx, self.meanPrefix)
self.precalculateAndLoadMean(isRecalculateMean=isRecalculateMeanIfExist)
else:
if not os.path.isfile(pathMeanData):
raise Exception('Cant find MEAN-data file [%s]' % pathMeanData)
self.pathMeanData = pathMeanData
self.precalculateAndLoadMean(isRecalculateMean=isRecalculateMeanIfExist)
# (5) Load data into memory
self.numImg = len(self.arrPathDataImg)
if isLoadIntoMemory:
#FIXME: incorrect code, please, fix this code before using
self.isDataInMemory = True
self.dataImg = np.zeros([self.numImg] + list(self.shapeImg), dtype=np.float)
self.dataMsk = None
# self.dataMsk = np.zeros([self.numImg] + list(self.shapeImg), dtype=np.float)
self.dataMskCls = np.zeros([self.numImg] + list(self.shapeMsk), dtype=np.float)
print (':: Loading data into memory:')
for ii in range(self.numImg):
tpathImg = self.arrPathDataImg[ii]
tpathMsk = self.arrPathDataMsk[ii]
#
tdataImg = self.adjustImage(nib.load(tpathImg).get_data())
tdataMsk = nib.load(tpathMsk).get_data()
tdataImg = self.transformImageFromOriginal(tdataImg, isRemoveMean=True)
tdataMsk = self.transformImageFromOriginal(tdataMsk > 200, isRemoveMean=False)
tdataMskCls = self.convertMskToOneHot(tdataMsk)
self.dataImg[ii] = tdataImg
# self.dataMsk[ii] = tdataMsk
self.dataMskCls[ii] = tdataMskCls
if (ii % 10) == 0:
print ('\t[%d/%d] ...' % (ii, self.numImg))
print ('\t... [done]')
if self.isTheanoShape:
tshp = self.dataMskCls.shape
print (tshp)
else:
self.isDataInMemory = False
self.dataImg = None
self.dataMsk = None
self.dataMskCls = None
def getNumImg(self):
if self.isInitialized():
return self.numImg
else:
return 0
def adjustImage(self, pimg):
qmin = -1400.
qmax = +400
tret = pimg.copy()
tret[pimg < qmin] = qmin
tret[pimg > qmax] = qmax
tret = (tret - qmin) / (qmax - qmin)
return tret
def convertMskToOneHot(self, msk):
tshape = list(msk.shape)
if self.numCls>2:
tret = np_utils.to_categorical(msk.reshape(-1), self.numCls)
else:
tret = (msk.reshape(-1)>0).astype(np.float)
tret = np.vstack((1.-tret,tret)).transpose()
if self.isTheanoShape:
tmpShape = list(tshape[1:]) + [self.numCls]
# tshape[ 0] = self.numCls
else:
tmpShape = tshape
tmpShape[-1] = self.numCls
tret = tret.reshape(tmpShape)
if self.isTheanoShape:
#FIXME: work only for 3D!!!
tret = tret.transpose((3,0,1,2))
return tret
def isInitialized(self):
return (self.shapeImg is not None) and (self.shapeMsk is not None) and (self.wdir is not None) and (self.numCls>0)
def checkIsInitialized(self):
if not self.isInitialized():
raise Exception('class Batcher() is not correctly initialized')
# def toString(self):
# if self.isInitialized():
# tstr = 'Shape/Slice=%s/%s, #Samples=%d, #Labels=%d, #Slices=%s'\
# % (self.shapeImg, self.shapeImgSlc, self.numImg, self.numCls, self.numSlices)
# if self.meanData is not None:
# tstr = '%s, meanValuePerCh=%s' % (tstr, self.meanData['meanCh'])
# else:
# tstr = '%s, meanValuePerCh= is Not Calculated' % (tstr)
# else:
# tstr = "BatcherOnImageCT3D() is not initialized"
# return tstr
def toString(self):
if self.isInitialized():
tstr = '#Samples=%d' % (self.numImg)
else:
tstr = "BatcherOnImage3D() is not initialized"
# (1) number of classes
if self.numCls is not None:
tstr = '%s, #Cls=%d' % (tstr, self.numCls)
# (2) input/output shapes
tstr = '%s, InpShape=%s, OutShape=%s' % (tstr, self.shapeImg, self.shapeMsk)
#
if self.meanData is not None:
tstr = '%s, meanValuePerCh=%s' % (tstr, self.meanData['meanCh'])
else:
tstr = '%s, meanValuePerCh= is Not Calculated' % (tstr)
if (self.model is not None) and (self.modelPath is not None):
tstr = '%s, model is loaded [%s]' % (tstr, os.path.basename(self.modelPath))
return tstr
def __str__(self):
return self.toString()
def __repr__(self):
return self.toString()
def preprocImageShape(self, img):
if self.isTheanoShape:
return img.reshape([1] + list(img.shape))
else:
return img.reshape(list(img.shape) + [1])
def removeMean(self, img):
ret = img
ret -= self.meanData['meanCh']
# ret -= self.meanData['meanImg']
return ret
def transformImageFromOriginal(self, pimg, isRemoveMean=True):
tmp = self.preprocImageShape(pimg)
tmp = tmp.astype(np.float) / self.imgScale
if isRemoveMean:
tmp = self.removeMean(tmp)
return tmp
def precalculateAndLoadMean(self, isRecalculateMean=False):
if os.path.isfile(self.pathMeanData) and (not isRecalculateMean):
print (':: found mean-value file, try to load from it [%s] ...' % self.pathMeanData)
with open(self.pathMeanData, 'rb') as f:
self.meanData = pickle.load(f, encoding='latin1')
tmpMeanKeys = ('meanImg', 'meanCh', 'meanImgCh')
for ii in tmpMeanKeys:
if ii not in self.meanData.keys():
raise Exception('Mean-file is invalid. Cant find key-value in mean-file [%s]' % self.pathMeanData)
else:
self.meanData = {}
self.meanData['meanImg'] = None
self.meanData['meanImgCh'] = None
maxNumImages = 1000
if len(self.arrPathDataImg)<maxNumImages:
maxNumImages = len(self.arrPathDataImg)
rndIdx = np.random.permutation(range(len(self.arrPathDataImg)))[:maxNumImages]
print ('*** Precalculate mean-info:')
for ii,idx in enumerate(rndIdx):
tpathImg = self.arrPathDataImg[idx]
tdataImg = nib.load(tpathImg).get_data()
tdataImg = self.adjustImage(self.transformImageFromOriginal(tdataImg, isRemoveMean=False))
if self.meanData['meanImg'] is None:
self.meanData['meanImg'] = tdataImg
else:
self.meanData['meanImg'] += tdataImg
if (ii%10)==0:
print ('\t[%d/%d] ...' % (ii, len(rndIdx)))
self.meanData['meanImg'] /= len(rndIdx)
self.meanData['meanCh'] = np.mean(self.meanData['meanImg'])
print (':: mean-image %s mean channels value is [%s], saved to [%s]'
% (self.meanData['meanImg'].shape, self.meanData['meanCh'], self.pathMeanData))
with open(self.pathMeanData, 'wb') as f:
pickle.dump(self.meanData, f)
def getSlice25D(self, idxImg, zidx):
if self.isDataInMemory:
tnumSlices = len(zidx)
dataX = np.zeros([tnumSlices] + list(self.shapeImgSlc), dtype=np.float)
# dataM = np.zeros([tnumSlices] + list(self.shapeMskSlc), dtype=np.float)
dataM = None
dataY = None
tnumBrd = self.numSlices + 1
for ii, tidx in enumerate(zidx):
if K.image_dim_ordering() == 'th':
# 3D-version
# dataX[ii] = tdataImg[:, :, :, tidx - tnumBrd + 1:tidx + tnumBrd ]
# dataY[ii] = tdataMskCls[:, :, :, tidx:tidx+1]
# 2D-version
dataX[ii] = self.dataImg[idxImg, 0, :, :, tidx - tnumBrd + 1:tidx + tnumBrd]
# dataM[ii] = self.dataMskCls[idxImg, :, :, :, tidx]
else:
# 3D-version
# dataX[ii] = tdataImg[:, :, tidx - tnumBrd + 1:tidx + tnumBrd, :]
# dataY[ii] = tdataMskCls[:, :, tidx:tidx+1, :]
# 2D-version
timg = self.dataImg[idxImg, :, :, tidx - tnumBrd + 1:tidx + tnumBrd, 0]
tmsk = self.dataMskCls[idxImg, :, :, tidx, :]
tout = tmsk.reshape(-1, self.numCls)
if dataY is None:
dataY = np.zeros([tnumSlices] + list(tout.shape), dtype=np.float)
dataX[ii] = timg
dataY[ii] = tout
# dataM[ii] = tmsk
else:
tpathImg = self.arrPathDataImg[idxImg]
tpathMsk = self.arrPathDataMsk[idxImg]
tdataImg = self.adjustImage(nib.load(tpathImg).get_data())
tdataMsk = nib.load(tpathMsk).get_data()
tdataImg = self.transformImageFromOriginal(tdataImg, isRemoveMean=True)
tdataMsk = self.transformImageFromOriginal(tdataMsk > 200, isRemoveMean=False)
tdataMskCls = self.convertMskToOneHot(tdataMsk)
#
tnumSlices = len(zidx)
dataX = np.zeros([tnumSlices] + list(self.shapeImgSlc), dtype=np.float)
dataM = np.zeros([tnumSlices] + list(self.shapeMskSlc), dtype=np.float)
dataY = None
tnumBrd = self.numSlices+1
for ii, tidx in enumerate(zidx):
if K.image_dim_ordering()=='th':
# 3D-version
# dataX[ii] = tdataImg[:, :, :, tidx - tnumBrd + 1:tidx + tnumBrd ]
# dataY[ii] = tdataMskCls[:, :, :, tidx:tidx+1]
# 2D-version
dataX[ii] = tdataImg[0, :, :, tidx - tnumBrd + 1:tidx + tnumBrd ]
dataM[ii] = tdataMskCls[:, :, :, tidx]
else:
# 3D-version
# dataX[ii] = tdataImg[:, :, tidx - tnumBrd + 1:tidx + tnumBrd, :]
# dataY[ii] = tdataMskCls[:, :, tidx:tidx+1, :]
# 2D-version
timg = tdataImg[:, :, tidx - tnumBrd + 1:tidx + tnumBrd, 0]
tmsk = tdataMskCls[:, :, tidx, :]
tout = tmsk.reshape(-1, self.numCls)
if dataY is None:
dataY = np.zeros([tnumSlices] + list(tout.shape), dtype=np.float)
dataX[ii] = timg
dataY[ii] = tout
dataM[ii] = tmsk
return (dataX, dataY, dataM)
def getSlice25DFromImg(self, pdataImg, zidx):
tnumSlices = len(zidx)
dataX = np.zeros([tnumSlices] + list(self.shapeImgSlc), dtype=np.float)
tnumBrd = self.numSlices + 1
for ii, tidx in enumerate(zidx):
if K.image_dim_ordering() == 'th':
dataX[ii] = pdataImg[0, :, :, tidx - tnumBrd + 1:tidx + tnumBrd]
else:
dataX[ii] = pdataImg[:, :, tidx - tnumBrd + 1:tidx + tnumBrd, 0]
return dataX
def getBatchDataSlicedByIdx(self, dictImg2SliceIdx, isReturnDict=True):
dictDataX = collections.OrderedDict()
dictDataY = collections.OrderedDict()
for ii,(imgIdx, listIdxSlices) in enumerate(dictImg2SliceIdx.items()):
tmpDataX, tmpDataY, _ = self.getSlice25D(imgIdx, listIdxSlices)
dictDataX[imgIdx] = tmpDataX
dictDataY[imgIdx] = tmpDataY
if isReturnDict:
return (dictDataX, dictDataY)
else:
return (np.concatenate(dictDataX.values()), np.concatenate(dictDataY.values()))
def getBatchDataSliced(self, parNumImages=8, parNumSlices=4, isReturnDict=True):
self.checkIsInitialized()
numImg = self.numImg
rndIdx = np.random.permutation(range(numImg))[:parNumImages]
dictImg2Idx={}
for imgIdx in rndIdx:
trndSliceIdx = range(self.numSlices, self.sizeZ-self.numSlices)
trndSliceIdx = np.random.permutation(trndSliceIdx)[:parNumSlices]
dictImg2Idx[imgIdx] = trndSliceIdx
return self.getBatchDataSlicedByIdx(dictImg2SliceIdx=dictImg2Idx, isReturnDict=isReturnDict)
def getBatchDataByIdx(self, parBatchIdx):
rndIdx = parBatchIdx
parBatchSize = len(rndIdx)
dataX = np.zeros([parBatchSize] + list(self.shapeImg), dtype=np.float)
dataY = np.zeros([parBatchSize] + list(self.shapeMsk), dtype=np.float)
for ii, tidx in enumerate(rndIdx):
if self.isDataInMemory:
dataX[ii] = self.dataImg[tidx]
dataY[ii] = self.dataMskCls[tidx]
else:
tpathImg = self.arrPathDataImg[tidx]
tpathMsk = self.arrPathDataMsk[tidx]
tdataImg = self.adjustImage(nib.load(tpathImg).get_data())
tdataMsk = nib.load(tpathMsk).get_data()
tdataImg = self.transformImageFromOriginal(tdataImg, isRemoveMean=True)
tdataMsk = self.transformImageFromOriginal(tdataMsk > 200, isRemoveMean=False)
tdataMskCls = self.convertMskToOneHot(tdataMsk)
dataX[ii] = tdataImg
dataY[ii] = tdataMskCls
if self.isTheanoShape:
tshp = dataY.shape
dataY = dataY.reshape([tshp[0], tshp[1], np.prod(tshp[-3:])]).transpose((0, 2, 1))
# print (tshp)
return (dataX, dataY)
def getBatchData(self, parBatchSize=8):
self.checkIsInitialized()
numImg = self.numImg
rndIdx = np.random.permutation(range(numImg))[:parBatchSize]
return self.getBatchDataByIdx(rndIdx)
def exportModel(self, model, epochId, extInfo=None):
if extInfo is not None:
modelPrefix = extInfo
else:
modelPrefix = ''
foutModel = "%s-e%03d.json" % (modelPrefix, epochId)
foutWeights = "%s-e%03d.h5" % (modelPrefix, epochId)
foutModel = '%s-%s' % (self.pathDataIdx, foutModel)
foutWeights = '%s-%s' % (self.pathDataIdx, foutWeights)
with open(foutModel, 'w') as f:
str = json.dumps(json.loads(model.to_json()), indent=3)
f.write(str)
model.save_weights(foutWeights, overwrite=True)
return foutModel
@staticmethod
def loadModelFromJson(pathModelJson):
if not os.path.isfile(pathModelJson):
raise Exception('Cant find JSON-file [%s]' % pathModelJson)
tpathBase = os.path.splitext(pathModelJson)[0]
tpathModelWeights = '%s.h5' % tpathBase
if not os.path.isfile(tpathModelWeights):
raise Exception('Cant find h5-Weights-file [%s]' % tpathModelWeights)
# with open(pathModelJson, 'r') as f:
# tmpStr = f.read()
# model = keras.models.model_from_json(tmpStr, custom_objects={'UpSamplingInterpolated2D': UpSamplingInterpolated2D})
# model.load_weights(tpathModelWeights)
model = buildModelSegNet_InterpolatedUpSampling2D(inpShape=(256, 256, 5))
model.load_weights(tpathModelWeights)
return model
def loadModelFromDir(self, pathDirWithModels, paramFilter=None):
if paramFilter is None:
lstModels = glob.glob('%s/*.json' % pathDirWithModels)
else:
lstModels = glob.glob('%s/*%s*.json' % (pathDirWithModels, paramFilter))
pathJson = os.path.abspath(sorted(lstModels)[-1])
print (':: found model [%s] in directory [%s]' % (os.path.basename(pathJson), pathDirWithModels))
self.modelPath = pathJson
return BatcherCTLung2D.loadModelFromJson(pathJson)
def loadModelForInference(self, pathModelJson, pathMeanData, paramFilter=None):
if os.path.isdir(pathModelJson):
self.model = self.loadModelFromDir(pathModelJson)
else:
self.model = BatcherCTLung2D.loadModelFromJson(pathModelJson)
if os.path.isdir(pathMeanData):
if paramFilter is None:
lstMean = sorted(glob.glob('%s/*mean.pkl' % pathMeanData))
else:
lstMean = sorted(glob.glob('%s/*%s*mean.pkl' % (pathMeanData, paramFilter)))
if len(lstMean) < 1:
raise Exception('Cant find mean-file in directory [%s]' % pathMeanData)
self.pathMeanData = lstMean[0]
else:
self.pathMeanData = pathMeanData
self.precalculateAndLoadMean(isRecalculateMean=False)
self.numCls = self.model.output_shape[-1]
self.shapeImgSlc = self.model.input_shape[1:]
self.isTheanoShape = (K.image_dim_ordering() == 'th')
if self.isTheanoShape:
self.shapeMskSlc = tuple([self.numCls] + list(self.shapeImgSlc[1:]))
self.numSlices = (self.model.input_shape[1] - 1) // 2
else:
self.shapeMskSlc = tuple(list(self.shapeImgSlc[:-1]) + [self.numCls])
self.numSlices = (self.model.input_shape[-1] - 1) // 2
def inference(self, lstData, batchSize=2, isDebug=False):
if self.model is None:
raise Exception('Model is not loaded... load model before call inferece()')
if len(lstData) > 0:
tmpListOfImg = []
# (1) load into memory
if isinstance(lstData[0], str):# or isinstance(lstData[0], unicode):
for ii in lstData:
tmpListOfImg.append(nib.load(ii).get_data())
else:
for ii in lstData:
tmpListOfImg.append(ii.get_data())
# (2) check shapes
tsetShapes = set()
for ii in tmpListOfImg:
tsetShapes.add(ii.shape[:-1])
if len(tsetShapes) > 1:
raise Exception('Shapes of images must be equal sized')
tmpShape = self.shapeImgSlc[:-1]
if tmpShape not in tsetShapes:
raise Exception('Model input shape and shapes of input images is not equal!')
# (3) inference
# self.isDataInMemory = True
numImg = len(tmpListOfImg)
# self.dataImg = np.zeros([numImg] + list(self.shapeImg), dtype=np.float)
ret = []
for ii in range(numImg):
# (3.1) convert data
if isDebug:
print (':: process: [%s]' % tmpListOfImg[ii])
tdataImg = self.adjustImage(tmpListOfImg[ii])
tdataImg = self.transformImageFromOriginal(tdataImg, isRemoveMean=True)
if K.image_dim_ordering() == 'th':
tsegm3D = np.zeros(tdataImg.shapeImg[+1:], np.float)
numSlicesZ = tdataImg.shape[-1]
else:
tsegm3D = np.zeros(tdataImg.shape[:-1], np.float)
numSlicesZ = tdataImg.shape[-2]
# (3.2) inference slide-by-slide
lstIdxScl = range(self.numSlices, numSlicesZ - self.numSlices)
lstIdxScl = split_list_by_blocks(lstIdxScl, batchSize)
for ss, sslst in enumerate(lstIdxScl):
dataX = self.getSlice25DFromImg(tdataImg, sslst)
tret = self.model.predict_on_batch(dataX)
if K.image_dim_ordering() == 'th':
# raise NotImplementedError
tret = tret.transpose((1, 0, 2))
sizXY = self.shapeImgSlc[+1:]
else:
sizXY = self.shapeImgSlc[:-1]
tret = tret.transpose((1, 0, 2))
tret = tret.reshape(list(sizXY) + list(tret.shape[1:]))
# tmskSlc = (tret[:, :, :, 1] > 0.5).astype(np.float)
tsegm3D[:, :, sslst] = tret[:,:,:,1]
ret.append(tsegm3D)
return ret
else:
return []
#########################################
if __name__ == '__main__':
pass | {
"content_hash": "c3e3dac89290dcc92fe97770f41f062f",
"timestamp": "",
"source": "github",
"line_count": 688,
"max_line_length": 129,
"avg_line_length": 46.60029069767442,
"alnum_prop": 0.5700071738248963,
"repo_name": "gakarak/BTBDB_ImageAnalysisSubPortal",
"id": "0157575e58a02b5fec2688a29dbd4635a9dc1273",
"size": "32103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/core/segmct/fcnn_lung2d.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "161"
},
{
"name": "HTML",
"bytes": "15991"
},
{
"name": "JavaScript",
"bytes": "6689"
},
{
"name": "Python",
"bytes": "458280"
},
{
"name": "Shell",
"bytes": "3378"
}
],
"symlink_target": ""
} |
import unittest
import copy
from scrapy.http import Headers
class HeadersTest(unittest.TestCase):
def assertSortedEqual(self, first, second, msg=None):
return self.assertEqual(sorted(first), sorted(second), msg)
def test_basics(self):
h = Headers({'Content-Type': 'text/html', 'Content-Length': 1234})
assert h['Content-Type']
assert h['Content-Length']
self.assertRaises(KeyError, h.__getitem__, 'Accept')
self.assertEqual(h.get('Accept'), None)
self.assertEqual(h.getlist('Accept'), [])
self.assertEqual(h.get('Accept', '*/*'), b'*/*')
self.assertEqual(h.getlist('Accept', '*/*'), [b'*/*'])
self.assertEqual(h.getlist('Accept', ['text/html', 'images/jpeg']),
[b'text/html', b'images/jpeg'])
def test_single_value(self):
h = Headers()
h['Content-Type'] = 'text/html'
self.assertEqual(h['Content-Type'], b'text/html')
self.assertEqual(h.get('Content-Type'), b'text/html')
self.assertEqual(h.getlist('Content-Type'), [b'text/html'])
def test_multivalue(self):
h = Headers()
h['X-Forwarded-For'] = hlist = ['ip1', 'ip2']
self.assertEqual(h['X-Forwarded-For'], b'ip2')
self.assertEqual(h.get('X-Forwarded-For'), b'ip2')
self.assertEqual(h.getlist('X-Forwarded-For'), [b'ip1', b'ip2'])
assert h.getlist('X-Forwarded-For') is not hlist
def test_encode_utf8(self):
h = Headers({'key': '\xa3'}, encoding='utf-8')
key, val = dict(h).popitem()
assert isinstance(key, bytes), key
assert isinstance(val[0], bytes), val[0]
self.assertEqual(val[0], b'\xc2\xa3')
def test_encode_latin1(self):
h = Headers({'key': '\xa3'}, encoding='latin1')
key, val = dict(h).popitem()
self.assertEqual(val[0], b'\xa3')
def test_encode_multiple(self):
h = Headers({'key': ['\xa3']}, encoding='utf-8')
key, val = dict(h).popitem()
self.assertEqual(val[0], b'\xc2\xa3')
def test_delete_and_contains(self):
h = Headers()
h['Content-Type'] = 'text/html'
assert 'Content-Type' in h
del h['Content-Type']
assert 'Content-Type' not in h
def test_setdefault(self):
h = Headers()
hlist = ['ip1', 'ip2']
olist = h.setdefault('X-Forwarded-For', hlist)
assert h.getlist('X-Forwarded-For') is not hlist
assert h.getlist('X-Forwarded-For') is olist
h = Headers()
olist = h.setdefault('X-Forwarded-For', 'ip1')
self.assertEqual(h.getlist('X-Forwarded-For'), [b'ip1'])
assert h.getlist('X-Forwarded-For') is olist
def test_iterables(self):
idict = {'Content-Type': 'text/html', 'X-Forwarded-For': ['ip1', 'ip2']}
h = Headers(idict)
self.assertDictEqual(dict(h),
{b'Content-Type': [b'text/html'],
b'X-Forwarded-For': [b'ip1', b'ip2']})
self.assertSortedEqual(h.keys(),
[b'X-Forwarded-For', b'Content-Type'])
self.assertSortedEqual(h.items(),
[(b'X-Forwarded-For', [b'ip1', b'ip2']),
(b'Content-Type', [b'text/html'])])
self.assertSortedEqual(h.values(), [b'ip2', b'text/html'])
def test_update(self):
h = Headers()
h.update({'Content-Type': 'text/html',
'X-Forwarded-For': ['ip1', 'ip2']})
self.assertEqual(h.getlist('Content-Type'), [b'text/html'])
self.assertEqual(h.getlist('X-Forwarded-For'), [b'ip1', b'ip2'])
def test_copy(self):
h1 = Headers({'header1': ['value1', 'value2']})
h2 = copy.copy(h1)
self.assertEqual(h1, h2)
self.assertEqual(h1.getlist('header1'), h2.getlist('header1'))
assert h1.getlist('header1') is not h2.getlist('header1')
assert isinstance(h2, Headers)
def test_appendlist(self):
h1 = Headers({'header1': 'value1'})
h1.appendlist('header1', 'value3')
self.assertEqual(h1.getlist('header1'), [b'value1', b'value3'])
h1 = Headers()
h1.appendlist('header1', 'value1')
h1.appendlist('header1', 'value3')
self.assertEqual(h1.getlist('header1'), [b'value1', b'value3'])
def test_setlist(self):
h1 = Headers({'header1': 'value1'})
self.assertEqual(h1.getlist('header1'), [b'value1'])
h1.setlist('header1', [b'value2', b'value3'])
self.assertEqual(h1.getlist('header1'), [b'value2', b'value3'])
def test_setlistdefault(self):
h1 = Headers({'header1': 'value1'})
h1.setlistdefault('header1', ['value2', 'value3'])
h1.setlistdefault('header2', ['value2', 'value3'])
self.assertEqual(h1.getlist('header1'), [b'value1'])
self.assertEqual(h1.getlist('header2'), [b'value2', b'value3'])
def test_none_value(self):
h1 = Headers()
h1['foo'] = 'bar'
h1['foo'] = None
h1.setdefault('foo', 'bar')
self.assertEqual(h1.get('foo'), None)
self.assertEqual(h1.getlist('foo'), [])
def test_int_value(self):
h1 = Headers({'hey': 5})
h1['foo'] = 1
h1.setdefault('bar', 2)
h1.setlist('buz', [1, 'dos', 3])
self.assertEqual(h1.getlist('foo'), [b'1'])
self.assertEqual(h1.getlist('bar'), [b'2'])
self.assertEqual(h1.getlist('buz'), [b'1', b'dos', b'3'])
self.assertEqual(h1.getlist('hey'), [b'5'])
def test_invalid_value(self):
self.assertRaisesRegex(TypeError, 'Unsupported value type',
Headers, {'foo': object()})
self.assertRaisesRegex(TypeError, 'Unsupported value type',
Headers().__setitem__, 'foo', object())
self.assertRaisesRegex(TypeError, 'Unsupported value type',
Headers().setdefault, 'foo', object())
self.assertRaisesRegex(TypeError, 'Unsupported value type',
Headers().setlist, 'foo', [object()])
| {
"content_hash": "cedf853b496e15bf906865b9184384d6",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 80,
"avg_line_length": 39.670967741935485,
"alnum_prop": 0.5550496015612295,
"repo_name": "pawelmhm/scrapy",
"id": "64ff7a73dbfee6a2f84b6380e24fdd637ba69966",
"size": "6149",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_http_headers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2790"
},
{
"name": "Python",
"bytes": "1889675"
},
{
"name": "Roff",
"bytes": "2010"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import sys
# Note throughout the distinction between the artifact_root (which is where the artifacts are
# originally built and where the cache restores them to) and the cache root path/URL (which is
# where the artifacts are cached).
logger = logging.getLogger(__name__)
class ArtifactCacheError(Exception):
pass
class NonfatalArtifactCacheError(Exception):
pass
class UnreadableArtifact(object):
"""A False-y value to indicate a read-failure (vs a normal cache-miss)
See docstring on `ArtifactCache.use_cached_files` for details.
"""
def __init__(self, key, err=None):
"""
:param CacheKey key: The key of the artifact that encountered an error
:param err: Any additional information on the nature of the read error.
"""
self.key = key
self.err = err
# For python 3
def __bool__(self):
return False
# For python 2
def __nonzero__(self):
return self.__bool__()
def __str__(self):
return "key={} err={}".format(self.key, self.err)
class ArtifactCache(object):
"""A map from cache key to a set of build artifacts.
The cache key must uniquely identify the inputs (sources, compiler flags etc.) needed to
build the artifacts. Cache keys are typically obtained from a CacheKeyGenerator.
Subclasses implement the methods below to provide this functionality.
"""
def __init__(self, artifact_root):
"""Create an ArtifactCache.
All artifacts must be under artifact_root.
"""
self.artifact_root = artifact_root
def prune(self):
"""Prune stale cache files
Remove old unused cache files
:return:
"""
pass
def insert(self, cache_key, paths, overwrite=False):
"""Cache the output of a build.
By default, checks cache.has(key) first, only proceeding to create and insert an artifact
if it is not already in the cache (though `overwrite` can be used to skip the check and
unconditionally insert).
:param CacheKey cache_key: A CacheKey object.
:param list<str> paths: List of absolute paths to generated dirs/files.
These must be under the artifact_root.
:param bool overwrite: Skip check for existing, insert even if already in cache.
"""
missing_files = filter(lambda f: not os.path.exists(f), paths)
if missing_files:
raise ArtifactCacheError('Tried to cache nonexistent files {0}'.format(missing_files))
if not overwrite:
if self.has(cache_key):
logger.debug('Skipping insert of existing artifact: {0}'.format(cache_key))
return False
try:
self.try_insert(cache_key, paths)
return True
except NonfatalArtifactCacheError as e:
logger.error('Error while writing to artifact cache: {0}'.format(e))
return False
def try_insert(self, cache_key, paths):
"""Attempt to cache the output of a build, without error-handling.
:param CacheKey cache_key: A CacheKey object.
:param list<str> paths: List of absolute paths to generated dirs/files. These must be under the artifact_root.
"""
pass
def has(self, cache_key):
pass
def use_cached_files(self, cache_key, hit_callback=None):
"""Use the files cached for the given key.
Returned result indicates whether or not an artifact was successfully found
and decompressed to the `artifact_root`:
`True` if artifact was found and successfully decompressed
`False` if not in the cache
Implementations may choose to return an UnreadableArtifact instance instead
of `False` to indicate an artifact was in the cache but could not be read,
due to an error or corruption. UnreadableArtifact evaluates as False-y, so
callers can treat the result as a boolean if they are only concerned with
whether or not an artifact was read.
Callers may also choose to attempt to repair or report corrupted artifacts
differently, as these are unexpected, unlike normal cache misses.
:param CacheKey cache_key: A CacheKey object.
"""
pass
def delete(self, cache_key):
"""Delete the artifacts for the specified key.
Deleting non-existent artifacts is a no-op.
:param CacheKey cache_key: A CacheKey object.
"""
pass
def call_use_cached_files(tup):
"""Importable helper for multi-proc calling of ArtifactCache.use_cached_files on a cache instance.
Multiprocessing map/apply/etc require functions which can be imported, not bound methods.
To call a bound method, instead call a helper like this and pass tuple of the instance and args.
The helper can then call the original method on the deserialized instance.
:param tup: A tuple of an ArtifactCache and args (eg CacheKey) for ArtifactCache.use_cached_files.
"""
try:
cache, key, callback = tup
res = cache.use_cached_files(key, callback)
if res:
sys.stderr.write('.')
else:
sys.stderr.write(' ')
return res
except NonfatalArtifactCacheError as e:
logger.warn('Error calling use_cached_files in artifact cache: {0}'.format(e))
return False
def call_insert(tup):
"""Importable helper for multi-proc calling of ArtifactCache.insert on an ArtifactCache instance.
See docstring on call_use_cached_files explaining why this is useful.
:param tup: A 4-tuple of an ArtifactCache and the 3 args passed to ArtifactCache.insert:
eg (some_cache_instance, cache_key, [some_file, another_file], False)
"""
try:
cache, key, files, overwrite = tup
return cache.insert(key, files, overwrite)
except NonfatalArtifactCacheError as e:
logger.warn('Error while inserting into artifact cache: {0}'.format(e))
return False
| {
"content_hash": "401d9481ab3acacc51add883079bcbac",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 114,
"avg_line_length": 32.37222222222222,
"alnum_prop": 0.7025913849322121,
"repo_name": "sameerparekh/pants",
"id": "1f7ddc2d8fda2e85adc8e6be2e1f9dc3f438f86f",
"size": "5974",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/python/pants/cache/artifact_cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11442"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "70150"
},
{
"name": "Java",
"bytes": "308102"
},
{
"name": "JavaScript",
"bytes": "25075"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "3862954"
},
{
"name": "Scala",
"bytes": "85437"
},
{
"name": "Shell",
"bytes": "49265"
},
{
"name": "Thrift",
"bytes": "2858"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 0.49, s, t 0.51, s, t 2.49, s, t 2.51, s, t 2.99, s, t 3.1, s, q"
tags = "WavesTiles3D, grid_effects"
import cocos
from cocos.director import director
import cocos.actions as ac
from cocos.layer import *
import pyglet
class BackgroundLayer(cocos.layer.Layer):
def __init__(self):
super(BackgroundLayer, self).__init__()
self.img = pyglet.resource.image('background_image.png')
def draw( self ):
glColor4ub(255, 255, 255, 255)
glPushMatrix()
self.transform()
self.img.blit(0,0)
glPopMatrix()
def main():
director.init( resizable=True )
director.set_depth_test()
main_scene = cocos.scene.Scene()
main_scene.add( BackgroundLayer(), z=0 )
# In real code after a sequence of grid actions the StopGrid() action
# should be called. Omited here to stay in the last grid action render
action1 = ac.WavesTiles3D( waves=2, amplitude=70, grid=(16,16), duration=3)
main_scene.do( action1 )
director.run (main_scene)
if __name__ == '__main__':
main()
| {
"content_hash": "da6f35c884e72a2cdc25727bb32b8304",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 82,
"avg_line_length": 28.8,
"alnum_prop": 0.6566358024691358,
"repo_name": "vyscond/cocos",
"id": "d89060ed1834cbf4eb46b135b92e6afde2d1866b",
"size": "1296",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "test/test_wavestiles3d.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7097"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Python",
"bytes": "1330883"
}
],
"symlink_target": ""
} |
import os
import math
from director.consoleapp import ConsoleApp
from director import ioUtils
from director import segmentation
from director import segmentationroutines
from director import applogic
from director import visualization as vis
from director import roboturdf
app = ConsoleApp()
# create a view
view = app.createView()
segmentation._defaultSegmentationView = view
segmentation.initAffordanceManager(view)
robotStateModel, robotStateJointController = roboturdf.loadRobotModel('robot state model', view, parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True)
segmentationroutines.SegmentationContext.initWithRobot(robotStateModel)
# Move robot to near to valve wall:
# 0degrees
#robotStateJointController.q[5] = math.radians(120)
#robotStateJointController.q[0] = 0
#robotStateJointController.q[1] = 0
# 30,60,90
robotStateJointController.q [5] = math.radians(-120)
robotStateJointController.q [0] = 1
robotStateJointController.q [1] = 1
robotStateJointController.q[2] = 0.85
robotStateJointController.push()
# load poly data
dataDir = app.getTestingDataDirectory()
#polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-lever-scene.vtp'))
#polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-lever-scene-30.vtp'))
#polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-lever-scene-60.vtp'))
polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-lever-scene-90.vtp'))
vis.showPolyData(polyData, 'pointcloud snapshot')
segmentation.segmentValveWallAuto(.2, mode='both', removeGroundMethod=segmentation.removeGround )
if app.getTestingInteractiveEnabled():
view.show()
app.showObjectModel()
app.start()
| {
"content_hash": "585d4751dd704833d738a6c344270349",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 165,
"avg_line_length": 35.4375,
"alnum_prop": 0.800705467372134,
"repo_name": "patmarion/director",
"id": "cb146a65bb33c2ba784a2e860825ab3914d0c5a9",
"size": "1701",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/python/tests/testValveFit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "121912"
},
{
"name": "C++",
"bytes": "565385"
},
{
"name": "CMake",
"bytes": "82478"
},
{
"name": "Dockerfile",
"bytes": "2510"
},
{
"name": "GLSL",
"bytes": "15443"
},
{
"name": "MATLAB",
"bytes": "161948"
},
{
"name": "Makefile",
"bytes": "5014"
},
{
"name": "Python",
"bytes": "2282093"
},
{
"name": "Shell",
"bytes": "14291"
}
],
"symlink_target": ""
} |
from __future__ import division
import gc
import os.path
import sys
import timeit
pythoscope_path = os.path.join(os.path.dirname(__file__), os.pardir)
sys.path.insert(0, os.path.abspath(pythoscope_path))
from pythoscope.cmdline import init_project
from pythoscope.store import get_pickle_path
from test.helper import putfile, rmtree, tmpdir
def make_class(name, methods_count=20):
code = ["class %s(object):\n" % name]
for i in range(methods_count):
code.append(" def method_%d(self):\n pass\n" % i)
return ''.join(code)
def make_function(name):
return "def %s():\n pass\n" % name
def make_module(classes_count=10, functions_count=10):
code = []
for i in range(classes_count):
code.append(make_class("Class%d" % i))
for i in range(functions_count):
code.append(make_function("function_%d" % i))
return ''.join(code)
# Run the setup once, stmt n times and report the minimum running time.
#
# Based on timeit module. I had to modify it, because:
# - timer.timeit(n) returns time of running stmt n times (the sum, not the minimum),
# - min(timer.repeat(n, 1)) runs the setup n times.
timer_template = """
def inner(_n, _timer):
_results = []
%(setup)s
for _i in range(_n):
_t0 = _timer()
%(stmt)s
_t1 = _timer()
_results.append(_t1 - _t0)
return min(_results)
"""
def run_timer(stmt, setup, n=3, timer=timeit.default_timer):
src = timer_template % {'stmt': stmt, 'setup': setup}
code = compile(src, '', "exec")
ns = {}
exec code in globals(), ns
inner = ns["inner"]
gcold = gc.isenabled()
gc.disable()
timing = inner(n, timer)
if gcold:
gc.enable()
return timing
def human_size(bytes, prefixes=['', 'K', 'M', 'G']):
if bytes > 1024:
return human_size(bytes/1024, prefixes[1:])
return "%.2f%sb" % (bytes, prefixes[0])
def benchmark_project_load_performance(modules_count=25):
print "==> Creating project with %d modules..." % modules_count
project_path = tmpdir()
module = make_module()
for i in range(modules_count):
putfile(project_path, "module%s.py" % i, module)
init_project(project_path, skip_inspection=True)
print "==> Inspecting project.."
elapsed = run_timer("inspect_project(Project('%s'))" % project_path,
"from pythoscope.inspector import inspect_project; from pythoscope.store import Project")
print "It took %f seconds to inspect." % elapsed
print "==> Saving project information"
elapsed = run_timer("project.save()",
"""from pythoscope.inspector import inspect_project ;\
from pythoscope.store import Project ;\
project = Project('%s') ;\
inspect_project(project)""" % project_path)
print "It took %f seconds to save the project information." % elapsed
print "==> Reading project information"
elapsed = run_timer("Project.from_directory('%s')" % project_path,
"from pythoscope.store import Project")
print "It took %f seconds to read project information from %s pickle." % \
(elapsed, human_size(os.path.getsize(get_pickle_path(project_path))))
rmtree(project_path)
if __name__ == "__main__":
benchmark_project_load_performance()
| {
"content_hash": "68a2874ebe8fd4f4a1f2990e865079ab",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 113,
"avg_line_length": 34.111111111111114,
"alnum_prop": 0.6188925081433225,
"repo_name": "adamhaapala/pythoscope",
"id": "688d810ba4414e9a9cfe6e8fb1e7205fce5a40d3",
"size": "3377",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/speed_benchmark.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from openturns import *
TESTPREAMBLE()
try:
# Create an intance
myFunc = NumericalMathFunction("x", "x^2")
mySpatialFunc = SpatialFunction(myFunc)
print "mySpatialFunc=", mySpatialFunc
# Get the input and output description
print "mySpatialFunc input description=", mySpatialFunc.getInputDescription()
print "mySpatialFunc output description=", mySpatialFunc.getOutputDescription()
# Get the input and output dimension, based on description
print "mySpatialFunc input dimension=", mySpatialFunc.getInputDimension()
print "mySpatialFunc output dimension=", mySpatialFunc.getOutputDimension()
# Create a TimeSeries
tg = RegularGrid(0.0, 0.2, 6)
data = NumericalSample(tg.getN(), myFunc.getInputDimension())
for i in range(data.getSize()):
for j in range(data.getDimension()):
data[i, j] = i * data.getDimension() + j
ts = TimeSeries(tg, data)
print "input time series=", ts
print "output time series=", mySpatialFunc(ts)
# Get the number of calls
print "called ", mySpatialFunc.getCallsNumber(), " times"
except:
import sys
print "t_SpatialFunction_std.py", sys.exc_type, sys.exc_value
| {
"content_hash": "b816bd1b392fe639b36d974b9c080928",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 83,
"avg_line_length": 38.45161290322581,
"alnum_prop": 0.7030201342281879,
"repo_name": "sofianehaddad/ot-svn",
"id": "4d9854a1f50d7f9e9b59033bc092f79bfa4bba09",
"size": "1216",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/test/t_SpatialFunction_std.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6498"
},
{
"name": "C",
"bytes": "455749"
},
{
"name": "C++",
"bytes": "10021345"
},
{
"name": "CMake",
"bytes": "240050"
},
{
"name": "FORTRAN",
"bytes": "299"
},
{
"name": "Makefile",
"bytes": "12372"
},
{
"name": "NSIS",
"bytes": "26263"
},
{
"name": "Python",
"bytes": "1221927"
},
{
"name": "R",
"bytes": "11141"
},
{
"name": "Scilab",
"bytes": "2612"
},
{
"name": "Shell",
"bytes": "20403"
},
{
"name": "TeX",
"bytes": "4250"
},
{
"name": "Visual Basic",
"bytes": "3294"
}
],
"symlink_target": ""
} |
"""The Normal (Gaussian) distribution class.
@@Gaussian
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util # pylint: disable=line-too-long
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# TODO(ebrevdo): Use asserts contrib module when ready
def _assert_all_positive(x):
return logging_ops.Assert(
math_ops.reduce_all(x > 0),
["Tensor %s should contain only positive values: " % x.name, x])
class Gaussian(object):
"""The scalar Gaussian distribution with mean and stddev parameters mu, sigma.
The PDF of this distribution is:
```f(x) = sqrt(1/(2*pi*sigma^2)) exp(-(x-mu)^2/(2*sigma^2))```
"""
def __init__(self, mu, sigma, name=None):
"""Construct Gaussian distributions with mean and stddev `mu` and `sigma`.
The parameters `mu` and `sigma` must be shaped in a way that supports
broadcasting (e.g. `mu + sigma` is a valid operation).
Args:
mu: `float` or `double` tensor, the means of the distribution(s).
sigma: `float` or `double` tensor, the stddevs of the distribution(s).
sigma must contain only positive values.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if mu and sigma are different dtypes.
"""
with ops.op_scope([mu, sigma], name, "Gaussian"):
mu = ops.convert_to_tensor(mu)
sigma = ops.convert_to_tensor(sigma)
with ops.control_dependencies([_assert_all_positive(sigma)]):
self._mu = mu
self._sigma = sigma
contrib_tensor_util.assert_same_float_dtype((mu, sigma))
@property
def dtype(self):
return self._mu.dtype
@property
def mu(self):
return self._mu
@property
def sigma(self):
return self._sigma
@property
def mean(self):
return self._mu * array_ops.ones_like(self._sigma)
def log_pdf(self, x, name=None):
"""Log pdf of observations in `x` under these Gaussian distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
log_pdf: tensor of dtype `dtype`, the log-PDFs of `x`.
"""
with ops.op_scope([self._mu, self._sigma, x], name, "GaussianLogPdf"):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
log_2_pi = constant_op.constant(math.log(2 * math.pi), dtype=self.dtype)
return (-0.5*log_2_pi - math_ops.log(self._sigma)
-0.5*math_ops.square((x - self._mu) / self._sigma))
def cdf(self, x, name=None):
"""CDF of observations in `x` under these Gaussian distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
cdf: tensor of dtype `dtype`, the CDFs of `x`.
"""
with ops.op_scope([self._mu, self._sigma, x], name, "GaussianCdf"):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
return (0.5 + 0.5*math_ops.erf(
1.0/(math.sqrt(2.0) * self._sigma)*(x - self._mu)))
def log_cdf(self, x, name=None):
"""Log CDF of observations `x` under these Gaussian distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
log_cdf: tensor of dtype `dtype`, the log-CDFs of `x`.
"""
with ops.op_scope([self._mu, self._sigma, x], name, "GaussianLogCdf"):
return math_ops.log(self.cdf(x))
def pdf(self, x, name=None):
"""The PDF of observations in `x` under these Gaussian distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
pdf: tensor of dtype `dtype`, the pdf values of `x`.
"""
with ops.op_scope([self._mu, self._sigma, x], name, "GaussianPdf"):
return math_ops.exp(self.log_pdf(x))
def entropy(self, name=None):
"""The entropy of Gaussian distribution(s).
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropy.
"""
with ops.op_scope([self._mu, self._sigma], name, "GaussianEntropy"):
two_pi_e1 = constant_op.constant(
2 * math.pi * math.exp(1), dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
sigma = self._sigma * array_ops.ones_like(self._mu)
return 0.5 * math_ops.log(two_pi_e1 * math_ops.square(sigma))
def sample(self, n, seed=None, name=None):
"""Sample `n` observations from the Gaussian Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: `[n, ...]`, a `Tensor` of `n` samples for each
of the distributions determined by broadcasting the hyperparameters.
"""
with ops.op_scope([self._mu, self._sigma, n], name, "GaussianSample"):
broadcast_shape = (self._mu + self._sigma).get_shape()
n = ops.convert_to_tensor(n)
shape = array_ops.concat(
0, [array_ops.pack([n]), array_ops.shape(self.mean)])
sampled = random_ops.random_normal(
shape=shape, mean=0, stddev=1, dtype=self._mu.dtype, seed=seed)
# Provide some hints to shape inference
n_val = tensor_util.constant_value(n)
final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
sampled.set_shape(final_shape)
return sampled * self._sigma + self._mu
| {
"content_hash": "31ac1a3b29086d92fec57c5971748333",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 125,
"avg_line_length": 34.114754098360656,
"alnum_prop": 0.6423193977254525,
"repo_name": "peterbraden/tensorflow",
"id": "cbb98624d97f639b4b0c47bcfd40335a042c0cf6",
"size": "6920",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distributions/python/ops/gaussian.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "154152"
},
{
"name": "C++",
"bytes": "8654768"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "737101"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "11651"
},
{
"name": "Jupyter Notebook",
"bytes": "1771939"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "108842"
},
{
"name": "Python",
"bytes": "5710163"
},
{
"name": "Shell",
"bytes": "164294"
},
{
"name": "TypeScript",
"bytes": "394470"
}
],
"symlink_target": ""
} |
"""Certificate Authority package certificate request helper module
"""
__author__ = "P J Kershaw"
__date__ = "19/09/12"
__copyright__ = "(C) 2012 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "[email protected]"
__revision__ = "$Id$"
from OpenSSL import crypto
class CertReqUtils(object):
"""Utility class containing helper functions for use with Certificate
Authority"""
PRIKEY_NBITS_DEFAULT = 1024
MESSAGE_DIGEST_TYPE_DEFAULT = "md5"
@staticmethod
def create_key_pair(nbits_for_key=PRIKEY_NBITS_DEFAULT):
"""Generate key pair and return as PEM encoded string
@type nbits_for_key: int
@param nbits_for_key: number of bits for private key generation -
default is 2048
@rtype: OpenSSL.crypto.PKey
@return: public/private key pair
"""
key_pair = crypto.PKey()
key_pair.generate_key(crypto.TYPE_RSA, nbits_for_key)
return key_pair
@staticmethod
def create_cert_req(dn,
key_pair,
message_digest=MESSAGE_DIGEST_TYPE_DEFAULT):
"""Create a certificate request.
@param dn: The distinguished name of the subject of the request,
possible arguments are:
C - Country name
SP - State or province name
L - Locality name
O - Organization name
OU - Organizational unit name
CN - Common name
email - E-mail address
@type dn: dict
@type key_pair: string/None
@param key_pair: public/private key pair
@type message_digest: basestring
@param message_digest: message digest type - default is MD5
@return certificate request PEM text and private key PEM text
@rtype: base string
"""
# Check all required certificate request DN parameters are set
# Create certificate request
cert_req = crypto.X509Req()
subj = cert_req.get_subject()
for k, v in dn.items():
setattr(subj, k, v)
# Create public key object
cert_req.set_pubkey(key_pair)
# Add the public key to the request
cert_req.sign(key_pair, message_digest)
return cert_req
| {
"content_hash": "672f5786d696991ca0d39fbc7306dbfb",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 86,
"avg_line_length": 34.55714285714286,
"alnum_prop": 0.5890863993385697,
"repo_name": "philipkershaw/CertificateAuthority",
"id": "879aff245e64340949635ec99275cab8d7949636",
"size": "2419",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrail/security/ca/cert_req.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "49019"
}
],
"symlink_target": ""
} |
from django.shortcuts import redirect
def index(request):
return redirect('/panel')
| {
"content_hash": "a4f6884607653a10ced8d0be0bf14388",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 37,
"avg_line_length": 22.25,
"alnum_prop": 0.7528089887640449,
"repo_name": "bilgorajskim/soman",
"id": "0ee7e53bac4d2a3af00ba4ec81e9db5a1c773a59",
"size": "89",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/config/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "2811"
},
{
"name": "Batchfile",
"bytes": "5094"
},
{
"name": "CSS",
"bytes": "6968"
},
{
"name": "HTML",
"bytes": "20408"
},
{
"name": "JavaScript",
"bytes": "73516"
},
{
"name": "Makefile",
"bytes": "5560"
},
{
"name": "Nginx",
"bytes": "1275"
},
{
"name": "Python",
"bytes": "74190"
},
{
"name": "Shell",
"bytes": "9973"
}
],
"symlink_target": ""
} |
import numpy as np
import dill
from pathlib import Path
from pySDC.helpers.stats_helper import get_sorted
from pySDC.core.Collocation import CollBase as Collocation
from pySDC.implementations.problem_classes.Battery_2Condensators import battery_2condensators
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
from pySDC.projects.PinTSimE.piline_model import setup_mpl
import pySDC.helpers.plot_helper as plt_helper
from pySDC.core.Hooks import hooks
from pySDC.projects.PinTSimE.switch_estimator import SwitchEstimator
class log_data(hooks):
def post_step(self, step, level_number):
super(log_data, self).post_step(step, level_number)
# some abbreviations
L = step.levels[level_number]
L.sweep.compute_end_point()
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=L.level_index,
iter=0,
sweep=L.status.sweep,
type='current L',
value=L.uend[0],
)
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=L.level_index,
iter=0,
sweep=L.status.sweep,
type='voltage C1',
value=L.uend[1],
)
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=L.level_index,
iter=0,
sweep=L.status.sweep,
type='voltage C2',
value=L.uend[2],
)
self.increment_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=0,
sweep=L.status.sweep,
type='restart',
value=1,
initialize=0,
)
def main(use_switch_estimator=True):
"""
A simple test program to do SDC/PFASST runs for the battery drain model using 2 condensators
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-13
level_params['dt'] = 1e-2
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'LOBATTO'
sweeper_params['num_nodes'] = 5
sweeper_params['QI'] = 'LU' # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters
problem_params = dict()
problem_params['Vs'] = 5.0
problem_params['Rs'] = 0.5
problem_params['C1'] = 1.0
problem_params['C2'] = 1.0
problem_params['R'] = 1.0
problem_params['L'] = 1.0
problem_params['alpha'] = 5.0
problem_params['V_ref'] = np.array([1.0, 1.0]) # [V_ref1, V_ref2]
problem_params['set_switch'] = np.array([False, False], dtype=bool)
problem_params['t_switch'] = np.zeros(np.shape(problem_params['V_ref'])[0])
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 20
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
controller_params['hook_class'] = log_data
# convergence controllers
convergence_controllers = dict()
if use_switch_estimator:
switch_estimator_params = {}
convergence_controllers[SwitchEstimator] = switch_estimator_params
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = battery_2condensators # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
if use_switch_estimator:
description['convergence_controllers'] = convergence_controllers
proof_assertions_description(description, problem_params)
# set time parameters
t0 = 0.0
Tend = 3.5
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
Path("data").mkdir(parents=True, exist_ok=True)
fname = 'data/battery_2condensators.dat'
f = open(fname, 'wb')
dill.dump(stats, f)
f.close()
# filter statistics by number of iterations
iter_counts = get_sorted(stats, type='niter', sortby='time')
# compute and print statistics
min_iter = 20
max_iter = 0
f = open('battery_2condensators_out.txt', 'w')
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
f.write(out + '\n')
print(out)
for item in iter_counts:
out = 'Number of iterations for time %4.2f: %1i' % item
f.write(out + '\n')
# print(out)
min_iter = min(min_iter, item[1])
max_iter = max(max_iter, item[1])
restarts = np.array(get_sorted(stats, type='restart', recomputed=False))[:, 1]
print("Restarts for dt: ", level_params['dt'], " -- ", np.sum(restarts))
assert np.mean(niters) <= 10, "Mean number of iterations is too high, got %s" % np.mean(niters)
f.close()
plot_voltages(description, use_switch_estimator)
return np.mean(niters)
def plot_voltages(description, use_switch_estimator, cwd='./'):
"""
Routine to plot the numerical solution of the model
"""
f = open(cwd + 'data/battery_2condensators.dat', 'rb')
stats = dill.load(f)
f.close()
# convert filtered statistics to list of iterations count, sorted by process
cL = get_sorted(stats, type='current L', sortby='time')
vC1 = get_sorted(stats, type='voltage C1', sortby='time')
vC2 = get_sorted(stats, type='voltage C2', sortby='time')
times = [v[0] for v in cL]
setup_mpl()
fig, ax = plt_helper.plt.subplots(1, 1, figsize=(4.5, 3))
ax.plot(times, [v[1] for v in cL], label='$i_L$')
ax.plot(times, [v[1] for v in vC1], label='$v_{C_1}$')
ax.plot(times, [v[1] for v in vC2], label='$v_{C_2}$')
if use_switch_estimator:
t_switch_plot = np.zeros(np.shape(description['problem_params']['t_switch'])[0])
for i in range(np.shape(description['problem_params']['t_switch'])[0]):
t_switch_plot[i] = description['problem_params']['t_switch'][i]
ax.axvline(x=t_switch_plot[i], linestyle='--', color='k', label='Switch {}'.format(i + 1))
ax.legend(frameon=False, fontsize=12, loc='upper right')
ax.set_xlabel('Time')
ax.set_ylabel('Energy')
fig.savefig('data/battery_2condensators_model_solution.png', dpi=300, bbox_inches='tight')
plt_helper.plt.close(fig)
def proof_assertions_description(description, problem_params):
"""
Function to proof the assertions (function to get cleaner code)
"""
assert problem_params['alpha'] > problem_params['V_ref'][0], 'Please set "alpha" greater than "V_ref1"'
assert problem_params['alpha'] > problem_params['V_ref'][1], 'Please set "alpha" greater than "V_ref2"'
assert problem_params['V_ref'][0] > 0, 'Please set "V_ref1" greater than 0'
assert problem_params['V_ref'][1] > 0, 'Please set "V_ref2" greater than 0'
assert type(problem_params['V_ref']) == np.ndarray, '"V_ref" needs to be an array (of type float)'
assert not problem_params['set_switch'][0], 'First entry of "set_switch" needs to be False'
assert not problem_params['set_switch'][1], 'Second entry of "set_switch" needs to be False'
assert not type(problem_params['t_switch']) == float, '"t_switch" has to be an array with entry zero'
assert problem_params['t_switch'][0] == 0, 'First entry of "t_switch" needs to be zero'
assert problem_params['t_switch'][1] == 0, 'Second entry of "t_switch" needs to be zero'
assert 'errtol' not in description['step_params'].keys(), 'No exact solution known to compute error'
assert 'alpha' in description['problem_params'].keys(), 'Please supply "alpha" in the problem parameters'
assert 'V_ref' in description['problem_params'].keys(), 'Please supply "V_ref" in the problem parameters'
if __name__ == "__main__":
main()
| {
"content_hash": "022959d64c8d45a6f81e7aa0d7d7c223",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 109,
"avg_line_length": 36.22406639004149,
"alnum_prop": 0.6406643757159222,
"repo_name": "Parallel-in-Time/pySDC",
"id": "c41cb8271236f26c8c7c6f827a0a2853e1154c91",
"size": "8730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pySDC/projects/PinTSimE/battery_2condensators_model.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4264000"
},
{
"name": "Python",
"bytes": "2450453"
},
{
"name": "Shell",
"bytes": "18105"
}
],
"symlink_target": ""
} |
from lib.graphics import *
from src.interface.IDrawable import *
from src.interface.Color import *
class Fence(IDrawable):
class DIRECTION(Enum):
HORIZONTAL = 0
VERTICAL = 1
def __init__(self, board, player):
self.board = board
self.player = player
def draw(self, color = None):
if not INTERFACE:
return
square = self.getSquare()
rectangleLength = 2*self.board.squareSize + self.board.innerSize
rectangleWidth = self.board.innerSize
if (self.direction == Fence.DIRECTION.HORIZONTAL):
rectangle = Rectangle(Point(square.left, square.top - rectangleWidth), Point(square.left + rectangleLength, square.top))
else:
rectangle = Rectangle(Point(square.left - rectangleWidth, square.top), Point(square.left, square.top + rectangleLength))
rectangle.setFill(self.player.color.value if color is None else color)
rectangle.setWidth(0)
rectangle.draw(self.board.window)
def place(self, coord, direction):
self.coord = coord
self.direction = direction
self.board.fences.append(self)
self.board.updateStoredValidActionsAfterFencePlacing(coord, direction)
self.draw()
def getSquare(self):
return self.board.getSquareAt(self.coord)
def __str__(self):
vertical = (self.direction == Fence.DIRECTION.VERTICAL)
return "%s-fence at %s" % ("V" if vertical else "H", self.coord)
| {
"content_hash": "84b1e5795808b75d7e89c65a3a7cd25c",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 132,
"avg_line_length": 34,
"alnum_prop": 0.6214833759590793,
"repo_name": "alainrinder/quoridor.py",
"id": "08c758ca17efcf5fe3c32dbaac017454d09afda6",
"size": "1656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/interface/Fence.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95359"
}
],
"symlink_target": ""
} |
"""
Proxied API URLs.
Served from the same domain docs are served,
so they can make use of features that require to have access to their cookies.
"""
from django.conf.urls import url
from readthedocs.analytics.proxied_api import AnalyticsView
from readthedocs.api.v2.views.proxied import ProxiedEmbedAPI, ProxiedFooterHTML
from readthedocs.search.proxied_api import ProxiedPageSearchAPIView
api_footer_urls = [
url(r'footer_html/', ProxiedFooterHTML.as_view(), name='footer_html'),
url(r'search/$', ProxiedPageSearchAPIView.as_view(), name='search_api'),
url(r'embed/', ProxiedEmbedAPI.as_view(), name='embed_api'),
url(r'analytics/$', AnalyticsView.as_view(), name='analytics_api'),
]
urlpatterns = api_footer_urls
| {
"content_hash": "bf2a1232e5159202df321f1bed74c451",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 35.095238095238095,
"alnum_prop": 0.7530529172320217,
"repo_name": "rtfd/readthedocs.org",
"id": "2f5346a465dabfa56b5338d9c5cfd3b84cb99463",
"size": "737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/api/v2/proxied_urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "66552"
},
{
"name": "Dockerfile",
"bytes": "205"
},
{
"name": "HTML",
"bytes": "196998"
},
{
"name": "JavaScript",
"bytes": "431128"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1821332"
},
{
"name": "Shell",
"bytes": "682"
}
],
"symlink_target": ""
} |
"""C4 dataset based on Common Crawl."""
import os
import gzip
import requests
from tqdm import tqdm
BASE_DOWNLOAD_PATH = "/tmp"
_VARIANTS = ["en", "realnewslike", "en.noblocklist", "en.noclean"]
_N_SHARDS_PER_SPLIT = {
"en": {
"train": 1024,
"validation": 8
},
"realnewslike": {
"train": 512,
"validation": 1
},
"en.noblocklist": {
"train": 1024,
"validation": 8
},
"en.noclean": {
"train": 7168,
"validation": 64
},
}
_DATA_URL = "https://huggingface.co/datasets/allenai/c4/resolve/607bd4c8450a42878aa9ddc051a65a055450ef87/{name}/c4-{split}.{index:05d}-of-{n_shards:05d}.json.gz"
def download(url: str, fname: str):
resp = requests.get(url, stream=True)
total = int(resp.headers.get('content-length', 0))
# Can also replace 'file' with a io.BytesIO object
with open(fname, 'wb') as file, tqdm(
desc=fname,
total=total,
unit='iB',
unit_scale=True,
unit_divisor=1024,
) as bar:
for data in resp.iter_content(chunk_size=1024):
size = file.write(data)
bar.update(size)
def decompress(infile, tofile):
with open(infile, 'rb') as inf, open(tofile, 'w', encoding='utf8') as tof:
decom_str = gzip.decompress(inf.read()).decode('utf-8')
tof.write(decom_str)
if __name__ == "__main__":
for variant in _VARIANTS:
print('\n=============================================================')
print(f'Processing Variant: {variant}')
variant_dir = os.path.join(BASE_DOWNLOAD_PATH, variant)
try:
os.makedirs(variant_dir)
except FileExistsError:
pass
for split in ["train", "validation"]:
if split == "train":
continue
num_shards = _N_SHARDS_PER_SPLIT[variant][split]
print(f"Split: {split}, Shards: {num_shards}")
for index in range(num_shards):
url = _DATA_URL.format(
name=variant, split=split, index=index, n_shards=num_shards
)
filename = os.path.join(variant_dir, url.split("/")[-1])
# Downloading the file in GZIP format
if not os.path.isfile(filename):
print(f"Downloading: {url}...")
download(url, fname=filename)
else:
print(f"Already exists: {filename}...")
# Processing the file from GZIP to JSON
target_file = filename.replace(".gz", "")
if not os.path.isfile(target_file):
print(f"Decompressing: {filename}...")
decompress(filename, target_file)
else:
print(f"Decompressed file already exists: {target_file}")
| {
"content_hash": "0fb8ad22a7585c57a5bd78777ffde330",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 161,
"avg_line_length": 28.96,
"alnum_prop": 0.523135359116022,
"repo_name": "tensorflow/tensorrt",
"id": "5dcf3b23a75cbd430242c389ebad74b396d720b8",
"size": "2896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tftrt/benchmarking-python/huggingface/t5/download_c4.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "46221"
},
{
"name": "CMake",
"bytes": "4340"
},
{
"name": "Jupyter Notebook",
"bytes": "2748791"
},
{
"name": "Python",
"bytes": "588488"
},
{
"name": "Shell",
"bytes": "103938"
},
{
"name": "Starlark",
"bytes": "1478"
}
],
"symlink_target": ""
} |
"""Tools for animation. Animation sources are GIFs from disk, which
have been made into a PygAnimation [1]_ object. Stateful animations
which represent objects, e.g., :class:`Walkabout` represents an
:class:`actor.Actor`.
Examples of "tools":
* functions for creating an animation from a single suface
* loading animations from disk
* adding frame-dependent positional data
* contextually-aware sprites
References:
.. [1] PygAnim:
http://inventwithpython.com/pyganim/
Warning:
Sometimes an "animation" can consist of one frame.
Note:
I wanna add support for loading character animations
from sprite sheets.
See Also:
* :mod:`util`
* :mod:`actor`
* :class:`Walkabout`
"""
import os
import copy
import glob
import itertools
import collections
try:
import ConfigParser as configparser
except ImportError:
import configparser
import pygame
import pyganim
from PIL import Image
from hypatia import util
from hypatia import render
from hypatia import constants
class BadWalkabout(Exception):
"""The supplied directory has no files which match ``*.gif.`` The
walkabout resource specified does not contain any GIFs.
See Also:
:meth:`Walkabout.__init__`
"""
def __init__(self, supplied_archive):
"""
Args:
supplied_archive (str): :class:`Walkabout` resource archive
which *should* have contained files of pattern
``*.gif,`` but didn't.
"""
super(BadWalkabout, self).__init__(supplied_archive)
class AnimAnchors(object):
"""The anchors per frame of a :class:`pyganim.PygAnimation`. Anchors
are coordinates belonging to a :class:`pygame.Surface`, which can be
translated to coordinates belonging to another surface.
With :class:`AnimAnchors` you can keep one animation "pinned" or
"glued" to another. This can help with adding decals to a
:class;`Walkabout` animation (like a hat!).
Attributes:
anchor_points (dict): key is anchor label/group, value is a list
of :class:`AnchorPoint` instances whose index corresponds to
respective :class:`pyganim.PygAnimation` frame index.
anchor_groups (list): the names/labels of the anchor groups,
e.g., *head_anchor*.
Example:
>>> resource = util.Resource('walkabouts', 'debug')
>>> anchors = AnimAnchors.from_config(resource['walk_north.ini'])
>>> anchors.anchor_points['head_anchor']
[<hypatia.animations.AnchorPoint object at 0x...>, ...]
>>> anchors.anchor_groups
['head_anchor']
Note:
You can modify anchors--there's no reason they have to be
immutable. You can even build them yourself. Remember the
tall cacti from Mario? How about a spinning mace?
See Also:
* :class:`AnchorPoint`
* :meth:`Walkabout.blit`
"""
def __init__(self, anchor_points, anchor_groups):
self.anchor_points = anchor_points
self.anchor_groups = anchor_groups
@classmethod
def from_config(cls, anchor_ini):
"""Instantiate AnimAnchors using the anchor_ini config.
The anchor_ini derives from an INI like this:
[head_anchor]
0=0,2
1=1,3
2=2,2
`[head_anchor]` is the anchor label. The stuff below it is
`head_anchor`'s position for frames 0, 1, and 2.
In the above example, `head_anchor` has a coordinate (anchor)
for three different frames:
* frame 0 at (0, 2)
* frame 1 at (1, 3)
* frame 2 at (2, 2)
Note:
`anchor_ini` should be provided from a
:class:`util.Resource`. See example below.
Args:
anchor_ini (configparser): configparser object.
Example:
>>> resource = util.Resource('walkabouts', 'debug')
>>> AnimAnchors.from_config(resource['walk_north.ini'])
<hypatia.animations.AnimAnchors object at 0x...>
Returns:
AnimAnchors: anchor points and groups collected from an INI
"""
anchor_point_groups = anchor_ini.sections()
# key is group, value is list of frame coord positions
anchors = {name: [] for name in anchor_point_groups}
for anchor_point_group in anchor_point_groups:
for __, frame_anchor in anchor_ini.items(anchor_point_group):
x, y = frame_anchor.split(',')
anchor_point = AnchorPoint(int(x), int(y))
anchors[anchor_point_group].append(anchor_point)
return AnimAnchors(anchors, anchor_point_groups)
def get_anchor_point(self, anchor_point_group, frame_index):
"""Return an :class:`AnchorPoint` corresponding to group name
and frame index.
Args:
anchor_point_group (str): name of the anchor point group
frame_index (int): which frame for group's anchor
Returns:
AnchorPoint: --
Note:
Will simply return last anchor point for group if an anchor
isn't defined for frame.
Example:
>>> resource = util.Resource('walkabouts', 'debug')
>>> config = resource['walk_north.ini']
>>> animation_anchors = AnimAnchors.from_config(config)
>>> animation_anchors.get_anchor_point('head_anchor', 0)
<hypatia.animations.AnchorPoint object at 0x...>
"""
try:
return self.anchor_points[anchor_point_group][frame_index]
except IndexError:
return self.anchor_points[anchor_point_group][-1]
class AnchorPoint(object):
"""A coordinate on a surface which is used for pinning to another
surface AnchorPoint. Used when attempting to afix one surface to
another, lining up their corresponding anchorpoints.
Attributes:
x (int): x-axis coordinate on a surface to place anchor at
y (int): x-axis coordinate on a surface to place anchor at
"""
def __init__(self, x, y):
"""Create an AnchorPoint at coordinate (x, y).
Args:
x (int): the x-axis pixel position
y (int): the y-axis pixel position
Example:
>>> anchor_point = AnchorPoint(5, 3)
>>> anchor_point.x
5
>>> anchor_point.y
3
"""
self.x = x
self.y = y
def __add__(self, other_anchor_point):
"""Adds the x, y values of this and another anchor point.
Args:
other_anchor_point (AnchorPoint): the AnchorPoint
coordinates to add to this AnchorPoint's coordinates.
Returns:
(x, y) tuple: the new x, y coordinate
Example:
>>> anchor_point_a = AnchorPoint(4, 1)
>>> anchor_point_b = AnchorPoint(2, 0)
>>> anchor_point_a + anchor_point_b
(6, 1)
"""
return (self.x + other_anchor_point.x,
self.y + other_anchor_point.y)
def __sub__(self, other_anchor_point):
"""Find the difference between this anchor and another.
Args:
other_anchor_point (AnchorPoint): the AnchorPoint
coordinates to subtract from this
AnchorPoint's coordinates.
Returns:
tuple: the (x, y) difference between this
anchor point and the other supplied.
Example:
>>> anchor_point_a = AnchorPoint(4, 1)
>>> anchor_point_b = AnchorPoint(2, 0)
>>> anchor_point_a - anchor_point_b
(2, 1)
"""
return (self.x - other_anchor_point.x,
self.y - other_anchor_point.y)
class Walkabout(object):
"""Sprite animations for a character which walks around.
Contextually-aware graphical representation.
The walkabout sprites specified to be therein
walkabout_directory, are files with an action__direction.gif
filename convention.
Blits its children relative to its own anchor.
Attributes:
resource (Resource): --
animations (dict): 2D dictionary [action][direction] whose
values are PygAnimations.
animation_anchors (dict): 2D dictionary [action][direction]
whose values are AnimAnchors.
rect (pygame.Rect): position on tilemap
size (tuple): the size of the animation in pixels.
action (constants.Action): --
direction (constnts.Direction): --
topleft_float (x,y tuple): --
position_rect
"""
def __init__(self, directory, position=None, children=None):
"""
Args:
directory (str): directory containing (animated)
walkabout GIFs. Assumed parent is data/walkabouts/
position (tuple): (x, y) coordinates (integers)
referring to absolute pixel coordinate.
children (list|None): Walkabout objects drawn relative to
this Walkabout instance.
Example:
>>> hat = Walkabout('hat')
>>> Walkabout('debug', position=(44, 55), children=[hat])
<hypatia.animations.Walkabout object at 0x...>
"""
# the attributes we're generating
self.animations = {}
self.animation_anchors = {}
self.actions = []
self.directions = []
self.size = None # will be removed in future?
if not position:
position = (0, 0)
topleft_float = (float(position[0]), float(position[1]))
# specify the files to load
# how will i glob a resource
resource = util.Resource('walkabouts', directory)
sprite_files = resource.get_type('.gif')
# no sprites matching pattern!
if not sprite_files:
raise BadWalkabout(directory)
for sprite_path in sprite_files.keys():
file_name, file_ext = os.path.splitext(sprite_path)
file_name = os.path.split(file_name)[1]
if file_name == 'only':
action = constants.Action.stand
direction = constants.Direction.south
else:
action, direction = file_name.split('_', 1)
direction = getattr(constants.Direction, direction)
action = getattr(constants.Action, action)
self.actions.append(action)
self.directions.append(direction)
# load pyganim from gif file
animation = sprite_files[sprite_path]
try:
self.animations[action][direction] = animation
except KeyError:
self.animations[action] = {direction: animation}
# load anchor points
# erro here not loading all the time
# maybe make the ini exlpicit? this caused porbs
associated_ini_name = file_name + '.ini'
if associated_ini_name in resource:
anchors_ini = resource[associated_ini_name]
anim_anchors = AnimAnchors.from_config(anchors_ini)
try:
self.animation_anchors[action][direction] = anim_anchors
except KeyError:
self.animation_anchors[action] = {direction: anim_anchors}
else:
self.animation_anchors = None
# ... set the rest of the attribs
self.resource = resource
self.size = animation.getMaxSize()
self.rect = pygame.Rect(position, self.size)
self.topleft_float = topleft_float
self.action = constants.Action.stand
self.direction = constants.Direction.south
self.child_walkabouts = children or []
def __getitem__(self, key):
"""Fetch sprites associated with action (key).
Args:
key (constants.Action): return dictionary of
sprites for this action (key).
Returns:
dict: sprites associated with action supplied (key)
Examples:
>>> walkabout = Walkabout('debug')
>>> walkabout[constants.Action.walk][constants.Direction.south]
<pyganim.PygAnimation object at 0x...>
"""
return self.animations[key]
def current_animation(self):
"""Returns the animation selected by the current action
and direction.
Returns:
PygAnim: the animation associated with this Walkabout's
current action and direction.
Example:
>>> walkabout = Walkabout('debug')
>>> walkabout.current_animation()
<pyganim.PygAnimation object at 0x...>
"""
return self.animations[self.action][self.direction]
def get_anchors(self):
"""Get anchors per frame in a GIF by identifying th ecoordinate
of a specific color.
Warning:
This is an old, but still useful way of loading anchors for
an animation.
"""
anchors = {a: {d: [] for d in self.directions} for a in self.actions}
for action, directions in self.animations.items():
for direction, animation in directions.items():
for surface_frame in animation._images:
anchor = self.get_anchor(surface_frame)
anchors[action][direction].append(anchor)
return anchors
def get_anchor(self, surface):
"""Locate the anchor coordinate by identifying which pixel
coordinate matches color.
Args:
surface (pygame.Surface): surface to scan for color and
return the coord which color appears
Returns:
tuple: (x, y) pixel coordinate where color shows up.
Warning:
Old way of defining anchor points, but still handy!
"""
x, y = surface.get_size()
debug_color = pygame.Color(255, 136, 255)
for coord in itertools.product(range(0, x), range(0, y)):
if surface.get_at(coord) == debug_color:
return coord
def blit(self, screen, offset):
"""Draw the appropriate/active animation to screen.
Note:
Should go to render module?
Args:
screen (pygame.Surface): the primary display/screen.
offset (x, y tuple): the x, y coords of the absolute
starting top left corner for the current screen/viewport
position.
"""
x, y = self.topleft_float
x -= offset[0]
y -= offset[1]
position_on_screen = (x, y)
pyganim_gif = self.current_animation()
pyganim_gif.blit(screen, position_on_screen)
# the rest of this is for children/anchors
if self.animation_anchors is None:
return None
pyganim_frame_index = pyganim.findStartTime(pyganim_gif._startTimes,
pyganim_gif.elapsed)
current_frame_surface = pyganim_gif.getFrame(pyganim_frame_index)
# anchors are all completely wrong
animation_anchors = self.animation_anchors[self.action][self.direction]
frame_anchor = animation_anchors.get_anchor_point('head_anchor',
pyganim_frame_index)
parent_anchor = AnchorPoint(position_on_screen[0] + frame_anchor.x,
position_on_screen[1] + frame_anchor.y)
for child_walkabout in self.child_walkabouts:
# draw at position + difference in child anchor
child_anim_anchor = (child_walkabout
.animation_anchors[self.action]
[self.direction])
child_frame_anchor = (child_anim_anchor
.get_anchor_point('head_anchor',
pyganim_frame_index))
child_position = parent_anchor - child_frame_anchor
child_anim = child_walkabout.current_animation()
child_anim.blit(screen, child_position)
def runtime_setup(self):
"""Perform actions to setup the walkabout. Actions performed
once pygame is running and walkabout has been initialized.
Convert and play all the animations, run init for children.
Note:
It MAY be bad to leave the sprites in play mode in startup
by default.
"""
if len(self.animations) == 1:
actions = (constants.Action.stand,)
directions = (constants.Direction.south,)
else:
actions = (constants.Action.walk, constants.Action.stand)
directions = (constants.Direction.north, constants.Direction.south,
constants.Direction.east, constants.Direction.west)
for action in actions:
for direction in directions:
animated_sprite = self.animations[action][direction]
animated_sprite.convert_alpha()
animated_sprite.play()
for walkabout_child in self.child_walkabouts:
walkabout_child.runtime_setup()
def palette_cycle(surface):
"""get_palette is not sufficient; it generates superflous colors.
Note:
Need to see if I can convert 32bit alpha to 8 bit temporarily,
to be converted back at end of palette/color manipulations.
"""
original_surface = surface.copy() # don't touch! used for later calc
width, height = surface.get_size()
ordered_color_list = []
seen_colors = set()
for coordinate in itertools.product(range(0, width), range(0, height)):
color = surface.get_at(coordinate)
color = tuple(color)
if color in seen_colors:
continue
ordered_color_list.append(color)
seen_colors.add(color)
# reverse the color list but not the pixel arrays, then replace!
old_color_list = collections.deque(ordered_color_list)
new_surface = surface.copy()
frames = []
for rotation_i in range(len(ordered_color_list)):
new_surface = new_surface.copy()
new_color_list = copy.copy(old_color_list)
new_color_list.rotate(1)
color_translations = dict(zip(old_color_list, new_color_list))
# replace each former color with the color from newcolor_list
for coordinate in itertools.product(range(0, width), range(0, height)):
color = new_surface.get_at(coordinate)
color = tuple(color)
new_color = color_translations[color]
new_surface.set_at(coordinate, new_color)
frame = new_surface.copy()
frames.append((frame, 0.2))
old_color_list = copy.copy(new_color_list)
return pyganim.PygAnimation(frames)
| {
"content_hash": "8558f7f78fffc3b0a328341726211bcc",
"timestamp": "",
"source": "github",
"line_count": 605,
"max_line_length": 79,
"avg_line_length": 32.236363636363635,
"alnum_prop": 0.5722196585140747,
"repo_name": "brechin/hypatia",
"id": "8ab4294e4f6e6bf3134ba4e17c2ea9b1df44f594",
"size": "19614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hypatia/animations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "25"
},
{
"name": "Python",
"bytes": "101038"
},
{
"name": "Shell",
"bytes": "6424"
}
],
"symlink_target": ""
} |
from shop.models.ordermodel import OrderExtraInfo, Order
from shop.util.order import get_order_from_request
class ShopAPI(object):
"""
A base-baseclass for shop APIs.
Both payment and shipping backends need some common functions from the shop
interface (for example get_order() is useful in both cases). To reduce code
duplication, theses common methods are defined here and inherited by shop
interfaces (DRY)
Another approach would be to stuff everything here, but I think it opens
up potential to overbloating this one class.
This is debatable and relatively easy to change later anyway.
Define all functions common to both the shipping and the payment shop APIs
here
PLEASE: When adding functions here please write a short description of
them in BaseShippingBackend and BasePaymentBackend, future implementers
thank you :)
"""
def get_order(self, request):
"""
Returns the order object for the current shopper.
This is called from the backend's views as:
>>> order = self.shop.getOrder(request)
"""
# it might seem a bit strange to simply forward the call to a helper,
# but this avoids exposing the shop's internal workings to module
# writers
return get_order_from_request(request)
def add_extra_info(self, order, text):
"""
Add an extra info text field to the order
"""
OrderExtraInfo.objects.create(text=text, order=order)
def is_order_paid(self, order):
"""Whether the passed order is fully paid or not."""
return order.is_paid()
is_order_payed = is_order_paid #Backward compatability, deprecated spelling
def is_order_completed(self, order):
return order.is_completed()
def get_order_total(self, order):
"""The total amount to be charged for passed order"""
return order.order_total
def get_order_subtotal(self, order):
"""The total amount to be charged for passed order"""
return order.order_subtotal
def get_order_short_name(self, order):
"""
A short name for the order, to be displayed on the payment processor's
website. Should be human-readable, as much as possible
"""
return "%s-%s" % (order.pk, order.order_total)
def get_order_unique_id(self, order):
"""
A unique identifier for this order. This should be our shop's reference
number. This is sent back by the payment processor when confirming
payment, for example.
"""
return order.pk
def get_order_for_id(self, id):
"""
Get an order for a given ID. Typically, this would be used when the
backend receives notification from the transaction processor (i.e.
paypal ipn), with an attached "invoice ID" or "order ID", which should
then be used to get the shop's order with this method.
"""
return Order.objects.get(pk=id)
| {
"content_hash": "38c5edff89c0f82b134c47eb6f7b64ae",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 79,
"avg_line_length": 37.111111111111114,
"alnum_prop": 0.6636726546906188,
"repo_name": "bmihelac/django-shop",
"id": "f300f8875af5c0d89ce3e26c6bad47b6d2d46ce1",
"size": "3029",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "shop/shop_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "386859"
},
{
"name": "Shell",
"bytes": "5030"
}
],
"symlink_target": ""
} |
1import sys as s
import subprocess as sb
from time import time
from parsingInfo import parseInfo
from actions import userNodeSelectionAct,randomSubSamplingAct,parseList
from featuresVector import featuresCreate
from misc import mergeList
from preformat import process
#/!\ The list of samples ID is supposed to be the same as the list of .match files! Each .match file must correspond to one single sample!
def main():
iMatrix = raw_input("Write down the CSV file name of the data matrix in the folder \"meta\" [ without the extension .csv ]\n")
if (iMatrix == ""):
iMatrix = "Info"
fastaFileName = raw_input("Write down the FASTA file names in the folder \"meta\" [ without the extension .fasta ]\n")
if (fastaFileName == ""):
fastaFileName = "GREENGENES_gg16S_unaligned_10022015"
print "/!\ Data getting parsed..."
try:
samplesInfoList,infoList = parseInfo(iMatrix)
filenames = [sample[0] for sample in samplesInfoList]
except IOError:
print "\nERROR: Maybe the filename",iMatrix,".csv does not exist in \"meta\" folder.\n"
s.exit(0)
print "-- End of parsing\n"
sb.call("ls ./meta/match > sampleidlist",shell=True)
sampleidlist = sb.check_output("sed 's/.match//g' sampleidlist | sed 's/testfiles//g' | sed '/^$/d'",shell=True).split()
sb.call("rm -f sampleidlist",shell=True)
result = sb.check_output("ls ./meta/match/testfiles",shell=True)
if not result:
print "/!\ Pre-processing files for parsing..."
process(sampleidlist)
print "/!\ Pre-processing done."
print "/!\ Constructing the features vectors..."
sampleList = mergeList(sampleidlist,filenames)
try:
matchingNodes,idSequences,_,_ = featuresCreate(sampleList,fastaFileName)
except ValueError:
print "/!\ ERROR: Please look at the line above."
print "/!\ ERROR: If the line above is blank, it may be an uncatched ValueError.\n"
s.exit(0)
print "-- End of construction\n"
dataArray = [samplesInfoList,infoList,idSequences,sampleList,matchingNodes]
answer = ""
while not ((answer == "exit") or (answer == "exit()") or (answer == "quit")):
try:
print "What do you want to do?"
print "[Write down the number matching with the action required. Details are in README file]"
print " 1: User node selection"
print " 2: Random sub-sampling"
print "[To quit, write down exit]"
answer = raw_input("Your answer?\n")
if (answer =="1"):
userNodeSelectionAct(dataArray)
print "-- End \n"
elif (answer == "2"):
randomSubSamplingAct(dataArray)
print "-- End \n"
elif not ((answer == "exit") or (answer == "exit()") or (answer == "quit")):
print "\n/!\ ERROR: Please enter a number between 1 and 2 included, or 'exit' if you want to quit."
raise ValueError
except ValueError:
print "/!\ ERROR: Please look at the line above."
print "/!\ ERROR: If the line above is blank, it may be an uncatched ValueError.\n"
| {
"content_hash": "f90b61d5c086f13276f129a65c1fe4f3",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 138,
"avg_line_length": 48.36363636363637,
"alnum_prop": 0.6284461152882206,
"repo_name": "kuredatan/taxoclassifier",
"id": "9256919fad98cb7ba745f4a099d09723c4802381",
"size": "3192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52677"
},
{
"name": "Shell",
"bytes": "922"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ContainerServiceClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for ContainerServiceClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2018-08-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(ContainerServiceClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2018-08-01-preview") # type: Literal["2018-08-01-preview"]
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-containerservice/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
| {
"content_hash": "128556ca68c5947dc4d18dfec6920f0f",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 108,
"avg_line_length": 53.4,
"alnum_prop": 0.7251512532411409,
"repo_name": "Azure/azure-sdk-for-python",
"id": "5d4d41c05cac63d16919a896dec93f58ed5e5fec",
"size": "3939",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2018_08_01_preview/aio/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from tastypie.test import ResourceTestCase
from mozdns.tests.utils import create_fake_zone, random_label
from core.registration.static.models import StaticReg
from core.hwadapter.models import HWAdapter
from core.group.models import Group
from systems.models import (
System, SystemRack, SystemStatus, OperatingSystem, Allocation,
ServerModel, SystemType
)
from systems.tests.utils import create_fake_host
import simplejson as json
from datetime import datetime
API_VERSION = '1'
class TestCaseUtils(object):
def localize_url(self, url):
if 'en-US' not in url:
url = url.replace('core/api', 'en-US/core/api')
return url
class CoreAPITests(TestCaseUtils):
object_list_url = "/en-US/core/api/v{0}_core/{1}/"
object_url = "/en-US/core/api/v{0}_core/{1}/{2}/"
def setUp(self):
super(CoreAPITests, self).setUp()
def test_create(self):
resp, post_data = self.generic_create(self.post_data())
new_object_url = resp['Location']
new_resp = self.api_client.get(
new_object_url, format='json', follow=True
)
self.assertValidJSONResponse(new_resp)
new_obj_data = json.loads(new_resp.content)
self.compare_data(post_data, new_obj_data)
def compare_data(self, old_data, new_obj_data):
for key in old_data.keys():
self.assertEqual(old_data[key], new_obj_data[key])
def test_update(self):
# Are these test's failing? See this bug.
# https://github.com/toastdriven/django-tastypie/issues/597
# Please monkey patch tastypie
# tastypie/test.py Line
# diff --git a/tastypie/test.py b/tastypie/test.py
# index e395158..27f813f 100644
# --- a/tastypie/test.py
# +++ b/tastypie/test.py
# @@ -161,6 +161,8 @@ class TestApiClient(object):
#
# if data is not None:
# kwargs['data'] = self.serializer.serialize(data, format=content_type)
# + if content_type == 'application/json':
# + kwargs['data'] = str(kwargs['data'])
#
# if authentication is not None:
# kwargs['HTTP_AUTHORIZATION'] = authentication
post_data = self.post_data()
resp, post_data = self.generic_create(post_data)
new_object_url = resp['Location']
patch_data = self.post_data()
update_resp, patch_data = self.generic_update(new_object_url,
patch_data)
# Now make sure the data used to patch is sticking to the model.
patch_resp = self.api_client.get(
new_object_url, format='json', follow=True
)
self.assertValidJSONResponse(patch_resp)
patch_obj_data = json.loads(patch_resp.content)
self.compare_data(patch_data, patch_obj_data)
def test_delete(self):
obj_count = self.test_type.objects.count()
resp, post_data = self.generic_create(self.post_data())
new_object_url = self.localize_url(resp['Location'])
self.assertEqual(self.test_type.objects.count(), obj_count + 1)
resp = self.api_client.delete(new_object_url, format='json')
self.assertHttpAccepted(resp)
self.assertEqual(self.test_type.objects.count(), obj_count)
def generic_update(self, patch_url, patch_data, assertResponse=None):
patch_url = self.localize_url(patch_url)
obj_count = self.test_type.objects.count()
resp = self.api_client.patch(patch_url, format='json',
data=patch_data)
if not assertResponse:
self.assertHttpAccepted(resp)
else:
assertResponse(resp)
# Verify a no new object has been added.
self.assertEqual(self.test_type.objects.count(), obj_count)
return resp, patch_data
def generic_create(self, post_data, assertResponse=None, fail=False):
# Check how many are there first.
obj_count = self.test_type.objects.count()
create_url = self.object_list_url.format(
API_VERSION, str(self.test_name).lower())
resp = self.api_client.post(create_url, format='json', data=post_data)
if assertResponse:
assertResponse(resp)
else:
self.assertHttpCreated(resp)
# Verify a new one has been added.
if not fail:
self.assertEqual(self.test_type.objects.count(), obj_count + 1)
return resp, post_data
def test_changing_only_one_field(self):
resp, post_data = self.generic_create(self.post_data())
new_object_url = self.localize_url(resp['Location'])
change_post_data = {}
change_post_data['description'] = "==DIFFERENT=="
post_data['description'] = "==DIFFERENT=="
resp, patch_data = self.generic_update(
new_object_url, change_post_data
)
new_resp = self.api_client.get(
new_object_url, format='json', follow=True
)
updated_obj_data = json.loads(new_resp.content)
self.compare_data(post_data, updated_obj_data)
class HWAdapterTest(CoreAPITests, ResourceTestCase):
test_type = HWAdapter
test_name = 'hwadapter'
def setUp(self):
self.num = 0
create_fake_zone('2.ip6.arpa', suffix="")
self.domain = create_fake_zone('foo.mozilla.com', suffix='')
self.s = create_fake_host(hostname='foo.mozilla.com')
self.sreg = StaticReg.objects.create(
label='', domain=self.domain, ip_str='2222:123::', ip_type='6',
system=self.s
)
self.s = create_fake_host(hostname='foobar.mozilla.com')
self.g = Group.objects.create(name='foobar')
super(HWAdapterTest, self).setUp()
def compare_data(self, old_data, new_obj_data):
for key in old_data.keys():
if key in ('sreg', 'group'):
continue
self.assertEqual(old_data[key], new_obj_data[key])
def post_data(self):
return {
'description': random_label(),
'sreg': self.sreg.pk,
'mac': '11:22:33:44:55:66'
}
class SystemTest(CoreAPITests, ResourceTestCase):
test_type = System
test_name = 'system'
def setUp(self):
self.operating_system = OperatingSystem.objects.create()
self.server_model = ServerModel.objects.create()
self.allocation = Allocation.objects.create(name='foo')
self.system_rack = SystemRack.objects.create()
self.system_type = SystemType.objects.create()
self.system_status = SystemStatus.objects.create()
self.change_password_date = datetime(2002, 12, 25)
super(SystemTest, self).setUp()
def compare_data(self, old_data, new_obj_data):
for key in old_data.keys():
if key in ('operating_system', 'operating_system', 'server_model',
'allocation', 'system_rack', 'system_type',
'system_status'):
self.assertEqual(
getattr(self, key).pk, new_obj_data[key]['pk']
)
elif key == 'change_password':
self.assertTrue(new_obj_data[key].startswith(old_data[key]))
else:
self.assertEqual(old_data[key], new_obj_data[key])
def post_data(self):
return {
'operating_system': self.operating_system.pk,
'server_model': self.server_model.pk,
'allocation': self.allocation.pk,
'system_rack': self.system_rack.pk,
'system_type': self.system_type.pk,
'system_status': self.system_status.pk,
'hostname': '{0}.{1}.mozilla.com'.format(
random_label(), random_label()
),
'serial': '1234',
'oob_ip': '10.2.3.4',
'asset_tag': 'foobar',
'notes': 'foobar notes',
'rack_order': '1.21',
'switch_ports': 'hihi',
'patch_panel_port': 'derpdaderp',
'oob_switch_port': 'derpdaderp',
'purchase_date': '2012-08-01',
'purchase_price': '$3000',
'change_password': self.change_password_date.isoformat(),
'warranty_start': '2012-08-01',
'warranty_end': '2012-08-04'
}
def test_changing_only_one_field(self):
# Systems don't have descriptions, they have notes
resp, post_data = self.generic_create(self.post_data())
new_object_url = self.localize_url(resp['Location'])
change_post_data = {}
change_post_data['notes'] = "==DIFFERENT=="
post_data['notes'] = "==DIFFERENT=="
resp, patch_data = self.generic_update(
new_object_url, change_post_data
)
new_resp = self.api_client.get(
new_object_url, format='json', follow=True
)
updated_obj_data = json.loads(new_resp.content)
self.compare_data(post_data, updated_obj_data)
| {
"content_hash": "f97e77d624a9f8976c33316101be2a7b",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 79,
"avg_line_length": 38.836909871244636,
"alnum_prop": 0.5883523041220025,
"repo_name": "rtucker-mozilla/inventory",
"id": "fffc68aa14105166000de53b68697f4b781db547",
"size": "9049",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "core/api/tests/basic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5104"
},
{
"name": "CSS",
"bytes": "362837"
},
{
"name": "CoffeeScript",
"bytes": "9538"
},
{
"name": "HTML",
"bytes": "1195738"
},
{
"name": "JavaScript",
"bytes": "1530665"
},
{
"name": "Makefile",
"bytes": "14421"
},
{
"name": "PHP",
"bytes": "27273"
},
{
"name": "Python",
"bytes": "3642241"
},
{
"name": "Shell",
"bytes": "1783"
}
],
"symlink_target": ""
} |
import ConfigParser
import json
import logging
import os
import subprocess
import re
import time
import unittest
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import UnexpectedAlertPresentException
from autothreadharness import settings
from autothreadharness.exceptions import FailError, FatalError, GoldenDeviceNotEnoughError
from autothreadharness.harness_controller import HarnessController
from autothreadharness.helpers import HistoryHelper
from autothreadharness.open_thread_controller import OpenThreadController
from autothreadharness.pdu_controller_factory import PduControllerFactory
from autothreadharness.rf_shield_controller import get_rf_shield_controller
logger = logging.getLogger(__name__)
THREAD_CHANNEL_MAX = 26
"""Maximum channel number of thread protocol"""
THREAD_CHANNEL_MIN = 11
"""Minimum channel number of thread protocol"""
DEFAULT_TIMEOUT = 2700
"""Timeout for each test case in seconds"""
def wait_until(what, times=-1):
"""Wait until `what` return True
Args:
what (Callable[bool]): Call `wait()` again and again until it returns True
times (int): Maximum times of trials before giving up
Returns:
True if success, False if times threshold reached
"""
while times:
logger.info('Waiting times left %d', times)
try:
if what() is True:
return True
except:
logger.exception('Wait failed')
else:
logger.warning('Trial[%d] failed', times)
times -= 1
time.sleep(1)
return False
class HarnessCase(unittest.TestCase):
"""This is the case class of all automation test cases.
All test case classes MUST define properties `role`, `case` and `golden_devices_required`
"""
channel = settings.THREAD_CHANNEL
"""int: Thread channel.
Thread channel ranges from 11 to 26.
"""
ROLE_LEADER = 1
ROLE_ROUTER = 2
ROLE_SED = 4
ROLE_BORDER = 8
ROLE_REED = 16
ROLE_ED = 32
ROLE_COMMISSIONER = 64
ROLE_JOINER = 128
ROLE_FED = 512
ROLE_MED = 1024
role = None
"""int: role id.
1
Leader
2
Router
4
Sleepy end device
16
Router eligible end device
32
End device
64
Commissioner
128
Joiner
512
Full end device
1024
Minimal end device
"""
case = None
"""str: Case id, e.g. '6 5 1'.
"""
golden_devices_required = 0
"""int: Golden devices needed to finish the test
"""
child_timeout = settings.THREAD_CHILD_TIMEOUT
"""int: Child timeout in seconds
"""
sed_polling_interval = settings.THREAD_SED_POLLING_INTERVAL
"""int: SED polling interval in seconds
"""
auto_dut = settings.AUTO_DUT
"""bool: whether use harness auto dut feature"""
timeout = hasattr(settings, 'TIMEOUT') and settings.TIMEOUT or DEFAULT_TIMEOUT
"""number: timeout in seconds to stop running this test case"""
started = 0
"""number: test case started timestamp"""
def __init__(self, *args, **kwargs):
self.dut = None
self._browser = None
self._hc = None
self.result_dir = '%s\\%s' % (settings.OUTPUT_PATH, self.__class__.__name__)
self.history = HistoryHelper()
self.add_all_devices = False
super(HarnessCase, self).__init__(*args, **kwargs)
def _init_devices(self):
"""Reboot all usb devices.
Note:
If PDU_CONTROLLER_TYPE is not valid, usb devices is not rebooted.
"""
if not settings.PDU_CONTROLLER_TYPE:
if settings.AUTO_DUT:
return
for device in settings.GOLDEN_DEVICES:
port, _ = device
try:
with OpenThreadController(port) as otc:
logger.info('Resetting %s', port)
otc.reset()
except:
logger.exception('Failed to reset device %s', port)
self.history.mark_bad_golden_device(device)
return
tries = 3
pdu_factory = PduControllerFactory()
while True:
try:
pdu = pdu_factory.create_pdu_controller(settings.PDU_CONTROLLER_TYPE)
pdu.open(**settings.PDU_CONTROLLER_OPEN_PARAMS)
except EOFError:
logger.warning('Failed to connect to telnet')
tries = tries - 1
if tries:
time.sleep(10)
continue
else:
logger.error('Fatal error: cannot connect to apc')
raise
else:
pdu.reboot(**settings.PDU_CONTROLLER_REBOOT_PARAMS)
pdu.close()
break
time.sleep(len(settings.GOLDEN_DEVICES))
def _init_harness(self):
"""Restart harness backend service.
Please start the harness controller before running the cases, otherwise, nothing happens
"""
self._hc = HarnessController(self.result_dir)
self._hc.stop()
time.sleep(1)
self._hc.start()
time.sleep(2)
harness_config = ConfigParser.ConfigParser()
harness_config.read('%s\\Config\\Configuration.ini' % settings.HARNESS_HOME)
if harness_config.has_option('THREAD_HARNESS_CONFIG', 'BrowserAutoNavigate') and \
harness_config.getboolean('THREAD_HARNESS_CONFIG', 'BrowserAutoNavigate'):
os.system('taskkill /t /f /im chrome.exe')
def _destroy_harness(self):
"""Stop harness backend service
Stop harness service.
"""
self._hc.stop()
time.sleep(2)
def _init_dut(self):
"""Initialize the DUT.
DUT will be restarted. and openthread will started.
"""
if self.auto_dut:
self.dut = None
return
dut_port = settings.DUT_DEVICE[0]
dut = OpenThreadController(dut_port)
self.dut = dut
def _destroy_dut(self):
self.dut = None
def _init_browser(self):
"""Open harness web page.
Open a quiet chrome which:
1. disables extensions,
2. ignore certificate errors and
3. always allow notifications.
"""
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--disable-extensions')
chrome_options.add_argument('--ignore-certificate-errors')
chrome_options.add_experimental_option('prefs', {
'profile.managed_default_content_settings.notifications': 1
})
browser = webdriver.Chrome(chrome_options=chrome_options)
browser.set_page_load_timeout(10)
browser.implicitly_wait(1)
browser.maximize_window()
browser.get(settings.HARNESS_URL)
self._browser = browser
if not wait_until(lambda: 'Thread' in browser.title, 30):
self.assertIn('Thread', browser.title)
def _destroy_browser(self):
"""Close the browser.
"""
self._browser.close()
self._browser = None
def _init_rf_shield(self):
if getattr(settings, 'SHIELD_CONTROLLER_TYPE', None) and getattr(settings, 'SHIELD_CONTROLLER_PARAMS', None):
self.rf_shield = get_rf_shield_controller(
shield_type=settings.SHIELD_CONTROLLER_TYPE,
params=settings.SHIELD_CONTROLLER_PARAMS
)
else:
self.rf_shield = None
def _destroy_rf_shield(self):
self.rf_shield = None
def setUp(self):
"""Prepare to run test case.
Start harness service, init golden devices, reset DUT and open browser.
"""
if self.__class__ is HarnessCase:
return
logger.info('Setting up')
# clear files
logger.info('Deleting all .pdf')
os.system('del /q "%HOMEDRIVE%%HOMEPATH%\\Downloads\\NewPdf_*.pdf"')
logger.info('Deleting all .xlsx')
os.system('del /q "%HOMEDRIVE%%HOMEPATH%\\Downloads\\ExcelReport*.xlsx"')
logger.info('Deleting all .pcapng')
os.system('del /q "%s\\Captures\\*.pcapng"' % settings.HARNESS_HOME)
# using temp files to fix excel downloading fail
logger.info('Empty files in temps')
os.system('del /q "%s\\Thread_Harness\\temp\\*.*"' % settings.HARNESS_HOME)
# create directory
os.system('mkdir %s' % self.result_dir)
self._init_harness()
self._init_devices()
self._init_dut()
self._init_rf_shield()
def tearDown(self):
"""Clean up after each case.
Stop harness service, close browser and close DUT.
"""
if self.__class__ is HarnessCase:
return
logger.info('Tearing down')
self._destroy_harness()
self._destroy_browser()
self._destroy_dut()
self._destroy_rf_shield()
def _setup_page(self):
"""Do sniffer settings and general settings
"""
if not self.started:
self.started = time.time()
if time.time() - self.started > 30:
self._browser.refresh()
return
# Detect Sniffer
try:
dialog = self._browser.find_element_by_id('capture-Setup-modal')
except:
logger.exception('Failed to get dialog.')
else:
if dialog and dialog.get_attribute('aria-hidden') == 'false':
times = 60
while times:
status = dialog.find_element_by_class_name('status-notify').text
if 'Searching' in status:
logger.info('Still detecting..')
elif 'Not' in status:
logger.warning('Sniffer device not verified!')
button = dialog.find_element_by_id('snifferAutoDetectBtn')
button.click()
elif 'Verified' in status:
logger.info('Verified!')
button = dialog.find_element_by_id('saveCaptureSettings')
button.click()
break
else:
logger.warning('Unexpected sniffer verification status')
times = times - 1
time.sleep(1)
if not times:
raise Exception('Unable to detect sniffer device')
time.sleep(1)
try:
skip_button = self._browser.find_element_by_id('SkipPrepareDevice')
if skip_button.is_enabled():
skip_button.click()
time.sleep(1)
except:
logger.info('Still detecting sniffers')
try:
next_button = self._browser.find_element_by_id('nextButton')
except:
logger.exception('Failed to finish setup')
return
if not next_button.is_enabled():
logger.info('Harness is still not ready')
return
# General Setup
try:
if self.child_timeout or self.sed_polling_interval:
button = self._browser.find_element_by_id('general-Setup')
button.click()
time.sleep(2)
dialog = self._browser.find_element_by_id('general-Setup-modal')
if dialog.get_attribute('aria-hidden') != 'false':
raise Exception('Missing General Setup dialog')
field = dialog.find_element_by_id('inp_general_child_update_wait_time')
field.clear()
if self.child_timeout:
field.send_keys(str(self.child_timeout))
field = dialog.find_element_by_id('inp_general_sed_polling_rate')
field.clear()
if self.sed_polling_interval:
field.send_keys(str(self.sed_polling_interval))
button = dialog.find_element_by_id('saveGeneralSettings')
button.click()
time.sleep(1)
except:
logger.exception('Failed to do general setup')
return
# Finish this page
next_button.click()
time.sleep(1)
def _connect_devices(self):
connect_all = self._browser.find_element_by_link_text('Connect All')
connect_all.click()
def _add_device(self, port, device_type_id):
browser = self._browser
test_bed = browser.find_element_by_id('test-bed')
device = browser.find_element_by_id(device_type_id)
# drag
action_chains = ActionChains(browser)
action_chains.click_and_hold(device)
action_chains.move_to_element(test_bed).perform()
time.sleep(1)
# drop
drop_hw = browser.find_element_by_class_name('drop-hw')
action_chains = ActionChains(browser)
action_chains.move_to_element(drop_hw)
action_chains.release(drop_hw).perform()
time.sleep(0.5)
selected_hw = browser.find_element_by_class_name('selected-hw')
form_inputs = selected_hw.find_elements_by_tag_name('input')
form_port = form_inputs[0]
form_port.clear()
form_port.send_keys(port)
def _test_bed(self):
"""Set up the test bed.
Connect number of golden devices required by each case.
"""
browser = self._browser
test_bed = browser.find_element_by_id('test-bed')
time.sleep(3)
selected_hw_set = test_bed.find_elements_by_class_name('selected-hw')
selected_hw_num = len(selected_hw_set)
while selected_hw_num:
remove_button = selected_hw_set[selected_hw_num - 1].find_element_by_class_name(
'removeSelectedDevice')
remove_button.click()
selected_hw_num = selected_hw_num - 1
devices = [device for device in settings.GOLDEN_DEVICES
if not self.history.is_bad_golden_device(device[0]) and \
not (settings.DUT_DEVICE and device[0] == settings.DUT_DEVICE[0])]
logger.info('Available golden devices: %s', json.dumps(devices, indent=2))
golden_devices_required = self.golden_devices_required
if self.auto_dut and not settings.DUT_DEVICE:
golden_devices_required += 1
if len(devices) < golden_devices_required:
raise GoldenDeviceNotEnoughError()
# add golden devices
number_of_devices_to_add = len(devices) if self.add_all_devices else golden_devices_required
for i in range(number_of_devices_to_add):
self._add_device(*devices.pop())
# add DUT
if settings.DUT_DEVICE:
self._add_device(*settings.DUT_DEVICE)
# enable AUTO DUT
if self.auto_dut:
checkbox_auto_dut = browser.find_element_by_id('EnableAutoDutSelection')
if not checkbox_auto_dut.is_selected():
checkbox_auto_dut.click()
time.sleep(1)
if settings.DUT_DEVICE:
radio_auto_dut = browser.find_element_by_class_name('AutoDUT_RadBtns')
if not radio_auto_dut.is_selected():
radio_auto_dut.click()
while True:
try:
self._connect_devices()
button_next = browser.find_element_by_id('nextBtn')
if not wait_until(lambda: 'disabled' not in button_next.get_attribute('class'),
times=(30 + 4 * number_of_devices_to_add)):
bad_ones = []
selected_hw_set = test_bed.find_elements_by_class_name('selected-hw')
for selected_hw in selected_hw_set:
form_inputs = selected_hw.find_elements_by_tag_name('input')
form_port = form_inputs[0]
if form_port.is_enabled():
bad_ones.append(selected_hw)
for selected_hw in bad_ones:
form_inputs = selected_hw.find_elements_by_tag_name('input')
form_port = form_inputs[0]
port = form_port.get_attribute('value').encode('utf8')
if settings.DUT_DEVICE and port == settings.DUT_DEVICE[0]:
if settings.PDU_CONTROLLER_TYPE is None:
# connection error cannot recover without power cycling
raise FatalError('Failed to connect to DUT')
else:
raise FailError('Failed to connect to DUT')
if settings.PDU_CONTROLLER_TYPE is None:
# port cannot recover without power cycling
self.history.mark_bad_golden_device(port)
# remove the bad one
selected_hw.find_element_by_class_name('removeSelectedDevice').click()
time.sleep(0.1)
if len(devices):
self._add_device(*devices.pop())
else:
devices = None
if devices is None:
logger.warning('Golden devices not enough')
raise GoldenDeviceNotEnoughError()
else:
logger.info('Try again with new golden devices')
continue
if self.auto_dut and not settings.DUT_DEVICE:
radio_auto_dut = browser.find_element_by_class_name('AutoDUT_RadBtns')
if not radio_auto_dut.is_selected():
radio_auto_dut.click()
time.sleep(5)
button_next.click()
if not wait_until(lambda: self._browser.current_url.endswith('TestExecution.html'), 20):
raise Exception('Failed to load TestExecution page')
except FailError:
raise
except:
logger.exception('Unexpected error')
else:
break
def _select_case(self, role, case):
"""Select the test case.
"""
# select the case
elem = Select(self._browser.find_element_by_id('select-dut'))
elem.select_by_value(str(role))
time.sleep(1)
checkbox = None
wait_until(lambda: self._browser.find_elements_by_css_selector('.tree-node .tree-title') and True)
elems = self._browser.find_elements_by_css_selector('.tree-node .tree-title')
finder = re.compile(r'.*\b' + case + r'\b')
finder_dotted = re.compile(r'.*\b' + case.replace(' ', r'\.') + r'\b')
for elem in elems:
action_chains = ActionChains(self._browser)
action_chains.move_to_element(elem)
action_chains.perform()
logger.debug(elem.text)
if finder.match(elem.text) or finder_dotted.match(elem.text):
parent = elem.find_element_by_xpath('..')
checkbox = parent.find_element_by_class_name('tree-checkbox')
break
if not checkbox:
time.sleep(5)
raise Exception('Failed to find the case')
self._browser.execute_script("$('.overview').css('left', '0')")
checkbox.click()
time.sleep(1)
elem = self._browser.find_element_by_id('runTest')
elem.click()
if not wait_until(lambda: self._browser.find_element_by_id('stopTest') and True, 10):
raise Exception('Failed to start test case')
def _collect_result(self):
"""Collect test result.
Generate PDF, excel and pcap file
"""
# generate pdf
self._browser.find_element_by_class_name('save-pdf').click()
time.sleep(1)
try:
dialog = self._browser.find_element_by_id('Testinfo')
except:
logger.exception('Failed to get test info dialog.')
else:
if dialog.get_attribute('aria-hidden') != 'false':
raise Exception('Test information dialog not ready')
version = self.auto_dut and settings.DUT_VERSION or self.dut.version
dialog.find_element_by_id('inp_dut_manufacturer').send_keys(settings.DUT_MANUFACTURER)
dialog.find_element_by_id('inp_dut_firmware_version').send_keys(version)
dialog.find_element_by_id('inp_tester_name').send_keys(settings.TESTER_NAME)
dialog.find_element_by_id('inp_remarks').send_keys(settings.TESTER_REMARKS)
dialog.find_element_by_id('generatePdf').click()
time.sleep(1)
main_window = self._browser.current_window_handle
# generate excel
self._browser.find_element_by_class_name('save-excel').click()
time.sleep(1)
for window_handle in self._browser.window_handles:
if window_handle != main_window:
self._browser.switch_to.window(window_handle)
self._browser.close()
self._browser.switch_to.window(main_window)
# save pcap
self._browser.find_element_by_class_name('save-wireshark').click()
time.sleep(1)
for window_handle in self._browser.window_handles:
if window_handle != main_window:
self._browser.switch_to.window(window_handle)
self._browser.close()
self._browser.switch_to.window(main_window)
os.system('copy "%%HOMEPATH%%\\Downloads\\NewPdf_*.pdf" %s\\'
% self.result_dir)
os.system('copy "%%HOMEPATH%%\\Downloads\\ExcelReport_*.xlsx" %s\\'
% self.result_dir)
os.system('copy "%s\\Captures\\*.pcapng" %s\\'
% (settings.HARNESS_HOME, self.result_dir))
os.system('copy "%s\\Thread_Harness\\temp\\*.*" "%s"'
% (settings.HARNESS_HOME, self.result_dir))
def _wait_dialog(self):
"""Wait for dialogs and handle them until done.
"""
logger.debug('waiting for dialog')
done = False
error = False
while not done and self.timeout:
try:
dialog = self._browser.find_element_by_id('RemoteConfirm')
except:
logger.exception('Failed to get dialog.')
else:
if dialog and dialog.get_attribute('aria-hidden') == 'false':
title = dialog.find_element_by_class_name('modal-title').text
time.sleep(1)
logger.info('Handling dialog[%s]', title)
try:
done = self._handle_dialog(dialog, title)
except:
logger.exception('Error handling dialog: %s', title)
error = True
if done is None:
raise FailError('Unexpected dialog occurred')
dialog.find_element_by_id('ConfirmOk').click()
time.sleep(1)
try:
stop_button = self._browser.find_element_by_id('stopTest')
if done:
stop_button.click()
# wait for stop procedure end
time.sleep(10)
except:
logger.exception('Test stopped')
time.sleep(5)
done = True
self.timeout -= 1
# check if already ended capture
if self.timeout % 10 == 0:
lines = self._hc.tail()
if 'SUCCESS: The process "dumpcap.exe" with PID ' in lines:
logger.info('Tshark should be ended now, lets wait at most 30 seconds.')
if not wait_until(lambda: 'tshark.exe' not in subprocess.check_output('tasklist'), 30):
res = subprocess.check_output('taskkill /t /f /im tshark.exe',
stderr=subprocess.STDOUT, shell=True)
logger.info(res)
# Wait until case really stopped
wait_until(lambda: self._browser.find_element_by_id('runTest') and True, 30)
if error:
raise FailError('Fail for previous exceptions')
def _handle_dialog(self, dialog, title):
"""Handle a dialog.
Returns:
bool True if no more dialogs expected,
False if more dialogs needed, and
None if not handled
"""
done = self.on_dialog(dialog, title)
if isinstance(done, bool):
return done
if title.startswith('Start DUT'):
body = dialog.find_element_by_id('cnfrmMsg').text
if 'Sleepy End Device' in body:
self.dut.mode = 's'
self.dut.child_timeout = self.child_timeout
elif 'End Device' in body:
self.dut.mode = 'rsn'
self.dut.child_timeout = self.child_timeout
else:
self.dut.mode = 'rsdn'
if 'at channel' in body:
self.channel = int(body.split(':')[1])
self.dut.channel = self.channel
self.dut.panid = settings.THREAD_PANID
self.dut.networkname = settings.THREAD_NETWORKNAME
self.dut.extpanid = settings.THREAD_EXTPANID
self.dut.start()
elif (title.startswith('MAC Address Required')
or title.startswith('DUT Random Extended MAC Address Required')):
mac = self.dut.mac
inp = dialog.find_element_by_id('cnfrmInpText')
inp.clear()
inp.send_keys('0x%s' % mac)
elif title.startswith('LL64 Address'):
ll64 = None
for addr in self.dut.addrs:
addr = addr.lower()
if addr.startswith('fe80') and not re.match('.+ff:fe00:[0-9a-f]{0,4}$', addr):
ll64 = addr
break
if not ll64:
raise FailError('No link local address found')
logger.info('Link local address is %s', ll64)
inp = dialog.find_element_by_id('cnfrmInpText')
inp.clear()
inp.send_keys(ll64)
elif title.startswith('Enter Channel'):
self.dut.channel = self.channel
inp = dialog.find_element_by_id('cnfrmInpText')
inp.clear()
inp.send_keys(str(self.dut.channel))
elif title.startswith('User Action Needed'):
body = dialog.find_element_by_id('cnfrmMsg').text
if body.startswith('Power Down the DUT'):
self.dut.stop()
return True
elif title.startswith('Short Address'):
short_addr = '0x%s' % self.dut.short_addr
inp = dialog.find_element_by_id('cnfrmInpText')
inp.clear()
inp.send_keys(short_addr)
elif title.startswith('ML64 Address'):
ml64 = None
for addr in self.dut.addrs:
if addr.startswith('fd') and not re.match('.+ff:fe00:[0-9a-f]{0,4}$', addr):
ml64 = addr
break
if not ml64:
raise Exception('No mesh local address found')
logger.info('Mesh local address is %s', ml64)
inp = dialog.find_element_by_id('cnfrmInpText')
inp.clear()
inp.send_keys(ml64)
elif title.startswith('Shield Devices') or title.startswith('Sheild DUT'):
if self.rf_shield:
logger.info('Shielding devices')
with self.rf_shield:
self.rf_shield.shield()
elif self.dut and settings.SHIELD_SIMULATION:
self.dut.channel = (self.channel == THREAD_CHANNEL_MAX
and THREAD_CHANNEL_MIN) or (self.channel + 1)
else:
raw_input('Shield DUT and press enter to continue..')
elif title.startswith('Unshield Devices') or title.startswith('Bring DUT Back to network'):
if self.rf_shield:
logger.info('Unshielding devices')
with self.rf_shield:
self.rf_shield.unshield()
elif self.dut and settings.SHIELD_SIMULATION:
self.dut.channel = self.channel
else:
raw_input('Bring DUT and press enter to continue..')
elif title.startswith('Configure Prefix on DUT'):
body = dialog.find_element_by_id('cnfrmMsg').text
body = body.split(': ')[1]
params = reduce(lambda params, param: params.update(((param[0].strip(' '), param[1]),)) or params,
[it.split('=') for it in body.split(', ')], {})
prefix = params['P_Prefix'].strip('\0\r\n\t ')
flags = []
if params.get('P_slaac_preferred', 0) == '1':
flags.append('p')
flags.append('ao')
if params.get('P_stable', 0) == '1':
flags.append('s')
if params.get('P_default', 0) == '1':
flags.append('r')
prf = 'high'
self.dut.add_prefix(prefix, ''.join(flags), prf)
return False
def test(self):
"""This method will only start test case in child class"""
if self.__class__ is HarnessCase:
logger.warning('Skip this harness itself')
return
logger.info('Testing role[%d] case[%s]', self.role, self.case)
try:
self._init_browser()
# prepare test case
while True:
url = self._browser.current_url
if url.endswith('SetupPage.html'):
self._setup_page()
elif url.endswith('TestBed.html'):
self._test_bed()
elif url.endswith('TestExecution.html'):
logger.info('Ready to handle dialogs')
break
time.sleep(2)
except UnexpectedAlertPresentException:
logger.exception('Failed to connect to harness server')
raise SystemExit()
except FatalError:
logger.exception('Test stopped for fatal error')
raise SystemExit()
except FailError:
logger.exception('Test failed')
raise
except:
logger.exception('Something wrong')
self._select_case(self.role, self.case)
self._wait_dialog()
try:
self._collect_result()
except:
logger.exception('Failed to collect results')
raise
# get case result
status = self._browser.find_element_by_class_name('title-test').text
logger.info(status)
success = 'Pass' in status
self.assertTrue(success)
| {
"content_hash": "b6f9419ec1c9616c4c6c07fe8016f947",
"timestamp": "",
"source": "github",
"line_count": 867,
"max_line_length": 117,
"avg_line_length": 36.08073817762399,
"alnum_prop": 0.5465123713317562,
"repo_name": "xiaom-GitHub/openthread",
"id": "33384cd77cc747bcd3dee795be003d671c475a18",
"size": "32863",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/harness-automation/autothreadharness/harness_case.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "10128"
},
{
"name": "C",
"bytes": "572698"
},
{
"name": "C#",
"bytes": "18077"
},
{
"name": "C++",
"bytes": "3486464"
},
{
"name": "M4",
"bytes": "48751"
},
{
"name": "Makefile",
"bytes": "82554"
},
{
"name": "Python",
"bytes": "1060869"
},
{
"name": "Ruby",
"bytes": "3397"
},
{
"name": "Shell",
"bytes": "28070"
}
],
"symlink_target": ""
} |
"""Agent-environment training loop based on acme."""
import collections
import csv
import operator
import os
import pickle
from absl import flags
from acme import core
from acme.utils import counting
from acme.utils import loggers
import dm_env
from mime.agent import ScriptAgent
import numpy as np
import tensorflow as tf
from tensorflow.io import gfile
from rrlfd.bc import pickle_dataset
FLAGS = flags.FLAGS
class ActionLogger:
"""Logger to save actions and their base and residual components to pickle."""
def __init__(self, spec):
self._spec = spec
self._reset()
def _reset(self):
"""Reset episode buffers."""
if isinstance(self._spec, dict):
self.actions = collections.defaultdict(list)
self.base_actions = collections.defaultdict(list)
self.residual_actions = collections.defaultdict(list)
else:
self.actions = []
self.base_actions = []
self.residual_actions = []
def add(self, action, base_action, residual_action):
"""Append an action to the current episode."""
if isinstance(self._spec, dict):
for k, v in action.items():
self.actions[k].append(np.copy(v))
for k, v in base_action.items():
self.base_actions[k].append(np.copy(v))
for k, v in residual_action.items():
self.residual_actions[k].append(np.copy(v))
else:
self.actions.append(np.copy(action))
self.base_actions.append(np.copy(base_action))
self.residual_actions.append(np.copy(residual_action))
def append_to_pickle(self, path):
if not gfile.exists(os.path.dirname(path)):
gfile.makedirs(os.path.dirname(path))
with gfile.GFile(path, 'ab') as f:
pickle.dump((self.actions, self.base_actions, self.residual_actions), f)
self._reset()
def equal_observations(obs1, obs2, obs_type):
# Account for noise (in linear velocity).
if obs_type in ['visible_state', 'linear_velocity']:
return np.all(np.isclose(obs1, obs2, rtol=0., atol=1e-09))
elif obs_type == 'failure_message':
return np.all(obs1 == obs2)
else:
return np.all(np.equal(obs1, obs2))
def equal_dicts(d1, d2):
"""Test whether the dicts d1 and d2 have equal items."""
equal_keys = sorted(d1.keys()) == sorted(d2.keys())
equal_values = np.all(
[equal_observations(d1[k], d2[k], k) for k in d1.keys()])
return equal_keys and equal_values
def loop_is_stuck(base_obs1, base_obs2, acme_obs1, acme_obs2, a1, a2):
"""Detect if observations and actions are stuck."""
# If first time step.
if acme_obs1 is None or acme_obs2 is None or a1 is None or a2 is None:
return False, False, False, False
act_stuck = np.all(np.equal(a1, a2))
base_obs_stuck = True
# Not used in the case of RL-only agent.
if base_obs1 is not None and base_obs2 is not None:
# Base observation is a list of frames (and possibly other features).
for obs1, obs2 in zip(base_obs1, base_obs2):
base_obs_stuck = base_obs_stuck and equal_dicts(obs1, obs2)
acme_obs_stuck = equal_dicts(acme_obs1, acme_obs2)
obs_stuck = base_obs_stuck and acme_obs_stuck
stuck = act_stuck and obs_stuck
return stuck, act_stuck, base_obs_stuck, acme_obs_stuck
class EnvironmentLoop(core.Worker):
"""A custom RL environment loop."""
def __init__(
self,
environment,
eval_environment,
cam_environment,
cam_eval_environment,
actor,
counter=None,
logger=None,
label='environment_loop',
summary_writer=None,
):
# Internalize agent and environment.
self._environment = environment
self._eval_environment = eval_environment
self._cam_environment = cam_environment
self._cam_eval_environment = cam_eval_environment
self._actor = actor
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger(label)
self._summary_writer = summary_writer
self.min_discounted = np.inf
self.max_discounted = -np.inf
@property
def actor(self):
return self._actor
@actor.setter
def actor(self, actor):
self._actor = actor
def _eval_policy(self, num_episodes, out_dir, num_episodes_to_log):
# Write a single file with num_episodes_to_log episodes.
start_log_episode = num_episodes - num_episodes_to_log
self.run(num_episodes, out_dir, ckpt_freq=None,
log_frames_freq=start_log_episode, eval_freq=None,
num_episodes_to_log=num_episodes_to_log)
def _log_action(self, action_logger, action_components):
action_dicts = []
for action in action_components:
action_dicts.append(self._actor.flat_action_to_dict(action))
action_logger.add(*action_dicts)
def env_timestep_to_acme_timestep(self, env_timestep, rl_obs):
return dm_env.TimeStep(
env_timestep.step_type,
env_timestep.reward,
env_timestep.discount,
rl_obs)
def keep_shortest_trajectories(
self, demos_file, num_to_keep, episode_lengths=None):
# Keep num_to_keep shortest trajectories in the dataset at demos_file.
if episode_lengths is None:
episode_lengths = []
with gfile.GFile(demos_file, 'rb') as f:
while True:
try:
demo = pickle.load(f)
episode_lengths.append(len(demo['actions']))
except EOFError:
break
sorted_lengths = sorted(
enumerate(episode_lengths), key=operator.itemgetter(1))
included_trajectories = set([e[0] for e in sorted_lengths[:num_to_keep]])
print('Keeping', len(included_trajectories), 'trajectories')
all_demos_file = (
demos_file.replace(f'e{num_to_keep}', '').replace('.pkl', 'all.pkl'))
gfile.Rename(demos_file, all_demos_file)
new_demo_writer = pickle_dataset.DemoWriter(demos_file)
i = 0
with gfile.GFile(all_demos_file, 'rb') as f:
while True:
try:
demo = pickle.load(f)
if i in included_trajectories:
new_demo_writer.write_episode(demo['observations'], demo['actions'])
i += 1
except EOFError:
break
def keep_latest_trajectories(self, demos_file, num_to_keep):
# Keep num_to_keep shortest trajectories in the dataset at demos_file.
print(demos_file)
all_demos_file = (
demos_file.replace(f'e{num_to_keep}', '').replace('.pkl', 'all.pkl'))
print(all_demos_file)
gfile.Rename(demos_file, all_demos_file)
last_demos = []
with gfile.GFile(all_demos_file, 'rb') as f:
while True:
try:
demo = pickle.load(f)
last_demos.append(demo)
last_demos = last_demos[:num_to_keep]
except EOFError:
break
new_demo_writer = pickle_dataset.DemoWriter(demos_file)
for demo in last_demos:
new_demo_writer.write_episode(demo['observations'], demo['actions'])
def run(self,
num_episodes=None,
num_successes=None,
success_writer=None,
out_dir=None,
ckpt_freq=50_000,
log_frames_freq=1000,
eval_freq=100_000,
start_with_eval=False,
num_eval_episodes=100,
num_episodes_to_log=0,
collapse_in_eval=True,
eval_seed=None,
increment_eval_seed=False,
stop_if_stuck=False,
trajectory_filter='latest',
summary_writer=None):
"""Run the env for num_episodes episodes or num_successes successes."""
# If both are set, use num_episodes as the limit but write num_successes
# trajectories with success_writer.
# TODO(minttu): OR train until both are satisfied.
np.set_printoptions(precision=4, suppress=True)
action_logger = ActionLogger(self._environment.action_spec())
num_successes_written = 0
success_lengths = []
counts = self._counter.get_counts()
print('Starting env loop with counters', counts)
total_steps = counts['env_steps'] if 'env_steps' in counts else 0
prev_total_steps = total_steps
summary_writer = summary_writer or self._summary_writer
i = counts['episodes'] if 'episodes' in counts else 0
# How many episodes have been written in latest log.
record_count = i % log_frames_freq
while num_episodes is None or i < num_episodes:
rewards = []
episode_steps = 0
episode_return = 0
prev_raw_residual = None
prev_residual_exploration = False
# For envs with non-Markovian success criteria, track required fields.
if i % log_frames_freq == 0:
record_count = 0
first_to_record = i
last_to_record = i + num_episodes_to_log - 1
if out_dir is not None:
demo_writer = pickle_dataset.DemoWriter(
os.path.join(out_dir,
'episodes',
f'episodes_{first_to_record}-{last_to_record}.pkl'))
if record_count < num_episodes_to_log: # Log frames for current episode.
if self._cam_environment is None:
environment = self._environment
else:
environment = self._cam_environment
self._environment.reset() # Keep both environments in same state.
print(f'episode {i}: using cam env')
else: # Do not log frames for current episode.
environment = self._environment
if self._cam_environment is not None:
self._cam_environment.reset() # Keep both environments in same state.
print(f'episode {i}: using non-cam env')
timestep = environment.reset()
if FLAGS.base_controller is not None:
# Reset script for each episode.
# TODO(minttu): Recompute at each time step.
self._actor.base_controller = ScriptAgent(
environment.env, FLAGS.base_controller)
if record_count < num_episodes_to_log:
observations = []
actions = []
# Call base agent here (if applicable) in order to pre-shape observation
# for acme's observe first.
acme_obs, _, norm_base_act = self._actor.get_acme_observation(
timestep.observation)
acme_timestep = self.env_timestep_to_acme_timestep(
timestep, acme_obs)
self._actor.observe_first(acme_timestep)
while not timestep.last():
(action, base_action, residual_action, raw_residual,
residual_exploration, policy_mean, policy_std) = (
self._actor.select_action(
acme_obs, norm_base_act, timestep.observation,
prev_raw_residual, prev_residual_exploration))
prev_raw_residual = raw_residual
prev_residual_exploration = residual_exploration
self._log_action(action_logger, (action, base_action, residual_action))
if (i % 100 == 0 and episode_steps == 0
and summary_writer is not None):
if policy_std is not None:
with summary_writer.as_default():
tf.summary.scalar('grip_policy_std', policy_std[0], step=i)
tf.summary.scalar(
'linear_policy_std', np.mean(policy_std[1:]), step=i)
if policy_mean is None:
if residual_exploration:
# Get deterministic action.
_, _, policy_mean, _, _, _, _ = (
self._actor.select_action(
acme_obs, norm_base_act, timestep.observation,
add_exploration=False))
else:
policy_mean = residual_action
with summary_writer.as_default():
tf.summary.scalar(
'grip_policy_magn', np.abs(policy_mean[0]), step=i)
tf.summary.scalar(
'linear_policy_magn', np.mean(np.abs(policy_mean[1:])),
step=i)
next_timestep = environment.step(action)
info = environment.info_from_observation(next_timestep.observation)
if record_count < num_episodes_to_log:
observations.append(timestep.observation)
actions.append(self._actor.flat_action_to_dict(action))
next_acme_obs, _, next_norm_base_act = (
self._actor.get_acme_observation(next_timestep.observation))
acme_timestep = self.env_timestep_to_acme_timestep(
next_timestep, next_acme_obs)
self._actor.observe(raw_residual, acme_timestep)
self._actor.update()
timestep = next_timestep
rewards.append(timestep.reward)
acme_obs = next_acme_obs
norm_base_act = next_norm_base_act
episode_return += timestep.reward
episode_steps += 1
if ((eval_freq is not None # and total_steps > 0
and total_steps % eval_freq == 0)
or (total_steps == prev_total_steps and start_with_eval)):
eval_path = None
eval_task = self._environment.task
if out_dir is not None:
increment_str = 'i' if increment_eval_seed else ''
eval_path = os.path.join(
out_dir,
'eval',
f'eval{eval_task}_s{eval_seed}{increment_str}_'
f'e{num_eval_episodes}')
finished_eval = False
while not finished_eval:
print(f'Evaluating policy after {total_steps} frames')
success_rate, finished_eval = self.eval_policy(
num_episodes=num_eval_episodes,
trained_steps=total_steps,
collapse_policy=collapse_in_eval,
eval_path=eval_path,
num_videos_to_save=100,
seed=eval_seed,
increment_seed=increment_eval_seed,
stop_if_stuck=stop_if_stuck)
if summary_writer is not None:
collapse_str = 'c' if collapse_in_eval else ''
with summary_writer.as_default():
tf.summary.scalar(
f'{eval_task}_s{eval_seed}_e{num_eval_episodes}{collapse_str}'
f'_success_rate',
success_rate, step=i)
tf.summary.scalar(
f'{eval_task}_s{eval_seed}_e{num_eval_episodes}{collapse_str}'
'_success_rate_env_steps',
success_rate, step=total_steps)
if (ckpt_freq is not None and total_steps % ckpt_freq == 0
and out_dir is not None):
# TODO(minttu): Add global step to checkpoint.
ckpt_path = os.path.join(out_dir, 'policy', f'policy_{total_steps}')
print('Saving policy weights to', ckpt_path)
self._actor.save_policy_weights(ckpt_path)
if self._actor.rl_observation_network_type is not None:
ckpt_path = os.path.join(
out_dir, 'observation_net', f'observation_{total_steps}')
print('Saving observation weights to', ckpt_path)
checkpoint = tf.train.Checkpoint(
module=self._actor.rl_agent._learner._observation_network) # pylint: disable=protected-access
checkpoint.save(ckpt_path)
total_steps += 1
discounted_returns = [rewards[-1]]
for r in reversed(rewards[:-1]):
discounted_returns.append(r + FLAGS.discount * discounted_returns[-1])
self.min_discounted = min(self.min_discounted, np.min(discounted_returns))
self.max_discounted = max(self.max_discounted, np.max(discounted_returns))
print('discounted episode return range:'
f'[{self.min_discounted}, {self.max_discounted}]')
# Record counts.
counts = self._counter.increment(episodes=1, env_steps=episode_steps)
print(self._counter.get_counts(), 'success' if info['success'] else '')
# Collect the results and combine with counts.
result = {
'episode_length': episode_steps,
'episode_return': episode_return}
result.update(counts)
self._logger.write(result)
if out_dir is not None:
if record_count < num_episodes_to_log:
print('Saving episode', i)
demo_writer.write_episode(observations, actions)
actions_path = (
os.path.join(
out_dir,
'actions',
f'actions_{first_to_record}-{last_to_record}.pkl'))
action_logger.append_to_pickle(actions_path)
record_count += 1
if success_writer is not None and info['success']:
num_successes_written += 1
print(f'Saving episode {num_successes_written} / {num_successes}')
success_writer.write_episode(observations, actions)
success_lengths.append(episode_steps)
if num_successes_written >= num_successes and num_episodes is None:
break
i += 1
print('Ending env loop with counters', self._counter.get_counts())
if success_writer is not None and num_successes_written > num_successes:
if trajectory_filter == 'shortest':
self.keep_shortest_trajectories(
success_writer.path, num_successes, success_lengths)
else:
self.keep_latest_trajectories(success_writer.path, num_successes)
def eval_policy(
self,
num_episodes,
trained_steps=None,
collapse_policy=True,
eval_path=None,
num_videos_to_save=0,
max_num_steps=None,
seed=None,
increment_seed=False,
stop_if_stuck=False):
"""Evaluate policy on env for num_episodes episodes."""
if FLAGS.domain == 'mime':
self._eval_environment.create_env()
if not increment_seed and seed is not None:
self._eval_environment.env.seed(seed)
if self._cam_eval_environment is not None:
self._cam_eval_environment.env.seed(seed)
num_successes = 0
action_logger = ActionLogger(self._environment.action_spec())
if max_num_steps is None:
max_num_steps = self._eval_environment.default_max_episode_steps
if eval_path is None:
log_f = None
success_f = None
episode_length_f = None
eval_writer = None
else:
if not gfile.exists(os.path.dirname(eval_path)):
gfile.makedirs(os.path.dirname(eval_path))
collapse_str = 'c' if collapse_policy else ''
stuck_str = 's' if stop_if_stuck else ''
eval_summary_path = eval_path + f'_all{collapse_str}{stuck_str}'
eval_path = eval_path + f'_{trained_steps}{collapse_str}{stuck_str}'
log_f = gfile.GFile(eval_path + '_log.txt', 'w')
success_f = gfile.GFile(eval_path + '_success.txt', 'w')
episode_length_f = gfile.GFile(eval_path + '_lengths.txt', 'w')
eval_writer = pickle_dataset.DemoWriter(eval_path + '.pkl')
actions_path = eval_path + '_actions.pkl'
if gfile.exists(actions_path):
gfile.Remove(actions_path)
for e in range(num_episodes):
rewards = []
if increment_seed and seed is not None:
self._eval_environment.env.seed(seed + e)
if self._cam_eval_environment is not None:
self._cam_eval_environment.env.seed(seed + e)
if e % 10 == 0 and e > 0:
success_rate = num_successes / e * 100
print(f'Episode {e} / {num_episodes}; Success rate {num_successes} / '
f'{e} ({success_rate:.4f}%)')
if (e < num_videos_to_save and eval_writer is not None
and self._cam_eval_environment is not None):
environment = self._cam_eval_environment
self._eval_environment.reset() # Keep both environments in same state.
print(f'eval episode {e}: using cam env')
else:
environment = self._eval_environment
if self._cam_eval_environment is not None:
# Keep both environments in same state.
self._cam_eval_environment.reset()
print(f'eval episode {e}: using non-cam env')
timestep = environment.reset()
observations = []
actions = []
step_count = 0
if FLAGS.base_controller is not None:
# Reset script for each episode.
self._actor.base_controller = ScriptAgent(
environment.env, FLAGS.base_controller)
while not timestep.last():
acme_obs, _, norm_base_act = self._actor.get_acme_observation(
timestep.observation)
action, base_action, residual_action, _, _, _, _ = (
self._actor.select_action(
acme_obs, norm_base_act, timestep.observation,
add_exploration=False, collapse=collapse_policy))
observations.append(timestep.observation)
actions.append(self._actor.flat_action_to_dict(action))
self._log_action(action_logger, (action, base_action, residual_action))
next_timestep = environment.step(action)
info = environment.info_from_observation(next_timestep.observation)
timestep = next_timestep
rewards.append(timestep.reward)
step_count += 1
discounted_returns = [rewards[-1]]
for r in reversed(rewards[:-1]):
discounted_returns.append(r + FLAGS.discount * discounted_returns[-1])
self.min_discounted = min(self.min_discounted, np.min(discounted_returns))
self.max_discounted = max(self.max_discounted, np.max(discounted_returns))
print('discounted episode return range:'
f'[{self.min_discounted}, {self.max_discounted}]')
if info['success']:
print(f'{e}: success')
if log_f is not None:
log_f.write(f'{e}: success\n')
log_f.flush()
if success_f is not None:
success_f.write('success\n')
success_f.flush()
num_successes += 1
else:
if 'failure_message' in info:
print(f'{e}: failure:', info['failure_message'])
elif step_count >= max_num_steps or timestep.last():
print(f'{e}: failure: time limit')
else:
print(f'{e}: failure')
if log_f is not None:
if 'failure_message' in info:
log_f.write(f'{e}: failure:' + info['failure_message'] + '\n')
elif step_count >= max_num_steps or timestep.last():
log_f.write(f'{e}: failure: time limit \n')
else:
log_f.write(f'{e}: failure\n')
log_f.flush()
if success_f is not None:
success_f.write('failure\n')
success_f.flush()
if episode_length_f is not None:
episode_length_f.write(str(step_count) + '\n')
episode_length_f.flush()
if e < num_videos_to_save and eval_writer is not None:
eval_writer.write_episode(observations, actions)
action_logger.append_to_pickle(actions_path)
success_rate = num_successes / num_episodes * 100
print(
f'Done; Success rate {num_successes} / {num_episodes} '
f'({success_rate:.4f}%)')
if log_f is not None:
log_f.write(
f'Done; Success rate {num_successes} / {num_episodes} '
f'({success_rate:.4f}%)\n')
log_f.close()
csv_writer = csv.writer(
gfile.GFile(eval_summary_path + '_success_rates.csv', 'a'))
csv_writer.writerow([trained_steps, num_successes / num_episodes])
return num_successes / num_episodes, True
| {
"content_hash": "7895d304c0c53f55f6bdf2eaf267004e",
"timestamp": "",
"source": "github",
"line_count": 575,
"max_line_length": 110,
"avg_line_length": 39.850434782608694,
"alnum_prop": 0.6140350877192983,
"repo_name": "google-research/google-research",
"id": "5ececab16486438bc9f1dfb7d1a3eb181cadf26b",
"size": "23522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rrlfd/residual/environment_loop.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
import rospy
import tf
from geometry_msgs.msg import PoseStamped
from sensor_msgs.msg import Joy
from math import fabs
lastData = None
def joyChanged(data):
global lastData
lastData = data
# print(data)
if __name__ == '__main__':
rospy.init_node('publish_pose', anonymous=True)
name = rospy.get_param("~name")
r = rospy.get_param("~rate")
joy_topic = rospy.get_param("~joy_topic", "joy")
x = rospy.get_param("~x")
y = rospy.get_param("~y")
z = rospy.get_param("~z")
rate = rospy.Rate(r)
msg = PoseStamped()
msg.header.seq = 0
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = "world"
msg.pose.position.x = x
msg.pose.position.y = y
msg.pose.position.z = z
yaw = 0
quaternion = tf.transformations.quaternion_from_euler(0, 0, yaw)
msg.pose.orientation.x = quaternion[0]
msg.pose.orientation.y = quaternion[1]
msg.pose.orientation.z = quaternion[2]
msg.pose.orientation.w = quaternion[3]
pub = rospy.Publisher(name, PoseStamped, queue_size=1)
rospy.Subscriber(joy_topic, Joy, joyChanged)
while not rospy.is_shutdown():
global lastData
if lastData != None:
if fabs(lastData.axes[1]) > 0.1:
msg.pose.position.z += lastData.axes[1] / r / 2
if fabs(lastData.axes[4]) > 0.1:
msg.pose.position.x += lastData.axes[4] / r * 1
if fabs(lastData.axes[3]) > 0.1:
msg.pose.position.y += lastData.axes[3] / r * 1
if fabs(lastData.axes[0]) > 0.1:
yaw += lastData.axes[0] / r * 2
quaternion = tf.transformations.quaternion_from_euler(0, 0, yaw)
msg.pose.orientation.x = quaternion[0]
msg.pose.orientation.y = quaternion[1]
msg.pose.orientation.z = quaternion[2]
msg.pose.orientation.w = quaternion[3]
# print(pose)
msg.header.seq += 1
msg.header.stamp = rospy.Time.now()
pub.publish(msg)
rate.sleep()
| {
"content_hash": "15ea1b3d61f53b111bfa00988b6c198e",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 76,
"avg_line_length": 32.854838709677416,
"alnum_prop": 0.5915562101129112,
"repo_name": "marktsai0316/crazyflie_ros",
"id": "96f64d66c9eef231aac6e4cf67ee64a7deedd354",
"size": "2060",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "crazyflie_demo/scripts/publish_pose_teleop.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "53809"
},
{
"name": "CMake",
"bytes": "21553"
},
{
"name": "Python",
"bytes": "21713"
}
],
"symlink_target": ""
} |
from setuptools import setup
import codecs
import os
here = os.path.abspath(os.path.dirname(__file__))
def read_file(names, encoding='utf-8'):
file_path = os.path.join(here, *names)
if encoding:
with codecs.open(file_path, encoding=encoding) as f:
return f.read()
else:
with open(file_path, 'rb') as f:
return f.read()
def exec_file(names):
code = read_file(names, encoding=None)
result = {}
exec(code, result)
return result
setup(
name='matrix_client',
version=exec_file(('matrix_client', '__init__.py',))['__version__'],
description='Client-Server SDK for Matrix',
long_description=read_file(('README.rst',)),
author='The Matrix.org Team',
author_email='[email protected]',
url='https://github.com/matrix-org/matrix-python-sdk',
packages=['matrix_client'],
license='Apache License, Version 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Communications :: Chat',
'Topic :: Communications :: Conferencing',
],
keywords='chat sdk matrix matrix.org',
install_requires=[
'requests'
],
extras_require={
'test': ['tox', 'pytest', 'flake8']
}
)
| {
"content_hash": "ba8bfc9583d1e30906b6381a3c4e7996",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 72,
"avg_line_length": 28.098039215686274,
"alnum_prop": 0.6022330774598744,
"repo_name": "dovf/matrix-python-sdk",
"id": "dc7c8d717db8fa7e06971c0fb04c13088062bcb0",
"size": "1455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "36602"
}
],
"symlink_target": ""
} |
"""Maintain moving averages of parameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.training import slot_creator
from tensorflow.python.util.tf_export import tf_export
# TODO(touts): switch to variables.Variable.
def assign_moving_average(variable, value, decay, zero_debias=True, name=None):
"""Compute the moving average of a variable.
The moving average of 'variable' updated with 'value' is:
variable * decay + value * (1 - decay)
The returned Operation sets 'variable' to the newly computed moving average,
by performing this subtraction:
variable -= (1 - decay) * (variable - value)
Since variables that are initialized to a `0` value will be `0` biased,
`zero_debias` optionally enables scaling by the mathematically correct
debiasing factor of
1 - decay ** num_updates
See `ADAM: A Method for Stochastic Optimization` Section 3 for more details
(https://arxiv.org/abs/1412.6980).
The names of the debias shadow variables, by default, include both the scope
they were created in and the scope of the variables they debias. They are also
given a uniquifying-suffix.
E.g.:
```
with tf.variable_scope('scope1'):
with tf.variable_scope('scope2'):
var = tf.get_variable('foo')
update_1 = tf.assign_moving_average(var, 0.0, 1.0)
update_2 = tf.assign_moving_average(var, 0.0, 0.9)
# var.name: 'scope1/scope2/foo'
# shadow var names: 'scope1/scope2/scope1/scope2/foo/biased'
# 'scope1/scope2/scope1/scope2/foo/biased_1'
```
Args:
variable: A Variable.
value: A tensor with the same shape as 'variable'.
decay: A float Tensor or float value. The moving average decay.
zero_debias: A python bool. If true, assume the variable is 0-initialized
and unbias it, as in https://arxiv.org/abs/1412.6980. See docstring in
`_zero_debias` for more details.
name: Optional name of the returned operation.
Returns:
A tensor which if evaluated will compute and return the new moving average.
"""
def update_fn(v, value, decay=decay):
decay = ops.convert_to_tensor(1.0 - decay, name="decay")
if decay.dtype != v.dtype.base_dtype:
decay = math_ops.cast(decay, v.dtype.base_dtype)
if zero_debias:
update_delta = _zero_debias(v, value, decay)
else:
update_delta = (v - value) * decay
return state_ops.assign_sub(v, update_delta, name=scope)
with ops.name_scope(name, "AssignMovingAvg",
[variable, value, decay]) as scope:
replica_context = distribution_strategy_context.get_replica_context()
if replica_context:
# In a replica context, we update variable using the mean of value across
# replicas.
def merge_fn(strategy, v, value):
value = strategy.reduce(
variable_scope.VariableAggregation.MEAN, value, v)
return strategy.update(v, update_fn, value)
return replica_context.merge_call(merge_fn, variable, value)
else:
strategy = distribution_strategy_context.get_cross_replica_context()
return strategy.update(variable, update_fn, value)
def weighted_moving_average(value,
decay,
weight,
truediv=True,
collections=None,
name=None):
"""Compute the weighted moving average of `value`.
Conceptually, the weighted moving average is:
`moving_average(value * weight) / moving_average(weight)`,
where a moving average updates by the rule
`new_value = decay * old_value + (1 - decay) * update`
Internally, this Op keeps moving average variables of both `value * weight`
and `weight`.
Args:
value: A numeric `Tensor`.
decay: A float `Tensor` or float value. The moving average decay.
weight: `Tensor` that keeps the current value of a weight.
Shape should be able to multiply `value`.
truediv: Boolean, if `True`, dividing by `moving_average(weight)` is
floating point division. If `False`, use division implied by dtypes.
collections: List of graph collections keys to add the internal variables
`value * weight` and `weight` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
name: Optional name of the returned operation.
Defaults to "WeightedMovingAvg".
Returns:
An Operation that updates and returns the weighted moving average.
"""
# Unlike assign_moving_average, the weighted moving average doesn't modify
# user-visible variables. It is the ratio of two internal variables, which are
# moving averages of the updates. Thus, the signature of this function is
# quite different than assign_moving_average.
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
with variable_scope.variable_scope(name, "WeightedMovingAvg",
[value, weight, decay]) as scope:
value_x_weight_var = variable_scope.get_variable(
"value_x_weight",
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
weight_var = variable_scope.get_variable(
"weight",
shape=weight.get_shape(),
dtype=weight.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
numerator = assign_moving_average(
value_x_weight_var, value * weight, decay, zero_debias=False)
denominator = assign_moving_average(
weight_var, weight, decay, zero_debias=False)
if truediv:
return math_ops.truediv(numerator, denominator, name=scope.name)
else:
return math_ops.div(numerator, denominator, name=scope.name)
def _zero_debias(unbiased_var, value, decay):
"""Compute the delta required for a debiased Variable.
All exponential moving averages initialized with Tensors are initialized to 0,
and therefore are biased to 0. Variables initialized to 0 and used as EMAs are
similarly biased. This function creates the debias updated amount according to
a scale factor, as in https://arxiv.org/abs/1412.6980.
To demonstrate the bias the results from 0-initialization, take an EMA that
was initialized to `0` with decay `b`. After `t` timesteps of seeing the
constant `c`, the variable have the following value:
```
EMA = 0*b^(t) + c*(1 - b)*b^(t-1) + c*(1 - b)*b^(t-2) + ...
= c*(1 - b^t)
```
To have the true value `c`, we would divide by the scale factor `1 - b^t`.
In order to perform debiasing, we use two shadow variables. One keeps track of
the biased estimate, and the other keeps track of the number of updates that
have occurred.
Args:
unbiased_var: A Variable representing the current value of the unbiased EMA.
value: A Tensor representing the most recent value.
decay: A Tensor representing `1-decay` for the EMA.
Returns:
The amount that the unbiased variable should be updated. Computing this
tensor will also update the shadow variables appropriately.
"""
with variable_scope.variable_scope(
unbiased_var.op.name, values=[unbiased_var, value, decay]) as scope:
with ops.colocate_with(unbiased_var):
with ops.init_scope():
biased_initializer = init_ops.zeros_initializer(
dtype=unbiased_var.dtype)(unbiased_var.get_shape())
local_step_initializer = init_ops.zeros_initializer()
def _maybe_get_unique(name):
"""Get name for a unique variable, if not `reuse=True`."""
if variable_scope.get_variable_scope().reuse:
return name
vs_vars = [x.op.name for x in
variable_scope.get_variable_scope().global_variables()]
full_name = variable_scope.get_variable_scope().name + "/" + name
if full_name not in vs_vars: return name
idx = 1
while full_name + ("_%d" % idx) in vs_vars:
idx += 1
return name + ("_%d" % idx)
biased_var = variable_scope.get_variable(
_maybe_get_unique("biased"), initializer=biased_initializer,
trainable=False)
local_step = variable_scope.get_variable(
_maybe_get_unique("local_step"),
shape=[],
dtype=unbiased_var.dtype,
initializer=local_step_initializer,
trainable=False)
# Get an update ops for both shadow variables.
update_biased = state_ops.assign_sub(biased_var,
(biased_var - value) * decay,
name=scope.name)
update_local_step = local_step.assign_add(1)
# Compute the value of the delta to update the unbiased EMA. Make sure to
# use the new values of the biased variable and the local step.
with ops.control_dependencies([update_biased, update_local_step]):
# This function gets `1 - decay`, so use `1.0 - decay` in the exponent.
unbiased_ema_delta = (unbiased_var - biased_var.read_value() /
(1 - math_ops.pow(
1.0 - decay, local_step.read_value())))
return unbiased_ema_delta
@tf_export("train.ExponentialMovingAverage")
class ExponentialMovingAverage(object):
"""Maintains moving averages of variables by employing an exponential decay.
When training a model, it is often beneficial to maintain moving averages of
the trained parameters. Evaluations that use averaged parameters sometimes
produce significantly better results than the final trained values.
The `apply()` method adds shadow copies of trained variables and add ops that
maintain a moving average of the trained variables in their shadow copies.
It is used when building the training model. The ops that maintain moving
averages are typically run after each training step.
The `average()` and `average_name()` methods give access to the shadow
variables and their names. They are useful when building an evaluation
model, or when restoring a model from a checkpoint file. They help use the
moving averages in place of the last trained values for evaluations.
The moving averages are computed using exponential decay. You specify the
decay value when creating the `ExponentialMovingAverage` object. The shadow
variables are initialized with the same initial values as the trained
variables. When you run the ops to maintain the moving averages, each
shadow variable is updated with the formula:
`shadow_variable -= (1 - decay) * (shadow_variable - variable)`
This is mathematically equivalent to the classic formula below, but the use
of an `assign_sub` op (the `"-="` in the formula) allows concurrent lockless
updates to the variables:
`shadow_variable = decay * shadow_variable + (1 - decay) * variable`
Reasonable values for `decay` are close to 1.0, typically in the
multiple-nines range: 0.999, 0.9999, etc.
Example usage when creating a training model:
```python
# Create variables.
var0 = tf.Variable(...)
var1 = tf.Variable(...)
# ... use the variables to build a training model...
...
# Create an op that applies the optimizer. This is what we usually
# would use as a training op.
opt_op = opt.minimize(my_loss, [var0, var1])
# Create an ExponentialMovingAverage object
ema = tf.train.ExponentialMovingAverage(decay=0.9999)
with tf.control_dependencies([opt_op]):
# Create the shadow variables, and add ops to maintain moving averages
# of var0 and var1. This also creates an op that will update the moving
# averages after each training step. This is what we will use in place
# of the usual training op.
training_op = ema.apply([var0, var1])
...train the model by running training_op...
```
There are two ways to use the moving averages for evaluations:
* Build a model that uses the shadow variables instead of the variables.
For this, use the `average()` method which returns the shadow variable
for a given variable.
* Build a model normally but load the checkpoint files to evaluate by using
the shadow variable names. For this use the `average_name()` method. See
the `tf.train.Saver` for more
information on restoring saved variables.
Example of restoring the shadow variable values:
```python
# Create a Saver that loads variables from their saved shadow values.
shadow_var0_name = ema.average_name(var0)
shadow_var1_name = ema.average_name(var1)
saver = tf.train.Saver({shadow_var0_name: var0, shadow_var1_name: var1})
saver.restore(...checkpoint filename...)
# var0 and var1 now hold the moving average values
```
"""
def __init__(self, decay, num_updates=None, zero_debias=False,
name="ExponentialMovingAverage"):
"""Creates a new ExponentialMovingAverage object.
The `apply()` method has to be called to create shadow variables and add
ops to maintain moving averages.
The optional `num_updates` parameter allows one to tweak the decay rate
dynamically. It is typical to pass the count of training steps, usually
kept in a variable that is incremented at each step, in which case the
decay rate is lower at the start of training. This makes moving averages
move faster. If passed, the actual decay rate used is:
`min(decay, (1 + num_updates) / (10 + num_updates))`
Args:
decay: Float. The decay to use.
num_updates: Optional count of number of updates applied to variables.
zero_debias: If `True`, zero debias moving-averages that are initialized
with tensors.
name: String. Optional prefix name to use for the name of ops added in
`apply()`.
"""
self._decay = decay
self._num_updates = num_updates
self._zero_debias = zero_debias
self._name = name
self._averages = {}
@property
def name(self):
"""The name of this ExponentialMovingAverage object."""
return self._name
def apply(self, var_list=None):
"""Maintains moving averages of variables.
`var_list` must be a list of `Variable` or `Tensor` objects. This method
creates shadow variables for all elements of `var_list`. Shadow variables
for `Variable` objects are initialized to the variable's initial value.
They will be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.
For `Tensor` objects, the shadow variables are initialized to 0 and zero
debiased (see docstring in `assign_moving_average` for more details).
shadow variables are created with `trainable=False` and added to the
`GraphKeys.ALL_VARIABLES` collection. They will be returned by calls to
`tf.global_variables()`.
Returns an op that updates all shadow variables from the current value of
their associated variables.
Note that `apply()` can be called multiple times. When eager execution is
enabled each call to apply will update the variables once, so this needs to
be called in a loop.
Args:
var_list: A list of Variable or Tensor objects. The variables
and Tensors must be of types bfloat16, float16, float32, or float64.
Returns:
An Operation that updates the moving averages.
Raises:
TypeError: If the arguments are not an allowed type.
"""
# TODO(touts): op_scope
if var_list is None:
var_list = variables.trainable_variables()
zero_debias_true = set() # set of vars to set `zero_debias=True`
for var in var_list:
if var.dtype.base_dtype not in [
dtypes.bfloat16, dtypes.float16, dtypes.float32, dtypes.float64
]:
raise TypeError("The variables must be half, float, or double: %s" %
var.name)
if var not in self._averages:
# For variables: to lower communication bandwidth across devices we keep
# the moving averages on the same device as the variables. For other
# tensors, we rely on the existing device allocation mechanism.
with ops.init_scope():
if isinstance(var, variables.Variable):
avg = slot_creator.create_slot(var,
var.initialized_value(),
self.name,
colocate_with_primary=True)
# NOTE(mrry): We only add `tf.Variable` objects to the
# `MOVING_AVERAGE_VARIABLES` collection.
ops.add_to_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, var)
else:
avg = slot_creator.create_zeros_slot(
var,
self.name,
colocate_with_primary=(var.op.type in ["Variable",
"VariableV2",
"VarHandleOp"]))
if self._zero_debias:
zero_debias_true.add(avg)
self._averages[var] = avg
with ops.name_scope(self.name) as scope:
decay = ops.convert_to_tensor(self._decay, name="decay")
if self._num_updates is not None:
num_updates = math_ops.cast(self._num_updates,
dtypes.float32,
name="num_updates")
decay = math_ops.minimum(decay,
(1.0 + num_updates) / (10.0 + num_updates))
updates = []
for var in var_list:
zero_debias = self._averages[var] in zero_debias_true
updates.append(assign_moving_average(
self._averages[var], var, decay, zero_debias=zero_debias))
return control_flow_ops.group(*updates, name=scope)
def average(self, var):
"""Returns the `Variable` holding the average of `var`.
Args:
var: A `Variable` object.
Returns:
A `Variable` object or `None` if the moving average of `var`
is not maintained.
"""
return self._averages.get(var, None)
def average_name(self, var):
"""Returns the name of the `Variable` holding the average for `var`.
The typical scenario for `ExponentialMovingAverage` is to compute moving
averages of variables during training, and restore the variables from the
computed moving averages during evaluations.
To restore variables, you have to know the name of the shadow variables.
That name and the original variable can then be passed to a `Saver()` object
to restore the variable from the moving average value with:
`saver = tf.train.Saver({ema.average_name(var): var})`
`average_name()` can be called whether or not `apply()` has been called.
Args:
var: A `Variable` object.
Returns:
A string: The name of the variable that will be used or was used
by the `ExponentialMovingAverage class` to hold the moving average of
`var`.
"""
if var in self._averages:
return self._averages[var].op.name
return ops.get_default_graph().unique_name(
var.op.name + "/" + self.name, mark_as_used=False)
def variables_to_restore(self, moving_avg_variables=None):
"""Returns a map of names to `Variables` to restore.
If a variable has a moving average, use the moving average variable name as
the restore name; otherwise, use the variable name.
For example,
```python
variables_to_restore = ema.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
```
Below is an example of such mapping:
```
conv/batchnorm/gamma/ExponentialMovingAverage: conv/batchnorm/gamma,
conv_4/conv2d_params/ExponentialMovingAverage: conv_4/conv2d_params,
global_step: global_step
```
Args:
moving_avg_variables: a list of variables that require to use of the
moving variable name to be restored. If None, it will default to
variables.moving_average_variables() + variables.trainable_variables()
Returns:
A map from restore_names to variables. The restore_name can be the
moving_average version of the variable name if it exist, or the original
variable name.
"""
name_map = {}
if moving_avg_variables is None:
# Include trainable variables and variables which have been explicitly
# added to the moving_average_variables collection.
moving_avg_variables = variables.trainable_variables()
moving_avg_variables += variables.moving_average_variables()
# Remove duplicates
moving_avg_variables = set(moving_avg_variables)
# Collect all the variables with moving average,
for v in moving_avg_variables:
name_map[self.average_name(v)] = v
# Make sure we restore variables without moving averages as well.
moving_avg_variable_names = set([v.name for v in moving_avg_variables])
for v in list(set(variables.global_variables())):
if v.name not in moving_avg_variable_names and v.op.name not in name_map:
name_map[v.op.name] = v
return name_map
| {
"content_hash": "9126bb91abcc95ea3e20ac2fae1dca29",
"timestamp": "",
"source": "github",
"line_count": 517,
"max_line_length": 80,
"avg_line_length": 41.65570599613153,
"alnum_prop": 0.6680906389301634,
"repo_name": "dongjoon-hyun/tensorflow",
"id": "fc9eb479cc3a0c3fd3dba4de7269b7894d3ec84c",
"size": "22225",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/moving_averages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3301"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "446293"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50950243"
},
{
"name": "CMake",
"bytes": "198845"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285854"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "869263"
},
{
"name": "Jupyter Notebook",
"bytes": "2611125"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "62216"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40335927"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "487251"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
import logging
from django import template
from django.utils.html import mark_safe
import cio.conf
from cio.pipeline import pipeline
from djedi.auth import has_permission
from djedi.utils.templates import render_embed
register = template.Library()
logger = logging.getLogger(__name__)
@register.simple_tag(takes_context=True)
def djedi_admin(context):
output = ""
if has_permission(context.get("request")):
defaults = {
node.uri.clone(version=None): node.initial
for node in pipeline.history.list("get")
}
output = render_embed(nodes=defaults)
# Clear pipeline
pipeline.clear()
return output
@register.simple_tag
def djedi_xss_domain():
domain = cio.conf.settings.get("XSS_DOMAIN")
if domain:
return mark_safe(f'<script>document.domain = "{domain}";</script>')
return ""
| {
"content_hash": "2fbaabdaed5620b2efb1284621d4c4c2",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 75,
"avg_line_length": 22.894736842105264,
"alnum_prop": 0.6816091954022988,
"repo_name": "5monkeys/djedi-cms",
"id": "8798839b1eaed7fe6c19fe6e200724e8adfcdd78",
"size": "870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djedi/templatetags/djedi_admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "330222"
},
{
"name": "CoffeeScript",
"bytes": "38279"
},
{
"name": "Dockerfile",
"bytes": "535"
},
{
"name": "HTML",
"bytes": "14605"
},
{
"name": "JavaScript",
"bytes": "141445"
},
{
"name": "Less",
"bytes": "73752"
},
{
"name": "Makefile",
"bytes": "1489"
},
{
"name": "Python",
"bytes": "84579"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import url, patterns
urlpatterns = patterns("symposion.boxes.views",
url(r"^([-\w]+)/edit/$", "box_edit", name="box_edit"),
) | {
"content_hash": "a6493064498cfc9a771e3b2037039bb6",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 58,
"avg_line_length": 27,
"alnum_prop": 0.6666666666666666,
"repo_name": "djangocon/symposion-2014",
"id": "dc57fe6bd7fe5147a39210669f116f9086cd5cc2",
"size": "162",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "symposion/boxes/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from tempest.lib.services.identity.v3 import groups_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestGroupsClient(base.BaseServiceTest):
FAKE_CREATE_GROUP = {
'group': {
'description': 'Tempest Group Description',
'domain_id': 'TempestDomain',
'name': 'Tempest Group',
}
}
FAKE_GROUP_INFO = {
'group': {
'description': 'Tempest Group Description',
'domain_id': 'TempestDomain',
'id': '6e13e2068cf9466e98950595baf6bb35',
'links': {
'self': 'http://example.com/identity/v3/groups/' +
'6e13e2068cf9466e98950595baf6bb35'
},
'name': 'Tempest Group',
}
}
FAKE_GROUP_LIST = {
'links': {
'self': 'http://example.com/identity/v3/groups',
'previous': None,
'next': None,
},
'groups': [
{
'description': 'Tempest Group One Description',
'domain_id': 'TempestDomain',
'id': '1c92f3453ed34291a074b87493455b8f',
'links': {
'self': 'http://example.com/identity/v3/groups/' +
'1c92f3453ed34291a074b87493455b8f'
},
'name': 'Tempest Group One',
},
{
'description': 'Tempest Group Two Description',
'domain_id': 'TempestDomain',
'id': 'ce9e7dafed3b4877a7d4466ed730a9ee',
'links': {
'self': 'http://example.com/identity/v3/groups/' +
'ce9e7dafed3b4877a7d4466ed730a9ee'
},
'name': 'Tempest Group Two',
},
]
}
FAKE_USER_LIST = {
'links': {
'self': 'http://example.com/identity/v3/groups/' +
'6e13e2068cf9466e98950595baf6bb35/users',
'previous': None,
'next': None,
},
'users': [
{
'domain_id': 'TempestDomain',
'description': 'Tempest Test User One Description',
'enabled': True,
'id': '642688fa65a84217b86cef3c063de2b9',
'name': 'TempestUserOne',
'links': {
'self': 'http://example.com/identity/v3/users/' +
'642688fa65a84217b86cef3c063de2b9'
}
},
{
'domain_id': 'TempestDomain',
'description': 'Tempest Test User Two Description',
'enabled': True,
'id': '1048ead6f8ef4a859b44ffbce3ac0b52',
'name': 'TempestUserTwo',
'links': {
'self': 'http://example.com/identity/v3/users/' +
'1048ead6f8ef4a859b44ffbce3ac0b52'
}
},
]
}
def setUp(self):
super(TestGroupsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = groups_client.GroupsClient(fake_auth, 'identity',
'regionOne')
def _test_create_group(self, bytes_body=False):
self.check_service_client_function(
self.client.create_group,
'tempest.lib.common.rest_client.RestClient.post',
self.FAKE_CREATE_GROUP,
bytes_body,
status=201,
)
def _test_show_group(self, bytes_body=False):
self.check_service_client_function(
self.client.show_group,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_GROUP_INFO,
bytes_body,
group_id='6e13e2068cf9466e98950595baf6bb35',
)
def _test_list_groups(self, bytes_body=False):
self.check_service_client_function(
self.client.list_groups,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_GROUP_LIST,
bytes_body,
)
def _test_update_group(self, bytes_body=False):
self.check_service_client_function(
self.client.update_group,
'tempest.lib.common.rest_client.RestClient.patch',
self.FAKE_GROUP_INFO,
bytes_body,
group_id='6e13e2068cf9466e98950595baf6bb35',
name='NewName',
)
def _test_list_users_in_group(self, bytes_body=False):
self.check_service_client_function(
self.client.list_group_users,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_USER_LIST,
bytes_body,
group_id='6e13e2068cf9466e98950595baf6bb35',
)
def test_create_group_with_string_body(self):
self._test_create_group()
def test_create_group_with_bytes_body(self):
self._test_create_group(bytes_body=True)
def test_show_group_with_string_body(self):
self._test_show_group()
def test_show_group_with_bytes_body(self):
self._test_show_group(bytes_body=True)
def test_list_groups_with_string_body(self):
self._test_list_groups()
def test_list_groups_with_bytes_body(self):
self._test_list_groups(bytes_body=True)
def test_update_group_with_string_body(self):
self._test_update_group()
def test_update_group_with_bytes_body(self):
self._test_update_group(bytes_body=True)
def test_list_users_in_group_with_string_body(self):
self._test_list_users_in_group()
def test_list_users_in_group_with_bytes_body(self):
self._test_list_users_in_group(bytes_body=True)
def test_delete_group(self):
self.check_service_client_function(
self.client.delete_group,
'tempest.lib.common.rest_client.RestClient.delete',
{},
group_id='6e13e2068cf9466e98950595baf6bb35',
status=204,
)
def test_add_user_to_group(self):
self.check_service_client_function(
self.client.add_group_user,
'tempest.lib.common.rest_client.RestClient.put',
{},
status=204,
group_id='6e13e2068cf9466e98950595baf6bb35',
user_id='642688fa65a84217b86cef3c063de2b9',
)
def test_check_user_in_group(self):
self.check_service_client_function(
self.client.check_group_user_existence,
'tempest.lib.common.rest_client.RestClient.head',
{},
status=204,
group_id='6e13e2068cf9466e98950595baf6bb35',
user_id='642688fa65a84217b86cef3c063de2b9',
)
| {
"content_hash": "97758a9c480fb7c78dbed0705e923150",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 71,
"avg_line_length": 34.085427135678394,
"alnum_prop": 0.5364882795223352,
"repo_name": "Tesora/tesora-tempest",
"id": "38cf3ae1c271d666a15aac37be7dd3c846d5b5b7",
"size": "7359",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tempest/tests/lib/services/identity/v3/test_groups_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3633718"
},
{
"name": "Shell",
"bytes": "9310"
}
],
"symlink_target": ""
} |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("decoder", ["decoder.pyx"]),]
)
| {
"content_hash": "1753375b652c7a945735165e69f5a849",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 56,
"avg_line_length": 27.5,
"alnum_prop": 0.740909090909091,
"repo_name": "xiamike/stanford-ctc",
"id": "19c47c9790dd97f3f6364a707cf9f3c1f0e2af31",
"size": "221",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ctc_fast/new_decoder/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "19111"
},
{
"name": "CMake",
"bytes": "5218"
},
{
"name": "CSS",
"bytes": "1125"
},
{
"name": "HTML",
"bytes": "616"
},
{
"name": "JavaScript",
"bytes": "1699"
},
{
"name": "Makefile",
"bytes": "244"
},
{
"name": "Matlab",
"bytes": "38086"
},
{
"name": "Python",
"bytes": "242535"
},
{
"name": "Shell",
"bytes": "15591"
}
],
"symlink_target": ""
} |
from tweepy.mixins import DataMapping, HashableID
from tweepy.utils import parse_datetime
class List(HashableID, DataMapping):
"""The list object contains `Twitter Lists`_ metadata describing the
referenced List. The List object is the primary object returned in the List
lookup endpoint. When requesting additional List fields on this endpoint,
simply use the fields parameter ``list.fields``.
At the moment, the List object cannot be found as a child object from any
other data object. However, user objects can be found and expanded in the
user resource. These objects are available for expansion by adding
``owner_id`` to the ``expansions`` query parameter. Use the expansion with
the field parameter: ``list.fields`` when requesting additional fields to
complete the primary List object and ``user.fields`` to complete the
expansion object.
.. versionadded:: 4.4
Attributes
----------
data : dict
The JSON data representing the List.
id : str
The unique identifier of this List.
name : str
The name of the List, as defined when creating the List.
created_at : datetime.datetime | None
The UTC datetime that the List was created on Twitter.
description : str | None
A brief description to let users know about the List.
follower_count : int | None
Shows how many users follow this List,
member_count : int | None
Shows how many members are part of this List.
private : bool | None
Indicates if the List is private.
owner_id : str | None
Unique identifier of this List's owner.
References
----------
https://developer.twitter.com/en/docs/twitter-api/data-dictionary/object-model/lists
.. _Twitter Lists: https://help.twitter.com/en/using-twitter/twitter-lists
"""
__slots__ = (
"data", "id", "name", "created_at", "description", "follower_count",
"member_count", "private", "owner_id"
)
def __init__(self, data):
self.data = data
self.id = data["id"]
self.name = data["name"]
self.created_at = data.get("created_at")
if self.created_at is not None:
self.created_at = parse_datetime(self.created_at)
self.description = data.get("description")
self.follower_count = data.get("follower_count")
self.member_count = data.get("member_count")
self.private = data.get("private")
self.owner_id = data.get("owner_id")
def __repr__(self):
return f"<List id={self.id} name={self.name}>"
def __str__(self):
return self.name
| {
"content_hash": "6cfb8b3d71522023f7998eb86f58815b",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 88,
"avg_line_length": 36.36986301369863,
"alnum_prop": 0.6508474576271186,
"repo_name": "tweepy/tweepy",
"id": "b2a5044246f2b6210fb07e5302defe2a08bf1907",
"size": "2731",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tweepy/list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "656249"
}
],
"symlink_target": ""
} |
"""
Views for CAVE Language
"""
from cavelanguage.utils import template
from cavelanguage.models import Symbol, Collection, Category, Diagram, Contributor
@template('home.html')
def home(request):
"""
Home page.
"""
return {'location':'home'}
@template('symbol_library.html')
def symbol_library(request):
"""
The main symbol library.
"""
connector_symbols = Symbol.objects.active().filter(collection__name='Core',categories__name='Connectors')
data_symbols = Symbol.objects.active().filter(collection__name='Core',categories__name='Data')
context_symbols = Symbol.objects.active().filter(collection__name='Core',categories__name='Context')
conditional_symbols = Symbol.objects.active().filter(collection__name='Core',categories__name='Conditionals')
mode_symbols = Symbol.objects.active().filter(collection__name='Core',categories__name='Modes')
container_symbols = Symbol.objects.active().filter(collection__name='Core',categories__name='Containers')
extensions = Collection.objects.exclude(name='Core')
return {'location':'symbol_library',
'connector_symbols':connector_symbols,
'data_symbols':data_symbols,
'context_symbols':context_symbols,
'conditional_symbols':conditional_symbols,
'mode_symbols':mode_symbols,
'container_symbols':container_symbols,
'extensions':extensions}
@template('symbol.html')
def symbol(request,slug):
"""
Shows the symbol.
"""
sym = Symbol.objects.get(slug=slug)
return {'symbol':sym,'location':'symbol_library'}
@template('collection.html')
def collection(request,slug):
"""
Gets the collection.
"""
col = Collection.objects.get(slug=slug)
return {'collection':col,'location':'symbol_library'}
@template('category.html')
def category(request,collection_slug,slug):
"""
Gets the category.
"""
cat = Category.objects.get(slug=slug)
return {'category':cat,'location':'symbol_library'}
@template('diagram.html')
def diagram(request,diagram_id,diagram_slug):
"""
Gets the diagram.
"""
diagram = Diagram.objects.get(pk=diagram_id)
return {'diagram':diagram,'location':'diagrams'}
@template('diagrams.html')
def diagrams(request):
"""
Main diagrams page.
"""
return {'location':'diagrams','diagrams':Diagram.objects.all()}
@template('contributors.html')
def contributors(request):
"""
Contributors to CAVE.
"""
return {'location':'contributors','contributors':Contributor.objects.all()}
| {
"content_hash": "022e8911968b58b62a8e5ca8f8637ee2",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 113,
"avg_line_length": 31.621951219512194,
"alnum_prop": 0.6702661010412649,
"repo_name": "Axilent/cave-language",
"id": "84ae2bc2d1c05b49fda643294a7d948cebebf4ca",
"size": "2593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cavelanguage/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "245"
},
{
"name": "HTML",
"bytes": "23813"
},
{
"name": "Python",
"bytes": "24840"
}
],
"symlink_target": ""
} |
import json
import urllib
import urlparse
import logging
import cookielib
import dateutil.tz
from datetime import datetime
from StringIO import StringIO
from collections import OrderedDict
from twisted.web.client import Agent, CookieAgent, FileBodyProducer
from twisted.web.http_headers import Headers
from twisted.internet.protocol import Protocol
from twisted.internet import reactor, defer
from twisted.web._newclient import _WrapperException
from requests.cookies import create_cookie
class Response(object):
def __init__(self, status_code, headers, cookiejar, content):
self.status_code = status_code
self.headers = headers
self.cookiejar = cookiejar
self.content = content
@property
def json(self):
return json.loads(self.content)
@property
def cookies(self):
return {cookie.name: cookie.value for cookie in self.cookiejar}
class BodyReceiver(Protocol):
def __init__(self, status_code, headers, cookiejar, finished):
self.status_code = status_code
self.headers = headers
self.cookiejar = cookiejar
self.finished = finished
self.data = ''
def dataReceived(self, bytes):
self.data += bytes
def connectionLost(self, reason):
self.finished.callback(Response(self.status_code,
self.headers,
self.cookiejar,
self.data))
def http_request(method, url, params={}, data=None, headers={}, cookies=None, timeout=30, ignore_errors=True):
# Urlencode does not accept unicode, so convert to str first
url = url.encode('utf-8') if isinstance(url, unicode) else url
for k, v in params.items():
params[k] = v.encode('utf-8') if isinstance(v, unicode) else v
for k, v in headers.items():
headers[k] = v.encode('utf-8') if isinstance(v, unicode) else v
# Add any additional params to the url
url_parts = list(urlparse.urlparse(url))
query = dict(urlparse.parse_qsl(url_parts[4]))
query.update(params)
url_parts[4] = urllib.urlencode(query, doseq=True)
url = urlparse.urlunparse(url_parts)
# Handle cookies
if isinstance(cookies, cookielib.CookieJar):
cookiejar = cookies
else:
cookiejar = cookielib.CookieJar()
for name, value in (cookies or {}).iteritems():
cookiejar.set_cookie(create_cookie(name=name, value=value))
# Urlencode the data, if needed
if isinstance(data, dict):
data = urllib.urlencode(data)
headers['Content-Type'] = 'application/x-www-form-urlencoded'
agent = Agent(reactor, connectTimeout=timeout)
cookie_agent = CookieAgent(agent, cookiejar)
body = FileBodyProducer(StringIO(data)) if data else None
d = cookie_agent.request(method, url, Headers({k: [v] for k, v in headers.iteritems()}), body)
def handle_response(response, cookiejar):
if 'audio/mpeg' in response.headers.getRawHeaders('content-type')[-1]:
# Don't download any multimedia files
raise Exception('reponse contains a multimedia file')
d = defer.Deferred()
response.deliverBody(BodyReceiver(response.code,
dict(response.headers.getAllRawHeaders()),
cookiejar,
d))
return d
def handle_error(error):
if isinstance(error, _WrapperException):
reason = ', '.join(error.reasons)
else:
reason = error.getErrorMessage()
logger = logging.getLogger(__name__)
logger.error('Failed to GET %s (reason: %s)', url, reason)
return Response(0, {}, cookielib.CookieJar(), '')
d.addCallback(handle_response, cookiejar)
if ignore_errors:
d.addErrback(handle_error)
return d
def get_request(url, **kwargs):
return http_request('GET', url, **kwargs)
def post_request(url, **kwargs):
return http_request('POST', url, **kwargs)
def put_request(url, **kwargs):
return http_request('PUT', url, **kwargs)
def delete_request(url, **kwargs):
return http_request('DELETE', url, **kwargs)
# From: http://stackoverflow.com/questions/2437617/limiting-the-size-of-a-python-dictionary
class LimitedSizeDict(OrderedDict):
def __init__(self, *args, **kwds):
self.size_limit = kwds.pop("size_limit", None)
OrderedDict.__init__(self, *args, **kwds)
self._check_size_limit()
def __setitem__(self, key, value):
OrderedDict.__setitem__(self, key, value)
self._check_size_limit()
def _check_size_limit(self):
if self.size_limit is not None:
while len(self) > self.size_limit:
self.popitem(last=False)
def parse_title(title):
# Try to split the title into artist and name components
if title.count(' - ') == 1:
artist_name, track_name = title.split(' - ', 1)
return artist_name, track_name
return None
def ts_to_rfc3339(ts):
dt = datetime.utcfromtimestamp(ts)
return dt.isoformat("T") + "Z"
def datetime_to_ts(dt):
epoch_dt = datetime(1970, 1, 1, tzinfo=dateutil.tz.tzoffset(None, 0))
return int((dt - epoch_dt).total_seconds())
def build_bool_query(bool_type, match_dict, nested_path=None):
query = {'query': {'bool': {bool_type: []}}}
for key, value in match_dict.iteritems():
query['query']['bool'][bool_type].append({'match': {key: value}})
if nested_path:
query = {'nested': query}
query['nested']['path'] = nested_path
return query
def build_simple_query(query, fields=['_all']):
return {'query': {'simple_query_string': {'query': query, 'fields': fields}}}
def build_query(query, field='_all'):
return {'query': {'query_string': {'query': query, 'default_field': field}}}
| {
"content_hash": "a6a5cf793b846b44c62423a72f046e4a",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 110,
"avg_line_length": 31.972826086956523,
"alnum_prop": 0.6328403875573687,
"repo_name": "mmc-tudelft/billy",
"id": "dd2c00185ee5e1e0cb824dbf2ed3618a281b8b06",
"size": "5883",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4321"
},
{
"name": "HTML",
"bytes": "17285"
},
{
"name": "JavaScript",
"bytes": "34479"
},
{
"name": "Python",
"bytes": "71926"
}
],
"symlink_target": ""
} |
import argparse
from matplotlib import pyplot as plt
class Result():
params = {}
iterations = []
def __init__(self, lines):
info = lines[0].split()
self.params = {
'population_size': int(info[0]),
'number_of_tournaments': int(info[1]),
'tournament_size': int(info[2]),
'mutation_probability': float(info[3]),
'board_fill': int(info[4])
}
self.iterations = [line.split() for line in lines[1:]]
def read_files(file_list):
results = []
for filename in file_list:
with open(filename) as f:
lines = f.readlines()
results.append(Result(lines))
return results
def plot_by_fill_ratio(results, y_column):
params = results[0].params
figure_label = "Population: {0}, mutation probability: {1}".format(params['population_size'],
params['mutation_probability'])
figure = plt.figure()
figure.suptitle(figure_label)
fills = set(result.params['board_fill'] for result in results)
for fill in sorted(fills):
selected = next(result.iterations for result in results if result.params['board_fill'] == fill)
x = [l[0] for l in selected]
values = [l[y_column] for l in selected]
label = "fill ratio {0:.2f}".format(fill / 81)
plt.plot(x, values, '-', label=label)
plt.ylabel("error value")
plt.xlabel("iteration number")
plt.legend()
plt.show()
def plot_by_mutation_probability(results, y_column):
params = results[0].params
figure_label = "Population: {0}, fill ratio: {1}".format(params['population_size'],
params['board_fill'])
figure = plt.figure()
figure.suptitle(figure_label)
probabilities = set(result.params['mutation_probability'] for result in results)
for p in sorted(probabilities):
selected = next(result.iterations for result in results if result.params['mutation_probability'] == p)
x = [l[0] for l in selected]
values = [l[y_column] for l in selected]
label = "mutation prob. {0}".format(p)
plt.plot(x, values, '-', label=label)
plt.ylabel("error value")
plt.xlabel("iteration number")
plt.legend()
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--type', required=True, help="type of the plot: mutation, fill")
parser.add_argument('-v', '--value', help="values to print: min, mean", default="min")
parser.add_argument('files', nargs='+', help="files with results")
args = parser.parse_args()
results = read_files(args.files)
if args.value == "mean":
y_column = 2
else:
y_column = 1
if args.type == 'mutation':
plot_by_mutation_probability(results, y_column)
elif args.type == 'fill':
plot_by_fill_ratio(results, y_column)
| {
"content_hash": "a94736bec3421c0a8a27de264ac2e46d",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 110,
"avg_line_length": 30.363636363636363,
"alnum_prop": 0.583166999334664,
"repo_name": "konradstrack/stochastic-sudoku-solver",
"id": "6f44dbeaa8e8820c700b78da780aecdcb07f65dc",
"size": "3006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49713"
}
],
"symlink_target": ""
} |
"""Resize images in a folder using imagemagick command line tools.
http://hakanu.net
"""
import glob
import os
def main():
print 'Started'
images = glob.glob("/home/h/Desktop/all_karikatur_resized/*.jpg")
counter = 0
for image in images:
print 'Processing: ', image
index = image[image.rfind('/') + 1:image.rfind('.jpg')]
print 'index: ', index
os.system("convert " + index + ".jpg -resize 128x128 resize_128_" + index + ".jpg")
counter += 1
if counter % 100 == 0:
print 'Completed: ', counter
print '\n'
main()
| {
"content_hash": "4f343e1fe2cd019d9a7f8b87a074ebd0",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 88,
"avg_line_length": 24.304347826086957,
"alnum_prop": 0.6243291592128801,
"repo_name": "hakanu/scripts",
"id": "a9836284221d7bd240a52447bd76b631ea410043",
"size": "559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "batch_image_resizer2/batch_image_resizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "559"
}
],
"symlink_target": ""
} |
from flask import request
from weblab.core.wl import weblab_api
@weblab_api.route_web('/language/')
def language():
accept_language = request.headers.get('Accept-Language')
if accept_language is None:
return 'var acceptLanguageHeader = null;'
else:
return 'var acceptLanguageHeader = "%s";' % accept_language
| {
"content_hash": "9a2b3889a843574bce7d9ef6accf0d39",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 67,
"avg_line_length": 30.818181818181817,
"alnum_prop": 0.6991150442477876,
"repo_name": "zstars/weblabdeusto",
"id": "800db39b8e955667dbd9626031c036a2f30538aa",
"size": "783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/src/weblab/core/web/language.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "4785"
},
{
"name": "ActionScript",
"bytes": "8508"
},
{
"name": "ApacheConf",
"bytes": "122186"
},
{
"name": "Batchfile",
"bytes": "7753"
},
{
"name": "C",
"bytes": "19456"
},
{
"name": "C#",
"bytes": "315160"
},
{
"name": "C++",
"bytes": "9547"
},
{
"name": "CSS",
"bytes": "150709"
},
{
"name": "CoffeeScript",
"bytes": "30909"
},
{
"name": "Go",
"bytes": "7076"
},
{
"name": "HTML",
"bytes": "452001"
},
{
"name": "Java",
"bytes": "1234794"
},
{
"name": "JavaScript",
"bytes": "1656027"
},
{
"name": "Makefile",
"bytes": "1571"
},
{
"name": "Mako",
"bytes": "824"
},
{
"name": "PHP",
"bytes": "155137"
},
{
"name": "Python",
"bytes": "3435335"
},
{
"name": "Shell",
"bytes": "2596"
},
{
"name": "Smarty",
"bytes": "20160"
},
{
"name": "VHDL",
"bytes": "5874"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
version = '1.0'
setup(
name='tn.plonehtmlimagecache',
version=version,
description='',
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
],
keywords='',
author='TN Tecnologia e Negocios',
author_email='[email protected]',
url='http://www.tecnologiaenegocios.com.br',
packages=find_packages('src'),
package_dir={'': 'src'},
namespace_packages=['tn'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'five.grok',
'Plone',
'plone.directives.form',
'plone.behavior',
'plone.app.intid',
'plone.namedfile[blobs]',
'lxml',
],
extras_require={
'test': [
'stubydoo',
'zope.app.testing',
]
},
entry_points="""
[z3c.autoinclude.plugin]
target = plone
""",
)
| {
"content_hash": "307359e80e6efd8bc8a264ec381b2041",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 49,
"avg_line_length": 22.88095238095238,
"alnum_prop": 0.554630593132154,
"repo_name": "tecnologiaenegocios/tn.plonehtmlimagecache",
"id": "f12e623665fe15791b0d552bf26d1a9ea9847bbe",
"size": "961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "65298"
}
],
"symlink_target": ""
} |
__author__ = 'innopath'
| {
"content_hash": "fcea760b40a8f9fe5f38fe6f6938dc46",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 23,
"avg_line_length": 24,
"alnum_prop": 0.5833333333333334,
"repo_name": "xinquanking/AndJoin",
"id": "03d774b81c4755ff04d6fd338a72d456e10398f9",
"size": "24",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "369281"
},
{
"name": "Shell",
"bytes": "682"
},
{
"name": "TypeScript",
"bytes": "67"
}
],
"symlink_target": ""
} |
"""A specfile interface.
Used mainly by transformer and tests to represent
a conventional specfile as a structure of sections.
"""
try:
from functools import reduce
except ImportError:
pass # we're on Python 2 => ok
import re
from spec2scl import settings
class Specfile(object):
"""An specfile interface."""
def __init__(self, specfile):
if not isinstance(specfile, str):
specfile = ''.join(specfile)
self.specfile = specfile
self.sections = self.split_sections()
def split_sections(self):
"""Split a specfile into sections.
Return:
list of (section name, section text)
"""
headers_re = [re.compile('^' + x + '\\b', re.M) for x in settings.SPECFILE_SECTIONS]
section_starts = []
for header in headers_re:
for match in header.finditer(self.specfile):
section_starts.append(match.start())
section_starts.sort()
# this is mainly for tests - if the header is the only section
header_end = section_starts[0] if section_starts else len(self.specfile)
sections = [('%header', self.specfile[:header_end])]
for i in range(len(section_starts)):
if len(section_starts) > i + 1:
curr_section = self.specfile[section_starts[i]: section_starts[i + 1]]
else:
curr_section = self.specfile[section_starts[i]:]
for header in headers_re:
if header.match(curr_section):
sections.append((header.pattern[1:-2], curr_section))
return sections
def __contains__(self, what):
return reduce(lambda x, y: x or (what in y[1]), self.sections, False)
def __str__(self):
# in tests (maybe in reality, too), we may have an empty header, which will result in
# putting unnecessary newlines on top => leave out empty sections from joining
return '\n\n'.join([section for section in list(zip(*self.sections))[1] if section])
| {
"content_hash": "ed603780743af8c6532ede539b111075",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 93,
"avg_line_length": 34.016666666666666,
"alnum_prop": 0.6080352768250857,
"repo_name": "sclorg/spec2scl",
"id": "5fe5e7324fe391fcb879e7d9647cc4da58ef6e2c",
"size": "2041",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "spec2scl/specfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41446"
},
{
"name": "Roff",
"bytes": "2162"
}
],
"symlink_target": ""
} |
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
from fuel_agent.utils import utils
LOG = logging.getLogger(__name__)
def pvdisplay():
# unit m means MiB (power of 2)
output = utils.execute(
'pvdisplay',
'-C',
'--noheading',
'--units', 'm',
'--options', 'pv_name,vg_name,pv_size,dev_size,pv_uuid',
'--separator', ';',
check_exit_code=[0])[0]
return pvdisplay_parse(output)
def pvdisplay_parse(output):
pvs = []
for line in output.split('\n'):
line = line.strip()
if not line:
continue
pv_params = line.split(';')
pvs.append({
'name': pv_params[0],
'vg': pv_params[1] or None,
'psize': utils.parse_unit(pv_params[2], 'm'),
'devsize': utils.parse_unit(pv_params[3], 'm'),
'uuid': pv_params[4]
})
LOG.debug('Found physical volumes: {0}'.format(pvs))
return pvs
def pvcreate(pvname, metadatasize=64, metadatacopies=2):
# check if pv already exists
if filter(lambda x: x['name'] == pvname, pvdisplay()):
raise errors.PVAlreadyExistsError(
'Error while creating pv: pv %s already exists' % pvname)
utils.execute('pvcreate',
'--metadatacopies', str(metadatacopies),
'--metadatasize', str(metadatasize) + 'm',
pvname, check_exit_code=[0])
def pvremove(pvname):
pv = filter(lambda x: x['name'] == pvname, pvdisplay())
# check if pv exists
if not pv:
raise errors.PVNotFoundError(
'Error while removing pv: pv %s not found' % pvname)
# check if pv is attached to some vg
if pv[0]['vg'] is not None:
raise errors.PVBelongsToVGError('Error while removing pv: '
'pv belongs to vg %s' % pv[0]['vg'])
utils.execute('pvremove', '-ff', '-y', pvname, check_exit_code=[0])
def vgdisplay():
output = utils.execute(
'vgdisplay',
'-C',
'--noheading',
'--units', 'm',
'--options', 'vg_name,vg_uuid,vg_size,vg_free',
'--separator', ';',
check_exit_code=[0])[0]
return vgdisplay_parse(output)
def vgdisplay_parse(output):
vgs = []
for line in output.split('\n'):
line = line.strip()
if not line:
continue
vg_params = line.split(';')
vgs.append({
'name': vg_params[0],
'uuid': vg_params[1],
'size': utils.parse_unit(vg_params[2], 'm'),
'free': utils.parse_unit(vg_params[3], 'm', ceil=False)
})
LOG.debug('Found volume groups: {0}'.format(vgs))
return vgs
def _vg_attach_validate(pvnames):
pvs = pvdisplay()
# check if all necessary pv exist
if not set(pvnames).issubset(set([pv['name'] for pv in pvs])):
raise errors.PVNotFoundError(
'Error while creating vg: at least one of pv is not found')
# check if all necessary pv are not already attached to some vg
if not set(pvnames).issubset(
set([pv['name'] for pv in pvs if pv['vg'] is None])):
raise errors.PVBelongsToVGError(
'Error while creating vg: at least one of pvs is '
'already attached to some vg')
def vgcreate(vgname, pvname, *args):
# check if vg already exists
if filter(lambda x: x['name'] == vgname, vgdisplay()):
raise errors.VGAlreadyExistsError(
'Error while creating vg: vg %s already exists' % vgname)
pvnames = [pvname] + list(args)
_vg_attach_validate(pvnames)
utils.execute('vgcreate', vgname, *pvnames, check_exit_code=[0])
def vgextend(vgname, pvname, *args):
# check if vg exists
if not filter(lambda x: x['name'] == vgname, vgdisplay()):
raise errors.VGNotFoundError(
'Error while extending vg: vg %s not found' % vgname)
pvnames = [pvname] + list(args)
_vg_attach_validate(pvnames)
utils.execute('vgextend', vgname, *pvnames, check_exit_code=[0])
def vgreduce(vgname, pvname, *args):
# check if vg exists
if not filter(lambda x: x['name'] == vgname, vgdisplay()):
raise errors.VGNotFoundError(
'Error while reducing vg: vg %s not found' % vgname)
pvnames = [pvname] + list(args)
# check if all necessary pv are attached to vg
if not set(pvnames).issubset(
set([pv['name'] for pv in pvdisplay() if pv['vg'] == vgname])):
raise errors.PVNotFoundError(
'Error while reducing vg: at least one of pv is '
'not attached to vg')
utils.execute('vgreduce', '-f', vgname, *pvnames, check_exit_code=[0])
def vgremove(vgname):
# check if vg exists
if not filter(lambda x: x['name'] == vgname, vgdisplay()):
raise errors.VGNotFoundError(
'Error while removing vg: vg %s not found' % vgname)
utils.execute('vgremove', '-f', vgname, check_exit_code=[0])
def lvdisplay():
output = utils.execute(
'lvdisplay',
'-C',
'--noheading',
'--units', 'm',
#NOTE(agordeev): lv_path had been removed from options
# since versions of lvdisplay prior 2.02.68 don't have it.
'--options', 'lv_name,lv_size,vg_name,lv_uuid',
'--separator', ';',
check_exit_code=[0])[0]
return lvdisplay_parse(output)
def lvdisplay_parse(output):
lvs = []
for line in output.split('\n'):
line = line.strip()
if not line:
continue
lv_params = line.split(';')
lvs.append({
'name': lv_params[0],
'size': utils.parse_unit(lv_params[1], 'm'),
'vg': lv_params[2],
'uuid': lv_params[3],
#NOTE(agordeev): simulate lv_path with '/dev/$vg_name/$lv_name'
'path': '/dev/%s/%s' % (lv_params[2], lv_params[0])
})
LOG.debug('Found logical volumes: {0}'.format(lvs))
return lvs
def lvcreate(vgname, lvname, size):
vg = filter(lambda x: x['name'] == vgname, vgdisplay())
# check if vg exists
if not vg:
raise errors.VGNotFoundError(
'Error while creating vg: vg %s not found' % vgname)
# check if enough space is available
if vg[0]['free'] < size:
raise errors.NotEnoughSpaceError(
'Error while creating lv: vg %s has only %s m of free space, '
'but at least %s m is needed' % (vgname, vg[0]['free'], size))
# check if lv already exists
if filter(lambda x: x['name'] == lvname and x['vg'] == vgname,
lvdisplay()):
raise errors.LVAlreadyExistsError(
'Error while creating lv: lv %s already exists' % lvname)
utils.execute('lvcreate', '-L', '%sm' % size, '-n', lvname,
vgname, check_exit_code=[0])
def lvremove(lvpath):
# check if lv exists
if not filter(lambda x: x['path'] == lvpath, lvdisplay()):
raise errors.LVNotFoundError(
'Error while removing lv: lv %s not found' % lvpath)
utils.execute('lvremove', '-f', lvpath, check_exit_code=[0])
def lvremove_all():
for lv in lvdisplay():
lvremove(lv['path'])
def vgremove_all():
for vg in vgdisplay():
vgremove(vg['name'])
def pvremove_all():
for pv in pvdisplay():
pvremove(pv['name'])
| {
"content_hash": "4c0bcf53c331d047de6f7538698690b8",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 76,
"avg_line_length": 32.693333333333335,
"alnum_prop": 0.5710984230560087,
"repo_name": "zhaochao/fuel-web",
"id": "8dc2a2911311c48e3ddb9c9f639561048a5ecb69",
"size": "7934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuel_agent/fuel_agent/utils/lvm_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109800"
},
{
"name": "HTML",
"bytes": "16017"
},
{
"name": "JavaScript",
"bytes": "705662"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Puppet",
"bytes": "282"
},
{
"name": "Python",
"bytes": "3493678"
},
{
"name": "Ruby",
"bytes": "33590"
},
{
"name": "Shell",
"bytes": "26585"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import types
from collections import namedtuple
from typing import Any, ClassVar, Dict, List, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Iterator, Mapping
__all__ = (
'Enum',
'ChannelType',
'MessageType',
'SpeakingState',
'VerificationLevel',
'ContentFilter',
'Status',
'DefaultAvatar',
'AuditLogAction',
'AuditLogActionCategory',
'UserFlags',
'ActivityType',
'NotificationLevel',
'TeamMembershipState',
'WebhookType',
'ExpireBehaviour',
'ExpireBehavior',
'StickerType',
'StickerFormatType',
'InviteTarget',
'VideoQualityMode',
'ComponentType',
'ButtonStyle',
'TextStyle',
'PrivacyLevel',
'InteractionType',
'InteractionResponseType',
'NSFWLevel',
'MFALevel',
'Locale',
'EntityType',
'EventStatus',
'AppCommandType',
'AppCommandOptionType',
'AppCommandPermissionType',
'AutoModRuleTriggerType',
'AutoModRuleEventType',
'AutoModRuleActionType',
)
if TYPE_CHECKING:
from typing_extensions import Self
def _create_value_cls(name: str, comparable: bool):
# All the type ignores here are due to the type checker being unable to recognise
# Runtime type creation without exploding.
cls = namedtuple('_EnumValue_' + name, 'name value')
cls.__repr__ = lambda self: f'<{name}.{self.name}: {self.value!r}>' # type: ignore
cls.__str__ = lambda self: f'{name}.{self.name}' # type: ignore
if comparable:
cls.__le__ = lambda self, other: isinstance(other, self.__class__) and self.value <= other.value # type: ignore
cls.__ge__ = lambda self, other: isinstance(other, self.__class__) and self.value >= other.value # type: ignore
cls.__lt__ = lambda self, other: isinstance(other, self.__class__) and self.value < other.value # type: ignore
cls.__gt__ = lambda self, other: isinstance(other, self.__class__) and self.value > other.value # type: ignore
return cls
def _is_descriptor(obj):
return hasattr(obj, '__get__') or hasattr(obj, '__set__') or hasattr(obj, '__delete__')
class EnumMeta(type):
if TYPE_CHECKING:
__name__: ClassVar[str]
_enum_member_names_: ClassVar[List[str]]
_enum_member_map_: ClassVar[Dict[str, Any]]
_enum_value_map_: ClassVar[Dict[Any, Any]]
def __new__(cls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any], *, comparable: bool = False) -> Self:
value_mapping = {}
member_mapping = {}
member_names = []
value_cls = _create_value_cls(name, comparable)
for key, value in list(attrs.items()):
is_descriptor = _is_descriptor(value)
if key[0] == '_' and not is_descriptor:
continue
# Special case classmethod to just pass through
if isinstance(value, classmethod):
continue
if is_descriptor:
setattr(value_cls, key, value)
del attrs[key]
continue
try:
new_value = value_mapping[value]
except KeyError:
new_value = value_cls(name=key, value=value)
value_mapping[value] = new_value
member_names.append(key)
member_mapping[key] = new_value
attrs[key] = new_value
attrs['_enum_value_map_'] = value_mapping
attrs['_enum_member_map_'] = member_mapping
attrs['_enum_member_names_'] = member_names
attrs['_enum_value_cls_'] = value_cls
actual_cls = super().__new__(cls, name, bases, attrs)
value_cls._actual_enum_cls_ = actual_cls # type: ignore # Runtime attribute isn't understood
return actual_cls
def __iter__(cls) -> Iterator[Any]:
return (cls._enum_member_map_[name] for name in cls._enum_member_names_)
def __reversed__(cls) -> Iterator[Any]:
return (cls._enum_member_map_[name] for name in reversed(cls._enum_member_names_))
def __len__(cls) -> int:
return len(cls._enum_member_names_)
def __repr__(cls) -> str:
return f'<enum {cls.__name__}>'
@property
def __members__(cls) -> Mapping[str, Any]:
return types.MappingProxyType(cls._enum_member_map_)
def __call__(cls, value: str) -> Any:
try:
return cls._enum_value_map_[value]
except (KeyError, TypeError):
raise ValueError(f"{value!r} is not a valid {cls.__name__}")
def __getitem__(cls, key: str) -> Any:
return cls._enum_member_map_[key]
def __setattr__(cls, name: str, value: Any) -> None:
raise TypeError('Enums are immutable.')
def __delattr__(cls, attr: str) -> None:
raise TypeError('Enums are immutable')
def __instancecheck__(self, instance: Any) -> bool:
# isinstance(x, Y)
# -> __instancecheck__(Y, x)
try:
return instance._actual_enum_cls_ is self
except AttributeError:
return False
if TYPE_CHECKING:
from enum import Enum
else:
class Enum(metaclass=EnumMeta):
@classmethod
def try_value(cls, value):
try:
return cls._enum_value_map_[value]
except (KeyError, TypeError):
return value
class ChannelType(Enum):
text = 0
private = 1
voice = 2
group = 3
category = 4
news = 5
news_thread = 10
public_thread = 11
private_thread = 12
stage_voice = 13
forum = 15
def __str__(self) -> str:
return self.name
class MessageType(Enum):
default = 0
recipient_add = 1
recipient_remove = 2
call = 3
channel_name_change = 4
channel_icon_change = 5
pins_add = 6
new_member = 7
premium_guild_subscription = 8
premium_guild_tier_1 = 9
premium_guild_tier_2 = 10
premium_guild_tier_3 = 11
channel_follow_add = 12
guild_stream = 13
guild_discovery_disqualified = 14
guild_discovery_requalified = 15
guild_discovery_grace_period_initial_warning = 16
guild_discovery_grace_period_final_warning = 17
thread_created = 18
reply = 19
chat_input_command = 20
thread_starter_message = 21
guild_invite_reminder = 22
context_menu_command = 23
auto_moderation_action = 24
class SpeakingState(Enum):
none = 0
voice = 1
soundshare = 2
priority = 4
def __str__(self) -> str:
return self.name
def __int__(self) -> int:
return self.value
class VerificationLevel(Enum, comparable=True):
none = 0
low = 1
medium = 2
high = 3
highest = 4
def __str__(self) -> str:
return self.name
class ContentFilter(Enum, comparable=True):
disabled = 0
no_role = 1
all_members = 2
def __str__(self) -> str:
return self.name
class Status(Enum):
online = 'online'
offline = 'offline'
idle = 'idle'
dnd = 'dnd'
do_not_disturb = 'dnd'
invisible = 'invisible'
def __str__(self) -> str:
return self.value
class DefaultAvatar(Enum):
blurple = 0
grey = 1
gray = 1
green = 2
orange = 3
red = 4
def __str__(self) -> str:
return self.name
class NotificationLevel(Enum, comparable=True):
all_messages = 0
only_mentions = 1
class AuditLogActionCategory(Enum):
create = 1
delete = 2
update = 3
class AuditLogAction(Enum):
# fmt: off
guild_update = 1
channel_create = 10
channel_update = 11
channel_delete = 12
overwrite_create = 13
overwrite_update = 14
overwrite_delete = 15
kick = 20
member_prune = 21
ban = 22
unban = 23
member_update = 24
member_role_update = 25
member_move = 26
member_disconnect = 27
bot_add = 28
role_create = 30
role_update = 31
role_delete = 32
invite_create = 40
invite_update = 41
invite_delete = 42
webhook_create = 50
webhook_update = 51
webhook_delete = 52
emoji_create = 60
emoji_update = 61
emoji_delete = 62
message_delete = 72
message_bulk_delete = 73
message_pin = 74
message_unpin = 75
integration_create = 80
integration_update = 81
integration_delete = 82
stage_instance_create = 83
stage_instance_update = 84
stage_instance_delete = 85
sticker_create = 90
sticker_update = 91
sticker_delete = 92
scheduled_event_create = 100
scheduled_event_update = 101
scheduled_event_delete = 102
thread_create = 110
thread_update = 111
thread_delete = 112
app_command_permission_update = 121
automod_rule_create = 140
automod_rule_update = 141
automod_rule_delete = 142
automod_block_message = 143
automod_flag_message = 144
automod_timeout_member = 145
# fmt: on
@property
def category(self) -> Optional[AuditLogActionCategory]:
# fmt: off
lookup: Dict[AuditLogAction, Optional[AuditLogActionCategory]] = {
AuditLogAction.guild_update: AuditLogActionCategory.update,
AuditLogAction.channel_create: AuditLogActionCategory.create,
AuditLogAction.channel_update: AuditLogActionCategory.update,
AuditLogAction.channel_delete: AuditLogActionCategory.delete,
AuditLogAction.overwrite_create: AuditLogActionCategory.create,
AuditLogAction.overwrite_update: AuditLogActionCategory.update,
AuditLogAction.overwrite_delete: AuditLogActionCategory.delete,
AuditLogAction.kick: None,
AuditLogAction.member_prune: None,
AuditLogAction.ban: None,
AuditLogAction.unban: None,
AuditLogAction.member_update: AuditLogActionCategory.update,
AuditLogAction.member_role_update: AuditLogActionCategory.update,
AuditLogAction.member_move: None,
AuditLogAction.member_disconnect: None,
AuditLogAction.bot_add: None,
AuditLogAction.role_create: AuditLogActionCategory.create,
AuditLogAction.role_update: AuditLogActionCategory.update,
AuditLogAction.role_delete: AuditLogActionCategory.delete,
AuditLogAction.invite_create: AuditLogActionCategory.create,
AuditLogAction.invite_update: AuditLogActionCategory.update,
AuditLogAction.invite_delete: AuditLogActionCategory.delete,
AuditLogAction.webhook_create: AuditLogActionCategory.create,
AuditLogAction.webhook_update: AuditLogActionCategory.update,
AuditLogAction.webhook_delete: AuditLogActionCategory.delete,
AuditLogAction.emoji_create: AuditLogActionCategory.create,
AuditLogAction.emoji_update: AuditLogActionCategory.update,
AuditLogAction.emoji_delete: AuditLogActionCategory.delete,
AuditLogAction.message_delete: AuditLogActionCategory.delete,
AuditLogAction.message_bulk_delete: AuditLogActionCategory.delete,
AuditLogAction.message_pin: None,
AuditLogAction.message_unpin: None,
AuditLogAction.integration_create: AuditLogActionCategory.create,
AuditLogAction.integration_update: AuditLogActionCategory.update,
AuditLogAction.integration_delete: AuditLogActionCategory.delete,
AuditLogAction.stage_instance_create: AuditLogActionCategory.create,
AuditLogAction.stage_instance_update: AuditLogActionCategory.update,
AuditLogAction.stage_instance_delete: AuditLogActionCategory.delete,
AuditLogAction.sticker_create: AuditLogActionCategory.create,
AuditLogAction.sticker_update: AuditLogActionCategory.update,
AuditLogAction.sticker_delete: AuditLogActionCategory.delete,
AuditLogAction.scheduled_event_create: AuditLogActionCategory.create,
AuditLogAction.scheduled_event_update: AuditLogActionCategory.update,
AuditLogAction.scheduled_event_delete: AuditLogActionCategory.delete,
AuditLogAction.thread_create: AuditLogActionCategory.create,
AuditLogAction.thread_delete: AuditLogActionCategory.delete,
AuditLogAction.thread_update: AuditLogActionCategory.update,
AuditLogAction.app_command_permission_update: AuditLogActionCategory.update,
AuditLogAction.automod_rule_create: AuditLogActionCategory.create,
AuditLogAction.automod_rule_update: AuditLogActionCategory.update,
AuditLogAction.automod_rule_delete: AuditLogActionCategory.delete,
AuditLogAction.automod_block_message: None,
AuditLogAction.automod_flag_message: None,
AuditLogAction.automod_timeout_member: None,
}
# fmt: on
return lookup[self]
@property
def target_type(self) -> Optional[str]:
v = self.value
if v == -1:
return 'all'
elif v < 10:
return 'guild'
elif v < 20:
return 'channel'
elif v < 30:
return 'user'
elif v < 40:
return 'role'
elif v < 50:
return 'invite'
elif v < 60:
return 'webhook'
elif v < 70:
return 'emoji'
elif v == 73:
return 'channel'
elif v < 80:
return 'message'
elif v < 83:
return 'integration'
elif v < 90:
return 'stage_instance'
elif v < 93:
return 'sticker'
elif v < 103:
return 'guild_scheduled_event'
elif v < 113:
return 'thread'
elif v < 122:
return 'integration_or_app_command'
elif v < 143:
return 'auto_moderation'
elif v < 146:
return 'user'
class UserFlags(Enum):
staff = 1
partner = 2
hypesquad = 4
bug_hunter = 8
mfa_sms = 16
premium_promo_dismissed = 32
hypesquad_bravery = 64
hypesquad_brilliance = 128
hypesquad_balance = 256
early_supporter = 512
team_user = 1024
system = 4096
has_unread_urgent_messages = 8192
bug_hunter_level_2 = 16384
verified_bot = 65536
verified_bot_developer = 131072
discord_certified_moderator = 262144
bot_http_interactions = 524288
spammer = 1048576
class ActivityType(Enum):
unknown = -1
playing = 0
streaming = 1
listening = 2
watching = 3
custom = 4
competing = 5
def __int__(self) -> int:
return self.value
class TeamMembershipState(Enum):
invited = 1
accepted = 2
class WebhookType(Enum):
incoming = 1
channel_follower = 2
application = 3
class ExpireBehaviour(Enum):
remove_role = 0
kick = 1
ExpireBehavior = ExpireBehaviour
class StickerType(Enum):
standard = 1
guild = 2
class StickerFormatType(Enum):
png = 1
apng = 2
lottie = 3
@property
def file_extension(self) -> str:
# fmt: off
lookup: Dict[StickerFormatType, str] = {
StickerFormatType.png: 'png',
StickerFormatType.apng: 'png',
StickerFormatType.lottie: 'json',
}
# fmt: on
return lookup[self]
class InviteTarget(Enum):
unknown = 0
stream = 1
embedded_application = 2
class InteractionType(Enum):
ping = 1
application_command = 2
component = 3
autocomplete = 4
modal_submit = 5
class InteractionResponseType(Enum):
pong = 1
# ack = 2 (deprecated)
# channel_message = 3 (deprecated)
channel_message = 4 # (with source)
deferred_channel_message = 5 # (with source)
deferred_message_update = 6 # for components
message_update = 7 # for components
autocomplete_result = 8
modal = 9 # for modals
class VideoQualityMode(Enum):
auto = 1
full = 2
def __int__(self) -> int:
return self.value
class ComponentType(Enum):
action_row = 1
button = 2
select = 3
text_input = 4
def __int__(self) -> int:
return self.value
class ButtonStyle(Enum):
primary = 1
secondary = 2
success = 3
danger = 4
link = 5
# Aliases
blurple = 1
grey = 2
gray = 2
green = 3
red = 4
url = 5
def __int__(self) -> int:
return self.value
class TextStyle(Enum):
short = 1
paragraph = 2
# Aliases
long = 2
def __int__(self) -> int:
return self.value
class PrivacyLevel(Enum):
guild_only = 2
class NSFWLevel(Enum, comparable=True):
default = 0
explicit = 1
safe = 2
age_restricted = 3
class MFALevel(Enum, comparable=True):
disabled = 0
require_2fa = 1
class Locale(Enum):
american_english = 'en-US'
british_english = 'en-GB'
bulgarian = 'bg'
chinese = 'zh-CN'
taiwan_chinese = 'zh-TW'
croatian = 'hr'
czech = 'cs'
danish = 'da'
dutch = 'nl'
finnish = 'fi'
french = 'fr'
german = 'de'
greek = 'el'
hindi = 'hi'
hungarian = 'hu'
italian = 'it'
japanese = 'ja'
korean = 'ko'
lithuanian = 'lt'
norwegian = 'no'
polish = 'pl'
brazil_portuguese = 'pt-BR'
romanian = 'ro'
russian = 'ru'
spain_spanish = 'es-ES'
swedish = 'sv-SE'
thai = 'th'
turkish = 'tr'
ukrainian = 'uk'
vietnamese = 'vi'
def __str__(self) -> str:
return self.value
E = TypeVar('E', bound='Enum')
class EntityType(Enum):
stage_instance = 1
voice = 2
external = 3
class EventStatus(Enum):
scheduled = 1
active = 2
completed = 3
canceled = 4
ended = 3
cancelled = 4
class AppCommandOptionType(Enum):
subcommand = 1
subcommand_group = 2
string = 3
integer = 4
boolean = 5
user = 6
channel = 7
role = 8
mentionable = 9
number = 10
attachment = 11
class AppCommandType(Enum):
chat_input = 1
user = 2
message = 3
class AppCommandPermissionType(Enum):
role = 1
user = 2
channel = 3
class AutoModRuleTriggerType(Enum):
keyword = 1
harmful_link = 2
spam = 3
keyword_preset = 4
mention_spam = 5
class AutoModRuleEventType(Enum):
message_send = 1
class AutoModRuleActionType(Enum):
block_message = 1
send_alert_message = 2
timeout = 3
def create_unknown_value(cls: Type[E], val: Any) -> E:
value_cls = cls._enum_value_cls_ # type: ignore # This is narrowed below
name = f'unknown_{val}'
return value_cls(name=name, value=val)
def try_enum(cls: Type[E], val: Any) -> E:
"""A function that tries to turn the value into enum ``cls``.
If it fails it returns a proxy invalid value instead.
"""
try:
return cls._enum_value_map_[val] # type: ignore # All errors are caught below
except (KeyError, TypeError, AttributeError):
return create_unknown_value(cls, val)
| {
"content_hash": "6d3862ea09b384986574a0c9fe03612b",
"timestamp": "",
"source": "github",
"line_count": 745,
"max_line_length": 120,
"avg_line_length": 28.94228187919463,
"alnum_prop": 0.5781003617475188,
"repo_name": "rapptz/discord.py",
"id": "3465250a1a09d37d1b6d3049cce8cbea5eecb0c0",
"size": "21562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "discord/enums.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2453404"
}
],
"symlink_target": ""
} |
"""This example updates the given client user's status."""
import argparse
import pprint
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from googleapiclient.errors import HttpError
import samples_util
DEFAULT_ACCOUNT_ID = 'ENTER_ACCOUNT_ID_HERE'
DEFAULT_CLIENT_BUYER_ID = 'ENTER_CLIENT_BUYER_ID_HERE'
DEFAULT_USER_ID = 'ENTER_USER_ID_HERE'
VALID_STATUS = ('ACTIVE', 'DISABLED')
def main(ad_exchange_buyer, account_id, client_account_id, user_id, body):
try:
user = ad_exchange_buyer.accounts().clients().users().update(
accountId=account_id, clientAccountId=client_account_id,
userId=user_id, body=body).execute()
print(f'User for Account ID "{account_id}" and Client Account Id: '
f'"{client_account_id}" has been updated.')
pprint.pprint(user)
except HttpError as e:
print(e)
if __name__ == '__main__':
def status(s):
if s not in VALID_STATUS:
raise argparse.ArgumentTypeError('Invalid value "%s".' % s)
return s
# Optional arguments; overrides default values if set.
parser = argparse.ArgumentParser(description='Updates the status of a '
'client user.')
parser.add_argument(
'-a', '--account_id', default=DEFAULT_ACCOUNT_ID, type=int,
help='The integer id of the Authorized Buyers account.')
parser.add_argument(
'-c', '--client_buyer_id', default=DEFAULT_CLIENT_BUYER_ID, type=int,
help='The integer id of the client buyer.')
parser.add_argument(
'-u', '--user_id', default=DEFAULT_USER_ID, type=int,
help='The integer id of the client user.')
parser.add_argument(
'-s', '--status', default='DISABLED', type=status,
help=('The desired update to the client user\'s status. This can be '
'set to any of the following: %s' % str(VALID_STATUS)))
args = parser.parse_args()
# Create a body containing the fields to be updated.
# For this resource, only the "status" field can be modified.
BODY = {'status': args.status}
try:
service = samples_util.GetService('v2beta1')
except IOError as ex:
print(f'Unable to create adexchangebuyer service - {ex}')
print('Did you specify the key file in samples_util.py?')
sys.exit(1)
main(service, args.account_id, args.client_buyer_id, args.user_id, BODY)
| {
"content_hash": "64e787498729aa38f251444f3419a282",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 75,
"avg_line_length": 33.84057971014493,
"alnum_prop": 0.6625267665952891,
"repo_name": "googleads/googleads-adxbuyer-examples",
"id": "4bc45bd3bb393e2c2028a8a01ad3becc3bd42d45",
"size": "2953",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/samples/v2_x/update_client_user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "95365"
},
{
"name": "CSS",
"bytes": "637"
},
{
"name": "Java",
"bytes": "94253"
},
{
"name": "PHP",
"bytes": "57301"
},
{
"name": "Python",
"bytes": "71471"
},
{
"name": "Ruby",
"bytes": "87018"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, absolute_import
from os.path import abspath, join, dirname
from preggy import expect
from thumbor.context import ServerParameters, RequestParameters
from thumbor.config import Config
from thumbor.engines.gif import Engine
from thumbor.utils import which
from thumbor.importer import Importer
from tests.base import TestCase
STORAGE_PATH = abspath(join(dirname(__file__), '../fixtures/images/'))
class GitEngineTestCase(TestCase):
def get_config(self):
return Config(
SECURITY_KEY='ACME-SEC',
ENGINE='thumbor.engines.gif',
IMAGE_METADATA_READ_FORMATS='exif,xmp',
LOADER="thumbor.loaders.file_loader",
FILE_LOADER_ROOT_PATH=STORAGE_PATH,
STORAGE='thumbor.storages.no_storage',
USE_GIFSICLE_ENGINE=True,
RESPECT_ORIENTATION=True,
)
def get_importer(self):
return Importer(self.config)
def get_server(self):
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
server.gifsicle_path = which('gifsicle')
return server
def get_context(self, *args, **kwargs):
context = super(GitEngineTestCase, self).get_context(*args, **kwargs)
req = RequestParameters(url='/foo/bar.gif')
context.request = req
return context
def test_create_engine(self):
engine = Engine(self.context)
expect(engine).to_be_instance_of(Engine)
def test_load_image(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, 'animated.gif'), 'r') as im:
buffer = im.read()
image = engine.create_image(buffer)
expect(image.format).to_equal('GIF')
def test_errors_on_gifsicle_should_not_raises_errors_when_output(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, 'SmallFullColourGIF.gif'), 'r') as im:
buffer = im.read()
engine.load(buffer, '.gif')
result = engine.run_gifsicle('--some-invalid-opt')
expect(result).Not.to_be_null()
def test_is_multiple_should_returns_true_if_gif_has_many_frames(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, 'animated.gif'), 'r') as im:
buffer = im.read()
engine.load(buffer, '.gif')
expect(engine.is_multiple()).to_be_true()
def test_is_multiple_should_returns_false_if_gif_has_one_frame(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, 'animated-one-frame.gif'), 'r') as im:
buffer = im.read()
engine.load(buffer, '.gif')
expect(engine.is_multiple()).to_be_false()
def test_convert_to_grayscale_should_update_image(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, 'animated.gif'), 'r') as im:
buffer = im.read()
engine.load(buffer, '.gif')
buffer = engine.read()
engine.convert_to_grayscale()
expect(buffer).not_to_equal(engine.read())
def test_convert_to_grayscale_should_not_update_image(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, 'animated.gif'), 'r') as im:
buffer = im.read()
engine.load(buffer, '.gif')
buffer = engine.read()
engine.convert_to_grayscale(False)
expect(buffer).to_equal(engine.read())
| {
"content_hash": "85f8e9a914d3fb5b738361b48c93e896",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 88,
"avg_line_length": 34.03921568627451,
"alnum_prop": 0.6287442396313364,
"repo_name": "okor/thumbor",
"id": "c0c8430439afbb0b9655cb6cf3b44b36f2b2be27",
"size": "3724",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/engines/test_gif.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "62441"
},
{
"name": "JavaScript",
"bytes": "409"
},
{
"name": "Makefile",
"bytes": "10255"
},
{
"name": "Python",
"bytes": "594069"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
from .common import BaseTest, TestConfig as Config
class Kinesis(BaseTest):
def test_stream_query(self):
factory = self.replay_flight_data("test_kinesis_stream_query")
p = self.load_policy(
{
"name": "kstream",
"resource": "kinesis",
"filters": [
{"type": "value", "value_type": "size", "value": 3, "key": "Shards"}
],
},
config=Config.empty(),
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Tags"], [{"Key": "Origin", "Value": "home"}])
self.assertEqual(resources[0]["StreamStatus"], "ACTIVE")
def test_stream_delete(self):
factory = self.replay_flight_data("test_kinesis_stream_delete")
p = self.load_policy(
{
"name": "kstream",
"resource": "kinesis",
"filters": [{"StreamName": "sock-drawer"}],
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
stream = factory().client("kinesis").describe_stream(StreamName="sock-drawer")[
"StreamDescription"
]
self.assertEqual(stream["StreamStatus"], "DELETING")
def test_stream_encrypt(self):
factory = self.replay_flight_data("test_kinesis_encrypt")
p = self.load_policy(
{
"name": "kstream",
"resource": "kinesis",
"filters": [{"StreamName": "sock-drawer"}],
"actions": [{"type": "encrypt", "key": "aws/kinesis"}],
},
session_factory=factory,
)
p.run()
stream = factory().client("kinesis").describe_stream(StreamName="sock-drawer")[
"StreamDescription"
]
self.assertEqual(stream["EncryptionType"], "KMS")
def test_hose_query(self):
factory = self.replay_flight_data("test_kinesis_hose_query")
p = self.load_policy(
{
"name": "khole",
"resource": "firehose",
"filters": [{"DeliveryStreamName": "sock-index-hose"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DeliveryStreamStatus"], "ACTIVE")
def test_firehose_delete(self):
factory = self.replay_flight_data("test_kinesis_hose_delete")
p = self.load_policy(
{
"name": "khole",
"resource": "firehose",
"filters": [{"DeliveryStreamName": "sock-index-hose"}],
"actions": ["delete"]
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
factory().client("firehose").describe_delivery_stream(
DeliveryStreamName="sock-index-hose"
)[
"DeliveryStreamDescription"
][
"DeliveryStreamStatus"
],
"DELETING",
)
def test_firehose_extended_s3_encrypt_s3_destination(self):
factory = self.replay_flight_data("test_firehose_ext_s3_encrypt_s3_destination")
p = self.load_policy(
{
"name": "khole",
"resource": "firehose",
"filters": [{"type": "value",
"key": "Destinations[0].S3DestinationDescription.EncryptionConfiguration.NoEncryptionConfig", # noqa: E501
"value": "present"}],
"actions": [{"type": "encrypt-s3-destination",
"key_arn": "arn:aws:kms:us-east-1:123456789:alias/aws/s3"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
s = factory().client("firehose").describe_delivery_stream(
DeliveryStreamName="firehose-s3"
)['DeliveryStreamDescription']['Destinations'][0]
assert 'KMSEncryptionConfig' in s['S3DestinationDescription']['EncryptionConfiguration'].keys() # noqa: E501
def test_firehose_splunk_encrypt_s3_destination(self):
factory = self.replay_flight_data("test_firehose_splunk_encrypt_s3_destination")
p = self.load_policy(
{
"name": "khole",
"resource": "firehose",
"filters": [{"type": "value",
"key": "Destinations[0].SplunkDestinationDescription.S3DestinationDescription.EncryptionConfiguration.NoEncryptionConfig", # noqa: E501
"value": "present"}],
"actions": [{"type": "encrypt-s3-destination",
"key_arn": "arn:aws:kms:us-east-1:123456789:alias/aws/s3"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
s = factory().client("firehose").describe_delivery_stream(
DeliveryStreamName="firehose-splunk"
)['DeliveryStreamDescription']['Destinations'][0]['SplunkDestinationDescription']
assert 'KMSEncryptionConfig' in \
s['S3DestinationDescription']['EncryptionConfiguration'].keys()
def test_firehose_elasticsearch_encrypt_s3_destination(self):
factory = self.replay_flight_data("test_firehose_elasticsearch_encrypt_s3_destination")
p = self.load_policy(
{
"name": "khole",
"resource": "firehose",
"filters": [{"type": "value",
"key": "Destinations[0].ElasticsearchDestinationDescription.S3DestinationDescription.EncryptionConfiguration.NoEncryptionConfig", # noqa: E501
"value": "present"}],
"actions": [{"type": "encrypt-s3-destination",
"key_arn": "arn:aws:kms:us-east-1:123456789:alias/aws/s3"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
s = factory().client("firehose").describe_delivery_stream(
DeliveryStreamName="firehose-splunk"
)['DeliveryStreamDescription']['Destinations'][0]['ElasticsearchDestinationDescription']
assert 'KMSEncryptionConfig' in \
s['S3DestinationDescription']['EncryptionConfiguration'].keys()
def test_firehose_redshift_encrypt_s3_destination(self):
factory = self.replay_flight_data("test_firehose_redshift_encrypt_s3_destination")
p = self.load_policy(
{
"name": "khole",
"resource": "firehose",
"filters": [{"type": "value",
"key": "Destinations[0].RedshiftDestinationDescription.S3DestinationDescription.EncryptionConfiguration.NoEncryptionConfig", # noqa: E501
"value": "present"}],
"actions": [{"type": "encrypt-s3-destination",
"key_arn": "arn:aws:kms:us-east-1:123456789:alias/aws/s3"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
s = factory().client("firehose").describe_delivery_stream(
DeliveryStreamName="firehose-redshift"
)['DeliveryStreamDescription']['Destinations'][0]['RedshiftDestinationDescription']
assert 'KMSEncryptionConfig' in \
s['S3DestinationDescription']['EncryptionConfiguration'].keys()
def test_app_query(self):
factory = self.replay_flight_data("test_kinesis_analytics_query")
p = self.load_policy(
{
"name": "kapp",
"resource": "kinesis-analytics",
"filters": [{"ApplicationStatus": "RUNNING"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["ApplicationName"], "sock-app")
def test_app_delete(self):
factory = self.replay_flight_data("test_kinesis_analytics_delete")
p = self.load_policy(
{
"name": "kapp",
"resource": "kinesis-analytics",
"filters": [{"ApplicationName": "sock-app"}],
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
factory().client("kinesisanalytics").describe_application(
ApplicationName="sock-app"
)[
"ApplicationDetail"
][
"ApplicationStatus"
],
"DELETING",
)
| {
"content_hash": "2a9514fabaa4b05708058ea2d0e33f57",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 163,
"avg_line_length": 41.16216216216216,
"alnum_prop": 0.5383015977237907,
"repo_name": "Sutto/cloud-custodian",
"id": "e8259895a72382b8cef2e84d889b91dde6479b7c",
"size": "9728",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_kinesis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7986"
},
{
"name": "Go",
"bytes": "146630"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9971"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "5283859"
},
{
"name": "Shell",
"bytes": "12627"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
} |
import struct
from collections import OrderedDict
import numpy as np
import scipy.stats
REVISION_STATEPOINT = 12
filter_types = {1: 'universe', 2: 'material', 3: 'cell', 4: 'cellborn',
5: 'surface', 6: 'mesh', 7: 'energyin', 8: 'energyout'}
score_types = {-1: 'flux',
-2: 'total',
-3: 'scatter',
-4: 'nu-scatter',
-5: 'scatter-n',
-6: 'scatter-pn',
-7: 'nu-scatter-n',
-8: 'nu-scatter-pn',
-9: 'transport',
-10: 'n1n',
-11: 'absorption',
-12: 'fission',
-13: 'nu-fission',
-14: 'kappa-fission',
-15: 'current',
-16: 'flux-yn',
-17: 'total-yn',
-18: 'scatter-yn',
-19: 'nu-scatter-yn',
-20: 'events',
1: '(n,total)',
2: '(n,elastic)',
4: '(n,level)',
11: '(n,2nd)',
16: '(n,2n)',
17: '(n,3n)',
18: '(n,fission)',
19: '(n,f)',
20: '(n,nf)',
21: '(n,2nf)',
22: '(n,na)',
23: '(n,n3a)',
24: '(n,2na)',
25: '(n,3na)',
28: '(n,np)',
29: '(n,n2a)',
30: '(n,2n2a)',
32: '(n,nd)',
33: '(n,nt)',
34: '(n,nHe-3)',
35: '(n,nd2a)',
36: '(n,nt2a)',
37: '(n,4n)',
38: '(n,3nf)',
41: '(n,2np)',
42: '(n,3np)',
44: '(n,n2p)',
45: '(n,npa)',
91: '(n,nc)',
101: '(n,disappear)',
102: '(n,gamma)',
103: '(n,p)',
104: '(n,d)',
105: '(n,t)',
106: '(n,3He)',
107: '(n,a)',
108: '(n,2a)',
109: '(n,3a)',
111: '(n,2p)',
112: '(n,pa)',
113: '(n,t2a)',
114: '(n,d2a)',
115: '(n,pd)',
116: '(n,pt)',
117: '(n,da)',
201: '(n,Xn)',
202: '(n,Xgamma)',
203: '(n,Xp)',
204: '(n,Xd)',
205: '(n,Xt)',
206: '(n,X3He)',
207: '(n,Xa)',
444: '(damage)',
649: '(n,pc)',
699: '(n,dc)',
749: '(n,tc)',
799: '(n,3Hec)',
849: '(n,tc)'}
score_types.update({MT: '(n,n' + str(MT-50) + ')' for MT in range(51,91)})
score_types.update({MT: '(n,p' + str(MT-600) + ')' for MT in range(600,649)})
score_types.update({MT: '(n,d' + str(MT-650) + ')' for MT in range(650,699)})
score_types.update({MT: '(n,t' + str(MT-700) + ')' for MT in range(700,749)})
score_types.update({MT: '(n,3He' + str(MT-750) + ')' for MT in range(750,649)})
score_types.update({MT: '(n,a' + str(MT-800) + ')' for MT in range(800,849)})
class Mesh(object):
def __init__(self):
pass
def __repr__(self):
if hasattr(self, "dimension"):
return "<Mesh: {0}>".format(tuple(self.dimension))
else:
return "<Mesh>"
class Filter(object):
def __init__(self):
self.type = 0
self.bins = []
def __repr__(self):
return "<Filter: {0}>".format(self.type)
class Tally(object):
def __init__(self):
self.filters = OrderedDict()
class SourceSite(object):
def __init__(self):
self.weight = None
self.xyz = None
self.uvw = None
self.E = None
def __repr__(self):
return "<SourceSite: xyz={0} at E={1}>".format(self.xyz, self.E)
class StatePoint(object):
def __init__(self, filename):
if filename.endswith('.h5'):
import h5py
self._f = h5py.File(filename, 'r')
self._hdf5 = True
else:
self._f = open(filename, 'rb')
self._hdf5 = False
# Set flags for what data was read
self._metadata = False
self._results = False
self._source = False
# Initialize arrays for meshes and tallies
self.meshes = []
self.tallies = []
self.source = []
# Read all metadata
self._read_metadata()
def _read_metadata(self):
# Read filetype
self.filetype = self._get_int(path='filetype')[0]
# Read statepoint revision
self.revision = self._get_int(path='revision')[0]
if self.revision != REVISION_STATEPOINT:
raise Exception('Statepoint Revision is not consistent.')
# Read OpenMC version
if self._hdf5:
self.version = [self._get_int(path='version_major')[0],
self._get_int(path='version_minor')[0],
self._get_int(path='version_release')[0]]
else:
self.version = self._get_int(3)
# Read date and time
self.date_and_time = self._get_string(19, path='date_and_time')
# Read path
self.path = self._get_string(255, path='path').strip()
# Read random number seed
self.seed = self._get_long(path='seed')[0]
# Read run information
self.run_mode = self._get_int(path='run_mode')[0]
self.n_particles = self._get_long(path='n_particles')[0]
self.n_batches = self._get_int(path='n_batches')[0]
# Read current batch
self.current_batch = self._get_int(path='current_batch')[0]
# Read criticality information
if self.run_mode == 2:
self.n_inactive = self._get_int(path='n_inactive')[0]
self.gen_per_batch = self._get_int(path='gen_per_batch')[0]
self.k_batch = self._get_double(
self.current_batch*self.gen_per_batch, path='k_generation')
self.entropy = self._get_double(
self.current_batch*self.gen_per_batch, path='entropy')
self.k_col_abs = self._get_double(path='k_col_abs')[0]
self.k_col_tra = self._get_double(path='k_col_tra')[0]
self.k_abs_tra = self._get_double(path='k_abs_tra')[0]
self.k_combined = self._get_double(2, path='k_combined')
# Read CMFD information
cmfd_present = self._get_int(path='cmfd_on')[0]
if cmfd_present == 1:
self.cmfd_indices = self._get_int(4, path='cmfd/indices')
self.k_cmfd = self._get_double(self.current_batch,
path='cmfd/k_cmfd')
self.cmfd_src = self._get_double_array(np.product(self.cmfd_indices),
path='cmfd/cmfd_src')
self.cmfd_src = np.reshape(self.cmfd_src,
tuple(self.cmfd_indices), order='F')
self.cmfd_entropy = self._get_double(self.current_batch,
path='cmfd/cmfd_entropy')
self.cmfd_balance = self._get_double(self.current_batch,
path='cmfd/cmfd_balance')
self.cmfd_dominance = self._get_double(self.current_batch,
path='cmfd/cmfd_dominance')
self.cmfd_srccmp = self._get_double(self.current_batch,
path='cmfd/cmfd_srccmp')
# Read number of meshes
n_meshes = self._get_int(path='tallies/n_meshes')[0]
# Read meshes
for i in range(n_meshes):
m = Mesh()
self.meshes.append(m)
base = 'tallies/mesh' + str(i+1) + '/'
# Read id, mesh type, and number of dimensions
m.id = self._get_int(path=base+'id')[0]
m.type = self._get_int(path=base+'type')[0]
n = self._get_int(path=base+'n_dimension')[0]
# Read mesh size, lower-left coordinates, upper-right coordinates,
# and width of each mesh cell
m.dimension = self._get_int(n, path=base+'dimension')
m.lower_left = self._get_double(n, path=base+'lower_left')
m.upper_right = self._get_double(n, path=base+'upper_right')
m.width = self._get_double(n, path=base+'width')
# Read number of tallies
n_tallies = self._get_int(path='tallies/n_tallies')[0]
for i in range(n_tallies):
# Create Tally object and add to list of tallies
t = Tally()
self.tallies.append(t)
base = 'tallies/tally' + str(i+1) + '/'
# Read id and number of realizations
t.id = self._get_int(path=base+'id')[0]
t.n_realizations = self._get_int(path=base+'n_realizations')[0]
# Read sizes of tallies
t.total_score_bins = self._get_int(path=base+'total_score_bins')[0]
t.total_filter_bins = self._get_int(path=base+'total_filter_bins')[0]
# Read number of filters
n_filters = self._get_int(path=base+'n_filters')[0]
for j in range(n_filters):
# Create Filter object
f = Filter()
base = 'tallies/tally{0}/filter{1}/'.format(i+1, j+1)
# Get type of filter
f.type = filter_types[self._get_int(path=base+'type')[0]]
# Add to filter dictionary
t.filters[f.type] = f
# Determine how many bins are in this filter
f.length = self._get_int(path=base+'n_bins')[0]
assert f.length > 0
if f.type == 'energyin' or f.type == 'energyout':
f.bins = self._get_double(f.length + 1, path=base+'bins')
elif f.type == 'mesh':
f.bins = self._get_int(path=base+'bins')
else:
f.bins = self._get_int(f.length, path=base+'bins')
base = 'tallies/tally' + str(i+1) + '/'
# Read nuclide bins
n_nuclides = self._get_int(path=base+'n_nuclide_bins')[0]
t.n_nuclides = n_nuclides
t.nuclides = self._get_int(n_nuclides, path=base+'nuclide_bins')
# Read score bins and scattering order
t.n_scores = self._get_int(path=base+'n_score_bins')[0]
t.scores = [score_types[j] for j in self._get_int(
t.n_scores, path=base+'score_bins')]
t.moment_order = self._get_int(t.n_scores, path=base+'moment_order')
# Read number of user score bins
t.n_user_scores = self._get_int(path=base+'n_user_score_bins')[0]
# Set up stride
stride = 1
for f in list(t.filters.values())[::-1]:
f.stride = stride
stride *= f.length
# Source bank present
source_present = self._get_int(path='source_present')[0]
if source_present == 1:
self.source_present = True
else:
self.source_present = False
# Set flag indicating metadata has already been read
self._metadata = True
def read_results(self):
# Check whether metadata has been read
if not self._metadata:
self._read_metadata()
# Number of realizations for global tallies
self.n_realizations = self._get_int(path='n_realizations')[0]
# Read global tallies
n_global_tallies = self._get_int(path='n_global_tallies')[0]
if self._hdf5:
data = self._f['global_tallies'].value
self.global_tallies = np.column_stack((data['sum'], data['sum_sq']))
else:
self.global_tallies = np.array(self._get_double(2*n_global_tallies))
self.global_tallies.shape = (n_global_tallies, 2)
# Flag indicating if tallies are present
tallies_present = self._get_int(path='tallies/tallies_present')[0]
# Read tally results
if tallies_present:
for i, t in enumerate(self.tallies):
n = t.total_score_bins * t.total_filter_bins
if self._hdf5:
path = 'tallies/tally{0}/results'.format(i+1)
data = self._f[path].value
t.results = np.column_stack((data['sum'], data['sum_sq']))
t.results.shape = (t.total_filter_bins, t.total_score_bins, 2)
else:
t.results = np.array(self._get_double(2*n))
t.results.shape = (t.total_filter_bins, t.total_score_bins, 2)
# Indicate that tally results have been read
self._results = True
def read_source(self):
# Check whether tally results have been read
if not self._results:
self.read_results()
# Check if source bank is in statepoint
if not self.source_present:
print('Source not in statepoint file.')
return
# For HDF5 state points, copy entire bank
if self._hdf5:
source_sites = self._f['source_bank'].value
for i in range(self.n_particles):
s = SourceSite()
self.source.append(s)
# Read position, angle, and energy
if self._hdf5:
s.weight, s.xyz, s.uvw, s.E = source_sites[i]
else:
s.weight = self._get_double()[0]
s.xyz = self._get_double(3)
s.uvw = self._get_double(3)
s.E = self._get_double()[0]
def generate_ci(self, confidence=0.95):
"""Calculates confidence intervals for each tally bin."""
# Determine number of realizations
n = self.n_realizations
# Determine significance level and percentile for two-sided CI
alpha = 1 - confidence
percentile = 1 - alpha/2
# Calculate t-value
t_value = scipy.stats.t.ppf(percentile, n - 1)
self.generate_stdev(t_value)
def generate_stdev(self, t_value=1.0):
"""
Calculates the sample mean and standard deviation of the mean for each
tally bin.
"""
# Determine number of realizations
n = self.n_realizations
# Global tallies
for i in range(len(self.global_tallies)):
# Get sum and sum of squares
s, s2 = self.global_tallies[i]
# Calculate sample mean and replace value
s /= n
self.global_tallies[i,0] = s
# Calculate standard deviation
if s != 0.0:
self.global_tallies[i,1] = t_value*np.sqrt((s2/n - s*s)/(n-1))
# Regular tallies
for t in self.tallies:
for i in range(t.results.shape[0]):
for j in range(t.results.shape[1]):
# Get sum and sum of squares
s, s2 = t.results[i,j]
# Calculate sample mean and replace value
s /= n
t.results[i,j,0] = s
# Calculate standard deviation
if s != 0.0:
t.results[i,j,1] = t_value*np.sqrt((s2/n - s*s)/(n-1))
def get_value(self, tally_index, spec_list, score_index):
"""Returns a tally score given a list of filters to satisfy.
Parameters
----------
tally_index : int
Index for tally in StatePoint.tallies list
spec_list : list
A list of tuples where the first value in each tuple is the filter
type, e.g. 'cell', and the second value is the desired index. If the
first value in the tuple is 'mesh', the second value should be a
tuple with three integers specifying the mesh indices.
Example: [('cell', 1), ('mesh', (14,17,20)), ('energyin', 2)]
score_index : int
Index corresponding to score for tally, i.e. the second index in
Tally.results[:,:,:].
"""
# Get Tally object given the index
t = self.tallies[tally_index]
# Initialize index for filter in Tally.results[:,:,:]
filter_index = 0
# Loop over specified filters in spec_list
for f_type, f_index in spec_list:
# Treat mesh filter separately
if f_type == 'mesh':
# Get index in StatePoint.meshes
mesh_index = t.filters['mesh'].bins[0] - 1
# Get dimensions of corresponding mesh
nx, ny, nz = self.meshes[mesh_index].dimension
# Convert (x,y,z) to a single bin -- this is similar to
# subroutine mesh_indices_to_bin in openmc/src/mesh.F90.
value = ((f_index[0] - 1)*ny*nz +
(f_index[1] - 1)*nz +
(f_index[2] - 1))
filter_index += value*t.filters[f_type].stride
else:
filter_index += f_index*t.filters[f_type].stride
# Return the desired result from Tally.results. This could be the sum and
# sum of squares, or it could be mean and stdev if self.generate_stdev()
# has been called already.
return t.results[filter_index, score_index]
def extract_results(self, tally_id, score_str):
"""Returns a tally results dictionary given a tally_id and score string.
Parameters
----------
tally_id : int
Index for the tally in StatePoint.tallies list
score_str : string
Corresponds to the string entered for a score in tallies.xml.
For a flux score extraction it would be 'score'
"""
# get tally
try:
tally = self.tallies[tally_id-1]
except:
print('Tally does not exist')
return
# get the score index if it is present
try:
idx = tally.scores.index(score_str)
except ValueError:
print('Score does not exist')
print(tally.scores)
return
# create numpy array for mean and 95% CI
n_bins = len(tally.results)
n_filters = len(tally.filters)
n_scores = len(tally.scores)
meanv = np.zeros(n_bins)
unctv = np.zeros(n_bins)
filters = np.zeros((n_bins,n_filters))
filtmax = np.zeros(n_filters+1)
meshmax = np.zeros(4)
filtmax[0] = 1
meshmax[0] = 1
# get number of realizations
n = tally.n_realizations
# get t-value
t_value = scipy.stats.t.ppf(0.975, n - 1)
# calculate mean
meanv = tally.results[:,idx,0]
meanv = meanv / n
# calculate 95% two-sided CI
unctv = tally.results[:,idx,1]
unctv = t_value*np.sqrt((unctv/n - meanv*meanv)/(n-1))/meanv
# create output dictionary
data = {'mean':meanv,'CI95':unctv}
# get bounds of filter bins
for akey in tally.filters.keys():
idx = list(tally.filters.keys()).index(akey)
filtmax[n_filters - idx] = tally.filters[akey].length
# compute bin info
for i in range(n_filters):
# compute indices for filter combination
filters[:,n_filters - i - 1] = np.floor((np.arange(n_bins) %
np.prod(filtmax[0:i+2]))/(np.prod(filtmax[0:i+1]))) + 1
# append in dictionary bin with filter
data.update({list(tally.filters.keys())[n_filters - i - 1]:
filters[:,n_filters - i - 1]})
# check for mesh
if list(tally.filters.keys())[n_filters - i - 1] == 'mesh':
dims = list(self.meshes[tally.filters['mesh'].bins[0] - 1].dimension)
dims.reverse()
dims = np.asarray(dims)
if score_str == 'current':
dims += 1
meshmax[1:4] = dims
mesh_bins = np.zeros((n_bins,3))
mesh_bins[:,2] = np.floor(((filters[:,n_filters - i - 1] - 1) %
np.prod(meshmax[0:2]))/(np.prod(meshmax[0:1]))) + 1
mesh_bins[:,1] = np.floor(((filters[:,n_filters - i - 1] - 1) %
np.prod(meshmax[0:3]))/(np.prod(meshmax[0:2]))) + 1
mesh_bins[:,0] = np.floor(((filters[:,n_filters - i - 1] - 1) %
np.prod(meshmax[0:4]))/(np.prod(meshmax[0:3]))) + 1
data.update({'mesh': list(zip(mesh_bins[:,0], mesh_bins[:,1],
mesh_bins[:,2]))})
i += 1
# add in maximum bin filters and order
b = list(tally.filters.keys())
b.reverse()
filtmax = list(filtmax[1:])
try:
idx = b.index('mesh')
filtmax[idx] = np.max(mesh_bins[:,2])
filtmax.insert(idx,np.max(mesh_bins[:,1]))
filtmax.insert(idx,np.max(mesh_bins[:,0]))
except ValueError:
pass
data.update({'bin_order':b,'bin_max':filtmax})
return data
def _get_data(self, n, typeCode, size):
return list(struct.unpack('={0}{1}'.format(n,typeCode),
self._f.read(n*size)))
def _get_int(self, n=1, path=None):
if self._hdf5:
return [int(v) for v in self._f[path].value]
else:
return [int(v) for v in self._get_data(n, 'i', 4)]
def _get_long(self, n=1, path=None):
if self._hdf5:
return [int(v) for v in self._f[path].value]
else:
return [int(v) for v in self._get_data(n, 'q', 8)]
def _get_float(self, n=1, path=None):
if self._hdf5:
return [float(v) for v in self._f[path].value]
else:
return [float(v) for v in self._get_data(n, 'f', 4)]
def _get_double(self, n=1, path=None):
if self._hdf5:
return [float(v) for v in self._f[path].value]
else:
return [float(v) for v in self._get_data(n, 'd', 8)]
def _get_double_array(self, n=1, path=None):
if self._hdf5:
return self._f[path].value
else:
return self._get_data(n, 'd', 8)
def _get_string(self, n=1, path=None):
if self._hdf5:
return str(self._f[path].value)
else:
return str(self._get_data(n, 's', 1)[0])
| {
"content_hash": "bb50cf274376ad33f8a0c328fb53ecb8",
"timestamp": "",
"source": "github",
"line_count": 631,
"max_line_length": 85,
"avg_line_length": 35.78605388272583,
"alnum_prop": 0.49112085381515436,
"repo_name": "shenqicang/openmc",
"id": "5a9764d92b78cc70cda36cb6d6d31ca48e99f2aa",
"size": "22605",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/utils/statepoint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "FORTRAN",
"bytes": "1261313"
},
{
"name": "Python",
"bytes": "374164"
},
{
"name": "Shell",
"bytes": "374"
}
],
"symlink_target": ""
} |
__author__ = "Peter Shipley"
__license__ = "BSD"
# Simple example
#
# This script listens for Upnp advertisements from any local ISY
# unit and prints results
#
from ISY.IsyDiscover import isy_discover
def list_units():
fmt = "%-25s %-25s %s"
print(fmt % ("Device Name", "Device Number", "URL Address" ))
print(fmt % ("-" * 20, "-" * 20, "-" * 20 ))
# wait upto 5 seconds or after you have discovered two unit
r = isy_discover(timeout=30, count=2, debug=0)
for key, unit in r.items():
print(fmt % ( unit['friendlyName'], unit['UDN'], unit['URLBase'] ))
if __name__ == '__main__' :
list_units()
exit(0)
| {
"content_hash": "3b75c93e6e84f022a59245a670efdded",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 76,
"avg_line_length": 23.464285714285715,
"alnum_prop": 0.5920852359208524,
"repo_name": "fxstein/ISYlib-python",
"id": "16e9b2003261b116411fb8d3d46f27dd9d15e123",
"size": "684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/isy_find.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1521"
},
{
"name": "Python",
"bytes": "293702"
}
],
"symlink_target": ""
} |
__author__ = 'ar'
import os
import sys
import glob
import fnmatch
import numpy as np
import json
import lmdb
import matplotlib.pyplot as plt
from datetime import datetime
import copy
import shutil
import skimage.io as skio
from dbhelpers import DBImageImportReader, DBImageImportReaderFromDir, DBImageImportReaderFromCSV, checkFilePath
from dbconfig import DBImage2DConfig
from imgproc2d import ImageTransformer2D
#################################################
class Progressor:
counterMax=0
counter=0
strState=None
isForceStop=False
def __init__(self, parCounterMax=100, parCounter=0, strState=None):
self.counter = parCounter
self.counterMax = parCounterMax
self.strState = strState
def getPercent(self):
tdiv = self.counterMax
if tdiv<1:
tdiv = 1
return (100*self.counter)/tdiv
def inc(self):
self.counter+=1
def update(self):
self.inc()
def toString(self):
tret = '[%d/%d] : %0.1f%% (%s)' % (self.counter, self.counterMax, self.getPercent(), self.strState)
return tret
def __str__(self):
return self.toString()
def __repr__(self):
return self.toString()
#################################################
class DBImage2DBuilder:
lmdbTrain = 'train_db'
lmdbVal = 'val_db'
fmeanData = 'mean.binaryproto'
fmeanImage = 'mean.jpg'
flabels = 'labels.txt'
fconfig = 'cfg.json'
#
wdir = None
pathCfg = None
#
cfg2D = None
imgReader2D = None
#
def getPathLmdbTrain(self):
return os.path.join(self.wdir, self.lmdbTrain)
def getPathLmdbVal(self):
return os.path.join(self.wdir, self.lmdbVal)
def getPathMeanBlob(self):
return os.path.join(self.wdir, self.fmeanData)
def getPathMeanImage(self):
return os.path.join(self.wdir, self.fmeanImage)
def getParhLabels(self):
return os.path.join(self.wdir, self.flabels)
def getPathDbConfig(self):
return os.path.join(self.wdir, self.fconfig)
#
def isInitialized(self):
return (self.cfg2D is not None) and (self.imgReader2D is not None)
def __init__(self, pathCfgInp=None, pathDirOut=None):
if pathCfgInp is not None:
checkFilePath(pathCfgInp)
self.pathCfg = pathCfgInp
if pathDirOut is not None:
checkFilePath(pathDirOut, isDirectory=True)
self.wdir = pathDirOut
if (self.wdir is not None) and (self.pathCfg is not None):
pass
def initializeInfo(self):
self.cfg2D = DBImage2DConfig(self.pathCfg)
tdatasetType = self.cfg2D.getImportDatasetType()
if tdatasetType=='dir':
self.imgReader2D = DBImageImportReaderFromDir(self.cfg2D)
elif tdatasetType=='txt':
self.imgReader2D = DBImageImportReaderFromCSV(self.cfg2D)
else:
raise Exception('Unknown dataset-import type: [%s]' % tdatasetType)
self.imgReader2D.precalculateInfo()
def toString(self):
if self.isInitialized():
tret = '[DBImage2DBuilder] : %s' % self.imgReader2D.toString()
return tret
else:
return "DBImage2DBuilder() is not initialized yet!"
def __str__(self):
return self.toString()
def __repr__(self):
return self.toString()
def _buildLBDMForLists(self, pathLMDB, imageTransformer2D, listLabels, mapImgPaths, imageEncoding, progressor):
tsizInBytes = 4 * (1024 ** 3)
# (1) crate LMDB objet
with lmdb.open(pathLMDB, map_size=tsizInBytes) as env:
with env.begin(write=True) as txn:
for lidx, ll in enumerate(listLabels):
tlstPathImg = mapImgPaths[ll]
for pp in tlstPathImg:
timg = imageTransformer2D.processImageFromFile(pp, isReshapeFinal=True, isAccumulateMean=True)
datum = ImageTransformer2D.cvtImage2Datum(timg, imageEncoding, lidx)
str_id = '%6d%12d' % (lidx, progressor.counter)
txn.put(str_id.encode('ascii'), datum.SerializeToString())
progressor.update()
print (progressor)
def buildDataset(self, parProgressor=None):
if self.isInitialized():
timgEncoding = self.cfg2D.getImageEncoding()
cntProgressMax = self.imgReader2D.numTrainImages+self.imgReader2D.numValImages
if parProgressor is None:
progressor = Progressor(parCounterMax=cntProgressMax)
else:
progressor = parProgressor
progressor.counter = 0
progressor.counterMax = cntProgressMax
imageTransformer2D = ImageTransformer2D(self.cfg2D)
tpathLmdbTrain = self.getPathLmdbTrain()
tpathLmdbVal = self.getPathLmdbVal()
if os.path.isdir(tpathLmdbTrain):
print ('remove existing LMDB [%s] dir...' % tpathLmdbTrain)
shutil.rmtree(tpathLmdbTrain)
if os.path.isdir(tpathLmdbVal):
print ('remove existing LMDB [%s] dir...' % tpathLmdbVal)
shutil.rmtree(tpathLmdbVal)
# (1) build Training DB
progressor.strState = 'train-db'
self._buildLBDMForLists(tpathLmdbTrain,
imageTransformer2D,
self.imgReader2D.listLabels,
self.imgReader2D.listTrainPath,
timgEncoding,
progressor)
# (2) build Validation DB
progressor.strState = 'val-db'
self._buildLBDMForLists(tpathLmdbVal,
imageTransformer2D,
self.imgReader2D.listLabels,
self.imgReader2D.listValPath,
timgEncoding,
progressor)
timgMeanBlob = imageTransformer2D.getMeanImage(outType=np.float)
timgMeanImage = imageTransformer2D.getMeanImage()
tpathMeanBlob = self.getPathMeanBlob()
tpathMeanImage = self.getPathMeanImage()
# (3) save mean binary-proto
ImageTransformer2D.saveImage2BinaryBlob(timgMeanBlob, tpathMeanBlob)
if timgMeanImage.shape[0]==1:
timgMeanImage = timgMeanImage.reshape(tuple(timgMeanImage.shape[1:]))
else:
timgMeanImage = timgMeanImage.transpose((1,2,0))
# (4) save mean preview image
skio.imsave(tpathMeanImage, timgMeanImage)
tpathLabels = self.getParhLabels()
# (5) save labels to file
with open(tpathLabels, 'w') as f:
tmp = self.imgReader2D.listLabels
for ll in tmp:
f.write('%s\n' % ll)
# (6) save DB-config
tpathCfg = self.getPathDbConfig()
newCfg = copy.copy(self.cfg2D.cfg)
newCfg['dbType']='image2d'
tdateTime=datetime.now()
strDate=tdateTime.strftime('%Y.%m.%d')
strTime=tdateTime.strftime('%H:%M:%S')
# prepare histograms
tretLabels=self.imgReader2D.listLabels
tretLabelHistTrain = [(ll, len(self.imgReader2D.listTrainPath[ll])) for ll in tretLabels]
tretLabelHistVal = [(ll, len(self.imgReader2D.listValPath[ll] )) for ll in tretLabels]
# prepare date & time
tretDate={
'str': strDate,
'year': tdateTime.strftime('%Y'),
'month': tdateTime.strftime('%m'),
'day': tdateTime.strftime('%m')
}
tretTime={
'str': strTime,
'hour': tdateTime.strftime('%H'),
'min': tdateTime.strftime('%M'),
'sec': tdateTime.strftime('%S'),
}
dbStats={
'numLabels' : self.imgReader2D.numLabels,
'numTrain' : self.imgReader2D.numTrainImages,
'numVal' : self.imgReader2D.numValImages,
'numTotal' : (self.imgReader2D.numTrainImages + self.imgReader2D.numValImages),
'date' : tretDate,
'time' : tretTime
}
dbHists={
'labels': tretLabels,
'histTrain': tretLabelHistTrain,
'histVal': tretLabelHistVal
}
newCfg['dbinfo'] = dbStats
newCfg['dbhist'] = dbHists
with open(tpathCfg,'w') as f:
f.write(json.dumps(newCfg, indent=4))
#generate preview
ImageTransformer2D.generateImagePreview(tpathLmdbTrain,nr=3,nc=5,fdirOut=self.wdir)
else:
raise Exception("Cant build dataset, DBImage2DBuilder() is not initialized yet!")
#################################################
if __name__ == '__main__':
pass | {
"content_hash": "a2d9aa237a24a48134ce0913e54f8f10",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 118,
"avg_line_length": 41.08482142857143,
"alnum_prop": 0.5598174508312507,
"repo_name": "SummaLabs/DLS",
"id": "64e7a98f1729663d5d0ea644f1a76aa747664588",
"size": "9245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/backend/core/datasets/dbbuilder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28477"
},
{
"name": "HTML",
"bytes": "146817"
},
{
"name": "JavaScript",
"bytes": "491364"
},
{
"name": "Jupyter Notebook",
"bytes": "10111"
},
{
"name": "Protocol Buffer",
"bytes": "115393"
},
{
"name": "Python",
"bytes": "877535"
},
{
"name": "Shell",
"bytes": "7969"
}
],
"symlink_target": ""
} |
"""
Simulated annealing fitting routine.
Complete rebuild of the original SA fitting module written by Mark A. Zentile, now using lmfit
Last updated 2018-07-12 MAZ
"""
# py 2.7 compatibility
from __future__ import (division, print_function, absolute_import)
import numpy as np
import matplotlib.pyplot as plt
import warnings
import sys
import copy
import time
import pickle as pickle
import psutil
from multiprocessing import Pool
from . import MLFittingRoutine as ML
import lmfit as lm
from .spectra import get_spectra
p_dict_bounds_default = {'lcell':1e-3,'Bfield':100., 'T':20.,
'GammaBuf':20., 'shift':100.,
# Polarisation of light
'theta0':10., 'E_x':0.05, 'E_y':0.05, 'E_phase':0.01,
# B-field angle w.r.t. light k-vector
'Btheta':10*3.14/180, 'Bphi':10*3.14/180,
'DoppTemp':20.,
'rb85frac':1, 'K40frac':1, 'K41frac':1,
}
def chisq(yo,ye,err=None):
"""
Evaluate the chi-squared value of a given data/theory combination
Inputs:
yo : observed data
ye : expected (theory) data
err : Optional, error bars - array of length(x)
Returns:
float of chi-squared value
"""
if err is None:
err = np.ones_like(yo)
return (((yo-ye)/err)**2).sum()
def evaluate(args):
""" Evaluate chi-squared value for a given set of parameters """
warnings.simplefilter("ignore")
data = args[0]
p_dict = args[2]
E_in = np.array([p_dict['E_x'],p_dict['E_y']*np.exp(1.j*p_dict['E_phase']),0.])
p_dict_bools = args[3]
data_type = args[4]
theory_vals = get_spectra(data[0],E_in,p_dict,outputs=[data_type])[0].real
chisq_val = chisq(data[1],theory_vals)
return chisq_val, p_dict
def SA_fit(data,E_in,p_dict,p_dict_bools,p_dict_bounds=None,no_evals=None,data_type='S0',verbose=False):
"""
Simulated annealing fitting method.
Before simulated annealing starts, the parameter space is randomly sampled to find good starting conditions for the SA fit.
data: an Nx2 iterable for the x and y data to be fitted
E_in: the initial electric field input. See docstring for the spectra.py module for details.
no_evals: The number of randomly-selected start points for downhill fitting. Defaults to 2**(3+2*nFitParams) where nFitParams is
the number of varying fit parameters
p_dict: dictionary containing all the calculation (initial) parameters
p_dict_bools: dictionary with the same keys as p_dict, with Boolean values representing each parameter that is to be varied in the fitting
p_dict_bounds: dictionary with the same keys as p_dict, with values that represent the deviation each parameter can take in the initial parameter search
NOTE: this works slightly differently to p_dict_bounds in the ML fitting methods. In RR and SA fitting, the bounds
select the range in parameter space that is randomly explored to find good starting parameters for the
SA routine, rather than being strict bounds on the fit parameters.
data_type: Data type to fit experimental data to. Can be one of:
'S0', 'S1', 'S2', 'S3', 'Ix', 'Iy', ...
verbose: Boolean - more print statements provided as the program progresses
"""
if p_dict_bounds is None:
p_dict_bounds = p_dict_bounds_default
print('Starting Simulated Annealing Fitting Routine')
x = np.array(data[0])
y = np.array(data[1])
p_dict['E_x'] = E_in[0]
p_dict['E_y'] = E_in[1][0]
p_dict['E_phase'] = E_in[1][1]
E_in_vector = np.array([p_dict['E_x'],p_dict['E_y']*np.exp(1.j*p_dict['E_phase']),0.])
# count number of fit parameters
nFitParams = 0
for key in p_dict_bools:
if p_dict_bools[key]: nFitParams += 1
# default number of iterations based on number of fit parameters
if no_evals == None:
no_evals = 2**(8+2*nFitParams)
# Create random array of starting parameters based on parameter ranges given in p_dict_bounds dictionary
# Scattered uniformly over the parameter space
#clone the parameter dictionary
p_dict_list = []
for i in range(no_evals):
p_dict_list.append(copy.deepcopy(p_dict))
for key in p_dict_bools:
if p_dict_bools[key]==True:
start_vals = p_dict[key]
#print start_vals
for i in range(len(p_dict_list)):
p_dict_list[i][key] = start_vals + np.random.uniform(-1,1) * p_dict_bounds[key]
if verbose:
print('List of initial parameter dictionaries:')
for pd in p_dict_list:
print(pd)
#print p_dict_list
print('\n\n')
#Do parallel ML fitting by utilising multiple cores
po = Pool() # Pool() uses all cores, Pool(3) uses 3 cores for example.
## use lower process priority so computer is still responsive while calculating!!
parent = psutil.Process()
parent.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS)
for child in parent.children():
child.nice(psutil.IDLE_PRIORITY_CLASS)
args_list = [(data, E_in, p_dict_list[k], p_dict_bools, data_type) for k in range(no_evals)]
Res = po.map_async(evaluate,args_list)
result = Res.get()
po.close()
po.join()
result = np.array(result)
lineMin = np.argmin(result[:,0]) # position of lowest chi-squared value from initial guesses
SA_params = result[lineMin][1] # parameter dictionary associated with lowest chi-squared
best_chi_sq = result[lineMin][0] # lowest chi-squared value
current_chi_sq = best_chi_sq
current_params = copy.deepcopy(SA_params)
print('\n\n Initial parameter space evaluations completed...')
# Measure of how much the chi-squared values change over the parameter space
spread = np.std(result[:,0])
# Initial 'temperature' for the annealing
T = 1500.
# Cold temperature extreme where the algorithm will stop searching
minimum_T = 50.
# sets the effective range of a given temperature
range_scaling = 3
k = abs(spread)/T / range_scaling
# Cooling rate - how much the temperature decreases on each run.
cooling_rate = 7e-6
# Number of times a jump uphill is rejected before switching to ML fit.
uphill_escape_chance = 0
uphill_escape_threshold = 300
# Number of iterations to investigate a plateau in parameter space (i.e. chi-squared does not change)
plateau_escape_chance = 0
plateau_escape_threshold = 300
trial_params = copy.deepcopy(current_params)
best_params = copy.deepcopy(current_params)
best_chi_sq = current_chi_sq
#print trial_params
## testing - log algorithm path
chi_sq_log = [current_chi_sq]
T_log = [trial_params['T']]
B_log = [trial_params['Bfield']]
hot_iterations = 250
iteration = 0
while T > minimum_T:
# Generate a new set of parameters that are close to the last evaluated parameters
for key in p_dict_bools:
if p_dict_bools[key]==True: # (extra check)
## parameters varied over 10% of range specified before...
trial_params[key] = copy.deepcopy(current_params[key]) + np.random.normal(0,p_dict_bounds[key]) # * p_dict_bounds[key] #* 0.1
print('Before calculation:')
print(('Trial: ', trial_params['T'], trial_params['Bfield']))
print(('Current: ', current_params['T'], current_params['Bfield']))
trial_theory = get_spectra(x,E_in_vector,trial_params,outputs=[data_type])[0].real
trial_chi_sq = chisq(data[1],trial_theory)
# log best chi_squared values and parameters
if trial_chi_sq < best_chi_sq:
best_chi_sq = trial_chi_sq
best_params = copy.deepcopy(trial_params)
# Calculate energy difference
delta_chi_sq = trial_chi_sq - current_chi_sq
# and convert to probability of acceptance
if delta_chi_sq < 1e-5:
# if there's no change in chi-squared (i.e. parameter space is locally flat), accept with small (5%) probability
print('WARNING - no change in chi-squared - probably due to plateau in parameter space!')
prob = 0.05
plateau_escape_chance += 1
else:
prob = np.exp(-delta_chi_sq/(k*T))
if verbose:
print(('\n\tBest chi-squared so far:', best_chi_sq))
print(('\tBest Parameters so far (T, B): ', best_params['T'], best_params['Bfield']))
print(('\tCurrent Parameters (T, B): ', current_params['T'], current_params['Bfield']))
print(('\tTrial Parameters (T, B): ', trial_params['T'], trial_params['Bfield']))
print(('\n\tCurrent chi-squared:', current_chi_sq))
print(('\tTrial chi-squared:', trial_chi_sq))
print(('\tChange in chi-squared from previous:', delta_chi_sq))
print(('\tTemperature parameter: ', T))
print(('\tProbability that new parameters will be accepted (>1 == 1):', prob))
if (delta_chi_sq < 0) or (prob > np.random.random()):
# accept downhill movement, or uphill movement with probability prob - update chi_squared and parameters
current_chi_sq = trial_chi_sq
current_params = copy.deepcopy(trial_params)
chi_sq_log.append(trial_chi_sq) ## keep log of chi-squared values (on succesful iterations only)
T_log.append(trial_params['T'])
B_log.append(trial_params['Bfield'])
print('\t...Values accepted. Current parameters updated.')
print('\n')
# reset escape chance
uphill_escape_chance = 0
else:
print(('\t...Values rejected. Escape threshold:', uphill_escape_chance, ' / ', uphill_escape_threshold))
print('\n')
uphill_escape_chance += 1
# Cool system
# Hold T constant for first N iterations
if iteration > hot_iterations:
T = T/(1 + (cooling_rate*T)) #Lundy's Method (http://link.springer.com/article/10.1007/BF01582166)
iteration += 1
# Exit annealing loop if conditions are correct
if (uphill_escape_chance > uphill_escape_threshold):
print(('Simulated annealing completed ( No improvement found after {:d} iterations)'.format(uphill_escape_threshold)))
print('Switching to downhill fit using best found parameters...\n')
break
if (T < minimum_T): #No jumps up hill for a while, or minimum temperature reached
print('Simulated annealing completed ( Temperature reached minimum threshold )')
print('Switching to downhill fit using best found parameters...\n')
break
if (plateau_escape_chance > plateau_escape_threshold):
print('!!!\n\tCAUTION :: Annealing has not converged.')
print('\tAnnealing algorithm found plateau in parameter space')
print('\tSwitching to downhill fit using best found parameters...\n!!!')
break
#### Marquardt-Levenberg fit #####
try:
print(('Downhill fit with initial parameters: (T,B) ', best_params['T'], best_params['Bfield']))
ML_best_params, result = ML.ML_fit(data, E_in, best_params, p_dict_bools, data_type)
MLchi_sq = result.chisqr
if MLchi_sq <= best_chi_sq:
best_params = ML_best_params
final_result = result
success = True
print('Downhill fit converged successfully.\n')
else:
print('Downhill fit did not find further improvement. Continuing with simulated annealing result.\n')
success = False
final_result = 1
except:
print('Downhill fit failed to converge. Continuing with simulated annealing result.\n')
success = False
final_result = 1
return best_params, final_result #, chi_sq_log, T_log, B_log, success
def test_fit(calc=False):
p_dict = {'elem':'Rb','Dline':'D2','T':80.,'lcell':2e-3,'Bfield':600.,'Btheta':0.,
'Bphi':0.,'GammaBuf':0.,'shift':0.}
# only need to specify parameters that are varied
p_dict_bools = {'T':True,'Bfield':True}
p_dict_bounds = {'T':20,'Bfield':200}
E_in = np.array([0.7,0.7,0])
E_in_angle = [E_in[0].real,[abs(E_in[1]),np.angle(E_in[1])]]
print(E_in_angle)
x = np.linspace(-10000,10000,200)
if calc:
[y] = get_spectra(x,E_in,p_dict,outputs=['S1']) + np.random.randn(len(x))*0.01
pickle.dump([x,y],open('pickle_xydata.pkl','wb'))
else:
x,y = pickle.load(open('pickle_xydata.pkl','rb'))
data = [x,y.real]
# Map chi-squared surface:
Tvals = np.linspace(60,100,200)
Bvals = np.linspace(400,800,220)
T2D, B2D = np.meshgrid(Tvals,Bvals)
CSQ_map = np.zeros((len(Tvals),len(Bvals)))
if calc:
for i,TT in enumerate(Tvals):
print((i, len(Tvals)))
for j, BB in enumerate(Bvals):
p_dict['T'] = TT
p_dict['Bfield'] = BB
[ye] = get_spectra(x,E_in,p_dict,outputs=['S1'])
CSQ_map[i,j] = chisq(y,ye)
pickle.dump([T2D,B2D,CSQ_map],open('pickle_CSQmap.pkl','wb'))
else:
T2D, B2D, CSQ_map = pickle.load(open('pickle_CSQmap.pkl','rb'))
fig_map = plt.figure()
ax = fig_map.add_subplot(111)
ax.imshow(CSQ_map.T,origin='lower',aspect='auto',
extent=(Tvals[0],Tvals[-1],Bvals[0],Bvals[-1]),
cmap=plt.cm.jet,alpha=0.7)
ax.contour(T2D, B2D, CSQ_map.T,7,lw=2,color='k')
#plt.show()
## Do SA fitting
best_params, result, chi_sq_log, T_log, B_log = SA_fit(data, E_in_angle, p_dict, p_dict_bools,
p_dict_bounds, no_evals = 4, data_type='S1')
report = result.fit_report()
fit = result.best_fit
fig_data = plt.figure()
print(report)
plt.plot(x,y,'ko')
plt.plot(x,fit,'r-',lw=2)
# Chi-squared log with iteration number
fig_chisqlog = plt.figure()
plt.plot(chi_sq_log)
plt.ylabel('Chi_squared value')
plt.xlabel('Iteration')
#plt.show()
ax.plot(T_log, B_log, 'ko', ms=1)
plt.show()
def test_fit2(calc=False):
""" Alternate data set to test """
p_dict = {'elem':'Rb','Dline':'D2','T':80.,'lcell':2e-3,'Bfield':250.,'Btheta':0.,
'Bphi':0.,'GammaBuf':0.,'shift':0.}
# only need to specify parameters that are varied
p_dict_bools = {'T':True,'Bfield':True}
p_dict_bounds = {'T':15,'Bfield':100}
E_in = np.array([1.0,0.0,0.0])
E_in_angle = [E_in[0].real,[abs(E_in[1]),np.angle(E_in[1])]]
print(E_in_angle)
x = np.linspace(-7000,8000,200)
if calc:
[y] = get_spectra(x,E_in,p_dict,outputs=['S0']) + np.random.randn(len(x))*0.02
pickle.dump([x,y],open('pickle_xydata.pkl','wb'))
else:
x,y = pickle.load(open('pickle_xydata.pkl','rb'))
data = [x,y.real]
# Map chi-squared surface:
Tvals = np.linspace(40,120,300)
Bvals = np.linspace(0,500,250)
T2D, B2D = np.meshgrid(Tvals,Bvals)
CSQ_map = np.zeros((len(Tvals),len(Bvals)))
if calc:
for i,TT in enumerate(Tvals):
print((i, len(Tvals)))
for j, BB in enumerate(Bvals):
p_dict['T'] = TT
p_dict['Bfield'] = BB
[ye] = get_spectra(x,E_in,p_dict,outputs=['S0'])
CSQ_map[i,j] = chisq(y,ye)
pickle.dump([T2D,B2D,CSQ_map],open('pickle_CSQmap.pkl','wb'))
else:
T2D, B2D, CSQ_map = pickle.load(open('pickle_CSQmap.pkl','rb'))
fig_map = plt.figure()
ax = plt.subplot2grid((1,14),(0,0),colspan=13)
ax_cb = plt.subplot2grid((1,14),(0,13),colspan=1)
im = ax.imshow(CSQ_map.T,origin='lower',aspect='auto',
extent=(Tvals[0],Tvals[-1],Bvals[0],Bvals[-1]),
cmap=plt.cm.jet,alpha=0.7)
ax.contour(T2D, B2D, CSQ_map.T,7,lw=2,color='k')
cb = fig_map.colorbar(im,cax=ax_cb)
#plt.show()
## Do SA fitting
best_params, result, chi_sq_log, T_log, B_log = SA_fit(data, E_in_angle, p_dict, p_dict_bools,
p_dict_bounds, no_evals = 2, data_type='S0')
report = result.fit_report()
fit = result.best_fit
fig_data = plt.figure()
print(report)
plt.plot(x,y,'ko')
plt.plot(x,fit,'r-',lw=2)
# Chi-squared log with iteration number
fig_chisqlog = plt.figure()
plt.plot(chi_sq_log)
plt.ylabel('Chi_squared value')
plt.xlabel('Iteration')
#plt.show()
ax.plot(T_log, B_log, 'ko', ms=1)
ax.plot(T_log[0], B_log[0], 'rs', ms=6)
ax.plot(T_log[-1], B_log[-1], 'bs', ms=6)
plt.show()
def test_fit3(calc=False):
""" Alternate data set to test """
p_dict = {'elem':'Rb','Dline':'D2','T':155.,'lcell':5e-3,'Bfield':1100.,'Btheta':0.,
'Bphi':0.,'GammaBuf':0.,'shift':0.}
# only need to specify parameters that are varied
p_dict_bools = {'T':True,'Bfield':True}
p_dict_bounds = {'T':25,'Bfield':300}
E_in = np.array([1.0,0.0,0.0])
E_in_angle = [E_in[0].real,[abs(E_in[1]),np.angle(E_in[1])]]
print(E_in_angle)
x = np.linspace(-13000,-6000,150)
if calc:
[y] = get_spectra(x,E_in,p_dict,outputs=['S1']) + np.random.randn(len(x))*0.02
pickle.dump([x,y],open('pickle_xydata.pkl','wb'))
else:
x,y = pickle.load(open('pickle_xydata.pkl','rb'))
data = [x,y.real]
# Map chi-squared surface:
#Tvals = np.linspace(p_dict['T']-p_dict_bounds['T'],p_dict['T']+p_dict_bounds['T'],150)
#Bvals = np.linspace(p_dict['Bfield']-p_dict_bounds['Bfield'],p_dict['Bfield']+p_dict_bounds['Bfield'],125)
Tvals = np.linspace(80,250,450)
Bvals = np.linspace(0,2500,300)
T2D, B2D = np.meshgrid(Tvals,Bvals)
CSQ_map = np.zeros((len(Tvals),len(Bvals)))
if calc:
for i,TT in enumerate(Tvals):
print((i, len(Tvals)))
for j, BB in enumerate(Bvals):
p_dict['T'] = TT
p_dict['Bfield'] = BB
[ye] = get_spectra(x,E_in,p_dict,outputs=['S1'])
CSQ_map[i,j] = chisq(y,ye)
pickle.dump([T2D,B2D,CSQ_map],open('pickle_CSQmap.pkl','wb'))
else:
T2D, B2D, CSQ_map = pickle.load(open('pickle_CSQmap.pkl','rb'))
print(T2D)
print(B2D)
fig_map = plt.figure()
ax = plt.subplot2grid((1,14),(0,0),colspan=13)
ax_cb = plt.subplot2grid((1,14),(0,13),colspan=1)
im = ax.imshow(CSQ_map.T,origin='lower',aspect='auto',
extent=(T2D[0][0],T2D[-1][-1],B2D[0][0],B2D[-1][-1]),
cmap=plt.cm.jet,alpha=0.7)
ax.contour(T2D, B2D, CSQ_map.T,9,lw=2,color='k')
cb = fig_map.colorbar(im,cax=ax_cb)
#plt.show()
## Do SA fitting
best_params, result, chi_sq_log, T_log, B_log, success = SA_fit(data, E_in_angle, p_dict, p_dict_bools,
p_dict_bounds, no_evals = 32, data_type='S1')
if success:
report = result.fit_report()
fit = result.best_fit
print(report)
fig_data = plt.figure()
plt.plot(x,y,'ko')
if success:
plt.plot(x,fit,'r-',lw=2)
# Chi-squared log with iteration number
fig_chisqlog = plt.figure()
plt.plot(chi_sq_log)
plt.ylabel('Chi_squared value')
plt.xlabel('Iteration')
#plt.show()
ax.plot(T_log, B_log, 'ko', ms=1)
ax.plot(T_log, B_log, 'k--', lw=1.5)
ax.plot(T_log[0], B_log[0], 'rs', ms=6)
ax.plot(T_log[-1], B_log[-1], 'bs', ms=6)
plt.show()
def test_fit4(calc=False):
""" Alternate data set to test """
#actual params 110 / 7000
p_dict = {'elem':'Rb','Dline':'D2','T':100.,'lcell':5e-3,'Bfield':6100.,'Btheta':0.,
'Bphi':0.,'GammaBuf':0.,'shift':0.,'rb85frac':1.0}
# only need to specify parameters that are varied
p_dict_bools = {'T':True,'Bfield':True}
p_dict_bounds = {'T':5,'Bfield':400}
E_in = np.array([1.0,0.0,0.0])
E_in_angle = [E_in[0].real,[abs(E_in[1]),np.angle(E_in[1])]]
print(E_in_angle)
x = np.linspace(-21000,-13000,150)
if calc:
[y] = get_spectra(x,E_in,p_dict,outputs=['S0']) + np.random.randn(len(x))*0.02
pickle.dump([x,y],open('pickle_xydata.pkl','wb'))
else:
x,y = pickle.load(open('pickle_xydata.pkl','rb'))
data = [x,y.real]
# Map chi-squared surface:
#Tvals = np.linspace(p_dict['T']-p_dict_bounds['T'],p_dict['T']+p_dict_bounds['T'],150)
#Bvals = np.linspace(p_dict['Bfield']-p_dict_bounds['Bfield'],p_dict['Bfield']+p_dict_bounds['Bfield'],125)
Tvals = np.linspace(70,150,350)
Bvals = np.linspace(5000,10000,300)
T2D, B2D = np.meshgrid(Tvals,Bvals)
CSQ_map = np.zeros((len(Tvals),len(Bvals)))
if calc:
for i,TT in enumerate(Tvals):
print((i, len(Tvals)))
for j, BB in enumerate(Bvals):
p_dict['T'] = TT
p_dict['Bfield'] = BB
[ye] = get_spectra(x,E_in,p_dict,outputs=['S0'])
CSQ_map[i,j] = chisq(y,ye)
pickle.dump([T2D,B2D,CSQ_map],open('pickle_CSQmap.pkl','wb'))
else:
T2D, B2D, CSQ_map = pickle.load(open('pickle_CSQmap.pkl','rb'))
print(T2D)
print(B2D)
fig_map = plt.figure()
ax = plt.subplot2grid((1,14),(0,0),colspan=13)
ax_cb = plt.subplot2grid((1,14),(0,13),colspan=1)
im = ax.imshow(CSQ_map.T,origin='lower',aspect='auto',
extent=(T2D[0][0],T2D[-1][-1],B2D[0][0],B2D[-1][-1]),
cmap=plt.cm.jet,alpha=0.7)
ax.contour(T2D, B2D, CSQ_map.T,9,lw=2,color='k')
cb = fig_map.colorbar(im,cax=ax_cb)
#plt.show()
## Do SA fitting
best_params, result, chi_sq_log, T_log, B_log, success = SA_fit(data, E_in_angle, p_dict, p_dict_bools,
p_dict_bounds, no_evals = 32, data_type='S0')
fig_data = plt.figure()
plt.plot(x,y,'ko')
if success:
report = result.fit_report()
fit = result.best_fit
print(report)
else:
print(best_params)
fit = get_spectra(x,E_in,best_params,outputs=['S0'])[0].real
plt.plot(x,fit,'r-',lw=2)
# Chi-squared log with iteration number
fig_chisqlog = plt.figure()
plt.plot(chi_sq_log)
plt.ylabel('Chi_squared value')
plt.xlabel('Iteration')
#plt.show()
ax.plot(T_log, B_log, 'ko', ms=1)
ax.plot(T_log, B_log, 'k--', lw=1.5)
ax.plot(T_log[0], B_log[0], 'rs', ms=6)
ax.plot(T_log[-1], B_log[-1], 'bs', ms=6)
plt.show()
if __name__ == '__main__':
test_fit4(False)
| {
"content_hash": "6a5b8c63f396b184fcd23fc8bfed0a7d",
"timestamp": "",
"source": "github",
"line_count": 650,
"max_line_length": 153,
"avg_line_length": 31.435384615384617,
"alnum_prop": 0.653746390642588,
"repo_name": "jameskeaveney/ElecSus",
"id": "8bed4c5d7a0017b31430e73a782c9be7c365770a",
"size": "21074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elecsus/libs/SAFittingRoutine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "371112"
}
],
"symlink_target": ""
} |
from oslo.concurrency import lockutils
from oslo.config import cfg
import stevedore
from ironic.common import exception
dhcp_provider_opts = [
cfg.StrOpt('dhcp_provider',
default='neutron',
help='DHCP provider to use. "neutron" uses Neutron, and '
'"none" uses a no-op provider.'
),
]
CONF = cfg.CONF
CONF.register_opts(dhcp_provider_opts, group='dhcp')
_dhcp_provider = None
EM_SEMAPHORE = 'dhcp_provider'
class DHCPFactory(object):
# NOTE(lucasagomes): Instantiate a stevedore.driver.DriverManager
# only once, the first time DHCPFactory.__init__
# is called.
_dhcp_provider = None
def __init__(self, **kwargs):
if not DHCPFactory._dhcp_provider:
DHCPFactory._set_dhcp_provider(**kwargs)
# NOTE(lucasagomes): Use lockutils to avoid a potential race in eventlet
# that might try to create two dhcp factories.
@classmethod
@lockutils.synchronized(EM_SEMAPHORE, 'ironic-')
def _set_dhcp_provider(cls, **kwargs):
"""Initialize the dhcp provider
:raises: DHCPNotFound if the dhcp_provider cannot be loaded.
"""
# NOTE(lucasagomes): In case multiple greenthreads queue up on
# this lock before _dhcp_provider is initialized,
# prevent creation of multiple DriverManager.
if cls._dhcp_provider:
return
dhcp_provider_name = CONF.dhcp.dhcp_provider
try:
_extension_manager = stevedore.driver.DriverManager(
'ironic.dhcp',
dhcp_provider_name,
invoke_kwds=kwargs,
invoke_on_load=True)
except RuntimeError:
raise exception.DHCPNotFound(dhcp_provider_name=dhcp_provider_name)
cls._dhcp_provider = _extension_manager.driver
def update_dhcp(self, task, dhcp_opts):
"""Send or update the DHCP BOOT options for this node.
:param task: A TaskManager instance.
:param dhcp_opts: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
"""
self.provider.update_dhcp_opts(task, dhcp_opts)
@property
def provider(self):
return self._dhcp_provider
| {
"content_hash": "fad1fd515624fad00b4eb89d6c631b54",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 32.670731707317074,
"alnum_prop": 0.561030235162374,
"repo_name": "Tehsmash/ironic",
"id": "c6848c6d01dbc78e0df7ed675fa8c6d7e31777b1",
"size": "3287",
"binary": false,
"copies": "1",
"ref": "refs/heads/staging/kiloplus",
"path": "ironic/common/dhcp_factory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2250030"
}
],
"symlink_target": ""
} |
from examples_common import *
from connectors import *
# default configuration
#
DEFAULT_JDBC_CONFIG = JdbcConnectorConfig(host='localhost',
port='5555',
db_name='db_samples',
user_name='test',
user_pass='test')
DEFAULT_ES_CONFIG = ElasticConnectorConfig(host='localhost',
port=9200)
# individual examples
#
class TestExample1(TestSingleExampleDb2Es):
"""
Example1 test case description:
- ingest records from a single database source and store them in ElasticSearch
"""
def __init__(self, examples_path, *args, **kwargs):
super(TestExample1, self).__init__(source_conn_conf=DEFAULT_JDBC_CONFIG,
source_table_name='observations_view',
es_conn_conf=DEFAULT_ES_CONFIG,
es_index_name='sample_observations_view',
example_path=os.path.join(examples_path, "example1"),
*args, **kwargs)
class TestExample2(TestSingleExampleDb2Es):
"""
Example2 test case description:
- ingest records from a single database source and store them in ElasticSearch
- used in Quickstart
"""
def __init__(self, examples_path, *args, **kwargs):
super(TestExample2, self).__init__(source_conn_conf=DEFAULT_JDBC_CONFIG,
source_table_name='observations_view',
es_conn_conf=DEFAULT_ES_CONFIG,
es_index_name='sample_observations_view',
example_path=os.path.join(examples_path, "example2"),
*args, **kwargs)
class TestExample3(TestSingleExampleDb2Es):
"""
Example3 test case description:
- ingest records from a single database source and store them in ElasticSearch (syntetic)
- ingest records from a single database source and store them in ElasticSearch (mtsamples)
"""
def __init__(self, examples_path, *args, **kwargs):
super(TestExample3, self).__init__(source_conn_conf=DEFAULT_JDBC_CONFIG,
source_table_name='observations_view',
es_conn_conf=DEFAULT_ES_CONFIG,
es_index_name='sample_observations_view',
example_path=os.path.join(examples_path, "example3"),
*args, **kwargs)
self.source_conn_conf_mt = JdbcConnectorConfig(host='localhost',
port='5556',
db_name='db_samples',
user_name='test',
user_pass='test')
self.source_table_name_mt = 'mtsamples'
self.es_index_name_mt = 'sample_mt'
def test_source_target_mapping_mt(self):
# test the MTSamples mapping
self.log.info("Waiting for source/sink to become ready ...")
time.sleep(self.wait_for_souce_ready_s)
source_conn_mt = JdbcConnector(self.source_conn_conf_mt)
es_conn = ElasticConnector(self.es_conn_conf)
self.log.info("Waiting for cogstack pipeline to process records ...")
#time.sleep(self.wait_for_sink_ready_s)
self.waitForTargetEsReady(es_conn.connector, self.es_index_name, self.wait_for_sink_ready_max_s)
recs_in = self.getRecordsCountFromTargetDb(source_conn_mt.connector, self.source_table_name_mt)
recs_out = self.getRecordsCountFromTargetEs(es_conn.connector, self.es_index_name_mt)
self.assertEqual(recs_out, recs_in,
"There are less MTSamples records stored in sink (%s) than in source (%s)." % (recs_out, recs_in))
class TestExample4(TestSingleExampleDb2Es):
"""
Example4 test case description:
- ingest records from a single database source, run through Tika and store them in ElasticSearch
- using only 'docx' sub-case testing raw text extraction using Tika
"""
def __init__(self, examples_path, *args, **kwargs):
super(TestExample4, self).__init__(source_conn_conf=DEFAULT_JDBC_CONFIG,
source_table_name='observations_view',
es_conn_conf=DEFAULT_ES_CONFIG,
es_index_name='sample_observations_view',
example_path=os.path.join(examples_path, "example4"),
sub_case='docx',
*args, **kwargs)
class TestExample5s1(TestSingleExampleDb2Db):
"""
Example5-stage1 test case description:
- ingest records from a single database source, run through Tika and store them in output database
- using only 'docx' sub-case testing raw text extraction using Tika
"""
def __init__(self, examples_path, *args, **kwargs):
super(TestExample5s1, self).__init__(source_conn_conf=DEFAULT_JDBC_CONFIG,
source_table_name='medical_reports',
sink_conn_conf=DEFAULT_JDBC_CONFIG,
sink_table_name='medical_reports_processed',
example_path=os.path.join(examples_path, "example5"),
sub_case='docx',
*args, **kwargs)
class TestExample5s2(TestSingleExampleDb2Es):
"""
Example5-stage2 test case description:
- ingest records from a single database source and store them in ElasticSearch
"""
def __init__(self, examples_path, *args, **kwargs):
super(TestExample5s2, self).__init__(source_conn_conf=DEFAULT_JDBC_CONFIG,
source_table_name='observations_view',
es_conn_conf=DEFAULT_ES_CONFIG,
es_index_name='sample_observations_view',
example_path=os.path.join(examples_path, "example5"),
sub_case='docx',
*args, **kwargs)
class TestExample6(TestSingleExampleDb2Es):
def __init__(self, examples_path, *args, **kwargs):
"""
Example6 test case description:
- ingest records from a single database source and store them in ElasticSearch
- using NGINX as a proxy
"""
super(TestExample6, self).__init__(source_conn_conf=DEFAULT_JDBC_CONFIG,
source_table_name='observations_view',
es_conn_conf=ElasticConnectorConfig(host='localhost',
port='9200',
http_auth_pass='test',
http_auth_user='test'),
es_index_name='sample_observations_view',
example_path=os.path.join(examples_path, "example6"),
*args, **kwargs)
class TestExample7(TestSingleExampleDb2Es):
def __init__(self, examples_path, *args, **kwargs):
"""
Example7 test case description:
- ingest records from a single database source and store them in ElasticSearch
- using Fluentd logging driver
- using NGINX as a proxy
"""
super(TestExample7, self).__init__(source_conn_conf=DEFAULT_JDBC_CONFIG,
source_table_name='observations_view',
es_conn_conf=ElasticConnectorConfig(host='localhost',
port='9200',
http_auth_pass='test',
http_auth_user='test'),
es_index_name='sample_observations_view',
example_path=os.path.join(examples_path, "example7"),
*args, **kwargs)
class TestExample8(TestSingleExampleDb2Es):
def __init__(self, examples_path, *args, **kwargs):
"""
Example8 test case description:
- ingest records from a single database source, run NLP and store them in ElasticSearch
- running a custom GATE NLP application to extract annotations
"""
super(TestExample8, self).__init__(source_conn_conf=DEFAULT_JDBC_CONFIG,
source_table_name='observations_view',
es_conn_conf=DEFAULT_ES_CONFIG,
es_index_name='sample_observations_view',
example_path=os.path.join(examples_path, "example8"),
image_build_rel_dir="../../../dockerfiles/gate",
*args, **kwargs)
class TestExample9(TestSingleExampleDb2Es):
def __init__(self, examples_path, *args, **kwargs):
"""
Example9 test case description:
- ingest records from a single database source, run Tika+NLP and store them in ElasticSearch
- running Tika on the binary documents
- running a custom GATE NLP application to extract annotations
"""
super(TestExample9, self).__init__(source_conn_conf=DEFAULT_JDBC_CONFIG,
source_table_name='observations_view',
es_conn_conf=DEFAULT_ES_CONFIG,
es_index_name='sample_observations_view',
example_path=os.path.join(examples_path, "example9"),
image_build_rel_dir="../../../dockerfiles/gate",
*args, **kwargs)
| {
"content_hash": "162923f8084a51add4e3a8d922007401",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 123,
"avg_line_length": 54.065,
"alnum_prop": 0.4893184130213632,
"repo_name": "CogStack/cogstack",
"id": "504ece59bf755c9d6c654962fde8db37e5273867",
"size": "10832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/examples/examples_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "2090"
},
{
"name": "HTML",
"bytes": "2995818"
},
{
"name": "Java",
"bytes": "460672"
},
{
"name": "Shell",
"bytes": "8261"
}
],
"symlink_target": ""
} |
version_info = (2, 4, 0, 'final', 0)
def _get_version():
" Returns a PEP 386-compliant version number from version_info. "
assert len(version_info) == 5
assert version_info[3] in ('alpha', 'beta', 'rc', 'final')
parts = 2 if version_info[2] == 0 else 3
main = '.'.join(map(str, version_info[:parts]))
sub = ''
if version_info[3] == 'alpha' and version_info[4] == 0:
# TODO: maybe append some sort of git info here??
sub = '.dev'
elif version_info[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version_info[3]] + str(version_info[4])
return str(main + sub)
version = _get_version()
| {
"content_hash": "ef4689a678838e4cfbac6eae5303dcb7",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 69,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.5502121640735502,
"repo_name": "brian-yang/mozillians",
"id": "65edeeddbe2024b9e9a9513b5ec0deaec794cfdb",
"size": "907",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/markdown/__version__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1986"
},
{
"name": "CSS",
"bytes": "210671"
},
{
"name": "HTML",
"bytes": "184994"
},
{
"name": "JavaScript",
"bytes": "154038"
},
{
"name": "Makefile",
"bytes": "478"
},
{
"name": "Python",
"bytes": "9184371"
},
{
"name": "Shell",
"bytes": "7758"
}
],
"symlink_target": ""
} |
from osp_pipeline.core.models import FilesModel
from pyspark.sql import types as T
from .core import MARCModel, MARCDatafield
from ..sources import marc
from ...core.counters import Counters
class RawMARCRecord(FilesModel):
schema = T.StructType([
T.StructField('marc_id', T.StringType(), nullable=False),
T.StructField('marc_source', T.StringType(), nullable=False),
T.StructField('timestamp', T.StringType(), nullable=False),
T.StructField('datafields',
T.ArrayType(MARCDatafield),
nullable=False),
])
@classmethod
def iterfile(cls, fh):
"""yield `cls` from a compressed LOC XML file."""
fh = fh() if callable(fh) else fh
for source in marc.SourceMARCRecord.iterfile(fh):
yield cls(
marc_id=source.marc_id,
marc_source=source.marc_source,
timestamp=source.timestamp,
datafields=source.datafields
)
# Work around ModelMeta's broken subclass handling
class MARCRecordMixin(object):
MARC_SOURCE = '' # default control org if not set in source
FREE_TO_READ = None # default free_to_read if not set in source
schema = T.StructType([
T.StructField('id', T.StringType()),
T.StructField('source', T.StringType()),
T.StructField('marc_id', T.StringType()),
T.StructField('marc_source', T.StringType()),
T.StructField('timestamp', T.StringType(), nullable=False),
T.StructField('loc_control_number', T.StringType()),
T.StructField('title', T.StringType()),
T.StructField('subtitle', T.StringType()),
T.StructField('person_name', T.ArrayType(T.StringType())),
T.StructField('organization_name', T.ArrayType(T.StringType())),
T.StructField('medium', T.StringType()),
T.StructField('publisher', T.StringType()),
T.StructField('publication_date', T.StringType()),
T.StructField('series', T.StringType()),
T.StructField('isbn', T.ArrayType(T.StringType())),
T.StructField('issn', T.ArrayType(T.StringType())),
T.StructField('subject', T.ArrayType(T.StringType())),
T.StructField('free_to_read', T.BooleanType()),
])
@classmethod
def from_raw(cls, raw):
Counters.inc('MARCRecordMixin.raw_count')
marc_source = raw.marc_source or cls.MARC_SOURCE
return cls.from_marc(raw.datafields,
source=cls.SOURCE,
marc_id=raw.marc_id,
marc_source=marc_source,
free_to_read=cls.FREE_TO_READ,
timestamp=raw.timestamp,
# generate a unique ID for the record
id="|".join((raw.marc_id,
marc_source,
raw.timestamp)),
)
@classmethod
def from_files(cls, url):
return cls.build_df(
RawMARCRecord
.from_files(url)
.rdd
.map(cls.from_raw))
# Subclass b/c output destination is hardwired into Model's metaclass. Ugh.
class LOCRecord(MARCRecordMixin, MARCModel):
s3_key = 'loc_catalog'
rows_per_output_partition = 20000
MARC_SOURCE = 'LOC'
SOURCE = 'loc'
class OTLRecord(MARCRecordMixin, MARCModel):
s3_key = 'otl_catalog'
rows_per_output_partition = 1000
MARC_SOURCE = 'OTL'
FREE_TO_READ = True
SOURCE = 'otl'
| {
"content_hash": "d27b8bea271a497ba901ce20b9791e57",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 75,
"avg_line_length": 37.46875,
"alnum_prop": 0.5757575757575758,
"repo_name": "opensyllabus/osp-pipeline",
"id": "94185d639c832caf9b74c521b21de6e603d4619c",
"size": "3597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "osp_pipeline/catalog/models/marc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "72698"
},
{
"name": "Python",
"bytes": "283679"
},
{
"name": "Shell",
"bytes": "10441"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import absolute_import
from builtins import str
from builtins import next
from builtins import object
import os, codecs, re
from .helpers import save_json, load_json
from .content import Content
class Position(object):
"""
more than just a number of an item
or index of a list
we just want to hold a position and length
from this we can determine the number for previous, next
and provide increment and decrement options
loop is tracked here
error checking and representing positions
"""
def __init__(self, length=0, position=0, loop=True):
self._index = position
self._length = length
self.loop = loop
def __int__(self):
return self.position
def __str__(self):
return str(self.position)
def __repr__(self):
return self.position
def _get_length(self):
return self._length
def _set_length(self, l):
self.change_length(l)
length = property(_get_length, _set_length)
def _get_index(self):
return self._index
def _set_index(self, p):
self._index = self.check(p)
position = property(_get_index, _set_index)
def end(self):
"""
the value for the last object
"""
return self.length-1
def at_end(self):
"""
return a boolean value for if our position is equal to the end
"""
return self.position == self.end()
def change_length(self, length):
"""
position needs to know how long the list is
we can change that later if we don't know the length
"""
self._length = length
#go ahead one just to make sure we weren't beyond the new length
self.decrement()
self.increment()
def check(self, position):
"""
accept a vaule for a position
check to make sure it falls within the range of acceptable values
if greater, go to the end
if less than 0, go to the beginning
could consider doing a mod operation and taking the remainder as
the new position.
"""
if position < 0:
return 0
elif position >= 0 and position <= self.end():
return int(position)
else:
return self.end()
def next(self, value=1):
"""
gives the position for the next item
but does not actually increment the index
"""
if self.position+value >= self.length:
if self.loop:
return 0
else:
#staying at the end
#return self.position
#return self.length-1
return self.end()
else:
return self.position+value
def previous(self, value=1):
"""
gives the position for the next item
but does not actually increment the index
"""
if self.position-value < 0:
if self.loop:
#return self.length-1
return self.end()
else:
#staying at the beginning
#(should be 0 already)
return 0
else:
return self.position-value
def increment(self, value=1):
"""
changes the actual index variable
"""
self.position = self.next(value)
return self.position
def decrement(self, value=1):
"""
changes the actual index variable
"""
self.position = self.previous(value)
return self.position
def debug(self):
"""
return a string representation of current state
"""
result = ''
result += "Position index: %s\n" % self._index
result += "Length: %s\n" % self._length
result += "Loop: %s\n" % self.loop
return result
#previously: (too generic)
#class Items(list):
class PositionList(list):
"""
generic list with a position associated with it
position will get updated with call to update()
otherwise...
changing the position is left to the caller
"""
def __init__(self, items=[], position=0):
list.__init__(self)
self.extend(items)
self._position = Position(len(items), position)
#quick way to access the current item directly
#rather than having get return the value
#if items:
# self.current = self.get()
#else:
# #if nothing was sent, be sure to initialize current later!
# self.current = None
#special case for get_next...
#if we're new, return 0
#otherwise... all other rules apply
self.new = True
#wrap position object, so that we can assign a new position to the list
#as though it were an attribute.
#this simplifies the interface to the list of items.
def _get_position(self):
return self._position
def _set_position(self, p):
self.go(p)
position = property(_get_position, _set_position)
#aka get_current?
#def get(self, position=None):
def current(self):
"""
shortcut for get() without a specific position passed in
"""
return self.get()
def get(self, position=None):
"""
get calls will not change our position
"""
#make sure position's length is always current:
self.update_length()
#print "Received position: %s" % position
#print "Current position: %s" % self._position
#print "Length: %s" % len(self)
#should we update current here? or use current?
if position is None:
#use our current position
return self[int(self._position)]
else:
#checking if position is out of range here:
return self[self._position.check(position)]
#changing the interface to be the same as it is with Position object:
#def get_previous(self):
def previous(self):
"""
get the previous item in the list without changing position
"""
return self.get(self._position.previous())
#def get_next(self):
def __next__(self):
"""
get the next item in the list without changing position
"""
if self.new:
self.new = False
return self.get()
else:
return self.get(next(self._position))
def go(self, position=None):
"""
go calls will update the local position object
"""
item = self.get(position)
if not position is None:
#whew! this is a tricky line...
#setting the position object's internal position:
self._position.position = position
#self.current = item
print(self._position.debug())
print("passed position: %s" % position)
return item
#changing the interface to be the same as it is with Position object:
#def go_next(self):
def increment(self):
"""
go to the next item in the list (and change our position accordingly)
"""
return self.go(next(self._position))
#def go_previous(self):
def decrement(self):
"""
go to the previous item in the list
(and change our position accordingly)
"""
return self.go(self._position.previous())
#maybe rename to update_length to avoid confusion with replace functionality
#def update(self):
def update_length(self):
"""
update the position so it knows our new length
should be called any time items are added or removed to the list
"""
self._position.change_length(len(self))
def replace(self, item):
"""
replace the item in the current position
with the item passed in
"""
self[int(self._position)] = item
def clear(self):
del self[:]
self.update_length()
#aka
## class Sources(Items):
class Playlist(PositionList):
"""
Similar to a collection in that it holds a group of Content objects,
but not geared toward a single source of content.
Also, not specific to any single playlist format (e.g. M3U).
Because it holds Content objects,
there is much more meta data available than a typical playlist
very similar concepts to old mindstream sources module:
/c/medley/medley/sources.py
A generic Playlist object
These may help:
http://docs.python.org/2/library/collections.html
Previously:
A collection of Source objects
and a destination path for the logs generated
aka Playlist, Medialist
consider the best way to handle Segments in a Content object
for Playlist use:
Separate copies of Content in the Playlist for each Segment?
-- be careful not to save that Content object back and overwrite all
previous segments
Playlist reorders list of Segments associated with Content
-- more difficult to split segments of one piece of content in between
segments of another piece of content, within a list
also:
when editing a segment, save changes to main json parent Content
"""
def __init__(self, items=[], log_path=None, debug=False):
PositionList.__init__(self, items)
if log_path is None:
self.log_path = '/c/logs/transfer'
else:
self.log_path = log_path
#whether or not to update a content's source json file
#or just make the changes to the list locally
#
#generally with a playlist you don't want to update the content source
#e.g. subtractively limiting content segments to only favorites...
# wouldn't want to remove those segments from the content source
# just from the current playlist
#
#this should not matter if a content object is edited directly
self.sync_contents = False
self.debug = debug
#save and load:
#use helpers
#save_json(destination, self[:])
#and
#Playlist(load_json(source)) #assuming json contains a list of Contents
#any other format should be used
def set_current(self, item):
"""
if we have item
set the position to be that item
(useful when selecting next item externally)
"""
self.go(self.index(item))
#no need to return anything... already have the item
def add_if_new(self, source):
if not self.has_path(source.path):
self.append(source)
return True
else:
print("Already have: %s" % source.path)
return False
def has_path(self, path):
"""
go through all of our items and see if we have the path
"""
found = False
for i in self:
#print "m3u path: %s" % i.path
#print "chk path: %s" % path
if str(i.path) == str(path):
found = True
break
return found
def save(self, destination):
"""
consider using ContentPointer object here.. (is it useful?)
"""
items = []
for content in self:
json_path = os.path.join(content.path, content.json_source)
items.append( [json_path, content.segment_id] )
save_json(destination, items)
#def load_playlist(fname):
def load(self, fname, all_contents={}):
"""
if you want to keep track of all contents loaded,
pass in a dictionary of all_contents...
load will update that with any new Content objects,
and reuse any existing objects from there
originally from medley.player.list_tree.load_playlist(fname)
expects the playlist to hold:
- the content source path
- the segment id
then loads the content from the source, and selects the correct segment
"""
self.clear()
items = load_json(fname)
#print items
contents = []
for item in items:
if self.debug:
print(item)
print("")
(json_source, segment_id) = item
if json_source in all_contents:
if self.debug:
print("Matched existing Content object with path: %s" % json_source)
content = all_contents[json_source]
else:
try:
if self.debug:
print("loading: %s" % json_source)
content = Content(json_source)
all_contents[json_source] = content
except:
print("removing item. could not load: %s" % json_source)
#print json_source
try:
segment = content.get_segment(segment_id)
except:
raise ValueError("Could not locate content... is it still available locally?")
#print segment.to_dict()
#print ""
#print ""
contents.append(segment)
#return Playlist(contents)
self.extend(contents)
#update position_list so it knows
self.update_length()
def sort_path(self):
#self.sort(key=lambda source: str(source.path))
self.sort(key=sorter)
def log_current(self, add_tags=[]):
"""
log that a play was just completed
this is very similar to osbrowser.node log_action?
could move into moments.journal
would need the log path, the file being logged (or file parent path)
and the entry to use
"""
entry = self.now_playing()
entry.tags.union(add_tags)
#log in default log directory
j = Journal()
now = Timestamp(now=True)
log_name = os.path.join(self.log_path , now.filename())
j.from_file(log_name)
j.update_entry(entry)
j.to_file()
# log in action.txt for current media's directory
cur_item = self.current()
parent_path = os.path.dirname(str(cur_item.path))
action = os.path.join(parent_path, 'action.txt')
j2 = Journal()
j2.from_file(action)
j2.update_entry(entry)
j2.to_file()
def now_playing(self):
"""
return an entry for what is playing
"""
cur_item = self.get()
return cur_item.as_moment(new_entry=True)
class ListTree(object):
"""
hold a hierarchy of playlists and collections
"""
def __init__(self):
#a place to keep track of all playlists and collections
#associated with the grouping
#can be hierarchical
self.playlists = []
#the goal is to store these for easy loading later
| {
"content_hash": "2c0dc8c62c2ccf03d20e28eca69ac856",
"timestamp": "",
"source": "github",
"line_count": 499,
"max_line_length": 94,
"avg_line_length": 30.064128256513026,
"alnum_prop": 0.5736568457538995,
"repo_name": "charlesbrandt/medley",
"id": "93abd7e0dae7327195c3bfabd4492e3106ebfbc6",
"size": "15002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "medley/playlist.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19670"
},
{
"name": "JavaScript",
"bytes": "356728"
},
{
"name": "Python",
"bytes": "1017664"
},
{
"name": "Shell",
"bytes": "1569"
},
{
"name": "Smarty",
"bytes": "40284"
}
],
"symlink_target": ""
} |
import datetime
class TimeBucket:
def __init__(self,start_time,stop_time, datetime_format = None):
if datetime_format is not None:
self.datetime_format = datetime_format
else:
self.datetime_format = "%Y%m%d%H%M%S"
if isinstance(start_time,datetime.datetime):
self.start_time = start_time
else:
self.start_time = datetime.datetime.strptime(start_time,self.datetime_format)
if isinstance(stop_time,datetime.datetime):
self.stop_time = stop_time
else:
self.stop_time = datetime.datetime.strptime(stop_time,self.datetime_format)
# sanity check
assert self.stop_time > self.start_time
def size(self):
return self.stop_time - self.start_time
def is_in_bucket(self,this_datetime):
return this_datetime > self.start_time and this_datetime < self.stop_time
def __repr__(self):
return_str = "'{} - {}'".format(self.start_time.strftime(self.datetime_format),self.stop_time.strftime(self.datetime_format))
return return_str
def __gt__(self, obj):
if isinstance(obj,TimeBucket):
return obj.stop_time < self.start_time
else:
raise NotImplemented
def __lt__(self, obj):
if isinstance(obj,TimeBucket):
return self.stop_time < obj.start_time
else:
raise NotImplemented
def __eq__(self, obj):
if isinstance(obj,TimeBucket):
return obj.start_time == self.start_time and obj.stop_time == self.stop_time
else:
raise NotImplemented
def __ne__(self, obj):
if isinstance(obj,TimeBucket):
return obj.start_time != self.start_time or obj.stop_time != self.stop_time
else:
raise NotImplemented
def __ge__(self, obj):
if isinstance(obj,TimeBucket):
return self.__gt__(obj) or self.__eq__(obj)
else:
raise NotImplemented
def __le__(self, obj):
if isinstance(obj,TimeBucket):
return self.__lt__(obj) or self.__eq__(obj)
else:
raise NotImplemented
def __contains__(self, obj):
if isinstance(obj,TimeBucket):
return obj.start_time >= self.start_time and obj.stop_time <= self.stop_time
else:
raise NotImplemented
def lowerlaps(self,obj):
if isinstance(obj,TimeBucket):
cond1 = self.stop_time > obj.start_time and self.stop_time <= obj.stop_time
cond2 = self.start_time < obj.start_time
return cond1 and cond2
else:
raise NotImplemented
def upperlaps(self,obj):
if isinstance(obj,TimeBucket):
cond1 = self.start_time >= obj.start_time and self.start_time < obj.stop_time
cond2 = self.stop_time > obj.stop_time
return cond1 and cond2
else:
raise NotImplemented
def is_upperlapped_by(self, obj):
if isinstance(obj,TimeBucket):
cond1 = obj.start_time >= self.start_time and obj.start_time <= self.stop_time
cond2 = obj.stop_time > self.stop_time
return cond1 and cond2
else:
raise NotImplemented
def is_lowerlapped_by(self, obj):
if isinstance(obj,TimeBucket):
cond1 = obj.stop_time >= self.start_time and obj.stop_time <= self.stop_time
cond2 = obj.start_time < self.start_time
return cond1 and cond2
else:
raise NotImplemented
def intersects(self, obj):
if isinstance(obj,TimeBucket):
return self.lowerlaps(obj) or self.upperlaps(obj) or obj.lowerlaps(self) or obj.upperlaps(self) or self in obj or obj in self
#cond1 = self.start_time >= obj.start_time and self.start_time <= obj.stop_time
#cond2 = self.stop_time >= obj.start_time and self.stop_time <= obj.stop_time
#cond3 = obj.start_time >= self.start_time and obj.start_time <= self.stop_time
#cond4 = obj.stop_time >= self.start_time and obj.stop_time <= self.stop_time
#return cond1 or cond2 or cond3 or cond4
else:
raise NotImplemented
def get_fraction_overlapped_by(self, obj):
if self.is_lowerlapped_by(obj):
overlap = obj.stop_time - self.start_time
fraction = overlap.total_seconds() / self.size().total_seconds()
return float(fraction)
elif self.is_upperlapped_by(obj):
overlap = self.stop_time - obj.start_time
fraction = overlap.total_seconds() / self.size().total_seconds()
return float(fraction)
elif obj in self:
return float(obj.size().total_seconds() / self.size().total_seconds())
else:
return float(0)
| {
"content_hash": "3a3b8b5fa65104be8aabbae1903cbf48",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 137,
"avg_line_length": 37.715384615384615,
"alnum_prop": 0.5873954721599021,
"repo_name": "compston/TAP-Workshop",
"id": "3615b2abaded980bedcd7450550499645b025bd4",
"size": "4903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utilities/Gnip-Trend-Detection/gnip_trend_detection/time_bucket.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "346588"
},
{
"name": "Shell",
"bytes": "6926"
},
{
"name": "TeX",
"bytes": "29717"
}
],
"symlink_target": ""
} |
"""
Pushover platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.pushover/
"""
import logging
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_TARGET, ATTR_DATA, DOMAIN, BaseNotificationService)
from homeassistant.const import CONF_API_KEY
from homeassistant.helpers import validate_config
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-variable
def get_service(hass, config):
"""Get the Pushover notification service."""
if not validate_config({DOMAIN: config},
{DOMAIN: ['user_key', CONF_API_KEY]},
_LOGGER):
return None
from pushover import InitError
try:
return PushoverNotificationService(config['user_key'],
config[CONF_API_KEY])
except InitError:
_LOGGER.error(
"Wrong API key supplied. "
"Get it at https://pushover.net")
return None
# pylint: disable=too-few-public-methods
class PushoverNotificationService(BaseNotificationService):
"""Implement the notification service for Pushover."""
def __init__(self, user_key, api_token):
"""Initialize the service."""
from pushover import Client
self._user_key = user_key
self._api_token = api_token
self.pushover = Client(
self._user_key, api_token=self._api_token)
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
from pushover import RequestError
# Make a copy and use empty dict if necessary
data = dict(kwargs.get(ATTR_DATA) or {})
data['title'] = kwargs.get(ATTR_TITLE)
target = kwargs.get(ATTR_TARGET)
if target is not None:
data['device'] = target
try:
self.pushover.send_message(message, **data)
except ValueError as val_err:
_LOGGER.error(str(val_err))
except RequestError:
_LOGGER.exception("Could not send pushover notification")
| {
"content_hash": "60d301dbb60836b3159dfc1214c28bd1",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 74,
"avg_line_length": 31.850746268656717,
"alnum_prop": 0.626991565135895,
"repo_name": "Julian/home-assistant",
"id": "45a331c35857a7125fd1c6be8146e7d68064506f",
"size": "2134",
"binary": false,
"copies": "1",
"ref": "refs/heads/py2",
"path": "homeassistant/components/notify/pushover.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1354942"
},
{
"name": "Python",
"bytes": "2755966"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6430"
}
],
"symlink_target": ""
} |
from io import BytesIO
from glob import glob
from os.path import join, exists, dirname, abspath
from subprocess import call
from urllib.request import urlopen
from zipfile import ZipFile
import os
import shutil
JASMINE_REPORTER_URL='https://github.com/larrymyers/jasmine-reporters/zipball/0.2.1'
BASE = abspath(dirname(__file__))
REPORT_DIR = join(BASE, 'jasmine-results')
EXT_LIB = join(BASE, '..', 'ext-lib')
JARDIR = join(EXT_LIB, 'jasmine-reporters', 'ext')
def run_tests():
workdir = os.getcwd()
os.chdir(BASE)
download_jasmine_reporters()
clear_reports()
run()
os.chdir(workdir)
def run():
cmd = ['java', '-cp', '%s%s%s' % (join(JARDIR, 'js.jar'), os.pathsep, join(JARDIR, 'jline.jar')),
'org.mozilla.javascript.tools.shell.Main', '-opt', '-1', 'envjs.bootstrap.js',
join(BASE, 'webcontent', 'SpecRunner.html')]
call(cmd)
def clear_reports():
if exists(REPORT_DIR):
shutil.rmtree(REPORT_DIR)
os.mkdir(REPORT_DIR)
def download_jasmine_reporters():
if exists(join(EXT_LIB, 'jasmine-reporters')):
return
if not exists(EXT_LIB):
os.mkdir(EXT_LIB)
reporter = urlopen(JASMINE_REPORTER_URL)
z = ZipFile(BytesIO(reporter.read()))
z.extractall(EXT_LIB)
extraction_dir = glob(join(EXT_LIB, 'larrymyers-jasmine-reporters*'))[0]
print('Extracting Jasmine-Reporters to', extraction_dir)
shutil.move(extraction_dir, join(EXT_LIB, 'jasmine-reporters'))
if __name__ == '__main__':
run_tests()
| {
"content_hash": "0469e558577b03e29cb1744e03636d64",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 101,
"avg_line_length": 28.11111111111111,
"alnum_prop": 0.6613965744400527,
"repo_name": "HelioGuilherme66/robotframework",
"id": "a470cd84483a6e98e22973c26f642d54a0da45d4",
"size": "1541",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utest/run_jasmine.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "44706"
},
{
"name": "HTML",
"bytes": "86409"
},
{
"name": "JavaScript",
"bytes": "162950"
},
{
"name": "Python",
"bytes": "2671114"
},
{
"name": "RobotFramework",
"bytes": "1231105"
}
],
"symlink_target": ""
} |
from __future__ import division
import numpy as np
from chainercv.utils.mask.mask_to_bbox import mask_to_bbox
from chainercv.visualizations.colormap import voc_colormap
from chainercv.visualizations import vis_image
def vis_instance_segmentation(
img, mask, label=None, score=None, label_names=None,
instance_colors=None, alpha=0.7, sort_by_score=True, ax=None):
"""Visualize instance segmentation.
Example:
This example visualizes an image and an instance segmentation.
>>> from chainercv.datasets import SBDInstanceSegmentationDataset
>>> from chainercv.datasets \
... import sbd_instance_segmentation_label_names
>>> from chainercv.visualizations import vis_instance_segmentation
>>> import matplotlib.pyplot as plt
>>> dataset = SBDInstanceSegmentationDataset()
>>> img, mask, label = dataset[0]
>>> vis_instance_segmentation(
... img, mask, label,
... label_names=sbd_instance_segmentation_label_names)
>>> plt.show()
This example visualizes an image, an instance segmentation and
bounding boxes.
>>> from chainercv.datasets import SBDInstanceSegmentationDataset
>>> from chainercv.datasets \
... import sbd_instance_segmentation_label_names
>>> from chainercv.visualizations import vis_bbox
>>> from chainercv.visualizations import vis_instance_segmentation
>>> from chainercv.visualizations.colormap import voc_colormap
>>> from chainercv.utils import mask_to_bbox
>>> import matplotlib.pyplot as plt
>>> dataset = SBDInstanceSegmentationDataset()
>>> img, mask, label = dataset[0]
>>> bbox = mask_to_bbox(mask)
>>> colors = voc_colormap(list(range(1, len(mask) + 1)))
>>> ax = vis_bbox(img, bbox, label,
... label_names=sbd_instance_segmentation_label_names,
... instance_colors=colors, alpha=0.7, linewidth=0.5)
>>> vis_instance_segmentation(
... None, mask, instance_colors=colors, alpha=0.7, ax=ax)
>>> plt.show()
Args:
img (~numpy.ndarray): See the table below. If this is :obj:`None`,
no image is displayed.
mask (~numpy.ndarray): See the table below.
label (~numpy.ndarray): See the table below. This is optional.
score (~numpy.ndarray): See the table below. This is optional.
label_names (iterable of strings): Name of labels ordered according
to label ids.
instance_colors (iterable of tuple): List of colors.
Each color is RGB format and the range of its values is
:math:`[0, 255]`. The :obj:`i`-th element is the color used
to visualize the :obj:`i`-th instance.
If :obj:`instance_colors` is :obj:`None`, the default color map
is used.
alpha (float): The value which determines transparency of the figure.
The range of this value is :math:`[0, 1]`. If this
value is :obj:`0`, the figure will be completely transparent.
The default value is :obj:`0.7`. This option is useful for
overlaying the label on the source image.
sort_by_score (bool): When :obj:`True`, instances with high scores
are always visualized in front of instances with low scores.
ax (matplotlib.axes.Axis): The visualization is displayed on this
axis. If this is :obj:`None` (default), a new axis is created.
.. csv-table::
:header: name, shape, dtype, format
:obj:`img`, ":math:`(3, H, W)`", :obj:`float32`, \
"RGB, :math:`[0, 255]`"
:obj:`mask`, ":math:`(R, H, W)`", :obj:`bool`, --
:obj:`label`, ":math:`(R,)`", :obj:`int32`, \
":math:`[0, \#fg\_class - 1]`"
:obj:`score`, ":math:`(R,)`", :obj:`float32`, --
Returns:
matploblib.axes.Axes: Returns :obj:`ax`.
:obj:`ax` is an :class:`matploblib.axes.Axes` with the plot.
"""
# Returns newly instantiated matplotlib.axes.Axes object if ax is None
ax = vis_image(img, ax=ax)
if score is not None and len(mask) != len(score):
raise ValueError('The length of score must be same as that of mask')
if label is not None and len(mask) != len(label):
raise ValueError('The length of label must be same as that of mask')
if sort_by_score and score is not None:
order = np.argsort(score)
mask = mask[order]
score = score[order]
if label is not None:
label = label[order]
if instance_colors is not None:
instance_colors = np.array(instance_colors)[order]
bbox = mask_to_bbox(mask)
n_inst = len(bbox)
if instance_colors is None:
instance_colors = voc_colormap(list(range(1, n_inst + 1)))
instance_colors = np.array(instance_colors)
_, H, W = mask.shape
canvas_img = np.zeros((H, W, 4), dtype=np.uint8)
for i, (bb, msk) in enumerate(zip(bbox, mask)):
# The length of `colors` can be smaller than the number of instances
# if a non-default `colors` is used.
color = instance_colors[i % len(instance_colors)]
rgba = np.append(color, alpha * 255)
bb = np.round(bb).astype(np.int32)
y_min, x_min, y_max, x_max = bb
if y_max > y_min and x_max > x_min:
canvas_img[msk] = rgba
caption = []
if label is not None and label_names is not None:
lb = label[i]
if not (0 <= lb < len(label_names)):
raise ValueError('No corresponding name is given')
caption.append(label_names[lb])
if score is not None:
sc = score[i]
caption.append('{:.2f}'.format(sc))
if len(caption) > 0:
ax.text((x_max + x_min) / 2, y_min,
': '.join(caption),
style='italic',
bbox={'facecolor': color / 255, 'alpha': alpha},
fontsize=8, color='white')
ax.imshow(canvas_img)
return ax
| {
"content_hash": "35a94d39bd2d80d5a6e4ae56274e10c2",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 77,
"avg_line_length": 41.92465753424658,
"alnum_prop": 0.596144420846267,
"repo_name": "pfnet/chainercv",
"id": "c4ed265ed06cf2ff112105288e9a6a15d392b78e",
"size": "6121",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chainercv/visualizations/vis_instance_segmentation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "317937"
}
],
"symlink_target": ""
} |
import codecs
import ConfigParser
import os
import sys
import RezzMe.config.builder
onMacOSX = sys.platform == 'darwin'
onLinux = sys.platform == 'linux2'
onWindows = sys.platform == 'win32'
# check prereqs
if not os.path.exists('rezzme.cfg'):
print '''
oops...you need to create rezzme.cfg first!
copy rezzme.cfg.example to rezzme.cfg and adapt it to your needs,
and run me again.
'''
sys.exit(1)
if len(sys.argv) != 3:
print 'usage: expand.py in-file out-file'
sys.exit(2)
# read in configuration
cfg = RezzMe.config.builder.buildCfg('rezzme')
infile = codecs.open(sys.argv[1], 'r', 'utf8')
inContent = infile.read()
infile.close()
outfile = codecs.open(sys.argv[2], 'w', 'utf8')
outfile.write(inContent % cfg['package'])
outfile.close()
| {
"content_hash": "c263d43c4a89784d66349c67913dc344",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 65,
"avg_line_length": 21.25,
"alnum_prop": 0.6980392156862745,
"repo_name": "dirkhusemann/rezzme",
"id": "7a04bda4e1dbb16b1e38e3d0801160a596ad9bb2",
"size": "810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expand.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "2714"
},
{
"name": "Python",
"bytes": "220677"
}
],
"symlink_target": ""
} |
from libfcg.fcg import FCG
from oslo.config import cfg
from virtman.utils import singleton
from virtman.utils import rootwrap
fcg_opts = [
cfg.StrOpt('fcg_name',
default='fcg',
help='The name of the Flashcache Group'),
cfg.ListOpt('fcg_ssds',
default=['/dev/loop0'],
help='The devices of SSDs to use to create the FCG, '
'the parameter of \'ssds\' can fill in one '
'or more, splited by \',\''),
cfg.StrOpt('fcg_blocksize',
default='4k',
help='The block size of the FCG'),
cfg.StrOpt('fcg_pattern',
default='back',
help='The cache mode for the FCG'),
]
CONF = cfg.CONF
CONF.register_opts(fcg_opts)
class FcgExecutor(FCG):
def __init__(self):
FCG.__init__(self, CONF.fcg_name, root_helper=rootwrap.root_helper())
fcg_executor = FcgExecutor()
def is_valid():
return fcg_executor.is_valid()
def create_group():
return fcg_executor.create_group(CONF.fcg_ssds, CONF.fcg_blocksize,
CONF.fcg_pattern)
def add_disk(disk):
return fcg_executor.add_disk(disk)
def rm_disk(disk):
return fcg_executor.rm_disk(disk)
| {
"content_hash": "785ff86aa272b81201e94261d67aeb8e",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 77,
"avg_line_length": 25.24,
"alnum_prop": 0.5784469096671949,
"repo_name": "vmthunder/virtman",
"id": "efafea50f5a2545eeecb882af8d798d796765bd3",
"size": "1262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "virtman/drivers/fcg.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "266703"
}
],
"symlink_target": ""
} |
"""
SLA (Service-level agreement) is set of details for determining compliance
with contracted values such as maximum error rate or minimum response time.
"""
from rally.common.i18n import _
from rally.common import streaming_algorithms
from rally import consts
from rally.task import sla
@sla.configure(name="outliers")
class Outliers(sla.SLA):
"""Limit the number of outliers (iterations that take too much time).
The outliers are detected automatically using the computation of the mean
and standard deviation (std) of the data.
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"max": {"type": "integer", "minimum": 0},
"min_iterations": {"type": "integer", "minimum": 3},
"sigmas": {"type": "number", "minimum": 0.0,
"exclusiveMinimum": True}
}
}
def __init__(self, criterion_value):
super(Outliers, self).__init__(criterion_value)
self.max_outliers = self.criterion_value.get("max", 0)
# NOTE(msdubov): Having 3 as default is reasonable (need enough data).
self.min_iterations = self.criterion_value.get("min_iterations", 3)
self.sigmas = self.criterion_value.get("sigmas", 3.0)
self.iterations = 0
self.outliers = 0
self.threshold = None
self.mean_comp = streaming_algorithms.MeanComputation()
self.std_comp = streaming_algorithms.StdDevComputation()
def add_iteration(self, iteration):
# NOTE(ikhudoshyn): This method can not be implemented properly.
# After adding a new iteration, both mean and standard deviation
# may change. Hence threshold will change as well. In this case we
# should again compare durations of all accounted iterations
# to the threshold. Unfortunately we can not do it since
# we do not store durations.
# Implementation provided here only gives rough approximation
# of outliers number.
if not iteration.get("error"):
duration = iteration["duration"]
self.iterations += 1
# NOTE(msdubov): First check if the current iteration is an outlier
if ((self.iterations >= self.min_iterations and self.threshold and
duration > self.threshold)):
self.outliers += 1
# NOTE(msdubov): Then update the threshold value
self.mean_comp.add(duration)
self.std_comp.add(duration)
if self.iterations >= 2:
mean = self.mean_comp.result()
std = self.std_comp.result()
self.threshold = mean + self.sigmas * std
self.success = self.outliers <= self.max_outliers
return self.success
def merge(self, other):
# NOTE(ikhudoshyn): This method can not be implemented properly.
# After merge, both mean and standard deviation may change.
# Hence threshold will change as well. In this case we
# should again compare durations of all accounted iterations
# to the threshold. Unfortunately we can not do it since
# we do not store durations.
# Implementation provided here only gives rough approximation
# of outliers number.
self.iterations += other.iterations
self.outliers += other.outliers
self.mean_comp.merge(other.mean_comp)
self.std_comp.merge(other.std_comp)
if self.iterations >= 2:
mean = self.mean_comp.result()
std = self.std_comp.result()
self.threshold = mean + self.sigmas * std
self.success = self.outliers <= self.max_outliers
return self.success
def details(self):
return (_("Maximum number of outliers %i <= %i - %s") %
(self.outliers, self.max_outliers, self.status()))
| {
"content_hash": "a3d3afc47c8bb0ecc75a65b1cfbbc8f1",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 79,
"avg_line_length": 41.04210526315789,
"alnum_prop": 0.6245191074634522,
"repo_name": "vganapath/rally",
"id": "40ae37dfa93fc13ced46a070b526742be05a5a52",
"size": "4530",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "rally/plugins/common/sla/outliers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "52008"
},
{
"name": "JavaScript",
"bytes": "8550"
},
{
"name": "Mako",
"bytes": "18645"
},
{
"name": "Python",
"bytes": "3621510"
},
{
"name": "Shell",
"bytes": "43808"
}
],
"symlink_target": ""
} |
extensions_list = {"application/andrew-inset":["ez"],
"application/applixware":["aw"],
"application/atom+xml":["atom"],
"application/atomcat+xml":["atomcat"],
"application/atomsvc+xml":["atomsvc"],
"application/ccxml+xml":["ccxml"],
"application/cdmi-capability":["cdmia"],
"application/cdmi-container":["cdmic"],
"application/cdmi-domain":["cdmid"],
"application/cdmi-object":["cdmio"],
"application/cdmi-queue":["cdmiq"],
"application/cu-seeme":["cu"],
"application/davmount+xml":["davmount"],
"application/docbook+xml":["dbk"],
"application/dssc+der":["dssc"],
"application/dssc+xml":["xdssc"],
"application/ecmascript":["ecma"],
"application/emma+xml":["emma"],
"application/epub+zip":["epub"],
"application/exi":["exi"],
"application/font-tdpfr":["pfr"],
"application/font-woff":["woff"],
"application/gml+xml":["gml"],
"application/gpx+xml":["gpx"],
"application/gxf":["gxf"],
"application/hyperstudio":["stk"],
"application/inkml+xml":["ink","inkml"],
"application/ipfix":["ipfix"],
"application/java-archive":["jar"],
"application/java-serialized-object":["ser"],
"application/java-vm":["class"],
"application/javascript":["js"],
"application/json":["json"],
"application/jsonml+json":["jsonml"],
"application/lost+xml":["lostxml"],
"application/mac-binhex40":["hqx"],
"application/mac-compactpro":["cpt"],
"application/mads+xml":["mads"],
"application/marc":["mrc"],
"application/marcxml+xml":["mrcx"],
"application/mathematica":["ma","nb","mb"],
"application/mathml+xml":["mathml"],
"application/mbox":["mbox"],
"application/mediaservercontrol+xml":["mscml"],
"application/metalink+xml":["metalink"],
"application/metalink4+xml":["meta4"],
"application/mets+xml":["mets"],
"application/mods+xml":["mods"],
"application/mp21":["m21","mp21"],
"application/mp4":["mp4s"],
"application/msword":["doc","dot"],
"application/mxf":["mxf"],
"application/octet-stream":["bin","dms","lrf","mar","so","dist","distz","pkg","bpk","dump","elc","deploy"],
"application/oda":["oda"],
"application/oebps-package+xml":["opf"],
"application/ogg":["ogx"],
"application/omdoc+xml":["omdoc"],
"application/onenote":["onetoc","onetoc2","onetmp","onepkg"],
"application/oxps":["oxps"],
"application/patch-ops-error+xml":["xer"],
"application/pdf":["pdf"],
"application/pgp-encrypted":["pgp"],
"application/pgp-signature":["asc","sig"],
"application/pics-rules":["prf"],
"application/pkcs10":["p10"],
"application/pkcs7-mime":["p7m","p7c"],
"application/pkcs7-signature":["p7s"],
"application/pkcs8":["p8"],
"application/pkix-attr-cert":["ac"],
"application/pkix-cert":["cer"],
"application/pkix-crl":["crl"],
"application/pkix-pkipath":["pkipath"],
"application/pkixcmp":["pki"],
"application/pls+xml":["pls"],
"application/postscript":["ai","eps","ps"],
"application/prs.cww":["cww"],
"application/pskc+xml":["pskcxml"],
"application/rdf+xml":["rdf"],
"application/reginfo+xml":["rif"],
"application/relax-ng-compact-syntax":["rnc"],
"application/resource-lists+xml":["rl"],
"application/resource-lists-diff+xml":["rld"],
"application/rls-services+xml":["rs"],
"application/rpki-ghostbusters":["gbr"],
"application/rpki-manifest":["mft"],
"application/rpki-roa":["roa"],
"application/rsd+xml":["rsd"],
"application/rss+xml":["rss"],
"application/rtf":["rtf"],
"application/sbml+xml":["sbml"],
"application/scvp-cv-request":["scq"],
"application/scvp-cv-response":["scs"],
"application/scvp-vp-request":["spq"],
"application/scvp-vp-response":["spp"],
"application/sdp":["sdp"],
"application/set-payment-initiation":["setpay"],
"application/set-registration-initiation":["setreg"],
"application/shf+xml":["shf"],
"application/smil+xml":["smi","smil"],
"application/sparql-query":["rq"],
"application/sparql-results+xml":["srx"],
"application/srgs":["gram"],
"application/srgs+xml":["grxml"],
"application/sru+xml":["sru"],
"application/ssdl+xml":["ssdl"],
"application/ssml+xml":["ssml"],
"application/tei+xml":["tei","teicorpus"],
"application/thraud+xml":["tfi"],
"application/timestamped-data":["tsd"],
"application/vnd.3gpp.pic-bw-large":["plb"],
"application/vnd.3gpp.pic-bw-small":["psb"],
"application/vnd.3gpp.pic-bw-var":["pvb"],
"application/vnd.3gpp2.tcap":["tcap"],
"application/vnd.3m.post-it-notes":["pwn"],
"application/vnd.accpac.simply.aso":["aso"],
"application/vnd.accpac.simply.imp":["imp"],
"application/vnd.acucobol":["acu"],
"application/vnd.acucorp":["atc","acutc"],
"application/vnd.adobe.air-application-installer-package+zip":["air"],
"application/vnd.adobe.formscentral.fcdt":["fcdt"],
"application/vnd.adobe.fxp":["fxp","fxpl"],
"application/vnd.adobe.xdp+xml":["xdp"],
"application/vnd.adobe.xfdf":["xfdf"],
"application/vnd.ahead.space":["ahead"],
"application/vnd.airzip.filesecure.azf":["azf"],
"application/vnd.airzip.filesecure.azs":["azs"],
"application/vnd.amazon.ebook":["azw"],
"application/vnd.americandynamics.acc":["acc"],
"application/vnd.amiga.ami":["ami"],
"application/vnd.android.package-archive":["apk"],
"application/vnd.anser-web-certificate-issue-initiation":["cii"],
"application/vnd.anser-web-funds-transfer-initiation":["fti"],
"application/vnd.antix.game-component":["atx"],
"application/vnd.apple.installer+xml":["mpkg"],
"application/vnd.apple.mpegurl":["m3u8"],
"application/vnd.aristanetworks.swi":["swi"],
"application/vnd.astraea-software.iota":["iota"],
"application/vnd.audiograph":["aep"],
"application/vnd.blueice.multipass":["mpm"],
"application/vnd.bmi":["bmi"],
"application/vnd.businessobjects":["rep"],
"application/vnd.chemdraw+xml":["cdxml"],
"application/vnd.chipnuts.karaoke-mmd":["mmd"],
"application/vnd.cinderella":["cdy"],
"application/vnd.claymore":["cla"],
"application/vnd.cloanto.rp9":["rp9"],
"application/vnd.clonk.c4group":["c4g","c4d","c4f","c4p","c4u"],
"application/vnd.cluetrust.cartomobile-config":["c11amc"],
"application/vnd.cluetrust.cartomobile-config-pkg":["c11amz"],
"application/vnd.commonspace":["csp"],
"application/vnd.contact.cmsg":["cdbcmsg"],
"application/vnd.cosmocaller":["cmc"],
"application/vnd.crick.clicker":["clkx"],
"application/vnd.crick.clicker.keyboard":["clkk"],
"application/vnd.crick.clicker.palette":["clkp"],
"application/vnd.crick.clicker.template":["clkt"],
"application/vnd.crick.clicker.wordbank":["clkw"],
"application/vnd.criticaltools.wbs+xml":["wbs"],
"application/vnd.ctc-posml":["pml"],
"application/vnd.cups-ppd":["ppd"],
"application/vnd.curl.car":["car"],
"application/vnd.curl.pcurl":["pcurl"],
"application/vnd.dart":["dart"],
"application/vnd.data-vision.rdz":["rdz"],
"application/vnd.dece.data":["uvf","uvvf","uvd","uvvd"],
"application/vnd.dece.ttml+xml":["uvt","uvvt"],
"application/vnd.dece.unspecified":["uvx","uvvx"],
"application/vnd.dece.zip":["uvz","uvvz"],
"application/vnd.denovo.fcselayout-link":["fe_launch"],
"application/vnd.dna":["dna"],
"application/vnd.dolby.mlp":["mlp"],
"application/vnd.dpgraph":["dpg"],
"application/vnd.dreamfactory":["dfac"],
"application/vnd.ds-keypoint":["kpxx"],
"application/vnd.dvb.ait":["ait"],
"application/vnd.dvb.service":["svc"],
"application/vnd.dynageo":["geo"],
"application/vnd.ecowin.chart":["mag"],
"application/vnd.enliven":["nml"],
"application/vnd.epson.esf":["esf"],
"application/vnd.epson.msf":["msf"],
"application/vnd.epson.quickanime":["qam"],
"application/vnd.epson.salt":["slt"],
"application/vnd.epson.ssf":["ssf"],
"application/vnd.eszigno3+xml":["es3","et3"],
"application/vnd.ezpix-album":["ez2"],
"application/vnd.ezpix-package":["ez3"],
"application/vnd.fdf":["fdf"],
"application/vnd.fdsn.mseed":["mseed"],
"application/vnd.fdsn.seed":["seed","dataless"],
"application/vnd.flographit":["gph"],
"application/vnd.fluxtime.clip":["ftc"],
"application/vnd.framemaker":["fm","frame","maker","book"],
"application/vnd.frogans.fnc":["fnc"],
"application/vnd.frogans.ltf":["ltf"],
"application/vnd.fsc.weblaunch":["fsc"],
"application/vnd.fujitsu.oasys":["oas"],
"application/vnd.fujitsu.oasys2":["oa2"],
"application/vnd.fujitsu.oasys3":["oa3"],
"application/vnd.fujitsu.oasysgp":["fg5"],
"application/vnd.fujitsu.oasysprs":["bh2"],
"application/vnd.fujixerox.ddd":["ddd"],
"application/vnd.fujixerox.docuworks":["xdw"],
"application/vnd.fujixerox.docuworks.binder":["xbd"],
"application/vnd.fuzzysheet":["fzs"],
"application/vnd.genomatix.tuxedo":["txd"],
"application/vnd.geogebra.file":["ggb"],
"application/vnd.geogebra.tool":["ggt"],
"application/vnd.geometry-explorer":["gex","gre"],
"application/vnd.geonext":["gxt"],
"application/vnd.geoplan":["g2w"],
"application/vnd.geospace":["g3w"],
"application/vnd.gmx":["gmx"],
"application/vnd.google-earth.kml+xml":["kml"],
"application/vnd.google-earth.kmz":["kmz"],
"application/vnd.grafeq":["gqf","gqs"],
"application/vnd.groove-account":["gac"],
"application/vnd.groove-help":["ghf"],
"application/vnd.groove-identity-message":["gim"],
"application/vnd.groove-injector":["grv"],
"application/vnd.groove-tool-message":["gtm"],
"application/vnd.groove-tool-template":["tpl"],
"application/vnd.groove-vcard":["vcg"],
"application/vnd.hal+xml":["hal"],
"application/vnd.handheld-entertainment+xml":["zmm"],
"application/vnd.hbci":["hbci"],
"application/vnd.hhe.lesson-player":["les"],
"application/vnd.hp-hpgl":["hpgl"],
"application/vnd.hp-hpid":["hpid"],
"application/vnd.hp-hps":["hps"],
"application/vnd.hp-jlyt":["jlt"],
"application/vnd.hp-pcl":["pcl"],
"application/vnd.hp-pclxl":["pclxl"],
"application/vnd.hydrostatix.sof-data":["sfd-hdstx"],
"application/vnd.ibm.minipay":["mpy"],
"application/vnd.ibm.modcap":["afp","listafp","list3820"],
"application/vnd.ibm.rights-management":["irm"],
"application/vnd.ibm.secure-container":["sc"],
"application/vnd.iccprofile":["icc","icm"],
"application/vnd.igloader":["igl"],
"application/vnd.immervision-ivp":["ivp"],
"application/vnd.immervision-ivu":["ivu"],
"application/vnd.insors.igm":["igm"],
"application/vnd.intercon.formnet":["xpw","xpx"],
"application/vnd.intergeo":["i2g"],
"application/vnd.intu.qbo":["qbo"],
"application/vnd.intu.qfx":["qfx"],
"application/vnd.ipunplugged.rcprofile":["rcprofile"],
"application/vnd.irepository.package+xml":["irp"],
"application/vnd.is-xpr":["xpr"],
"application/vnd.isac.fcs":["fcs"],
"application/vnd.jam":["jam"],
"application/vnd.jcp.javame.midlet-rms":["rms"],
"application/vnd.jisp":["jisp"],
"application/vnd.joost.joda-archive":["joda"],
"application/vnd.kahootz":["ktz","ktr"],
"application/vnd.kde.karbon":["karbon"],
"application/vnd.kde.kchart":["chrt"],
"application/vnd.kde.kformula":["kfo"],
"application/vnd.kde.kivio":["flw"],
"application/vnd.kde.kontour":["kon"],
"application/vnd.kde.kpresenter":["kpr","kpt"],
"application/vnd.kde.kspread":["ksp"],
"application/vnd.kde.kword":["kwd","kwt"],
"application/vnd.kenameaapp":["htke"],
"application/vnd.kidspiration":["kia"],
"application/vnd.kinar":["kne","knp"],
"application/vnd.koan":["skp","skd","skt","skm"],
"application/vnd.kodak-descriptor":["sse"],
"application/vnd.las.las+xml":["lasxml"],
"application/vnd.llamagraphics.life-balance.desktop":["lbd"],
"application/vnd.llamagraphics.life-balance.exchange+xml":["lbe"],
"application/vnd.lotus-1-2-3":["123"],
"application/vnd.lotus-approach":["apr"],
"application/vnd.lotus-freelance":["pre"],
"application/vnd.lotus-notes":["nsf"],
"application/vnd.lotus-organizer":["org"],
"application/vnd.lotus-screencam":["scm"],
"application/vnd.lotus-wordpro":["lwp"],
"application/vnd.macports.portpkg":["portpkg"],
"application/vnd.mcd":["mcd"],
"application/vnd.medcalcdata":["mc1"],
"application/vnd.mediastation.cdkey":["cdkey"],
"application/vnd.mfer":["mwf"],
"application/vnd.mfmp":["mfm"],
"application/vnd.micrografx.flo":["flo"],
"application/vnd.micrografx.igx":["igx"],
"application/vnd.mif":["mif"],
"application/vnd.mobius.daf":["daf"],
"application/vnd.mobius.dis":["dis"],
"application/vnd.mobius.mbk":["mbk"],
"application/vnd.mobius.mqy":["mqy"],
"application/vnd.mobius.msl":["msl"],
"application/vnd.mobius.plc":["plc"],
"application/vnd.mobius.txf":["txf"],
"application/vnd.mophun.application":["mpn"],
"application/vnd.mophun.certificate":["mpc"],
"application/vnd.mozilla.xul+xml":["xul"],
"application/vnd.ms-artgalry":["cil"],
"application/vnd.ms-cab-compressed":["cab"],
"application/vnd.ms-excel":["xls","xlm","xla","xlc","xlt","xlw"],
"application/vnd.ms-excel.addin.macroenabled.12":["xlam"],
"application/vnd.ms-excel.sheet.binary.macroenabled.12":["xlsb"],
"application/vnd.ms-excel.sheet.macroenabled.12":["xlsm"],
"application/vnd.ms-excel.template.macroenabled.12":["xltm"],
"application/vnd.ms-fontobject":["eot"],
"application/vnd.ms-htmlhelp":["chm"],
"application/vnd.ms-ims":["ims"],
"application/vnd.ms-lrm":["lrm"],
"application/vnd.ms-officetheme":["thmx"],
"application/vnd.ms-pki.seccat":["cat"],
"application/vnd.ms-pki.stl":["stl"],
"application/vnd.ms-powerpoint":["ppt","pps","pot"],
"application/vnd.ms-powerpoint.addin.macroenabled.12":["ppam"],
"application/vnd.ms-powerpoint.presentation.macroenabled.12":["pptm"],
"application/vnd.ms-powerpoint.slide.macroenabled.12":["sldm"],
"application/vnd.ms-powerpoint.slideshow.macroenabled.12":["ppsm"],
"application/vnd.ms-powerpoint.template.macroenabled.12":["potm"],
"application/vnd.ms-project":["mpp","mpt"],
"application/vnd.ms-word.document.macroenabled.12":["docm"],
"application/vnd.ms-word.template.macroenabled.12":["dotm"],
"application/vnd.ms-works":["wps","wks","wcm","wdb"],
"application/vnd.ms-wpl":["wpl"],
"application/vnd.ms-xpsdocument":["xps"],
"application/vnd.mseq":["mseq"],
"application/vnd.musician":["mus"],
"application/vnd.muvee.style":["msty"],
"application/vnd.mynfc":["taglet"],
"application/vnd.neurolanguage.nlu":["nlu"],
"application/vnd.nitf":["ntf","nitf"],
"application/vnd.noblenet-directory":["nnd"],
"application/vnd.noblenet-sealer":["nns"],
"application/vnd.noblenet-web":["nnw"],
"application/vnd.nokia.n-gage.data":["ngdat"],
"application/vnd.nokia.n-gage.symbian.install":["n-gage"],
"application/vnd.nokia.radio-preset":["rpst"],
"application/vnd.nokia.radio-presets":["rpss"],
"application/vnd.novadigm.edm":["edm"],
"application/vnd.novadigm.edx":["edx"],
"application/vnd.novadigm.ext":["ext"],
"application/vnd.oasis.opendocument.chart":["odc"],
"application/vnd.oasis.opendocument.chart-template":["otc"],
"application/vnd.oasis.opendocument.database":["odb"],
"application/vnd.oasis.opendocument.formula":["odf"],
"application/vnd.oasis.opendocument.formula-template":["odft"],
"application/vnd.oasis.opendocument.graphics":["odg"],
"application/vnd.oasis.opendocument.graphics-template":["otg"],
"application/vnd.oasis.opendocument.image":["odi"],
"application/vnd.oasis.opendocument.image-template":["oti"],
"application/vnd.oasis.opendocument.presentation":["odp"],
"application/vnd.oasis.opendocument.presentation-template":["otp"],
"application/vnd.oasis.opendocument.spreadsheet":["ods"],
"application/vnd.oasis.opendocument.spreadsheet-template":["ots"],
"application/vnd.oasis.opendocument.text":["odt"],
"application/vnd.oasis.opendocument.text-master":["odm"],
"application/vnd.oasis.opendocument.text-template":["ott"],
"application/vnd.oasis.opendocument.text-web":["oth"],
"application/vnd.olpc-sugar":["xo"],
"application/vnd.oma.dd2+xml":["dd2"],
"application/vnd.openofficeorg.extension":["oxt"],
"application/vnd.openxmlformats-officedocument.presentationml.presentation":["pptx"],
"application/vnd.openxmlformats-officedocument.presentationml.slide":["sldx"],
"application/vnd.openxmlformats-officedocument.presentationml.slideshow":["ppsx"],
"application/vnd.openxmlformats-officedocument.presentationml.template":["potx"],
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":["xlsx"],
"application/vnd.openxmlformats-officedocument.spreadsheetml.template":["xltx"],
"application/vnd.openxmlformats-officedocument.wordprocessingml.document":["docx"],
"application/vnd.openxmlformats-officedocument.wordprocessingml.template":["dotx"],
"application/vnd.osgeo.mapguide.package":["mgp"],
"application/vnd.osgi.dp":["dp"],
"application/vnd.osgi.subsystem":["esa"],
"application/vnd.palm":["pdb","pqa","oprc"],
"application/vnd.pawaafile":["paw"],
"application/vnd.pg.format":["str"],
"application/vnd.pg.osasli":["ei6"],
"application/vnd.picsel":["efif"],
"application/vnd.pmi.widget":["wg"],
"application/vnd.pocketlearn":["plf"],
"application/vnd.powerbuilder6":["pbd"],
"application/vnd.previewsystems.box":["box"],
"application/vnd.proteus.magazine":["mgz"],
"application/vnd.publishare-delta-tree":["qps"],
"application/vnd.pvi.ptid1":["ptid"],
"application/vnd.quark.quarkxpress":["qxd","qxt","qwd","qwt","qxl","qxb"],
"application/vnd.realvnc.bed":["bed"],
"application/vnd.recordare.musicxml":["mxl"],
"application/vnd.recordare.musicxml+xml":["musicxml"],
"application/vnd.rig.cryptonote":["cryptonote"],
"application/vnd.rim.cod":["cod"],
"application/vnd.rn-realmedia":["rm"],
"application/vnd.rn-realmedia-vbr":["rmvb"],
"application/vnd.route66.link66+xml":["link66"],
"application/vnd.sailingtracker.track":["st"],
"application/vnd.seemail":["see"],
"application/vnd.sema":["sema"],
"application/vnd.semd":["semd"],
"application/vnd.semf":["semf"],
"application/vnd.shana.informed.formdata":["ifm"],
"application/vnd.shana.informed.formtemplate":["itp"],
"application/vnd.shana.informed.interchange":["iif"],
"application/vnd.shana.informed.package":["ipk"],
"application/vnd.simtech-mindmapper":["twd","twds"],
"application/vnd.smaf":["mmf"],
"application/vnd.smart.teacher":["teacher"],
"application/vnd.solent.sdkm+xml":["sdkm","sdkd"],
"application/vnd.spotfire.dxp":["dxp"],
"application/vnd.spotfire.sfs":["sfs"],
"application/vnd.stardivision.calc":["sdc"],
"application/vnd.stardivision.draw":["sda"],
"application/vnd.stardivision.impress":["sdd"],
"application/vnd.stardivision.math":["smf"],
"application/vnd.stardivision.writer":["sdw","vor"],
"application/vnd.stardivision.writer-global":["sgl"],
"application/vnd.stepmania.package":["smzip"],
"application/vnd.stepmania.stepchart":["sm"],
"application/vnd.sun.xml.calc":["sxc"],
"application/vnd.sun.xml.calc.template":["stc"],
"application/vnd.sun.xml.draw":["sxd"],
"application/vnd.sun.xml.draw.template":["std"],
"application/vnd.sun.xml.impress":["sxi"],
"application/vnd.sun.xml.impress.template":["sti"],
"application/vnd.sun.xml.math":["sxm"],
"application/vnd.sun.xml.writer":["sxw"],
"application/vnd.sun.xml.writer.global":["sxg"],
"application/vnd.sun.xml.writer.template":["stw"],
"application/vnd.sus-calendar":["sus","susp"],
"application/vnd.svd":["svd"],
"application/vnd.symbian.install":["sis","sisx"],
"application/vnd.syncml+xml":["xsm"],
"application/vnd.syncml.dm+wbxml":["bdm"],
"application/vnd.syncml.dm+xml":["xdm"],
"application/vnd.tao.intent-module-archive":["tao"],
"application/vnd.tcpdump.pcap":["pcap","cap","dmp"],
"application/vnd.tmobile-livetv":["tmo"],
"application/vnd.trid.tpt":["tpt"],
"application/vnd.triscape.mxs":["mxs"],
"application/vnd.trueapp":["tra"],
"application/vnd.ufdl":["ufd","ufdl"],
"application/vnd.uiq.theme":["utz"],
"application/vnd.umajin":["umj"],
"application/vnd.unity":["unityweb"],
"application/vnd.uoml+xml":["uoml"],
"application/vnd.vcx":["vcx"],
"application/vnd.visio":["vsd","vst","vss","vsw"],
"application/vnd.visionary":["vis"],
"application/vnd.vsf":["vsf"],
"application/vnd.wap.wbxml":["wbxml"],
"application/vnd.wap.wmlc":["wmlc"],
"application/vnd.wap.wmlscriptc":["wmlsc"],
"application/vnd.webturbo":["wtb"],
"application/vnd.wolfram.player":["nbp"],
"application/vnd.wordperfect":["wpd"],
"application/vnd.wqd":["wqd"],
"application/vnd.wt.stf":["stf"],
"application/vnd.xara":["xar"],
"application/vnd.xfdl":["xfdl"],
"application/vnd.yamaha.hv-dic":["hvd"],
"application/vnd.yamaha.hv-script":["hvs"],
"application/vnd.yamaha.hv-voice":["hvp"],
"application/vnd.yamaha.openscoreformat":["osf"],
"application/vnd.yamaha.openscoreformat.osfpvg+xml":["osfpvg"],
"application/vnd.yamaha.smaf-audio":["saf"],
"application/vnd.yamaha.smaf-phrase":["spf"],
"application/vnd.yellowriver-custom-menu":["cmp"],
"application/vnd.zul":["zir","zirz"],
"application/vnd.zzazz.deck+xml":["zaz"],
"application/voicexml+xml":["vxml"],
"application/widget":["wgt"],
"application/winhlp":["hlp"],
"application/wsdl+xml":["wsdl"],
"application/wspolicy+xml":["wspolicy"],
"application/x-7z-compressed":["7z"],
"application/x-abiword":["abw"],
"application/x-ace-compressed":["ace"],
"application/x-apple-diskimage":["dmg"],
"application/x-authorware-bin":["aab","x32","u32","vox"],
"application/x-authorware-map":["aam"],
"application/x-authorware-seg":["aas"],
"application/x-bcpio":["bcpio"],
"application/x-bittorrent":["torrent"],
"application/x-blorb":["blb","blorb"],
"application/x-bzip":["bz"],
"application/x-bzip2":["bz2","boz"],
"application/x-cbr":["cbr","cba","cbt","cbz","cb7"],
"application/x-cdlink":["vcd"],
"application/x-cfs-compressed":["cfs"],
"application/x-chat":["chat"],
"application/x-chess-pgn":["pgn"],
"application/x-conference":["nsc"],
"application/x-cpio":["cpio"],
"application/x-csh":["csh"],
"application/x-debian-package":["deb","udeb"],
"application/x-dgc-compressed":["dgc"],
"application/x-director":["dir","dcr","dxr","cst","cct","cxt","w3d","fgd","swa"],
"application/x-doom":["wad"],
"application/x-dtbncx+xml":["ncx"],
"application/x-dtbook+xml":["dtb"],
"application/x-dtbresource+xml":["res"],
"application/x-dvi":["dvi"],
"application/x-envoy":["evy"],
"application/x-eva":["eva"],
"application/x-font-bdf":["bdf"],
"application/x-font-ghostscript":["gsf"],
"application/x-font-linux-psf":["psf"],
"application/x-font-otf":["otf"],
"application/x-font-pcf":["pcf"],
"application/x-font-snf":["snf"],
"application/x-font-ttf":["ttf","ttc"],
"application/x-font-type1":["pfa","pfb","pfm","afm"],
"application/x-freearc":["arc"],
"application/x-futuresplash":["spl"],
"application/x-gca-compressed":["gca"],
"application/x-glulx":["ulx"],
"application/x-gnumeric":["gnumeric"],
"application/x-gramps-xml":["gramps"],
"application/x-gtar":["gtar"],
"application/x-hdf":["hdf"],
"application/x-install-instructions":["install"],
"application/x-iso9660-image":["iso"],
"application/x-java-jnlp-file":["jnlp"],
"application/x-latex":["latex"],
"application/x-lzh-compressed":["lzh","lha"],
"application/x-mie":["mie"],
"application/x-mobipocket-ebook":["prc","mobi"],
"application/x-ms-application":["application"],
"application/x-ms-shortcut":["lnk"],
"application/x-ms-wmd":["wmd"],
"application/x-ms-wmz":["wmz"],
"application/x-ms-xbap":["xbap"],
"application/x-msaccess":["mdb"],
"application/x-msbinder":["obd"],
"application/x-mscardfile":["crd"],
"application/x-msclip":["clp"],
"application/x-msdownload":["exe","dll","com","bat","msi"],
"application/x-msmediaview":["mvb","m13","m14"],
"application/x-msmetafile":["wmf","wmz","emf","emz"],
"application/x-msmoney":["mny"],
"application/x-mspublisher":["pub"],
"application/x-msschedule":["scd"],
"application/x-msterminal":["trm"],
"application/x-mswrite":["wri"],
"application/x-netcdf":["nc","cdf"],
"application/x-nzb":["nzb"],
"application/x-pkcs12":["p12","pfx"],
"application/x-pkcs7-certificates":["p7b","spc"],
"application/x-pkcs7-certreqresp":["p7r"],
"application/x-rar-compressed":["rar"],
"application/x-research-info-systems":["ris"],
"application/x-sh":["sh"],
"application/x-shar":["shar"],
"application/x-shockwave-flash":["swf"],
"application/x-silverlight-app":["xap"],
"application/x-sql":["sql"],
"application/x-stuffit":["sit"],
"application/x-stuffitx":["sitx"],
"application/x-subrip":["srt"],
"application/x-sv4cpio":["sv4cpio"],
"application/x-sv4crc":["sv4crc"],
"application/x-t3vm-image":["t3"],
"application/x-tads":["gam"],
"application/x-tar":["tar"],
"application/x-tcl":["tcl"],
"application/x-tex":["tex"],
"application/x-tex-tfm":["tfm"],
"application/x-texinfo":["texinfo","texi"],
"application/x-tgif":["obj"],
"application/x-ustar":["ustar"],
"application/x-wais-source":["src"],
"application/x-x509-ca-cert":["der","crt"],
"application/x-xfig":["fig"],
"application/x-xliff+xml":["xlf"],
"application/x-xpinstall":["xpi"],
"application/x-xz":["xz"],
"application/x-zmachine":["z1","z2","z3","z4","z5","z6","z7","z8"],
"application/xaml+xml":["xaml"],
"application/xcap-diff+xml":["xdf"],
"application/xenc+xml":["xenc"],
"application/xhtml+xml":["xhtml","xht"],
"application/xml":["xml","xsl"],
"application/xml-dtd":["dtd"],
"application/xop+xml":["xop"],
"application/xproc+xml":["xpl"],
"application/xslt+xml":["xslt"],
"application/xspf+xml":["xspf"],
"application/xv+xml":["mxml","xhvml","xvml","xvm"],
"application/yang":["yang"],
"application/yin+xml":["yin"],
"application/zip":["zip"],
"audio/adpcm":["adp"],
"audio/basic":["au","snd"],
"audio/midi":["mid","midi","kar","rmi"],
"audio/mp4":["m4a","mp4a"],
"audio/mpeg":["mpga","mp2","mp2a","mp3","m2a","m3a"],
"audio/ogg":["oga","ogg","spx"],
"audio/s3m":["s3m"],
"audio/silk":["sil"],
"audio/vnd.dece.audio":["uva","uvva"],
"audio/vnd.digital-winds":["eol"],
"audio/vnd.dra":["dra"],
"audio/vnd.dts":["dts"],
"audio/vnd.dts.hd":["dtshd"],
"audio/vnd.lucent.voice":["lvp"],
"audio/vnd.ms-playready.media.pya":["pya"],
"audio/vnd.nuera.ecelp4800":["ecelp4800"],
"audio/vnd.nuera.ecelp7470":["ecelp7470"],
"audio/vnd.nuera.ecelp9600":["ecelp9600"],
"audio/vnd.rip":["rip"],
"audio/webm":["weba"],
"audio/x-aac":["aac"],
"audio/x-aiff":["aif","aiff","aifc"],
"audio/x-caf":["caf"],
"audio/x-flac":["flac"],
"audio/x-matroska":["mka"],
"audio/x-mpegurl":["m3u"],
"audio/x-ms-wax":["wax"],
"audio/x-ms-wma":["wma"],
"audio/x-pn-realaudio":["ram","ra"],
"audio/x-pn-realaudio-plugin":["rmp"],
"audio/x-wav":["wav"],
"audio/xm":["xm"],
"chemical/x-cdx":["cdx"],
"chemical/x-cif":["cif"],
"chemical/x-cmdf":["cmdf"],
"chemical/x-cml":["cml"],
"chemical/x-csml":["csml"],
"chemical/x-xyz":["xyz"],
"image/bmp":["bmp"],
"image/cgm":["cgm"],
"image/g3fax":["g3"],
"image/gif":["gif"],
"image/ief":["ief"],
"image/jpeg":["jpeg","jpg","jpe"],
"image/ktx":["ktx"],
"image/png":["png"],
"image/prs.btif":["btif"],
"image/sgi":["sgi"],
"image/svg+xml":["svg","svgz"],
"image/tiff":["tiff","tif"],
"image/vnd.adobe.photoshop":["psd"],
"image/vnd.dece.graphic":["uvi","uvvi","uvg","uvvg"],
"image/vnd.djvu":["djvu","djv"],
"image/vnd.dvb.subtitle":["sub"],
"image/vnd.dwg":["dwg"],
"image/vnd.dxf":["dxf"],
"image/vnd.fastbidsheet":["fbs"],
"image/vnd.fpx":["fpx"],
"image/vnd.fst":["fst"],
"image/vnd.fujixerox.edmics-mmr":["mmr"],
"image/vnd.fujixerox.edmics-rlc":["rlc"],
"image/vnd.ms-modi":["mdi"],
"image/vnd.ms-photo":["wdp"],
"image/vnd.net-fpx":["npx"],
"image/vnd.wap.wbmp":["wbmp"],
"image/vnd.xiff":["xif"],
"image/webp":["webp"],
"image/x-3ds":["3ds"],
"image/x-cmu-raster":["ras"],
"image/x-cmx":["cmx"],
"image/x-freehand":["fh","fhc","fh4","fh5","fh7"],
"image/x-icon":["ico"],
"image/x-mrsid-image":["sid"],
"image/x-pcx":["pcx"],
"image/x-pict":["pic","pct"],
"image/x-portable-anymap":["pnm"],
"image/x-portable-bitmap":["pbm"],
"image/x-portable-graymap":["pgm"],
"image/x-portable-pixmap":["ppm"],
"image/x-rgb":["rgb"],
"image/x-tga":["tga"],
"image/x-xbitmap":["xbm"],
"image/x-xpixmap":["xpm"],
"image/x-xwindowdump":["xwd"],
"message/rfc822":["eml","mime"],
"model/iges":["igs","iges"],
"model/mesh":["msh","mesh","silo"],
"model/vnd.collada+xml":["dae"],
"model/vnd.dwf":["dwf"],
"model/vnd.gdl":["gdl"],
"model/vnd.gtw":["gtw"],
"model/vnd.mts":["mts"],
"model/vnd.vtu":["vtu"],
"model/vrml":["wrl","vrml"],
"model/x3d+binary":["x3db","x3dbz"],
"model/x3d+vrml":["x3dv","x3dvz"],
"model/x3d+xml":["x3d","x3dz"],
"text/cache-manifest":["appcache"],
"text/calendar":["ics","ifb"],
"text/css":["css"],
"text/csv":["csv"],
"text/html":["html","htm"],
"text/n3":["n3"],
"text/plain":["txt","text","conf","def","list","log","in"],
"text/prs.lines.tag":["dsc"],
"text/richtext":["rtx"],
"text/sgml":["sgml","sgm"],
"text/tab-separated-values":["tsv"],
"text/troff":["t","tr","roff","man","me","ms"],
"text/turtle":["ttl"],
"text/uri-list":["uri","uris","urls"],
"text/vcard":["vcard"],
"text/vnd.curl":["curl"],
"text/vnd.curl.dcurl":["dcurl"],
"text/vnd.curl.mcurl":["mcurl"],
"text/vnd.curl.scurl":["scurl"],
"text/vnd.dvb.subtitle":["sub"],
"text/vnd.fly":["fly"],
"text/vnd.fmi.flexstor":["flx"],
"text/vnd.graphviz":["gv"],
"text/vnd.in3d.3dml":["3dml"],
"text/vnd.in3d.spot":["spot"],
"text/vnd.sun.j2me.app-descriptor":["jad"],
"text/vnd.wap.wml":["wml"],
"text/vnd.wap.wmlscript":["wmls"],
"text/x-asm":["s","asm"],
"text/x-c":["c","cc","cxx","cpp","h","hh","dic"],
"text/x-fortran":["f","for","f77","f90"],
"text/x-java-source":["java"],
"text/x-nfo":["nfo"],
"text/x-opml":["opml"],
"text/x-pascal":["p","pas"],
"text/x-setext":["etx"],
"text/x-sfv":["sfv"],
"text/x-uuencode":["uu"],
"text/x-vcalendar":["vcs"],
"text/x-vcard":["vcf"],
"video/3gpp":["3gp"],
"video/3gpp2":["3g2"],
"video/h261":["h261"],
"video/h263":["h263"],
"video/h264":["h264"],
"video/jpeg":["jpgv"],
"video/jpm":["jpm","jpgm"],
"video/mj2":["mj2","mjp2"],
"video/mp4":["mp4","mp4v","mpg4"],
"video/mpeg":["mpeg","mpg","mpe","m1v","m2v"],
"video/ogg":["ogv"],
"video/quicktime":["qt","mov"],
"video/vnd.dece.hd":["uvh","uvvh"],
"video/vnd.dece.mobile":["uvm","uvvm"],
"video/vnd.dece.pd":["uvp","uvvp"],
"video/vnd.dece.sd":["uvs","uvvs"],
"video/vnd.dece.video":["uvv","uvvv"],
"video/vnd.dvb.file":["dvb"],
"video/vnd.fvt":["fvt"],
"video/vnd.mpegurl":["mxu","m4u"],
"video/vnd.ms-playready.media.pyv":["pyv"],
"video/vnd.uvvu.mp4":["uvu","uvvu"],
"video/vnd.vivo":["viv"],
"video/webm":["webm"],
"video/x-f4v":["f4v"],
"video/x-fli":["fli"],
"video/x-flv":["flv"],
"video/x-m4v":["m4v"],
"video/x-matroska":["mkv","mk3d","mks"],
"video/x-mng":["mng"],
"video/x-ms-asf":["asf","asx"],
"video/x-ms-vob":["vob"],
"video/x-ms-wm":["wm"],
"video/x-ms-wmv":["wmv"],
"video/x-ms-wmx":["wmx"],
"video/x-ms-wvx":["wvx"],
"video/x-msvideo":["avi"],
"video/x-sgi-movie":["movie"],
"video/x-smv":["smv"],
"x-conference/x-cooltalk":["ice"]} | {
"content_hash": "176c3bb77cf16c319b369d6acb6f9133",
"timestamp": "",
"source": "github",
"line_count": 765,
"max_line_length": 107,
"avg_line_length": 39.07843137254902,
"alnum_prop": 0.6785415621341362,
"repo_name": "aidatorajiro/Wiki2Epub",
"id": "917704fd689c6fcf6ac8e0ed410b3d44a04a2410",
"size": "29895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extensions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1159"
},
{
"name": "Python",
"bytes": "52248"
}
],
"symlink_target": ""
} |
'''
RPC Dispatcher Tests
--------------------
'''
import base64
import json
import unittest
from xml.dom.minidom import parseString
from django.core.exceptions import ImproperlyConfigured
try:
from rpc4django.rpcdispatcher import rpcmethod, RPCMethod, RPCDispatcher
except ImproperlyConfigured:
# Configure Django if not already configured
from django.conf import settings
settings.configure(DEBUG=True)
from rpc4django.rpcdispatcher import rpcmethod, RPCMethod, RPCDispatcher
try:
from xmlrpclib import Fault, Binary
except ImportError:
from xmlrpc.client import Fault, Binary
BINARY_STRING = b'\x97\xd2\xab\xc8\xfc\x98\xad'
# tests both the class and the decorator
class TestRPCMethod(unittest.TestCase):
def setUp(self):
@rpcmethod(name='my.add', signature=['int', 'int', 'int'])
def add(a, b):
return a + b
self.add = RPCMethod(add)
@rpcmethod()
def test1(arg1):
return 4
self.test1 = RPCMethod(test1)
def test_verify_creation(self):
self.assertEqual(self.add.name, 'my.add')
self.assertEqual(self.add.signature, ['int', 'int', 'int'])
self.assertEqual(self.add.args, ['a', 'b'])
self.assertEqual(self.test1.name, 'test1')
self.assertEqual(self.test1.signature, ['object', 'object'])
self.assertEqual(self.test1.args, ['arg1'])
def test_get_retrunvalue(self):
self.assertEqual(self.add.get_returnvalue(), 'int')
self.assertEqual(self.test1.get_returnvalue(), 'object')
def test_get_params(self):
self.assertEqual(self.add.get_params(), [{'name': 'a', 'rpctype': 'int'}, {'name': 'b', 'rpctype': 'int'}])
self.assertEqual(self.test1.get_params(), [{'name': 'arg1', 'rpctype': 'object'}])
class TestRPCDispatcher(unittest.TestCase):
def setUp(self):
self.d = RPCDispatcher()
def add(a, b):
return a + b
self.add = add
def kwargstest(a, b, **kwargs):
if kwargs.get('c', None) is not None:
return True
return False
self.kwargstest = kwargstest
def testBin():
return Binary(BINARY_STRING)
self.testBin = testBin
def test_rpcfault(self):
try:
self.d.system_methodhelp('method.DoesNotExist.AtAll')
self.fail('method not exists fault expected!')
except Fault:
pass
try:
self.d.system_methodsignature('method.DoesNotExist.AtAll')
self.fail('method not exists fault expected!')
except Fault:
pass
def test_listmethods(self):
resp = self.d.system_listmethods()
self.assertEqual(resp, ['system.describe', 'system.listMethods', 'system.methodHelp', 'system.methodSignature'])
self.d.register_method(self.add)
resp = self.d.system_listmethods()
self.assertEqual(resp, ['add', 'system.describe', 'system.listMethods', 'system.methodHelp', 'system.methodSignature'])
def test_methodhelp(self):
resp = self.d.system_methodhelp('system.methodHelp')
self.assertEqual(resp, 'Returns documentation for a specified method')
def test_methodsignature(self):
resp = self.d.system_methodsignature('system.listMethods')
self.assertEqual(resp, ['array'])
resp = self.d.system_methodsignature('system.methodSignature')
self.assertEqual(resp, ['array', 'string'])
def test_xmlrpc_call(self):
xml = '<?xml version="1.0"?><methodCall><methodName>system.listMethods</methodName><params></params></methodCall>'
expresp = "<?xml version='1.0'?><methodResponse><params><param><value><array><data><value><string>system.describe</string></value><value><string>system.listMethods</string></value><value><string>system.methodHelp</string></value><value><string>system.methodSignature</string></value></data></array></value></param></params></methodResponse>"
resp = self.d.xmldispatch(xml.encode('utf-8'))
self.assertEqual(resp.replace('\n', ''), expresp)
def test_unicode_call(self):
self.d.register_method(self.add)
s1 = u'は'
s2 = u'じめまして'
xml = u'<?xml version="1.0"?><methodCall><methodName>add</methodName><params><param><value><string>%s</string></value></param><param><value><string>%s</string></value></param></params></methodCall>' % (s1, s2)
resp = self.d.xmldispatch(xml.encode('utf-8'))
dom = parseString(resp)
retval = dom.getElementsByTagName('string')[0].firstChild.data
self.assertEqual(retval, u'はじめまして')
def test_base64_call(self):
self.d.register_method(self.testBin)
xml = '<?xml version="1.0"?><methodCall><methodName>testBin</methodName><params></params></methodCall>'
resp = self.d.xmldispatch(xml.encode('utf-8'))
dom = parseString(resp)
retval = dom.getElementsByTagName('base64')[0].firstChild.data
self.assertEqual(base64.b64decode(retval), BINARY_STRING)
def test_jsonrpc_call(self):
jsontxt = '{"params":[],"method":"system.listMethods","id":1}'
resp = self.d.jsondispatch(jsontxt.encode('utf-8'))
jsondict = json.loads(resp)
self.assertTrue(jsondict['error'] is None)
self.assertEqual(jsondict['id'], 1)
self.assertTrue(isinstance(jsondict['result'], list))
def test_register_method(self):
self.d.register_method(self.add)
jsontxt = '{"params":[1,2],"method":"add","id":1}'
resp = self.d.jsondispatch(jsontxt.encode('utf-8'))
jsondict = json.loads(resp)
self.assertTrue(jsondict['error'] is None)
self.assertEqual(jsondict['id'], 1)
self.assertEqual(jsondict['result'], 3)
def test_kwargs(self):
self.d.register_method(self.kwargstest)
jsontxt = '{"params":[1,2],"method":"kwargstest","id":1}'
resp = self.d.jsondispatch(jsontxt.encode('utf-8'))
jsondict = json.loads(resp)
self.assertFalse(jsondict['result'])
resp = self.d.jsondispatch(jsontxt.encode('utf-8'), c=1)
jsondict = json.loads(resp)
self.assertTrue(jsondict['result'])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "853e029d70879a76cf9dbbae9a484e60",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 349,
"avg_line_length": 37.1301775147929,
"alnum_prop": 0.6328286852589642,
"repo_name": "angvp/rpc4django",
"id": "ae854766c6b2fbe9f04b5554ca0dee68b7d1dec8",
"size": "6324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_rpcdispatcher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3518"
},
{
"name": "Python",
"bytes": "62436"
},
{
"name": "Shell",
"bytes": "341"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('questions', '0013_auto_20160505_0905'),
]
operations = [
migrations.AlterField(
model_name='question',
name='count',
field=jsonfield.fields.JSONField(blank=True),
),
]
| {
"content_hash": "e09964e2b253b24705b4a5a3c8e4f630",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 57,
"avg_line_length": 21.263157894736842,
"alnum_prop": 0.6138613861386139,
"repo_name": "watchdogpolska/bliski_publikator",
"id": "9bb70e2b40b4506c5be210bf3285bc7b311127df",
"size": "476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bliski_publikator/questions/migrations/0014_auto_20160505_0941.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "38778"
},
{
"name": "HTML",
"bytes": "109977"
},
{
"name": "JavaScript",
"bytes": "14459"
},
{
"name": "Python",
"bytes": "184036"
},
{
"name": "TypeScript",
"bytes": "38566"
}
],
"symlink_target": ""
} |
import ReadConfig
import Utils
from FTModuleClass import *
from SIL.LCModel import *
from SIL.LCModel.Core.KernelInterfaces import ITsString, ITsStrBldr
#----------------------------------------------------------------
# Documentation that the user sees:
docs = {FTM_Name : "End Testbed",
FTM_Version : "3.4.1",
FTM_ModifiesDB : False,
FTM_Synopsis : "Conclude a testbed log result.",
FTM_Help : "",
FTM_Description:
"""
Conclude a testbed log result..
""" }
#----------------------------------------------------------------
# The main processing function
def MainFunction(DB, report, modifyAllowed):
# Read the configuration file which we assume is in the current directory.
configMap = ReadConfig.readConfig(report)
if not configMap:
return
# Get the synthesis file name
outFileVal = ReadConfig.getConfigVal(configMap, ReadConfig.TARGET_SYNTHESIS_FILE, report)
if not outFileVal:
return
# Open the synthesis file
try:
f_out = open(outFileVal, encoding='utf-8')
except IOError:
report.Error('There is a problem with the Synthesis Output File path: '+outFileVal+'. Please check the configuration file setting.')
return
# Create an object for the testbed results file and get the associated
# XML object
resultsFileObj = Utils.FlexTransTestbedResultsFile(report)
resultsXMLObj = resultsFileObj.getResultsXMLObj()
# Extract the results from the myText.syn file
count = resultsXMLObj.extractResults(f_out)
f_out.close()
# If we were successful write the end date-time and save the file
if count > 0:
resultsXMLObj.endTest()
resultsFileObj.write()
# Let the user know how many valid/invalid test were dumped
report.Info(str(count) + ' results extracted.')
#----------------------------------------------------------------
# The name 'FlexToolsModule' must be defined like this:
FlexToolsModule = FlexToolsModuleClass(runFunction = MainFunction,
docs = docs)
#---------------------------------
if __name__ == '__main__':
FlexToolsModule.Help()
| {
"content_hash": "07aab958ccdb13e5c2231bed3ed7bcba",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 140,
"avg_line_length": 34.588235294117645,
"alnum_prop": 0.5625,
"repo_name": "rmlockwood/FLExTrans",
"id": "02c80407247a87284e44da87c16c0556a5f97bbf",
"size": "3250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Install2.0/FlexTools2.0-FLExTrans/FlexTools/Modules/FLExTrans/EndTestbed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "12781"
},
{
"name": "CSS",
"bytes": "45823"
},
{
"name": "HTML",
"bytes": "827329"
},
{
"name": "Makefile",
"bytes": "7353"
},
{
"name": "NSIS",
"bytes": "12387"
},
{
"name": "Python",
"bytes": "4442196"
},
{
"name": "VBScript",
"bytes": "2068"
}
],
"symlink_target": ""
} |
import httplib as http
import itertools
from flask import request
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from framework import status
from framework.exceptions import HTTPError
from framework.flask import redirect # VOL-aware redirect
from framework.auth.decorators import must_be_signed
from website.archiver import ARCHIVER_SUCCESS, ARCHIVER_FAILURE
from website import settings
from website.exceptions import NodeStateError
from website.project.decorators import (
must_be_valid_project, must_be_contributor_or_public,
must_have_permission,
must_not_be_registration, must_be_registration,
)
from website.identifiers.model import Identifier
from website.identifiers.metadata import datacite_metadata_for_node
from website.project.utils import serialize_node
from website.util.permissions import ADMIN
from website.models import MetaSchema, NodeLog
from website import language
from website.project import signals as project_signals
from website.project.metadata.schemas import _id_to_name
from website import util
from website.project.metadata.utils import serialize_meta_schema
from website.archiver.decorators import fail_archive_on_error
from website.identifiers.client import EzidClient
from .node import _view_project
@must_be_valid_project
@must_be_contributor_or_public
def node_register_page(auth, node, **kwargs):
"""Display the registration metadata for a registration.
:return: serialized Node
"""
if node.is_registration:
return serialize_node(node, auth)
else:
status.push_status_message(
'You have been redirected to the project\'s registrations page. From here you can initiate a new Draft Registration to complete the registration process',
trust=False)
return redirect(node.web_url_for('node_registrations', view='draft'))
@must_be_valid_project
@must_have_permission(ADMIN)
def node_registration_retraction_redirect(auth, node, **kwargs):
return redirect(node.web_url_for('node_registration_retraction_get', _guid=True))
@must_be_valid_project
@must_have_permission(ADMIN)
def node_registration_retraction_get(auth, node, **kwargs):
"""Prepares node object for registration retraction page.
:return: serialized Node to be retracted
:raises: 400: BAD_REQUEST if registration already pending retraction
"""
if not node.is_registration:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'Withdrawal of non-registrations is not permitted.'
})
if node.is_pending_retraction:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'This registration is already pending withdrawal.'
})
return serialize_node(node, auth, primary=True)
@must_be_valid_project
@must_have_permission(ADMIN)
def node_registration_retraction_post(auth, node, **kwargs):
"""Handles retraction of public registrations
:param auth: Authentication object for User
:return: Redirect URL for successful POST
"""
if node.is_pending_retraction:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'This registration is already pending withdrawal'
})
if not node.is_registration:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'Withdrawal of non-registrations is not permitted.'
})
if node.root is not node:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'Withdrawal of non-parent registrations is not permitted.'
})
data = request.get_json()
try:
node.retract_registration(auth.user, data.get('justification', None))
node.save()
node.retraction.ask(node.get_active_contributors_recursive(unique_users=True))
except NodeStateError as err:
raise HTTPError(http.FORBIDDEN, data=dict(message_long=err.message))
return {'redirectUrl': node.web_url_for('view_project')}
@must_be_valid_project
@must_be_contributor_or_public
def node_register_template_page(auth, node, metaschema_id, **kwargs):
if node.is_registration and bool(node.registered_schema):
try:
meta_schema = MetaSchema.find_one(
Q('_id', 'eq', metaschema_id)
)
except NoResultsFound:
# backwards compatability for old urls, lookup by name
try:
meta_schema = MetaSchema.find(
Q('name', 'eq', _id_to_name(metaschema_id))
).sort('-schema_version')[0]
except IndexError:
raise HTTPError(http.NOT_FOUND, data={
'message_short': 'Invalid schema name',
'message_long': 'No registration schema with that name could be found.'
})
if meta_schema not in node.registered_schema:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid schema',
'message_long': 'This registration has no registration supplment with that name.'
})
ret = _view_project(node, auth, primary=True)
ret['node']['registered_schema'] = serialize_meta_schema(meta_schema)
return ret
else:
status.push_status_message(
'You have been redirected to the project\'s registrations page. From here you can initiate a new Draft Registration to complete the registration process',
trust=False
)
return redirect(node.web_url_for('node_registrations', view=kwargs.get('template')))
@must_be_valid_project # returns project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_before_register(auth, node, **kwargs):
"""Returns prompt informing user that addons, if any, won't be registered."""
# TODO: Avoid generating HTML code in Python; all HTML should be in display layer
messages = {
'full': {
'addons': set(),
'message': 'The content and version history of <strong>{0}</strong> will be copied to the registration.',
},
'partial': {
'addons': set(),
'message': 'The current version of the content in <strong>{0}</strong> will be copied to the registration, but version history will be lost.'
},
'none': {
'addons': set(),
'message': 'The contents of <strong>{0}</strong> cannot be registered at this time, and will not be included as part of this registration.',
},
}
errors = {}
addon_set = [n.get_addons() for n in itertools.chain([node], node.get_descendants_recursive(lambda n: n.primary))]
for addon in itertools.chain(*addon_set):
if not addon.complete:
continue
archive_errors = getattr(addon, 'archive_errors', None)
error = None
if archive_errors:
error = archive_errors()
if error:
errors[addon.config.short_name] = error
continue
name = addon.config.short_name
if name in settings.ADDONS_ARCHIVABLE:
messages[settings.ADDONS_ARCHIVABLE[name]]['addons'].add(addon.config.full_name)
else:
messages['none']['addons'].add(addon.config.full_name)
error_messages = errors.values()
prompts = [
m['message'].format(util.conjunct(m['addons']))
for m in messages.values() if m['addons']
]
if node.has_pointers_recursive:
prompts.append(
language.BEFORE_REGISTER_HAS_POINTERS.format(
category=node.project_or_component
)
)
return {
'prompts': prompts,
'errors': error_messages
}
def _build_ezid_metadata(node):
"""Build metadata for submission to EZID using the DataCite profile. See
http://ezid.cdlib.org/doc/apidoc.html for details.
"""
doi = settings.EZID_FORMAT.format(namespace=settings.DOI_NAMESPACE, guid=node._id)
metadata = {
'_target': node.absolute_url,
'datacite': datacite_metadata_for_node(node=node, doi=doi)
}
return doi, metadata
def _get_or_create_identifiers(node):
"""
Note: ARKs include a leading slash. This is stripped here to avoid multiple
consecutive slashes in internal URLs (e.g. /ids/ark/<ark>/). Frontend code
that build ARK URLs is responsible for adding the leading slash.
"""
doi, metadata = _build_ezid_metadata(node)
client = EzidClient(settings.EZID_USERNAME, settings.EZID_PASSWORD)
try:
resp = client.create_identifier(doi, metadata)
return dict(
[each.strip('/') for each in pair.strip().split(':')]
for pair in resp['success'].split('|')
)
except HTTPError as error:
if 'identifier already exists' not in error.message.lower():
raise
resp = client.get_identifier(doi)
doi = resp['success']
suffix = doi.strip(settings.DOI_NAMESPACE)
return {
'doi': doi.replace('doi:', ''),
'ark': '{0}{1}'.format(settings.ARK_NAMESPACE.replace('ark:', ''), suffix),
}
@must_be_valid_project
@must_be_contributor_or_public
def node_identifiers_get(node, **kwargs):
"""Retrieve identifiers for a node. Node must be a public registration.
"""
if not node.is_registration or not node.is_public:
raise HTTPError(http.BAD_REQUEST)
return {
'doi': node.get_identifier_value('doi'),
'ark': node.get_identifier_value('ark'),
}
@must_be_valid_project
@must_have_permission(ADMIN)
def node_identifiers_post(auth, node, **kwargs):
"""Create identifier pair for a node. Node must be a public registration.
"""
# TODO: Fail if `node` is retracted
if not node.is_registration or not node.is_public: # or node.is_retracted:
raise HTTPError(http.BAD_REQUEST)
if node.get_identifier('doi') or node.get_identifier('ark'):
raise HTTPError(http.BAD_REQUEST)
try:
identifiers = _get_or_create_identifiers(node)
except HTTPError:
raise HTTPError(http.BAD_REQUEST)
for category, value in identifiers.iteritems():
node.set_identifier_value(category, value)
node.add_log(
NodeLog.EXTERNAL_IDS_ADDED,
params={
'parent_node': node.parent_id,
'node': node._id,
'identifiers': identifiers,
},
auth=auth,
)
return identifiers, http.CREATED
def get_referent_by_identifier(category, value):
"""Look up identifier by `category` and `value` and redirect to its referent
if found.
"""
try:
identifier = Identifier.find_one(
Q('category', 'eq', category) &
Q('value', 'eq', value)
)
except NoResultsFound:
raise HTTPError(http.NOT_FOUND)
if identifier.referent.url:
return redirect(identifier.referent.url)
raise HTTPError(http.NOT_FOUND)
@fail_archive_on_error
@must_be_signed
@must_be_registration
def registration_callbacks(node, payload, *args, **kwargs):
errors = payload.get('errors')
src_provider = payload['source']['provider']
if errors:
node.archive_job.update_target(
src_provider,
ARCHIVER_FAILURE,
errors=errors,
)
else:
# Dataverse requires two seperate targets, one
# for draft files and one for published files
if src_provider == 'dataverse':
src_provider += '-' + (payload['destination']['name'].split(' ')[-1].lstrip('(').rstrip(')').strip())
node.archive_job.update_target(
src_provider,
ARCHIVER_SUCCESS,
)
project_signals.archive_callback.send(node)
| {
"content_hash": "988a72acf1d8c346d49d02aebb99e272",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 166,
"avg_line_length": 36.91975308641975,
"alnum_prop": 0.6472161845845177,
"repo_name": "TomHeatwole/osf.io",
"id": "55efe7e91e3a3e224e5bc243eebca81e0c1d7d89",
"size": "11986",
"binary": false,
"copies": "13",
"ref": "refs/heads/develop",
"path": "website/project/views/register.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "140360"
},
{
"name": "HTML",
"bytes": "94857"
},
{
"name": "JavaScript",
"bytes": "1561313"
},
{
"name": "Mako",
"bytes": "659751"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "5250038"
}
],
"symlink_target": ""
} |
import os
try :
from setuptools import setup
except ImportError :
from distutils.core import setup
readme = open(os.path.join(os.path.dirname(__file__), 'README'), 'r').read()
setup(
name='ACIS-pandas',
author='Bill Noon',
author_email='[email protected]',
version='0.1.0',
url='http://github.com/bnoon/acis-pandas',
py_modules=['ACISLoader'],
description='Access the ACIS data via a pandas Panel.',
long_description=readme,
zip_safe=True,
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python'
]
)
| {
"content_hash": "cd2ff1211c1451d45056870a4e7356d0",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 76,
"avg_line_length": 26.347826086956523,
"alnum_prop": 0.636963696369637,
"repo_name": "bnoon/acis-pandas",
"id": "9d2652645e2ce9b606d7135d02efc4bb7caab48a",
"size": "606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7435"
}
],
"symlink_target": ""
} |
"""
This template script should accept two arguments: the source input filename
(which is expected to exist) and the target output filename (expected to be
created by this script).
"""
import sys
source = sys.argv[1]
target = sys.argv[2]
| {
"content_hash": "90e17e8869dc9e0f2e8165b3e42f90ed",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 75,
"avg_line_length": 26.555555555555557,
"alnum_prop": 0.7531380753138075,
"repo_name": "daler/hubward",
"id": "65960ce1271dc468feb456d1893fac1c8c49e12e",
"size": "262",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "resources/process_template.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1855"
},
{
"name": "Python",
"bytes": "59761"
},
{
"name": "Shell",
"bytes": "3434"
}
],
"symlink_target": ""
} |
from oslo_log import log
from oslo_utils import excutils
from manila.i18n import _LE
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp import utils as na_utils
LOG = log.getLogger(__name__)
class NetAppBaseClient(object):
def __init__(self, **kwargs):
self.connection = netapp_api.NaServer(
host=kwargs['hostname'],
transport_type=kwargs['transport_type'],
port=kwargs['port'],
username=kwargs['username'],
password=kwargs['password'],
trace=kwargs.get('trace', False))
def get_ontapi_version(self, cached=True):
"""Gets the supported ontapi version."""
if cached:
return self.connection.get_api_version()
ontapi_version = netapp_api.NaElement('system-get-ontapi-version')
res = self.connection.invoke_successfully(ontapi_version, False)
major = res.get_child_content('major-version')
minor = res.get_child_content('minor-version')
return major, minor
def _init_features(self):
"""Set up the repository of available Data ONTAP features."""
self.features = Features()
def check_is_naelement(self, elem):
"""Checks if object is instance of NaElement."""
if not isinstance(elem, netapp_api.NaElement):
raise ValueError('Expects NaElement')
def send_request(self, api_name, api_args=None, enable_tunneling=True):
"""Sends request to Ontapi."""
request = netapp_api.NaElement(api_name)
if api_args:
request.translate_struct(api_args)
return self.connection.invoke_successfully(request, enable_tunneling)
@na_utils.trace
def get_licenses(self):
try:
result = self.send_request('license-v2-list-info')
except netapp_api.NaApiError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Could not get licenses list. %s."), e)
return sorted(
[l.get_child_content('package').lower()
for l in result.get_child_by_name('licenses').get_children()])
def send_ems_log_message(self, message_dict):
"""Sends a message to the Data ONTAP EMS log."""
raise NotImplementedError()
class Features(object):
def __init__(self):
self.defined_features = set()
def add_feature(self, name, supported=True):
if not isinstance(supported, bool):
raise TypeError("Feature value must be a bool type.")
self.defined_features.add(name)
setattr(self, name, supported)
def __getattr__(self, name):
# NOTE(cknight): Needed to keep pylint happy.
raise AttributeError
| {
"content_hash": "88a8ce3fc04f4cdc1c3bd8fb590ff164",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 77,
"avg_line_length": 34.074074074074076,
"alnum_prop": 0.6315217391304347,
"repo_name": "weiting-chen/manila",
"id": "45e1148aead94dace71c00aad947813f81cf6d82",
"size": "3449",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "manila/share/drivers/netapp/dataontap/client/client_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "4010933"
},
{
"name": "Shell",
"bytes": "40516"
}
],
"symlink_target": ""
} |
import logging
class BrieLogging:
logger = None
@staticmethod
def initializeLog():
# create logger with 'spam_application'
BrieLogging.logger = logging.getLogger('Brie')
BrieLogging.logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh1 = logging.FileHandler('brie.log')
fh1.setLevel(logging.INFO)
# create file handler which logs even debug messages
fh2 = logging.FileHandler('brie_debug.log')
fh2.setLevel(logging.DEBUG)
# create console handler with a higher log level
#ch = logging.StreamHandler()
#ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh1.setFormatter(formatter)
fh2.setFormatter(formatter)
# add the handlers to the logger
BrieLogging.logger.addHandler(fh1)
BrieLogging.logger.addHandler(fh2)
#end def
@staticmethod
def get():
if BrieLogging.logger is None:
BrieLogging.initializeLog()
#end if
return BrieLogging.logger
#end def
#end class
| {
"content_hash": "1858b157e790e808ee35b834b816a0da",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 82,
"avg_line_length": 32.86842105263158,
"alnum_prop": 0.6261008807045636,
"repo_name": "Rbeuque74/brie-aurore",
"id": "38deb91c857ab0fb81fb889dfe36ab37ec7ddc31",
"size": "1265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Brie/brie/lib/log_helper.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "7815"
},
{
"name": "HTML",
"bytes": "91166"
},
{
"name": "JavaScript",
"bytes": "3085"
},
{
"name": "Python",
"bytes": "210870"
}
],
"symlink_target": ""
} |
from conjureup import controllers, juju
from conjureup.app_config import app
from conjureup.models.provider import load_schema
class BaseRegionsController:
def __init__(self):
# can't just determine these during __init__ because the controller
# instance is cached and the BACK button means we can get called
# multiple times with different clouds selected
self._regions = {}
self._default_regions = {}
@property
def default_region(self):
if app.provider.cloud not in self._default_regions:
app.provider.region = None
if len(app.provider.regions) == 1:
app.provider.region = list(app.provider.regions)[0]
if not app.provider.region:
creds = juju.get_credentials().get(app.provider.cloud, {})
app.provider.region = creds.get('default-region', None)
if not app.provider.region:
try:
schema = load_schema(app.provider.cloud)
app.provider.region = schema.default_region
except Exception:
# if we can't find a schema for this cloud,
# just assume no default
pass
self._default_regions[app.provider.cloud] = app.provider.region
return self._default_regions[app.provider.cloud]
@property
def regions(self):
if app.provider.cloud not in self._regions:
if app.provider.cloud_type in ['maas', 'localhost']:
# No regions for these providers
regions = []
elif len(app.provider.regions) > 0:
regions = app.provider.regions
else:
regions = sorted(juju.get_regions(app.provider.cloud).keys())
self._regions[app.provider.cloud] = regions
return self._regions[app.provider.cloud]
def finish(self, region):
app.provider.region = region
controllers.use('providersetup').render()
| {
"content_hash": "0c99ed690ad36623e79944c47cf3aaf8",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 77,
"avg_line_length": 41.40816326530612,
"alnum_prop": 0.5914243469689502,
"repo_name": "ubuntu/conjure-up",
"id": "8672e12589300ead206576fd3f7ebbff771d06ca",
"size": "2029",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "conjureup/controllers/juju/regions/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2552"
},
{
"name": "Python",
"bytes": "470520"
},
{
"name": "Shell",
"bytes": "4588"
}
],
"symlink_target": ""
} |
import subprocess
import json
import math
import re
import lizard
from jinja2 import Environment, FileSystemLoader
from xml.dom import minidom
from decimal import Decimal
from analyzr.settings import CONFIG_PATH, PROJECT_PATH, LAMBDA
XML_ILLEGAL = u'([\u0000-\u0008\u000b-\u000c\u000e-\u001f\ufffe-\uffff])|([%s-%s][^%s-%s])|([^%s-%s][%s-%s])|([%s-%s]$)|(^[%s-%s])'
RE_XML_ILLEGAL = XML_ILLEGAL % (
unichr(0xd800),
unichr(0xdbff),
unichr(0xdc00),
unichr(0xdfff),
unichr(0xd800),
unichr(0xdbff),
unichr(0xdc00),
unichr(0xdfff),
unichr(0xd800),
unichr(0xdbff),
unichr(0xdc00),
unichr(0xdfff)
)
class CheckerException(Exception):
def __init__(self, checker, cmd, stdout="", stderr=""):
self.checker = checker
self.cmd = cmd
self.stdout = stdout
self.stderr = stderr
super(CheckerException, self).__init__()
def __str__(self):
value = "STDOUT:\n%s\n\nSTDERR:\n%s" % (self.stdout, self.stderr)
return "%s raised an error while running command:\n\n%s\n\n%s" % (
self.checker,
" ".join(self.cmd),
value
)
def __unicode__(self):
return self.__str__()
def __repr__(self):
return self.__unicode__()
class Checker(object):
def __init__(self, config_path, result_path):
self.measures = {}
self.env = Environment(loader=FileSystemLoader(CONFIG_PATH))
self.config_path = config_path
self.result_path = result_path
self.files = []
def __str__(self):
return self.__unicode__()
def includes(self, filename):
for f in self.files:
if f.endswith(filename):
return True
return False
def get_decimal(self, value):
return Decimal("%s" % round(float(value), 2))
def execute(self, cmd):
# close_fds must be true as python would otherwise reuse created
# file handles. this would cause a serious memory leak.
# btw: the file handles are craeted because we pipe stdout and
# stderr to them.
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = proc.communicate()
if not proc.returncode == 0:
raise CheckerException(self, cmd, stdout=stdout, stderr=stderr)
return stdout
def stub(self):
return {
"cyclomatic_complexity": 0,
"halstead_volume": 0,
"halstead_difficulty": 0,
"fan_in": 0,
"fan_out": 0,
"sloc_absolute": 0,
"sloc": 0
}
def set(self, filename, key, value):
if not filename in self.measures:
self.measures[filename] = self.stub()
self.measures[filename][key] = self.get_decimal(value)
def get_value_in_range(self, value, low, high):
high = high * 1.0
low = low * 1.0
if value <= low:
return 3.0
if value >= high:
return 0.0
return 3.0 - 3.0 * (value / high)
def squale(self, marks):
sum_marks = math.fsum([math.pow(LAMBDA, -1.0 * mark) for mark in marks])
return -1.0 * math.log(sum_marks / (1.0 * len(marks)), LAMBDA)
def get_hv_mark(self, value):
return self.get_value_in_range(value, 20, 1000)
def get_hd_mark(self, value):
return self.get_value_in_range(value, 10, 50)
def get_cc_mark(self, value):
if value <= 2:
return 3.0
if value >= 20:
return 0.0
return math.pow(2, (7 - value) / 3.5)
def get_sloc_mark(self, value):
if value <= 37:
return 3.0
if value >= 162:
return 0.0
return math.pow(2, (70 - value) / 21.0)
def get_fan_in_mark(self, value):
if value <= 19:
return 3.0
if value >= 60:
return 0.0
return math.pow(2, (30 - value) / 7.0)
def get_fan_out_mark(self, value):
if value <= 6:
return 3.0
if value >= 19:
return 0.0
return math.pow(2, (10 - value) / 2.0)
def configure(self, files, revision, connector):
raise NotImplementedError
def run(self):
raise NotImplementedError
def parse(self, connector):
raise NotImplementedError
class JHawk(Checker):
# determines how many files are analyzed at once
# this is important as for revisions with a lot of files the
# generated report might not fit into main memory or can't
# be parsed.
FILE_BATCH_SIZE = 50
def __init__(self, config_path, result_path):
super(JHawk, self).__init__(config_path, result_path)
self.name = "jhawk"
self.files = []
self.configurations = []
self.results = []
def config_file(self, revision, part):
return "%s/%s_%d.xml" % (self.config_path, revision.identifier, part)
def result_file(self, revision, part):
return "%s/%s_%d" % (self.result_path, revision.identifier, part)
def configure(self, files, revision, connector):
for f in files:
self.files.append(f.full_path())
self.measures = {}
self.configurations = []
self.results = []
template = self.env.get_template("%s.xml" % self.name)
file_count = len(files)
chunks = int(math.ceil(file_count / self.FILE_BATCH_SIZE))
if not file_count % self.FILE_BATCH_SIZE == 0:
chunks = chunks + 1
for i in range(chunks):
start = i * self.FILE_BATCH_SIZE
end = min((i + 1) * self.FILE_BATCH_SIZE, file_count)
chunk = files[start:end]
filename = self.config_file(revision, i)
result_file = self.result_file(revision, i)
options = {
"checker": self.name,
"project_path": PROJECT_PATH,
"base_path": connector.get_repo_path(),
"target": result_file,
"filepattern": "|".join([".*/%s" % f.name for f in chunk])
}
with open(filename, "wb") as f:
f.write(template.render(options))
self.configurations.append(filename)
self.results.append(result_file)
self.revision = revision
def run(self):
for configuration in self.configurations:
cmd = [
"ant",
"-lib", "%s/lib/%s/JHawkCommandLine.jar" % (PROJECT_PATH, self.name),
"-f", configuration
]
self.execute(cmd)
# Don't allow multiple runs with the same configuration
self.configurations = []
return True
def get_metrics(self, parent):
for node in parent.childNodes:
if node.localName == "Metrics":
return node
def get_node_value(self, parent, node_name):
for node in parent.childNodes:
if node.localName == node_name:
return node.firstChild.nodeValue
def get_number(self, parent, node_name):
return float(self.get_node_value(parent, node_name))
def get_name(self, parent):
return self.get_node_value(parent, "Name")
def get_sloc_squale(self, methods):
marks = []
for method in methods:
metrics = self.get_metrics(method)
marks.append(self.get_sloc_mark(self.get_number(metrics, "loc")))
return self.squale(marks)
def get_hv_squale(self, methods):
marks = []
for method in methods:
metrics = self.get_metrics(method)
marks.append(self.get_hv_mark(self.get_number(metrics, "halsteadVolume")))
return self.squale(marks)
def add_halstead_metrics(self, filename, methods):
marks = []
for method in methods:
metrics = self.get_metrics(method)
volume = self.get_number(metrics, "halsteadVolume")
effort = self.get_number(metrics, "halsteadEffort")
difficulty = effort / volume
marks.append(self.get_hd_mark(difficulty))
self.set(filename, "halstead_difficulty", self.squale(marks))
self.set(filename, "halstead_volume", self.get_hv_squale(methods))
def get_cc_squale(self, methods):
marks = []
for method in methods:
metrics = self.get_metrics(method)
marks.append(self.get_cc_mark(self.get_number(metrics, "cyclomaticComplexity")))
return self.squale(marks)
def mark_faults(self, processed):
for f in self.files:
found = False
for p in processed:
if found:
continue
if f.endswith(p):
found = True
continue
if not found:
# All files should have been processed but weren't must contain
# some kind of error
error = self.revision.get_file(f)
error.faulty = True
error.save()
def parse(self, connector):
processed = []
for result in self.results:
with open("%s.xml" % result, "r") as f:
content = f.read()
content = re.sub(RE_XML_ILLEGAL, "?", content)
xml_doc = minidom.parseString(content.encode("utf-8"))
packages = xml_doc.getElementsByTagName("Package")
for package in packages:
name = self.get_name(package)
classes = package.getElementsByTagName("Class")
path = name.replace(".", "/")
for cls in classes:
class_metrics = self.get_metrics(cls)
class_name = self.get_node_value(cls, "ClassName")
if "$" in class_name:
# private class inside of class
# ignore!
continue
filename = "%s/%s.java" % (path, class_name)
if not self.includes(filename):
continue
processed.append(filename)
methods = cls.getElementsByTagName("Method")
if len(methods) == 0:
continue
self.add_halstead_metrics(filename, methods)
self.set(filename, "cyclomatic_complexity", self.get_cc_squale(methods))
self.set(filename, "sloc", self.get_sloc_squale(methods))
self.set(filename, "sloc_absolute", self.get_node_value(class_metrics, "loc"))
fan_in = self.get_number(class_metrics, "fanIn")
fan_out = self.get_number(class_metrics, "fanOut")
self.set(filename, "fan_in", self.get_fan_in_mark(fan_in))
self.set(filename, "fan_out", self.get_fan_out_mark(fan_out))
self.mark_faults(processed)
return self.measures
def __unicode__(self):
return "JHawk Java Checker"
class ComplexityReport(Checker):
def __init__(self, config_path, result_path):
super(ComplexityReport, self).__init__(config_path, result_path)
self.files = []
def __unicode__(self):
return "Complexity Report JavaScript Checker"
def result_file(self, revision):
return "%s/%s" % (self.result_path, revision.identifier)
def configure(self, files, revision, connector):
self.result = self.result_file(revision)
self.files = files
self.base_path = connector.get_repo_path()
def get_file_path(self, f):
return "%s/%s" % (self.base_path, f.full_path())
def run(self):
self.failed = []
for f in self.files:
path = self.get_file_path(f)
result = "%s_%s.json" % (self.result, f.get_identifier())
cmd = ["cr", "-f", "json", "-o", result, path]
try:
self.execute(cmd)
except CheckerException, e:
if not e.stdout.startswith("Fatal error") and not e.stderr.startswith("Fatal error"):
raise e
# Ignore syntax errors in checked files
self.failed.append(f.get_identifier())
# mark file as faulty
f.faulty = True
f.save()
return True
def get_cc_squale(self, functions):
marks = []
for function in functions:
marks.append(self.get_cc_mark(function["cyclomatic"]))
return self.squale(marks)
def get_hv_squale(self, functions):
marks = []
for function in functions:
marks.append(self.get_hv_mark(function["halstead"]["volume"]))
return self.squale(marks)
def get_hd_squale(self, functions):
marks = []
for function in functions:
marks.append(self.get_hd_mark(function["halstead"]["difficulty"]))
return self.squale(marks)
def get_sloc_squale(self, functions):
marks = []
for function in functions:
marks.append(self.get_sloc_mark(function["sloc"]["logical"]))
return self.squale(marks)
def parse(self, connector):
for f in self.files:
identifier = f.get_identifier()
if identifier in self.failed:
continue
path = "%s_%s.json" % (self.result, identifier)
with open(path) as result:
contents = json.load(result)
if not contents or not "reports" in contents or not contents["reports"]:
continue
data = contents["reports"][0]
if len(data["functions"]) == 0:
continue
filename = f.full_path()
functions = data["functions"]
self.set(filename, "cyclomatic_complexity", self.get_cc_squale(functions))
self.set(filename, "halstead_volume", self.get_hv_squale(functions))
self.set(filename, "halstead_difficulty", self.get_hd_squale(functions))
self.set(filename, "sloc", self.get_sloc_squale(functions))
self.set(filename, "sloc_absolute", data["aggregate"]["sloc"]["logical"])
return self.measures
class Lizard(Checker):
def __init__(self, config_path, result_path):
self.files = []
super(Lizard, self).__init__(config_path, result_path)
def configure(self, files, revision, connector):
self.files = files
self.path = connector.get_repo_path()
def run(self):
for f in self.files:
result = lizard.analyze_file("%s/%s" % (self.path, f.full_path()))
self.set(f.full_path(), "sloc", result.nloc)
self.set(f.full_path(), "cyclomatic_complexity", result.average_CCN)
def average(self, functions):
if len(functions) == 0:
return 0
return sum([function.cyclomatic_complexity for function in functions]) / len(functions)
def parse(self, connector):
return self.measures
| {
"content_hash": "f7e34afa183c053360ea5efbf46c9624",
"timestamp": "",
"source": "github",
"line_count": 535,
"max_line_length": 131,
"avg_line_length": 28.485981308411215,
"alnum_prop": 0.5496062992125984,
"repo_name": "frontendphil/analyzr",
"id": "fb2baf6b6ff7eba70417004065cdde34e6f8c22c",
"size": "15240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsr/checkers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28706"
},
{
"name": "HTML",
"bytes": "21250"
},
{
"name": "JavaScript",
"bytes": "177454"
},
{
"name": "Python",
"bytes": "535628"
}
],
"symlink_target": ""
} |
import json
import requests
import sys
import time
import datetime
def indent(level):
retval = ""
for i in range(0, level):
retval += " "
return retval
def get_most_recent_test(series):
res = requests.get("http://cstar.datastax.com/api/series/{series}/{ts_start}/{ts_end}".format(
series=series, ts_start=int(time.time() - 250000), ts_end=int(time.time())))
data = json.loads(res.text)
if data and data['series'] and len(data['series']) > 0:
return data['series'][-1]
return None
def generate_graphs(cached=False):
compaction_operation = [
{'name': 'Initial write throughput', 'op': '1_write', 'metric': 'op_rate'},
{'name': 'Initial read throughput', 'op': '4_read', 'metric': 'op_rate'},
{'name': 'Second read throughput', 'op': '5_read', 'metric': 'op_rate'},
{'name': 'Initial write P99.9', 'op': '1_write', 'metric': '99.9th_latency'},
{'name': 'Initial read P99.9', 'op': '4_read', 'metric': '99.9th_latency'},
{'name': 'Second read P99.9', 'op': '5_read', 'metric': '99.9th_latency'},
# {'name': 'Compaction elapsed time', 'op': '3_nodetool', 'metric': 'elapsed_time'}
]
simple_operation = [
{'name': 'Initial write throughput', 'op': '1_write', 'metric': 'op_rate'},
{'name': 'Initial read throughput', 'op': '2_read', 'metric': 'op_rate'},
{'name': 'Second read throughput', 'op': '3_read', 'metric': 'op_rate'},
{'name': 'Initial write P99.9', 'op': '1_write', 'metric': '99.9th_latency'},
{'name': 'Initial read P99.9', 'op': '2_read', 'metric': '99.9th_latency'},
{'name': 'Second read P99.9', 'op': '3_read', 'metric': '99.9th_latency'}
]
repair_operation = [
{'name': 'Initial read throughput', 'op': '4_read', 'metric': 'op_rate'},
{'name': 'Second read throughput', 'op': '5_read', 'metric': 'op_rate'},
{'name': 'Initial read P99.9', 'op': '4_read', 'metric': '99.9th_latency'},
{'name': 'Second read P99.9', 'op': '5_read', 'metric': '99.9th_latency'},
# {'name': 'Repair elapsed time', 'op': '3_nodetool', 'metric': 'elapsed_time'}
]
rolling_upgrade = [
{'name': 'Initial write throughput', 'op': '4_write', 'metric': 'op_rate'},
{'name': 'Initial read throughput', 'op': '5_read', 'metric': 'op_rate'},
{'name': 'Initial write P99.9', 'op': '4_write', 'metric': '99.9th_latency'},
{'name': 'Initial read P99.9', 'op': '5_read', 'metric': '99.9th_latency'}
]
mv_operation = [
{'name': 'write', 'op': '1_user', 'metric': 'op_rate'}
]
series_list = {
'daily_regressions_trunk-compaction': compaction_operation,
'daily_regressions_trunk-commitlog_sync': simple_operation,
'daily_regressions_trunk-read_write': simple_operation,
'daily_regressions_trunk-repair_10M': repair_operation,
'daily_regressions_trunk-compaction_lcs': compaction_operation,
'daily_regressions_trunk-compaction_stcs': compaction_operation,
'daily_regressions_trunk-compaction_dtcs': compaction_operation,
'daily_regressions_trunk-rolling_upgrade': rolling_upgrade,
'daily_regressions_trunk-materialized_views_write_3_mv': mv_operation,
'daily_regressions_trunk-materialized_views_write_1_mv': mv_operation
}
retval = ""
retval += "<html>\n"
retval += indent(1) + "<head>\n"
retval += indent(2) + '<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css" integrity="sha384-1q8mTJOASx8j1Au+a5WDVnPi2lkFfwwEAa8hDDdjZlpLegxhjVME1fgjWPGmkzs7" crossorigin="anonymous">\n'
retval += indent(2) + '<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>\n'
retval += indent(2) + '<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/js/bootstrap.min.js" integrity="sha384-0mSbJDEHialfmuBBQP6A4Qrprq5OVfW37PRR3j5ELqxss1yVqOtnepnHVP9aJ7xS" crossorigin="anonymous"></script>\n'
retval += indent(2) + '<style>body { margin: 15px; }</style>\n'
retval += indent(1) + "</head>\n"
retval += indent(1) + "<body>\n"
retval += indent(1) + "<h2>Daily C*Perf Regression Dashboard\n"
if cached:
retval += indent(1) + '<span style="color: red; font-size: 0.5em;">cached {} <a href="#" onclick="confirm(\'Loading/Updating the cached images is extremely expensive. Are you sure?\') == true ? window.location = \'dashboard_uncached.html\' : false; ">non-cached version</a></span>'.format(datetime.datetime.now().isoformat(' '))
retval += indent(1) + "</h2>\n"
for series, operations in series_list.iteritems():
retval += indent(2) + "<h3>" + series + "</h3>\n"
retval += indent(2) + "<h4>Most Recent Test Run: \n"
id = get_most_recent_test(series)
if id:
retval += indent(3) + '<a href="http://cstar.datastax.com/tests/id/{id}">Details</a> \n'.format(id=id)
retval += indent(3) + '<a href="http://cstar.datastax.com/tests/artifacts/{id}/graph">Graph</a></br>\n'.format(id=id)
else:
retval += indent(3) + ' (unavailable)</br>\n'
retval += indent(2) + "</h4>\n"
retval += indent(2) + '<div class="row">\n'
for operation in operations:
retval += indent(3) + '<div class="col-sm-6 col-md-4">\n'
retval += indent(4) + '<div class="thumbnail">\n'
retval += indent(5) + '<a href="#" class="popimage">\n'.format(
series=series, op=operation['op'], metric=operation['metric']
)
retval += indent(6) + "<img src='http://cstar.datastax.com/api/series/" + series + "/2538000/graph/"
if cached:
retval += "cached/"
retval += operation['op'] + "/" + operation['metric'] + ".png'/>\n"
retval += indent(5) + '</a>\n'
retval += indent(5) + '<div class="caption">\n'
retval += indent(6) + "{}\n".format(operation['name'])
retval += indent(5) + '</div>\n'
retval += indent(4) + "</div>\n"
retval += indent(3) + "</div>\n"
retval += indent(2) + "</div>\n"
retval += indent(2) + "<br/>\n"
retval += indent(2) + """
<!-- Creates the bootstrap modal where the image will appear -->
<div class="modal fade" id="imagemodal" tabindex="-1" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal"><span aria-hidden="true">×</span><span class="sr-only">Close</span></button>
<h4 class="modal-title" id="myModalLabel">Full Size</h4>
</div>
<div class="modal-body">
<img src="" id="imagepreview">
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
<script>
$(document).ready(function() {
$(".popimage").on("click", function(e) {
e.preventDefault();
$('#imagepreview').attr('src', $(this).children(":first").attr('src'));
$('#imagemodal').modal('show');
});
$('#imagemodal').on('shown.bs.modal', function () {
$(this).find('.modal-dialog').css({width:'945px',
height:'auto',
'max-height':'100%'});
});
});
</script>
"""
retval += indent(1) + "</body>\n"
retval += "</html>\n"
return retval
if __name__ == "__main__":
print generate_graphs(len(sys.argv) == 2 and sys.argv[1] == 'cached')
# print generate_graphs(True)
| {
"content_hash": "efee413d49dfac222570e292049730e9",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 337,
"avg_line_length": 46.883720930232556,
"alnum_prop": 0.5525793650793651,
"repo_name": "mambocab/cstar_perf",
"id": "97a32979060a02fcae7dd89f390598388ff94ac7",
"size": "8082",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "regression_suites/regression_html_generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6321"
},
{
"name": "HTML",
"bytes": "29680"
},
{
"name": "JavaScript",
"bytes": "75081"
},
{
"name": "Nginx",
"bytes": "5066"
},
{
"name": "Python",
"bytes": "632311"
},
{
"name": "Ruby",
"bytes": "2417"
}
],
"symlink_target": ""
} |
import json
import numpy
import time
import pyspark
from pyspark.ml.classification import LogisticRegression as MLLogisticRegression
from pyspark.ml.regression import LinearRegression as MLLinearRegression
from pyspark.mllib.classification import *
from pyspark.mllib.clustering import *
from pyspark.mllib.regression import *
from pyspark.mllib.recommendation import *
from pyspark.mllib.stat import *
from mllib_data import *
class PerfTest:
def __init__(self, sc):
self.sc = sc
def initialize(self, options):
self.options = options
def createInputData(self):
raise NotImplementedError
def run(self):
"""
:return: List of [trainingTime, testTime, trainingMetric, testMetric] tuples,
or list of [time] tuples
"""
raise NotImplementedError
class NonPredictionTest(PerfTest):
def __init__(self, sc):
PerfTest.__init__(self, sc)
def runTest(self):
raise NotImplementedError
def run(self):
"""
:return: List of [time] 1-element tuples
"""
options = self.options
results = []
for i in range(options.num_trials):
start = time.time()
self.runTest()
runtime = time.time() - start
results.append([runtime])
time.sleep(options.inter_trial_wait)
return results
class PredictionTest(PerfTest):
def __init__(self, sc):
PerfTest.__init__(self, sc)
def train(self, rdd):
"""
:return: Trained model to be passed to test.
"""
raise NotImplementedError
def evaluate(self, model, rdd):
"""
:return: Evaluation metric for model on the given data.
"""
raise NotImplementedError
def run(self):
options = self.options
self.trainRDD.cache() # match Scala tests for caching before computing testTime
self.trainRDD.count()
results = []
for i in range(options.num_trials):
# Train
start = time.time()
model = self.train(self.trainRDD)
trainingTime = time.time() - start
# Measure test time on training set since it is probably larger.
start = time.time()
print 'computing trainingMetric...'
trainingMetric = self.evaluate(model, self.trainRDD)
print ' done computing trainingMetric'
testTime = time.time() - start
# Test
print 'computing testMetric...'
testMetric = self.evaluate(model, self.testRDD)
print ' done computing testMetric'
results.append([trainingTime, testTime, trainingMetric, testMetric])
time.sleep(options.inter_trial_wait)
return results
@classmethod
def _evaluateAccuracy(cls, model, rdd):
"""
:return: 0/1 classification accuracy as percentage for model on the given data.
"""
acc = rdd.map(lambda lp: 1.0 if lp.label == model.predict(lp.features) else 0.0).mean()
return 100.0 * acc
@classmethod
def _evaluateRMSE(cls, model, rdd):
"""
:return: root mean squared error (RMSE) for model on the given data.
"""
squaredError =\
rdd.map(lambda lp: numpy.square(lp.label - model.predict(lp.features))).mean()
return numpy.sqrt(squaredError)
class GLMTest(PredictionTest):
def __init__(self, sc):
PredictionTest.__init__(self, sc)
def createInputData(self):
options = self.options
numTrain = options.num_examples
numTest = int(options.num_examples * 0.2)
self.trainRDD = LabeledDataGenerator.generateGLMData(
self.sc, numTrain, options.num_features,
options.num_partitions, options.random_seed, labelType=2)
self.testRDD = LabeledDataGenerator.generateGLMData(
self.sc, numTest, options.num_features,
options.num_partitions, options.random_seed + 1, labelType=2)
class GLMClassificationTest(GLMTest):
def __init__(self, sc):
GLMTest.__init__(self, sc)
def train(self, rdd):
"""
:return: Trained model to be passed to test.
"""
options = self.options
if options.reg_type == "elastic-net": # use spark.ml
lr = MLLogisticRegression(maxIter=options.num_iterations, regParam=options.reg_param,
elasticNetParam=options.elastic_net_param)
# TODO: Do not include time for conversion to DataFrame (but this currently matches
# the Scala tests)
df = rdd.toDF()
lrModel = lr.fit(df)
numFeatures = len(lrModel.weights)
numClasses = 2
return LogisticRegressionModel(lrModel.weights, lrModel.intercept,
numFeatures, numClasses)
else:
if options.loss == "logistic":
if options.optimizer == "sgd":
return LogisticRegressionWithSGD.train(data=rdd,
iterations=options.num_iterations,
step=options.step_size,
miniBatchFraction=1.0,
regParam=options.reg_param,
regType=options.reg_type)
elif options.optimizer == "l-bfgs":
return LogisticRegressionWithLBFGS.train(data=rdd,
iterations=options.num_iterations,
regParam=options.reg_param,
regType=options.reg_type,
tolerance=0.0)
else:
raise Exception("GLMClassificationTest cannot run with loss = %s,"
" optimizer = %s" % (options.loss, options.optimizer))
elif options.loss == "hinge":
if options.optimizer == "sgd":
return SVMWithSGD.train(data=rdd, iterations=options.num_iterations,
step=options.step_size, regParam=options.reg_param,
miniBatchFraction=1.0, regType=options.reg_type)
else:
raise Exception("GLMClassificationTest does not recognize loss: %s" % options.loss)
def evaluate(self, model, rdd):
return PredictionTest._evaluateAccuracy(model, rdd)
class GLMRegressionTest(GLMTest):
def __init__(self, sc):
GLMTest.__init__(self, sc)
def train(self, rdd):
"""
This ignores the optimizer parameter since it makes config difficult for Linear Regression.
:return: Trained model to be passed to test.
"""
options = self.options
if options.loss == "l2":
if options.reg_type in ["none", "l1", "l2"]:
return LinearRegressionWithSGD.train(data=rdd,
iterations=options.num_iterations,
step=options.step_size,
miniBatchFraction=1.0,
regParam=options.reg_param,
regType=options.reg_type)
elif options.reg_type == "elastic-net": # use spark.ml
lr = MLLinearRegression(maxIter=options.num_iterations, regParam=options.reg_param,
elasticNetParam=options.elastic_net_param)
# TODO: Do not include time for conversion to DataFrame (but this currently matches
# the Scala tests)
df = rdd.toDF()
lrModel = lr.fit(df)
return LinearRegressionModel(lrModel.weights, lrModel.intercept)
else:
raise Exception("GLMRegressionTest cannot run with loss = %s, reg_type = %s" \
% (options.loss, options.reg_type))
else:
raise Exception("GLMRegressionTest does not recognize loss: %s" % options.loss)
def evaluate(self, model, rdd):
return PredictionTest._evaluateRMSE(model, rdd)
class NaiveBayesTest(PredictionTest):
def __init__(self, sc):
PredictionTest.__init__(self, sc)
def createInputData(self):
options = self.options
numTrain = options.num_points
numTest = int(options.num_points * 0.2)
self.trainRDD = LabeledDataGenerator.generateGLMData(
self.sc, numTrain, options.num_features,
options.num_partitions, options.random_seed, labelType=2)
self.testRDD = LabeledDataGenerator.generateGLMData(
self.sc, numTest, options.num_features,
options.num_partitions, options.random_seed + 1, labelType=2)
def evaluate(self, model, rdd):
return PredictionTest._evaluateAccuracy(model, rdd)
def train(self, rdd):
return NaiveBayes.train(rdd, lambda_=options.nb_lambda)
class KMeansTest(NonPredictionTest):
def __init__(self, sc):
NonPredictionTest.__init__(self, sc)
def createInputData(self):
options = self.options
self.data = FeaturesGenerator.generateContinuousData(
self.sc, options.num_points, options.num_columns,
options.num_partitions, options.random_seed)
def runTest(self):
model = KMeans.train(self.data, k=options.num_centers,
maxIterations=options.num_iterations)
class ALSTest(PredictionTest):
def __init__(self, sc):
PredictionTest.__init__(self, sc)
def createInputData(self):
options = self.options
numTrain = options.num_ratings
numTest = int(options.num_ratings * 0.2)
self.trainRDD = RatingGenerator.generateRatingData(
self.sc, options.num_users, options.num_products, numTrain,
options.implicit_prefs, options.num_partitions, options.random_seed)
self.testRDD = RatingGenerator.generateRatingData(
self.sc, options.num_users, options.num_products, numTest,
options.implicit_prefs, options.num_partitions, options.random_seed + 1)
def evaluate(self, model, rdd):
"""
:return: root mean squared error (RMSE) for model on the given ratings.
"""
implicit_prefs = self.options.implicit_prefs
predictions = model.predictAll(rdd.map(lambda r: (r[0], r[1])))
sparkVersion = float(str(sc.version)[:3])
def mapPrediction(r):
if sparkVersion <= 1.1:
(user, product, rating) = (r[0], r[1], r[2])
else:
(user, product, rating) = (r.user, r.product, r.rating)
mappedRating = max(min(rating, 1.0), 0.0) if implicit_prefs else rating
return ((user, product), mappedRating)
predictionsAndRatings = \
predictions.map(mapPrediction).join(rdd.map(lambda r: ((r[0], r[1]), r[2]))).values()
return numpy.sqrt(predictionsAndRatings.map(lambda ab: numpy.square(ab[0] - ab[1])).mean())
def train(self, rdd):
if options.implicit_prefs:
model = ALS.trainImplicit(rdd, rank=options.rank,
iterations=options.num_iterations,
lambda_=options.reg_param, blocks=options.num_partitions)
else:
model = ALS.train(rdd, rank=options.rank,
iterations=options.num_iterations,
lambda_=options.reg_param, blocks=options.num_partitions)
return model
class CorrelationTest(NonPredictionTest):
def __init__(self, sc):
NonPredictionTest.__init__(self, sc)
def createInputData(self):
options = self.options
self.data = FeaturesGenerator.generateContinuousData(
self.sc, options.num_rows, options.num_cols,
options.num_partitions, options.random_seed)
class PearsonCorrelationTest(CorrelationTest):
def __init__(self, sc):
CorrelationTest.__init__(self, sc)
def runTest(self):
corr = Statistics.corr(self.data, method="pearson")
class SpearmanCorrelationTest(CorrelationTest):
def __init__(self, sc):
CorrelationTest.__init__(self, sc)
def runTest(self):
corr = Statistics.corr(self.data, method="spearman")
if __name__ == "__main__":
import optparse
parser = optparse.OptionParser(usage="Usage: %prog [options] test_names")
# COMMON_OPTS
parser.add_option("--num-trials", type="int", default=1)
parser.add_option("--inter-trial-wait", type="int", default=3)
# MLLIB_COMMON_OPTS
parser.add_option("--num-partitions", type="int", default=10)
parser.add_option("--random-seed", type="int", default=5)
parser.add_option("--num-iterations", type="int", default=20)
parser.add_option("--reg-param", type="float", default=0.1)
parser.add_option("--rank", type="int", default=2)
# MLLIB_REGRESSION_CLASSIFICATION_TEST_OPTS
parser.add_option("--num-examples", type="int", default=1024)
parser.add_option("--num-features", type="int", default=50)
# MLLIB_GLM_TEST_OPTS
parser.add_option("--step-size", type="float", default=0.1)
parser.add_option("--reg-type", type="string", default="none")
parser.add_option("--loss", type="string", default="L2")
parser.add_option("--optimizer", type="string", default="sgd")
parser.add_option("--elastic-net-param", type="float", default=0.0)
# MLLIB_GLM_REGRESSION_TEST_OPTS
parser.add_option("--intercept", type="float", default=0.0)
parser.add_option("--label-noise", type="float", default=0.1)
# MLLIB_CLASSIFICATION_TEST_OPTS
parser.add_option("--feature-noise", type="float", default=1.0)
# NAIVE_BAYES_TEST_OPTS
parser.add_option("--per-negative", type="float", default=0.3)
parser.add_option("--nb-lambda", type="float", default=1.0)
parser.add_option("--model-type", type="string", default="multinomial")
# MLLIB_DECISION_TREE_TEST_OPTS
parser.add_option("--label-type", type="int", default=2)
parser.add_option("--frac-categorical-features", type="float", default=0.5)
parser.add_option("--frac-binary-features", type="float", default=0.5)
parser.add_option("--tree-depth", type="int", default=5)
parser.add_option("--max-bins", type="int", default=32)
# (for Spark 1.2+ only:)
parser.add_option("--ensemble-type", type="string", default="RandomForest")
parser.add_option("--num-trees", type="int", default=1)
parser.add_option("--feature-subset-strategy", type="string", default="auto")
# MLLIB_RECOMMENDATION_TEST_OPTS
parser.add_option("--num-users", type="int", default=60)
parser.add_option("--num-products", type="int", default=50)
parser.add_option("--num-ratings", type="int", default=500)
parser.add_option("--implicit-prefs", type="int", default=0)
# MLLIB_CLUSTERING_TEST_OPTS
parser.add_option("--num-points", type="int", default=1000)
parser.add_option("--num-columns", type="int", default=10)
parser.add_option("--num-centers", type="int", default=5)
# MLLIB_LINALG_TEST_OPTS + MLLIB_STATS_TEST_OPTS
parser.add_option("--num-rows", type="int", default=1000)
parser.add_option("--num-cols", type="int", default=10)
options, cases = parser.parse_args()
sc = pyspark.SparkContext(appName="MLlibTestRunner")
for name in cases:
test = globals()[name](sc)
test.initialize(options)
test.createInputData()
javaSystemProperties = sc._jvm.System.getProperties()
systemProperties = {}
for k in javaSystemProperties.keys():
if type(javaSystemProperties[k]) != unicode:
print "type(javaSystemProperties[k]) != unicode"
print "\t type(javaSystemProperties[k]) = %r" % type(javaSystemProperties[k])
systemProperties[k] = javaSystemProperties[k]
ts = test.run()
if len(ts) != test.options.num_trials:
raise Exception("mllib_tests.py FAILED (got %d results instead of %d)" %
(len(ts), test.options.num_trials))
results = []
if len(ts[0]) == 1:
# results include: time
print "Results from each trial:"
print "trial\ttime"
for trial in range(test.options.num_trials):
t = ts[trial]
print "%d\t%.3f" % (trial, t[0])
results.append({"time": t[0]})
else:
# results include: trainingTime, testTime, trainingMetric, testMetric
print "Results from each trial:"
print "trial\ttrainingTime\ttestTime\ttrainingMetric\ttestMetric"
for trial in range(test.options.num_trials):
t = ts[trial]
print "%d\t%.3f\t%.3f\t%.3f\t%.3f" % (trial, t[0], t[1], t[2], t[3])
results.append({"trainingTime": t[0], "testTime": t[1],
"trainingMetric": t[2], "testMetric": t[3]})
# JSON results
sparkConfInfo = {} # convert to dict to match Scala JSON
for (a,b) in sc._conf.getAll():
sparkConfInfo[a] = b
jsonResults = json.dumps({"testName": name,
"options": vars(options),
"sparkConf": sparkConfInfo,
"sparkVersion": sc.version,
"systemProperties": systemProperties,
"results": results},
separators=(',', ':')) # use separators for compact encoding
print "results: " + jsonResults
| {
"content_hash": "b6e7b0a17ff1a17a4130e948c7315886",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 99,
"avg_line_length": 42.80235294117647,
"alnum_prop": 0.5754493980539828,
"repo_name": "zsxwing/spark-perf",
"id": "133d7518e7002eb376ab103e535b9c21657b27fe",
"size": "18191",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyspark-tests/mllib_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "79856"
},
{
"name": "Scala",
"bytes": "233190"
},
{
"name": "Shell",
"bytes": "30207"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from .models import Lottery, LotteryParticipant, LotteryTranslation
class LotteryTranslationInlineAdmin(admin.StackedInline):
verbose_name = _(u'Translation')
verbose_name_plural = _(u'Translations')
model = LotteryTranslation
max_num = len(settings.LANGUAGES)
extra = 1
class LotteryAdmin(admin.ModelAdmin):
inlines = [LotteryTranslationInlineAdmin]
list_display = ['__unicode__', 'lan']
class LotteryParticipantAdmin(admin.ModelAdmin):
model = LotteryParticipant
admin.site.register(LotteryParticipant, LotteryParticipantAdmin)
admin.site.register(Lottery, LotteryAdmin)
| {
"content_hash": "9f96cd7fa1b8fc7dea5a602d75a18c62",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 67,
"avg_line_length": 28.307692307692307,
"alnum_prop": 0.7744565217391305,
"repo_name": "CasualGaming/studlan",
"id": "42a3963d65a8337bd43097470b54fcd318a9e614",
"size": "761",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/lottery/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9222"
},
{
"name": "Dockerfile",
"bytes": "899"
},
{
"name": "HTML",
"bytes": "144147"
},
{
"name": "JavaScript",
"bytes": "18344"
},
{
"name": "Python",
"bytes": "342146"
},
{
"name": "Shell",
"bytes": "19209"
}
],
"symlink_target": ""
} |
import os
from paraview import simple
# -----------------------------------------------------------------------------
MODULE_PATH = os.path.dirname(os.path.abspath(__file__))
PLUGINS = [
'parflow.py'
]
FULL_PATHS = [
'/Applications/ParaView-5.6.0-1626-g52acf2f741.app/Contents/Plugins/ParFlow.so',
]
# -----------------------------------------------------------------------------
# Load the plugins
# -----------------------------------------------------------------------------
for plugin in PLUGINS:
simple.LoadPlugin(os.path.join(MODULE_PATH, plugin))
for plugin in FULL_PATHS:
simple.LoadPlugin(plugin)
| {
"content_hash": "bd65570651d6239f822819dea85f0329",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 84,
"avg_line_length": 26.083333333333332,
"alnum_prop": 0.4440894568690096,
"repo_name": "Kitware/HPCCloud",
"id": "1b8e3c325a97cccaf1c8c5f4654e6b07c027e9f1",
"size": "626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pvw-dependencies/pv-flow/flow/plugins/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "412"
},
{
"name": "HTML",
"bytes": "4433"
},
{
"name": "JavaScript",
"bytes": "688975"
},
{
"name": "Python",
"bytes": "344597"
},
{
"name": "Shell",
"bytes": "1993"
}
],
"symlink_target": ""
} |
__author__ = 'Sergei F. Kliver'
import sys
import argparse
from BCBio import GFF
from MACE.Parsers.VCF import CollectionVCF, ReferenceGenome
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file", action="store", dest="input", required=True,
help="Input vcf file with variants.")
parser.add_argument("-o", "--output_file_prefix", action="store", dest="output_prefix", required=True,
help="Prefix of output files")
"""
parser.add_argument("-d", "--dpi", action="store", dest="dpi", type=int, default=300,
help="Dpi of figure")
parser.add_argument("-f", "--size_of_figure", action="store", dest="size_of_figure", type=lambda s: s.split(","),
default=(40, 40),
help="Size of figure in inches. X and Y values should be separated by comma. Default: 40,40")
"""
parser.add_argument("-e", "--output_formats", action="store", dest="output_formats", type=lambda s: s.split(","),
default=("svg", "png"),
help="Comma-separated list of formats (supported by matlotlib) of "
"output figure.Default: svg,png")
parser.add_argument("-l", "--suptitle", action="store", dest="suptitle", default="",
help="Suptitle of figure. Default: ''")
parser.add_argument("-g", "--max_gaps_and_masked_per_window_fraction", action="store",
dest="max_gaps_and_masked_per_window_fraction",
default=0.4,
type=float,
help="Maximum number of gaped and masked positions per window. "
"Windows with higher fraction will be shown as having 0 variants."
"Default: 0.4")
parser.add_argument("-r", "--reference_genome", action="store", dest="reference",
help="Fasta file with reference genome, required to draw gaps and chromosomes")
parser.add_argument("-m", "--masked_regions", action="store", dest="masked_regions",
type=lambda s: s.split(","),
help="Comma-separated list of GFF files with masked regions")
#parser.add_argument("-n", "--normalize", action="store_true", dest="normalize",
# help="Normalize counts by effective window size, i.e. "
# "divide counts by (window_size - gaps - masking. Default: False)")
parser.add_argument("-u", "--per_sample_plot", action="store_true", dest="per_sample_plot",
help="Separated plot per sample. Default: False")
parser.add_argument("-w", "--window_size", action="store", dest="window_size", default=100000, type=int,
help="Size of the windows Default: 100000")
parser.add_argument("-s", "--window_step", action="store", dest="window_step", default=None, type=int,
help="Step of the sliding windows. Default: window size, i.e windows are staking")
parser.add_argument("-p", "--parsing_mode", action="store", dest="parsing_mode", default="parse",
help="Parsing mode for input sequence file. "
"Possible variants: 'index_db', 'index', 'parse'(default)")
"""
parser.add_argument("-a", "--scaffold_white_list", action="store", dest="scaffold_white_list", default=[],
type=lambda s: s.split(","),
help="Comma-separated list of the only scaffolds to draw. Default: all")
parser.add_argument("-b", "--scaffold_black_list", action="store", dest="scaffold_black_list", default=[],
type=lambda s: s.split(","),
help="Comma-separated list of scaffolds to skip at drawing. Default: not set")
parser.add_argument("-y", "--sort_scaffolds", action="store_true", dest="sort_scaffolds", default=False,
help="Order scaffolds according to their names. Default: False")
parser.add_argument("-z", "--scaffold_ordered_list", action="store", dest="scaffold_ordered_list", default=[],
type=lambda s: s.split(","),
help="Comma-separated list of scaffolds to draw first and exactly in same order. "
"Scaffolds absent in this list are drawn last and in order according to vcf file . "
"Default: not set")
parser.add_argument("-q", "--figure_width", action="store", dest="figure_width", default=12, type=int,
help="Width of figure in inches. Default: 12")
parser.add_argument("-u", "--figure_height_scale_factor", action="store", dest="figure_height_scale_factor",
default=0.5, type=float,
help="Figure height scale factor. Figure height is calculated in inches as "
"int(figure_scale_factor * scaffold_number * sample_number). Default: 0.5")
"""
args = parser.parse_args()
variants = CollectionVCF(from_file=True, in_file=args.input, parse_only_coordinates=False)
variants.draw_heterozygous_snps_histogram(args.window_size,
args.window_step,
args.output_prefix,
args.reference,
gaps_and_masked_positions_max_fraction=args.max_gaps_and_masked_per_window_fraction,
masking_gff=args.masked_regions,
parsing_mode=args.parsing_mode,
per_sample_output=args.per_sample_plot,
plot_type="concatenated",
xlabel="Position in genome",
ylabel="Number of SNPs",
title="SNP counts in windows",
suptitle=args.suptitle,
extensions=args.output_formats,
masked_or_gaped_region_mark=0,
multiplier=1000)
| {
"content_hash": "fde14e2310fa108e8e56ee589119da89",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 126,
"avg_line_length": 61.84848484848485,
"alnum_prop": 0.5539768087538788,
"repo_name": "mahajrod/MACE",
"id": "b2c0752f255a3a59bae44e2d3c02c27d19b4183d",
"size": "6145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/old/draw_heterozygous_snps.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "924706"
},
{
"name": "Python",
"bytes": "492761"
},
{
"name": "Shell",
"bytes": "1699"
},
{
"name": "Terra",
"bytes": "4344300"
}
],
"symlink_target": ""
} |
print "I could have code like this." # and a passive agressive little comment
# like this...
# you can also use comments to disable stuff. AKA "Comment it out"
# you did this stuff a million times already man.
print "This will run ."
| {
"content_hash": "da5ed56c65e7c4ac214e2663c3375b85",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 78,
"avg_line_length": 29.75,
"alnum_prop": 0.7226890756302521,
"repo_name": "jaredmanning/learning",
"id": "0fc1c0e9597543dff2ae9f7b54325c71e93b8f1f",
"size": "292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lpthw/ex2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "285"
},
{
"name": "Python",
"bytes": "34489"
},
{
"name": "Scheme",
"bytes": "8913"
}
],
"symlink_target": ""
} |
import euphoria as eu
import part
import json
class Board(eu.execgroup.ExecGroup):
"""
Manages all the parts of the board from one location.
"""
def __init__(self, *boards):
super().__init__(autoclean=False)
for b in boards:
self.add(b)
def get(self, nameshort):
for i in self.execs:
if i.name == nameshort or i.shortcut == nameshort:
return i
def get_all(self):
return self.execs
def names(self):
return [x.name for x in self.execs]
def shortcuts(self):
return [x.shortcut for x in self.execs]
def write(self, board, text):
towrite = self.get(board)
if towrite is not None:
towrite.write(text)
def update(self):
return "".join([c.update() for c in self.execs if c.update() is not None])
def get_dump(self):
parts = []
for b in self.execs:
parts.append({"roomname": b.roomname, "name": b.name, "shortcut": b.shortcut, "content": b.content, "delay": b.delay, "titles": b.titles, "separator": (b.separator, b.separator_multi)})
return json.dumps({"parts": parts})
def load_dump(self, dump):
parts = json.loads(dump)
#Load each section from the json
for b in parts["parts"]:
p = part.Part(b["roomname"], b["name"], b["shortcut"], b["delay"], b["separator"][0], b["separator"][1])
p.content = b["content"]
p.titles = b["titles"]
self.add(p)
| {
"content_hash": "6c4c0a11b0546725a28883320fcc0928",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 197,
"avg_line_length": 27.375,
"alnum_prop": 0.5609915198956295,
"repo_name": "jedevc/MusicNotice",
"id": "aee2ece7274ccb7309f626307326480d969f3376",
"size": "1533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/board.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7170"
},
{
"name": "Shell",
"bytes": "73"
}
],
"symlink_target": ""
} |
from ..java import opcodes as JavaOpcodes, Classref
##########################################################################
# Pseudo instructions used to flag the offset position of other
# attributes of the code, especially those depened on opcode offset.
##########################################################################
# A sentinel value to mark jumps that need to be resolved.
# This is used to differntiate from the "none" value indicating
# a currently open IF/TRY block.
RESOLVE = object()
class START_LOOP:
"""A pseudo instruction that can be used to mark the
start of a loop (either a FOR or a WHILE)
"""
def __init__(self):
# The opcode that is the first in the formal LOOP block
self.start_op = None
# The last operation in the LOOP block
self.end_op = None
# If this IF can be identified as the start of a new
# line of source code, track that line.
self.starts_line = None
def process(self, context):
# Record the start of the if block
context.next_resolve_list.append((self, 'start_op'))
# Record this loop.
context.blocks.append(self)
# This opcode isn't for the final output.
return False
class END_LOOP:
"""A no-op pseudo instruction that can be used to mark the
end of a loop
"""
def __init__(self):
# The opcode that is the first in the formal IF block
self.start_op = None
# The last operation in the IF-ELSE block - the target for
# all if-exiting jumps.
self.end_op = None
# If this IF can be identified as the start of a new
# line of source code, track that line.
self.starts_line = None
def process(self, context):
# Find the most recent if block on the stack that hasn't been
# ended. That's the block that the elif applies to.
for loop in context.blocks[::-1]:
if loop.end_op is None:
loop.end_op = RESOLVE
break
# A jump back to the start of the loop for another iteration
context.add_opcodes(jump(JavaOpcodes.GOTO(0), context, loop, Opcode.START))
# The next opcode is outside the LOOP block.
context.next_resolve_list.append((loop, 'next_op'))
context.next_resolve_list.append((loop, 'end_op'))
# The end operation is virtual, so all markers point
# to the next operation
context.next_resolve_list.append((self, 'start_op'))
context.next_resolve_list.append((self, 'end_op'))
context.next_resolve_list.append((self, 'next_op'))
# This opcode isn't for the final output.
return False
class IF:
"""Mark the start of an if-endif block.
This forms the first of at least two pseudo-operations. It might be
as simple as:
IF([], IFOPCODE)
...
END_IF()
or as complex as
IF([
... # commands to prepare stack for IF opcode
],
IFOPCODE
)
...
ELIF([
... # commands to prepare stack for next IF opcode
],
IFOPCODE
)
...
ELIF([
... # commands to prepare stack for next IF opcode
],
IFOPCODE
)
...
ELSE()
...
END_IF()
END_IF comes *after* the last operation in the relevant block;
but IF, ELIF and ELSE come *before* the first instruction. A little
special handling is required to account for this.
1. When the IF is created, it is empty, and has no start_op.
The empty pseudo-instruction is added to the blocks list.
2. When the instruction *after* the IF, ELIF or ELSE is processed,
it is post-processed to fill in the operation marking the
start of the relevant section.
3. When the first ELIF/ELSE is found, that marks the end of the IF; a
new elif block is registered. A GOTO is added, with no offset,
to the end of the IF block. The offset will be updated
when the END_IF is found.
3. When the 2+ ELIF/ELSE is found, that marks the end of the previous
ELIF; a new elif block is registered. A GOTO is added, with no offset,
to the end of the previous block. The offset will be updated
when the END_IF is found.
4. When an END_IF is found, that marks the end of the current
IF-ELIF-ELSE block. An end_op is recorded; that means this if block
is no longer current.
IF-ELSE blocks can be nested, so when an ELIF, ELSE or END_IF is found,
it is recorded against the last IF in the blocks list
that has no end_op recorded.
"""
def __init__(self, commands, opcode):
# The commands to prepare the stack for the IF comparison
self.commands = commands if commands is not None else []
# print("CREATE IF", id(self), opcode)
# The opcode class to instantiate
self.opcode = opcode
# The opcode that is the first in the formal IF block
self.start_op = None
# The instantiated opcode for the comparison
self.if_op = None
# The list of all ELSE blocks associated with this IF
self.elifs = []
# The jump operation at the end of the IF, jumping to the
# end of the IF-ELSE block.
self.jump_op = None
# The last operation in the IF-ELSE block - the target for
# all if-exiting jumps.
self.end_op = None
# If this IF can be identified as the start of a new
# line of source code, track that line.
self.starts_line = None
def process(self, context):
# Record the start of the if block
context.next_resolve_list.append((self, 'start_op'))
# Add the stack prepration commands to the code list
context.add_opcodes(*self.commands)
# Create an instance of the opcode and put it on the code list
self.if_op = self.opcode(0)
context.add_opcodes(self.if_op)
# Record this IF.
context.blocks.append(self)
# This opcode isn't for the final output.
return False
class ELIF:
def __init__(self, commands, opcode):
# The master IF block
self.if_block = None
# The commands to prepare the stack for the IF comparison
self.commands = commands if commands is not None else []
# The opcode class to instantiate
self.opcode = opcode
# The instantiated opcode for the comparison
self.elif_op = None
# The jump operation at the end of the ELIF, jumping to the
# end of the IF-ELSE block.
self.jump_op = None
# If this ELIF can be identified as the start of a new
# line of source code, track that line.
self.starts_line = None
def process(self, context):
# Find the most recent if block on the stack that hasn't been
# ended. That's the block that the elif applies to.
for if_block in context.blocks[::-1]:
if if_block.end_op is None:
if_block.end_op = RESOLVE
break
# If this is the first elif, add a GOTO and use it as the
# jump operation at the end of the IF block. If there are
# ELIFs, add the GOTO as the jump operation on the most
# recent ELIF.
jump_op = JavaOpcodes.GOTO(0)
context.add_opcodes(jump_op)
if len(if_block.elifs) == 0:
if_block.jump_op = jump_op
jump(if_block.if_op, context, self, Opcode.START)
else:
# print(" already got an endif")
if_block.handlers[-1].jump_op = jump_op
jump(if_block.handlers[-1].if_op, context, self, Opcode.START)
# Record the start of the elif block
context.next_resolve_list.append((self, 'start_op'))
if self.opcode:
# print(" this is an elif")
# Add the stack prepration commands to the code list
context.add_opcodes(*self.commands)
# Create an instance of the opcode and put it on the code list
self.if_op = self.opcode(0)
context.add_opcodes(self.if_op)
# else:
# print(" this is an else")
if_block.elifs.append(self)
# print("IF BLOCKS: ", [(id(b), b.end_op) for b in context.blocks])
# This opcode isn't for the final output.
return False
class ELSE(ELIF):
def __init__(self):
# ELSE if an ELIF with no preparation and no comparison opcode
super().__init__(None, None)
class END_IF:
def __init__(self):
# If this IF can be identified as the start of a new
# line of source code, track that line.
self.starts_line = None
def process(self, context):
# Find the most recent if block on the stack that hasn't been
# ended. That's the block that the elif applies to.
for if_block in context.blocks[::-1]:
if if_block.end_op is None:
if_block.end_op = RESOLVE
break
# If there aren't any ELIF/ELSE definitions, then the
# main if block jumps straight to the end.
if len(if_block.elifs) == 0:
jump(if_block.if_op, context, if_block, Opcode.NEXT)
# Each of the 'end of block' jumps go to the end as well.
if if_block.jump_op:
jump(if_block.jump_op, context, if_block, Opcode.NEXT)
for block in if_block.elifs:
if block.jump_op:
jump(block.jump_op, context, if_block, Opcode.NEXT)
# The next opcode is outside the IF/ELIF/ELSE block.
context.next_resolve_list.append((if_block, 'next_op'))
# This opcode isn't for the final output.
return False
class TRY:
"""Mark the start of a try-catch block.
This forms the first of at least three pseudo-operations:
TRY()
...
CATCH(['your/exception/descriptor1', 'your/exception/descriptor2'])
...
CATCH('your/other/exception')
...
CATCH()
...
END_TRY()
END_TRY come *after* the last operation in the relevant block;
but TRY and CATCH come *before* the first instruction. A little
special handling is required to account for this.
1. When the TRY is created, it is empty, and has no start_op.
The empty pseudo-instruction is added to the exception list.
2. When the instruction *after* the TRY or CATCH is processed,
it is post-processed to fill in the operation marking the
start of the relevant section.
3. When a CATCH is found, that marks the end of the TRY; a
new handler is registered in the current exception.
If it's the first CATCH, a GOTO is added, with no offset,
and recorded as the jump_op. The offset will be updated
when the END is found.
4. When an END is found, that marks the end of the current
exception. An end_op is recorded; that means this try block
is no longer current.
Exception blocks can be nested, so when a CATCH or END is found,
it is recorded against the last exception in the exception list
that has no end_op recorded.
"""
def __init__(self, else_block=None, finally_block=None):
# The first command covered by the try block
self.start_op = None
# The last command covered by the try
self.try_end_op = None
# The jump at the end of the try block, after any
# else and finally processing
self.jump_op = None
# The last command in the try/catch/else sequence
self.end_op = None
self.handlers = []
# The commands for the "else" block
self.else_block = else_block
# The commands for the "finally" block
self.finally_block = finally_block
# A handler for the finally block
self.finally_handler = None
# If this TRY can be identified as the start of a new
# line of source code, track that line.
self.starts_line = None
def process(self, context):
context.try_catches.append(self)
# print(" try-catches", [(id(t), t.end_op) for t in context.try_catches])
# The next opcode is the start of the TRY block.
context.next_resolve_list.append((self, 'start_op'))
# This opcode isn't for the final output.
return False
class CATCH:
def __init__(self, descriptors=None):
if descriptors is None:
self.descriptors = []
elif isinstance(descriptors, str):
self.descriptors = [descriptors]
else:
self.descriptors = descriptors
# If this CATCH can be identified as the start of a new
# line of source code, track that line.
self.starts_line = None
# The last command covered by the catch
self.catch_end_op = None
# The jump at the end of the catch block, after any
# finally processing
self.jump_op = None
def __len__(self):
# The CATCH needs to be able to pass as an opcode under initial
# post processing
return 3
def process(self, context):
# Find the most recent exception on the stack that hasn't been
# ended. That's the block that the catch applies to.
for try_catch in context.try_catches[::-1]:
if try_catch.end_op is None:
break
# print(" current try_catch", try_catch)
# print(" try-catches", [(id(t), t.end_op) for t in context.try_catches])
# If this is the first catch, insert a GOTO operation.
# The jump distance will be updated when all the CATCH blocks
# have been processed and the try_catch is converted.
# If it isn't the first catch, then this catch concludes the
# previous one. Add a goto to the end of the block, and
# record the end of the block for framing purposes.
end_jump = JavaOpcodes.GOTO(0)
if len(try_catch.handlers) == 0:
if try_catch.else_block or try_catch.finally_block:
context.next_resolve_list.append((try_catch, 'try_end_op'))
else:
try_catch.try_end_op = end_jump
if try_catch.else_block:
for command in try_catch.else_block.commands:
command.transpile(context)
if try_catch.finally_block:
for command in try_catch.finally_block.commands:
command.transpile(context)
try_catch.jump_op = end_jump
else:
if try_catch.else_block or try_catch.finally_block:
context.next_resolve_list.append((try_catch.handlers[-1], 'catch_end_op'))
else:
try_catch.handlers[-1].catch_end_op = end_jump
if try_catch.finally_block:
for command in try_catch.finally_block.commands:
command.transpile(context)
try_catch.handlers[-1].jump_op = end_jump
try_catch.handlers[-1].end_op = context.code[-1]
context.add_opcodes(end_jump)
jump(end_jump, context, try_catch, Opcode.NEXT)
# Add this catch block as a handler
try_catch.handlers.append(self)
# The next opcode is the start of the catch block.
context.next_resolve_list.append((self, 'start_op'))
# This opcode isn't for the final output.
return False
class FINALLY:
def __init__(self):
# If this FINALLY can be identified as the start of a new
# line of source code, track that line.
self.starts_line = None
# The last command covered by the finally
self.end_op = None
def __len__(self):
# The FINALLY needs to be able to pass as an opcode under initial
# post processing
return 3
def process(self, context):
# Find the most recent exception on the stack that hasn't been
# ended. That's the block that the catch applies to.
for try_catch in context.try_catches[::-1]:
if try_catch.end_op is None:
break
# print(" current try_catch", try_catch)
# print(" try-catches", [(id(t), t.end_op) for t in context.try_catches])
# If this is the first catch, insert a GOTO operation.
# The jump distance will be updated when all the CATCH blocks
# have been processed and the try_catch is converted.
# If it isn't the first catch, then this catch concludes the
# previous one. Add a goto to the end of the block, and
# record the end of the block for framing purposes.
end_jump = JavaOpcodes.GOTO(0)
if len(try_catch.handlers) == 0:
if try_catch.else_block or try_catch.finally_block:
context.next_resolve_list.append((try_catch, 'try_end_op'))
else:
try_catch.try_end_op = end_jump
if try_catch.else_block:
for command in try_catch.else_block.commands:
command.transpile(context)
if try_catch.finally_block:
for command in try_catch.finally_block.commands:
command.transpile(context)
try_catch.jump_op = end_jump
else:
if try_catch.else_block or try_catch.finally_block:
context.next_resolve_list.append((try_catch.handlers[-1], 'catch_end_op'))
else:
try_catch.handlers[-1].catch_end_op = end_jump
if try_catch.finally_block:
for command in try_catch.finally_block.commands:
command.transpile(context)
try_catch.handlers[-1].jump_op = end_jump
try_catch.handlers[-1].end_op = context.code[-1]
context.add_opcodes(end_jump)
jump(end_jump, context, try_catch, Opcode.NEXT)
# Add this catch block as a handler
try_catch.finally_handler = self
# The next opcode is the start of the finally block.
context.next_resolve_list.append((self, 'start_op'))
# This opcode isn't for the final output.
return False
class END_TRY:
def __init__(self):
# If this END TRY can be identified as the start of a new
# line of source code, track that line.
self.starts_line = None
def process(self, context):
# Find the most recent exception on the stack that hasn't been
# ended. That's the block we're ending.
for try_catch in context.try_catches[::-1]:
if try_catch.end_op is None:
try_catch.end_op = RESOLVE
break
# print(" current try_catch", try_catch)
# print(" try-catches", [(id(t), t.end_op) for t in context.try_catches])
if try_catch.finally_handler:
try_catch.finally_handler.end_op = context.code[-1]
elif len(try_catch.handlers) > 0:
try_catch.handlers[-1].end_op = context.code[-1]
try_catch.end_op = context.code[-1]
# The next opcode is the end of the try-catch block.
context.next_resolve_list.append((try_catch, 'next_op'))
# This opcode isn't for the final output.
return False
##########################################################################
# Local variables are stored in a dictionary, keyed by name,
# and with the value of the local variable register they are stored in.
#
# When a variable is deleted, a value of None is put in as the
# value.
##########################################################################
# A marker for deleted variable names.
DELETED = object()
def ALOAD_name(context, name):
"""Generate the opcode to load a variable with the given name onto the stack.
This looks up the local variable dictionary to find which
register is being used for that variable, using the optimized
register operations for the first 4 local variables.
"""
i = context.localvars[name]
if i == DELETED:
raise KeyError(name)
if i == 0:
return JavaOpcodes.ALOAD_0()
elif i == 1:
return JavaOpcodes.ALOAD_1()
elif i == 2:
return JavaOpcodes.ALOAD_2()
elif i == 3:
return JavaOpcodes.ALOAD_3()
else:
return JavaOpcodes.ALOAD(i)
def ASTORE_name(context, name):
"""Generate the opcode to store a variable with the given name.
This looks up the local variable dictionary to find which
register is being used for that variable, using the optimized
register operations for the first 4 local variables.
"""
try:
i = context.localvars[name]
if i == DELETED:
context.localvars[name] = i
except KeyError:
i = len(context.localvars)
context.localvars[name] = i
if i == 0:
return JavaOpcodes.ASTORE_0()
elif i == 1:
return JavaOpcodes.ASTORE_1()
elif i == 2:
return JavaOpcodes.ASTORE_2()
elif i == 3:
return JavaOpcodes.ASTORE_3()
else:
return JavaOpcodes.ASTORE(i)
def ADELETE_name(context, name):
"""Remove a name from the localvar pool
"""
try:
value = context.localvars[name]
if value == DELETED:
raise KeyError("Local variable '%s' already deleted" % name)
context.localvars[name] = DELETED
except KeyError:
raise
def ICONST_val(value):
"""Write an integer constant onto the stack.
There are a couple of opcodes that can be used to optimize the
loading of small integers; use them if possible.
"""
if isinstance(value, bool):
if value:
return JavaOpcodes.ICONST_1()
else:
return JavaOpcodes.ICONST_0()
elif isinstance(value, int):
if value == 0:
return JavaOpcodes.ICONST_0()
elif value == 1:
return JavaOpcodes.ICONST_1()
elif value == 2:
return JavaOpcodes.ICONST_2()
elif value == 3:
return JavaOpcodes.ICONST_3()
elif value == 4:
return JavaOpcodes.ICONST_4()
elif value == 5:
return JavaOpcodes.ICONST_5()
elif value == -1:
return JavaOpcodes.ICONST_M1()
else:
return JavaOpcodes.SIPUSH(value)
else:
raise RuntimeError("%s is not an integer constant" % value)
class Ref:
"""A reference to an opcode by target offset"""
def __init__(self, context, target):
self.context = context
self.target = target
def __repr__(self):
try:
return repr(self.context.jump_targets[self.target])
except KeyError:
return '<Ref: offset %s>' % self.target
@property
def start_op(self):
return self.context.jump_targets[self.target].start_op
@property
def end_op(self):
return self.context.jump_targets[self.target].end_op
@property
def next_op(self):
return self.context.jump_targets[self.target].next_op
def jump(opcode, context, target, position):
"""Define a jump operation.
The specific offset will be resolved once all the
Java opcodes have been instantiated
"""
# print(" add jump to reference %s %s %s %s..." % (opcode, id(opcode), target, position))
context.unknown_jump_targets.setdefault(target, []).append((opcode, position))
return opcode
def resolve_jump(opcode, context, target, position):
"""Resolve a jump target in an opcode.
target is the Python opcode (or a Ref instance).
When Python code is converted to Java, it will turn into
0-N opcodes. We need to specify which one will be used
as the Java offset:
* START - the first Java opcode generated from this Python opcode
* END - the last Java opcode generated from the Python opcode
* NEXT - the next Java opcode added after this block.
"""
# print("RESOLVE %s %s to %s %s" % (opcode, id(opcode), target, position))
if position == Opcode.START:
opcode.jump_op = target.start_op
elif position == Opcode.END:
opcode.jump_op = target.end_op
elif position == Opcode.NEXT:
opcode.jump_op = target.next_op
else:
raise Exception("Unknown opcode position")
context.jumps.append(opcode)
opcode.jump_op.references.append(opcode)
##########################################################################
# Base classes for defining opcodes.
##########################################################################
class Opcode:
START = 10
END = 20
NEXT = 30
def __init__(self, python_offset, starts_line, is_jump_target):
self.python_offset = python_offset
self.starts_line = starts_line
self.is_jump_target = is_jump_target
@property
def opname(self):
return self.__class__.__name__
def __repr__(self):
return self.opname + ': ' + self.__arg_repr__()
def __arg_repr__(self):
return ''
def transpile(self, context, arguments):
# If the Python opcode marks the start of a line of code,
# transfer that relationship to the first opcode in the
# generated Java code.
if self.starts_line:
context.next_opcode_starts_line = self.starts_line
context.next_resolve_list.append((self, 'start_op'))
n_ops = len(context.code)
# Actually convert the opcode. This is recursive down the Command sequence.
self.convert(context, arguments)
if len(context.code) == n_ops:
context.next_resolve_list.append((self, 'end_op'))
else:
self.end_op = context.code[-1]
context.next_resolve_list.append((self, 'next_op'))
# Save the code offset for the jump operation.
context.jump_targets[self.python_offset] = self
class UnaryOpcode(Opcode):
@property
def consume_count(self):
return 1
@property
def product_count(self):
return 1
def convert(self, context, arguments):
for argument in arguments:
argument.operation.transpile(context, argument.arguments)
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
self.__method__,
'()Lorg/python/Object;'
)
)
class BinaryOpcode(Opcode):
@property
def consume_count(self):
return 2
@property
def product_count(self):
return 1
def convert(self, context, arguments):
for argument in arguments:
argument.operation.transpile(context, argument.arguments)
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
self.__method__,
'(Lorg/python/Object;)Lorg/python/Object;'
)
)
class InplaceOpcode(Opcode):
@property
def consume_count(self):
return 2
@property
def product_count(self):
return 1
def convert(self, context, arguments):
arguments[0].operation.transpile(context, arguments[0].arguments)
context.add_opcodes(JavaOpcodes.DUP())
for argument in arguments[1:]:
argument.operation.transpile(context, argument.arguments)
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
self.__method__,
'(Lorg/python/Object;)V'
)
)
##########################################################################
# The actual Python opcodes
##########################################################################
class POP_TOP(Opcode):
@property
def consume_count(self):
return 1
@property
def product_count(self):
return 0
def convert(self, context, arguments):
for argument in arguments:
argument.operation.transpile(context, argument.arguments)
# Ignore the top of the stack.
context.add_opcodes(JavaOpcodes.POP())
class ROT_TWO(Opcode):
@property
def consume_count(self):
return 2
@property
def product_count(self):
return 2
def convert(self, context, arguments):
context.add_opcodes(JavaOpcodes.SWAP())
# class ROT_THREE(Opcode):
class DUP_TOP(Opcode):
@property
def consume_count(self):
return 1
@property
def product_count(self):
return 2
def convert(self, context, arguments):
context.add_opcodes(JavaOpcodes.DUP())
class DUP_TOP_TWO(Opcode):
@property
def consume_count(self):
return 2
@property
def product_count(self):
return 4
def convert(self, context, arguments):
context.add_opcodes(JavaOpcodes.DUP2())
class NOP(Opcode):
@property
def consume_count(self):
return 0
@property
def product_count(self):
return 0
def convert(self, context, arguments):
context.add_opcodes(JavaOpcodes.NOP())
class UNARY_POSITIVE(UnaryOpcode):
__method__ = '__pos__'
class UNARY_NEGATIVE(UnaryOpcode):
__method__ = '__neg__'
class UNARY_NOT(UnaryOpcode):
__method__ = '__not__'
class UNARY_INVERT(UnaryOpcode):
__method__ = '__invert__'
class BINARY_POWER(BinaryOpcode):
__method__ = '__pow__'
class BINARY_MULTIPLY(BinaryOpcode):
__method__ = '__mul__'
class BINARY_MODULO(BinaryOpcode):
__method__ = '__mod__'
class BINARY_ADD(BinaryOpcode):
__method__ = '__add__'
class BINARY_SUBTRACT(BinaryOpcode):
__method__ = '__sub__'
class BINARY_SUBSCR(BinaryOpcode):
__method__ = '__getitem__'
class BINARY_FLOOR_DIVIDE(BinaryOpcode):
__method__ = '__floordiv__'
class BINARY_TRUE_DIVIDE(BinaryOpcode):
__method__ = '__truediv__'
class INPLACE_FLOOR_DIVIDE(InplaceOpcode):
__method__ = '__ifloordiv__'
class INPLACE_TRUE_DIVIDE(InplaceOpcode):
__method__ = '__itruediv__'
# class STORE_MAP(Opcode):
class INPLACE_ADD(InplaceOpcode):
__method__ = '__iadd__'
class INPLACE_SUBTRACT(InplaceOpcode):
__method__ = '__isub__'
class INPLACE_MULTIPLY(InplaceOpcode):
__method__ = '__imul__'
class INPLACE_MODULO(InplaceOpcode):
__method__ = '__imod__'
class STORE_SUBSCR(Opcode):
@property
def consume_count(self):
return 1
@property
def product_count(self):
return 1
def convert(self, context, arguments):
pass # FIXME
class DELETE_SUBSCR(Opcode):
@property
def consume_count(self):
return 1
@property
def product_count(self):
return 1
def convert(self, context, arguments):
pass # FIXME
class BINARY_LSHIFT(BinaryOpcode):
__method__ = '__lshift__'
class BINARY_RSHIFT(BinaryOpcode):
__method__ = '__rshift__'
class BINARY_AND(BinaryOpcode):
__method__ = '__and__'
class BINARY_XOR(BinaryOpcode):
__method__ = '__xor__'
class BINARY_OR(BinaryOpcode):
__method__ = '__or__'
class INPLACE_POWER(InplaceOpcode):
__method__ = '__ipow__'
class GET_ITER(Opcode):
@property
def consume_count(self):
return 1
@property
def product_count(self):
return 1
def convert(self, context, arguments):
for argument in arguments:
argument.operation.transpile(context, argument.arguments)
context.add_opcodes(
ASTORE_name(context, '##ITERABLE##'),
JavaOpcodes.ICONST_1(),
JavaOpcodes.ANEWARRAY('org/python/Object'),
JavaOpcodes.DUP(),
JavaOpcodes.ICONST_0(),
ALOAD_name(context, '##ITERABLE##'),
JavaOpcodes.AASTORE(),
JavaOpcodes.NEW('java/util/Hashtable'),
JavaOpcodes.DUP(),
JavaOpcodes.INVOKESPECIAL('java/util/Hashtable', '<init>', '()V'),
JavaOpcodes.INVOKESTATIC(
'org/Python',
'iter',
'([Lorg/python/Object;Ljava/util/Hashtable;)Lorg/python/Iterable;'
),
JavaOpcodes.CHECKCAST('org/python/Iterable'),
)
class PRINT_EXPR(Opcode):
@property
def consume_count(self):
return 1
@property
def product_count(self):
return 0
class LOAD_BUILD_CLASS(Opcode):
@property
def consume_count(self):
return 0
@property
def product_count(self):
return 1
# class YIELD_FROM(Opcode):
class INPLACE_LSHIFT(InplaceOpcode):
__method__ = '__ilshift__'
class INPLACE_RSHIFT(InplaceOpcode):
__method__ = '__irshift__'
class INPLACE_AND(InplaceOpcode):
__method__ = '__iand__'
class INPLACE_XOR(InplaceOpcode):
__method__ = '__ixor__'
class INPLACE_OR(InplaceOpcode):
__method__ = '__ior__'
# class BREAK_LOOP(Opcode):
# class WITH_CLEANUP(Opcode):
class RETURN_VALUE(Opcode):
@property
def consume_count(self):
return 1
@property
def product_count(self):
return 0
def convert(self, context, arguments):
for argument in arguments:
argument.operation.transpile(context, argument.arguments)
context.add_opcodes(JavaOpcodes.ARETURN())
# class IMPORT_STAR(Opcode):
# class YIELD_VALUE(Opcode):
class POP_BLOCK(Opcode):
@property
def consume_count(self):
return 0
@property
def product_count(self):
return 0
class END_FINALLY(Opcode):
@property
def consume_count(self):
return 2
@property
def product_count(self):
return 0
class POP_EXCEPT(Opcode):
@property
def consume_count(self):
return 0
@property
def product_count(self):
return 0
def convert(self, context, arguments):
# print("convert POP_EXCEPT", len(arguments))
for argument in arguments:
argument.operation.transpile(context, argument.arguments)
class STORE_NAME(Opcode):
def __init__(self, name, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.name = name
def __arg_repr__(self):
return str(self.name)
@property
def consume_count(self):
return 1
@property
def product_count(self):
return 0
def convert(self, context, arguments):
for argument in arguments:
argument.operation.transpile(context, argument.arguments)
# Depending on context, this might mean writing to local
# variables, class attributes, or to the global context.
context.store_name(self.name, arguments)
class DELETE_NAME(Opcode):
def __init__(self, name, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.name = name
def __arg_repr__(self):
return str(self.name)
@property
def consume_count(self):
return 0
@property
def product_count(self):
return 0
def convert(self, context, arguments):
for argument in arguments:
argument.operation.transpile(context, argument.arguments)
# Depending on context, this might mean deleting from local
# variables, class attributes, or to the global context.
context.delete_name(self.name, arguments)
class UNPACK_SEQUENCE(Opcode):
def __init__(self, count, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.count = count
def __arg_repr__(self):
return str(self.count)
@property
def consume_count(self):
return 1
@property
def product_count(self):
return self.count
def convert(self, context, arguments):
for argument in arguments:
argument.operation.transpile(context, argument.arguments)
context.add_opcodes(
ASTORE_name(context, '##TEMP_%d##' % id(self))
)
for i in range(self.count, 0, -1):
context.add_opcodes(
ALOAD_name(context, '##TEMP_%d##' % id(self)),
ICONST_val(i - 1),
JavaOpcodes.INVOKEINTERFACE('org/python/Object', '__getitem__', '(I)Lorg/python/Object;'),
)
class FOR_ITER(Opcode):
def __init__(self, target, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.target = target
def __arg_repr__(self):
return str(self.target)
@property
def consume_count(self):
return 1
@property
def product_count(self):
return 2
# class UNPACK_EX(Opcode):
class STORE_ATTR(Opcode):
def __init__(self, name, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.name = name
def __arg_repr__(self):
return str(self.name)
@property
def consume_count(self):
return 2
@property
def product_count(self):
return 0
def convert(self, context, arguments):
arguments[1].operation.transpile(context, arguments[1].arguments)
context.add_opcodes(JavaOpcodes.LDC(self.name))
arguments[0].operation.transpile(context, arguments[0].arguments)
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE('org/python/Object', '__setattr__', '(Ljava/lang/String;Lorg/python/Object;)V'),
)
# class DELETE_ATTR(Opcode):
class STORE_GLOBAL(Opcode):
def __init__(self, name, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.name = name
def __arg_repr__(self):
return str(self.name)
@property
def consume_count(self):
return 1
@property
def product_count(self):
return 0
def convert(self, context, arguments):
for argument in arguments:
argument.operation.transpile(context, argument.arguments)
# Depending on context, this might mean writing to local
# variables, class attributes, or to the global context.
context.store_name(self.name, arguments, allow_locals=False)
# class DELETE_GLOBAL(Opcode):
class LOAD_CONST(Opcode):
def __init__(self, const, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.const = const
def __arg_repr__(self):
return str(self.const)
@property
def consume_count(self):
return 0
@property
def product_count(self):
return 1
def _convert(self, context, arguments, const):
# A None value has it's own opcode.
# If the constant is a byte or a short, we can
# cut a value out of the constant pool.
if const is None:
context.add_opcodes(JavaOpcodes.ACONST_NULL())
return
else:
if isinstance(const, bool):
context.add_opcodes(
JavaOpcodes.NEW('org/python/types/Bool'),
JavaOpcodes.DUP(),
ICONST_val(const),
JavaOpcodes.INVOKESPECIAL('org/python/types/Bool', '<init>', '(Z)V'),
)
elif isinstance(const, int):
context.add_opcodes(
JavaOpcodes.NEW('org/python/types/Int'),
JavaOpcodes.DUP(),
ICONST_val(const),
JavaOpcodes.INVOKESPECIAL('org/python/types/Int', '<init>', '(I)V'),
)
elif isinstance(const, float):
context.add_opcodes(
JavaOpcodes.NEW('org/python/types/Float'),
JavaOpcodes.DUP(),
JavaOpcodes.LDC2_W(const),
JavaOpcodes.INVOKESPECIAL('org/python/types/Float', '<init>', '(D)V'),
)
elif isinstance(const, str):
context.add_opcodes(
JavaOpcodes.NEW('org/python/types/Str'),
JavaOpcodes.DUP(),
JavaOpcodes.LDC(const),
JavaOpcodes.INVOKESPECIAL('org/python/types/Str', '<init>', '(Ljava/lang/String;)V'),
)
# elif isinstance(const, bytes):
# context.add_opcodes(
# JavaOpcodes.NEW('org/python/types/Bytes'),
# JavaOpcodes.DUP(),
# JavaOpcodes.LDC(const),
# JavaOpcodes.INVOKESPECIAL('org/python/types/Bytes', '<init>', '(Ljava/lang/String;)V'),
# )
elif isinstance(const, tuple):
context.add_opcodes(
JavaOpcodes.NEW('org/python/types/Tuple'),
JavaOpcodes.DUP(),
JavaOpcodes.NEW('java/util/ArrayList'),
JavaOpcodes.DUP(),
ICONST_val(len(const)),
JavaOpcodes.INVOKESPECIAL('java/util/ArrayList', '<init>', '(I)V')
)
for val in const:
context.add_opcodes(
JavaOpcodes.DUP(),
)
self._convert(context, arguments, val)
context.add_opcodes(
JavaOpcodes.INVOKEVIRTUAL('java/util/ArrayList', 'add', '(Ljava/lang/Object;)Z'),
JavaOpcodes.POP()
)
context.add_opcodes(
JavaOpcodes.INVOKESPECIAL('org/python/types/Tuple', '<init>', '(Ljava/util/ArrayList;)V'),
)
else:
raise RuntimeError("Unknown constant type %s" % type(const))
def convert(self, context, arguments):
self._convert(context, arguments, self.const)
class LOAD_NAME(Opcode):
def __init__(self, name, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.name = name
def __arg_repr__(self):
return str(self.name)
@property
def consume_count(self):
return 0
@property
def product_count(self):
return 1
def convert(self, context, arguments):
context.load_name(self.name)
class BUILD_TUPLE(Opcode):
def __init__(self, count, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.count = count
def __arg_repr__(self):
return str(self.count)
@property
def consume_count(self):
return self.count
@property
def product_count(self):
return 1
def convert(self, context, arguments):
context.add_opcodes(
JavaOpcodes.NEW('org/python/types/Tuple'),
JavaOpcodes.DUP(),
JavaOpcodes.NEW('java/util/ArrayList'),
JavaOpcodes.DUP(),
ICONST_val(self.count),
JavaOpcodes.INVOKESPECIAL('java/util/ArrayList', '<init>', '(I)V')
)
for argument in arguments:
context.add_opcodes(
JavaOpcodes.DUP(),
)
argument.operation.transpile(context, argument.arguments)
context.add_opcodes(
JavaOpcodes.INVOKEVIRTUAL('java/util/ArrayList', 'add', '(Ljava/lang/Object;)Z'),
JavaOpcodes.POP(),
)
context.add_opcodes(
JavaOpcodes.INVOKESPECIAL('org/python/types/Tuple', '<init>', '(Ljava/util/ArrayList;)V')
)
class BUILD_LIST(Opcode):
def __init__(self, count, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.count = count
def __arg_repr__(self):
return str(self.count)
@property
def consume_count(self):
return self.count
@property
def product_count(self):
return 1
def convert(self, context, arguments):
context.add_opcodes(
JavaOpcodes.NEW('org/python/types/List'),
JavaOpcodes.DUP(),
JavaOpcodes.NEW('java/util/ArrayList'),
JavaOpcodes.DUP(),
ICONST_val(self.count),
JavaOpcodes.INVOKESPECIAL('java/util/ArrayList', '<init>', '(I)V')
)
for argument in arguments:
context.add_opcodes(
JavaOpcodes.DUP(),
)
argument.operation.transpile(context, argument.arguments)
context.add_opcodes(
JavaOpcodes.INVOKEVIRTUAL('java/util/ArrayList', 'add', '(Ljava/lang/Object;)Z'),
JavaOpcodes.POP(),
)
context.add_opcodes(
JavaOpcodes.INVOKESPECIAL('org/python/types/List', '<init>', '(Ljava/util/ArrayList;)V')
)
class BUILD_SET(Opcode):
def __init__(self, count, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.count = count
def __arg_repr__(self):
return str(self.count)
@property
def consume_count(self):
return self.count
@property
def product_count(self):
return 1
# def convert(self, context, arguments):
# code = []
# return code
class BUILD_MAP(Opcode):
def __init__(self, count, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.count = count
def __arg_repr__(self):
return str(self.count)
@property
def consume_count(self):
return self.count
@property
def product_count(self):
return 1
# def convert(self, context, arguments):
# code = []
# return code
class LOAD_ATTR(Opcode):
def __init__(self, name, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.name = name
def __arg_repr__(self):
return str(self.name)
@property
def consume_count(self):
return 1
@property
def product_count(self):
return 1
def convert(self, context, arguments):
# print("LOAD_ATTR", context, arguments)
arguments[0].operation.transpile(context, arguments[0].arguments)
context.add_opcodes(JavaOpcodes.LDC(self.name))
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE('org/python/Object', '__getattr__', '(Ljava/lang/String;)Lorg/python/Object;'),
)
class COMPARE_OP(Opcode):
def __init__(self, comparison, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.comparison = comparison
def __arg_repr__(self):
return self.comparison
@property
def consume_count(self):
return 2
@property
def product_count(self):
return 1
def convert(self, context, arguments):
# Add the operand which will be the left side, and thus the
# target of the comparator operator.
for argument in arguments:
argument.operation.transpile(context, argument.arguments)
comparator = {
'<': '__lt__',
'<=': '__lte__',
'>': '__gt__',
'>=': '__gte__',
'==': '__eq__',
'exception match': '__eq__',
}[self.comparison]
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE('org/python/Object', comparator, '(Lorg/python/Object;)Lorg/python/Object;')
)
class IMPORT_NAME(Opcode):
def __init__(self, target, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.target = target
def __arg_repr__(self):
return str(self.target)
@property
def consume_count(self):
return 2
@property
def product_count(self):
return 1
def convert(self, context, arguments):
pass # FIXME
class IMPORT_FROM(Opcode):
def __init__(self, target, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.target = target
def __arg_repr__(self):
return str(self.target)
@property
def consume_count(self):
return 0
@property
def product_count(self):
return 1
def convert(self, context, arguments):
pass
class JUMP_FORWARD(Opcode):
def __init__(self, target, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.target = target
def __arg_repr__(self):
return str(self.target)
@property
def consume_count(self):
return 0
@property
def product_count(self):
return 0
def convert(self, context, arguments):
context.add_opcodes(
jump(JavaOpcodes.GOTO(0), context, Ref(context, self.target), Opcode.START)
)
# class JUMP_IF_FALSE_OR_POP(Opcode):
# class JUMP_IF_TRUE_OR_POP(Opcode):
class JUMP_ABSOLUTE(Opcode):
def __init__(self, target, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.target = target
def __arg_repr__(self):
return str(self.target)
@property
def consume_count(self):
return 0
@property
def product_count(self):
return 0
def convert(self, context, arguments):
context.add_opcodes(
jump(JavaOpcodes.GOTO(0), context, Ref(context, self.target), Opcode.START)
)
class POP_JUMP_IF_FALSE(Opcode):
def __init__(self, target, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.target = target
def __arg_repr__(self):
return str(self.target)
@property
def consume_count(self):
return 1
@property
def product_count(self):
return 0
def convert(self, context, arguments):
for argument in arguments:
argument.operation.transpile(context, argument.arguments)
context.add_opcodes(
# (bool) TOS.value
JavaOpcodes.INVOKEINTERFACE('org/python/Object', '__bool__', '()Lorg/python/types/Bool;'),
JavaOpcodes.GETFIELD('org/python/types/Bool', 'value', 'Z'),
# Jump if false
jump(JavaOpcodes.IFEQ(0), context, Ref(context, self.target), Opcode.START)
)
class POP_JUMP_IF_TRUE(Opcode):
def __init__(self, target, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.target = target
@property
def consume_count(self):
return 1
@property
def product_count(self):
return 0
def convert(self, context, arguments):
for argument in arguments:
argument.operation.transpile(context, argument.arguments)
context.add_opcodes(
# (bool) TOS.value
JavaOpcodes.INVOKEINTERFACE('org/python/Object', '__bool__', '()Lorg/python/types/Bool;'),
JavaOpcodes.GETFIELD('org/python/types/Bool', 'value', 'Z'),
# Jump if not false
jump(JavaOpcodes.IFNE(0), context, Ref(context, self.target), Opcode.START)
)
class LOAD_GLOBAL(Opcode):
def __init__(self, name, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.name = name
def __arg_repr__(self):
return str(self.name)
@property
def consume_count(self):
return 0
@property
def product_count(self):
return 1
def convert(self, context, arguments):
context.load_name(self.name, allow_locals=False)
# class CONTINUE_LOOP(Opcode):
class SETUP_LOOP(Opcode):
def __init__(self, delta, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.delta = delta
@property
def consume_count(self):
return 0
@property
def product_count(self):
return 0
def convert(self, context, arguments):
pass
class SETUP_EXCEPT(Opcode):
def __init__(self, delta, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.delta = delta
def __arg_repr__(self):
return ' %s' % self.delta
@property
def consume_count(self):
return 0
@property
def product_count(self):
return 0
class SETUP_FINALLY(Opcode):
def __init__(self, delta, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.delta = delta
def __arg_repr__(self):
return ' %s' % self.delta
@property
def consume_count(self):
return 0
@property
def product_count(self):
return 0
class LOAD_FAST(Opcode):
def __init__(self, name, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.name = name
def __arg_repr__(self):
return str(self.name)
@property
def consume_count(self):
return 0
@property
def product_count(self):
return 1
def convert(self, context, arguments):
context.add_opcodes(ALOAD_name(context, self.name))
class STORE_FAST(Opcode):
def __init__(self, name, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.name = name
def __arg_repr__(self):
return str(self.name)
@property
def consume_count(self):
return 1
@property
def product_count(self):
return 0
def convert(self, context, arguments):
for argument in arguments:
argument.operation.transpile(context, argument.arguments)
context.add_opcodes(ASTORE_name(context, self.name))
# class DELETE_FAST(Opcode):
# class RAISE_VARARGS(Opcode):
class CALL_FUNCTION(Opcode):
def __init__(self, argc, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.args = argc & 0xff
self.kwargs = ((argc >> 8) & 0xFF)
def __arg_repr__(self):
return '%s args, %s kwargs' % (
self.args,
self.kwargs,
)
@property
def consume_count(self):
return 1 + self.args + 2 * self.kwargs
@property
def product_count(self):
return 1
def convert(self, context, arguments):
code = []
if arguments[0].operation.opname == 'LOAD_BUILD_CLASS':
# Construct a class.
from .klass import Class
code = arguments[1].arguments[0].operation.const
class_name = arguments[1].arguments[1].operation.const
if len(arguments) == 4:
super_name = arguments[2].operation.const
else:
super_name = None
klass = Class(context.parent, class_name, super_name=super_name)
klass.extract(code)
context.parent.classes.append(klass.transpile())
# print("DESCRIPTOR", klass.descriptor)
# Push a callable onto the stack so that it can be stored
# in globals and subsequently retrieved and run.
context.add_opcodes(
# Get a Method representing the new function
TRY(),
JavaOpcodes.LDC(Classref(klass.descriptor)),
JavaOpcodes.ICONST_2(),
JavaOpcodes.ANEWARRAY('java/lang/Class'),
JavaOpcodes.DUP(),
JavaOpcodes.ICONST_0(),
JavaOpcodes.LDC(Classref('[Lorg/python/Object;')),
JavaOpcodes.AASTORE(),
JavaOpcodes.DUP(),
JavaOpcodes.ICONST_1(),
JavaOpcodes.LDC(Classref('java/util/Hashtable')),
JavaOpcodes.AASTORE(),
JavaOpcodes.INVOKEVIRTUAL(
'java/lang/Class',
'getConstructor',
'([Ljava/lang/Class;)Ljava/lang/reflect/Constructor;'
),
ASTORE_name(context, '#CONSTRUCTOR#'),
# # Then wrap that Constructor into a Callable.
JavaOpcodes.NEW('org/python/types/Constructor'),
JavaOpcodes.DUP(),
ALOAD_name(context, '#CONSTRUCTOR#'),
JavaOpcodes.INVOKESPECIAL('org/python/types/Constructor', '<init>', '(Ljava/lang/reflect/Constructor;)V'),
CATCH('java/lang/NoSuchMethodError'),
ASTORE_name(context, '#EXCEPTION#'),
JavaOpcodes.NEW('org/python/exceptions/RuntimeError'),
JavaOpcodes.DUP(),
JavaOpcodes.LDC('Unable to find class %s' % (klass.descriptor)),
JavaOpcodes.INVOKESPECIAL('org/python/exceptions/RuntimeError', '<init>', '(Ljava/lang/String;)V'),
JavaOpcodes.ATHROW(),
END_TRY()
)
else:
# print("CALL_FUNCTION", context, arguments)
# Retrive the function
arguments[0].operation.transpile(context, arguments[0].arguments)
context.add_opcodes(
JavaOpcodes.CHECKCAST('org/python/Callable'),
)
final_args = self.args
first_arg = 0
# If the function has been retrived using LOAD_ATTR, that means
# it's an instance method. We need to pass the instance itself
# as the first argument, so make space for that.
if arguments[0].operation.opname == 'LOAD_ATTR':
final_args += 1
first_arg = 1
context.add_opcodes(
# Create an array to pass in arguments to invoke()
ICONST_val(final_args),
JavaOpcodes.ANEWARRAY('org/python/Object'),
)
# If it's an instance method, put the instance at the start of
# the argument list.
if arguments[0].operation.opname == 'LOAD_ATTR':
context.add_opcodes(
JavaOpcodes.DUP(),
ICONST_val(0),
)
arguments[0].arguments[0].operation.transpile(context, arguments[0].arguments[0].arguments)
context.add_opcodes(JavaOpcodes.AASTORE())
# Push all the arguments into an array
for i, argument in enumerate(arguments[1:self.args+1]):
context.add_opcodes(
JavaOpcodes.DUP(),
ICONST_val(first_arg + i),
)
argument.operation.transpile(context, argument.arguments)
context.add_opcodes(JavaOpcodes.AASTORE())
# Create a Hashtable, and push all the kwargs into it.
context.add_opcodes(
JavaOpcodes.NEW('java/util/Hashtable'),
JavaOpcodes.DUP(),
JavaOpcodes.INVOKESPECIAL('java/util/Hashtable', '<init>', '()V')
)
for name, argument in zip(arguments[self.args+1::2], arguments[self.args+2::2]):
context.add_opcodes(
JavaOpcodes.DUP(),
JavaOpcodes.LDC(name),
)
argument.operation.transpile(context, argument.arguments)
context.add_opcodes(
JavaOpcodes.INVOKEVIRTUAL('java/util/Hashtable', 'put', '(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;'),
JavaOpcodes.POP()
)
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE('org/python/Callable', 'invoke', '([Lorg/python/Object;Ljava/util/Hashtable;)Lorg/python/Object;'),
)
return code
class MAKE_FUNCTION(Opcode):
def __init__(self, argc, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.argc = argc
self.default_args = argc & 0xff
self.default_kwargs = ((argc >> 8) & 0xFF)
self.annotations = (argc >> 16) & 0x7FFF
def __arg_repr__(self):
return '%d %s default args, %s default kwargs, %s annotations' % (
self.argc,
self.default_args,
self.default_kwargs,
self.annotations,
)
@property
def consume_count(self):
if self.annotations:
return 2 + self.annotations
else:
return 2 + self.default_args + (2 * self.default_kwargs)
@property
def product_count(self):
return 1
def convert(self, context, arguments):
# Add a new method definition to the context class/module
code = arguments[-2].operation.const
full_method_name = arguments[-1].operation.const
method = context.add_method(full_method_name, code)
if method.is_constructor:
context.add_opcodes(
JavaOpcodes.ACONST_NULL()
)
elif method.is_closuremethod:
context.add_opcodes(
JavaOpcodes.NEW(method.callable),
JavaOpcodes.DUP(),
JavaOpcodes.INVOKESPECIAL(method.callable, '<init>', '()V'),
)
else:
# Push a callable onto the stack so that it can be stored
# in globals and subsequently retrieved and run.
context.add_opcodes(
# Get a Method representing the new function
TRY(),
JavaOpcodes.LDC(Classref(context.descriptor)),
JavaOpcodes.LDC(method.name),
JavaOpcodes.ICONST_2(),
JavaOpcodes.ANEWARRAY('java/lang/Class'),
JavaOpcodes.DUP(),
JavaOpcodes.ICONST_0(),
JavaOpcodes.LDC(Classref('[Lorg/python/Object;')),
JavaOpcodes.AASTORE(),
JavaOpcodes.DUP(),
JavaOpcodes.ICONST_1(),
JavaOpcodes.LDC(Classref('java/util/Hashtable')),
JavaOpcodes.AASTORE(),
JavaOpcodes.INVOKEVIRTUAL(
'java/lang/Class',
'getMethod',
'(Ljava/lang/String;[Ljava/lang/Class;)Ljava/lang/reflect/Method;'
),
ASTORE_name(context, '#METHOD#'),
# Then wrap that Method into a Callable.
JavaOpcodes.NEW(method.callable),
JavaOpcodes.DUP(),
ALOAD_name(context, '#METHOD#'),
ICONST_val(method.static),
JavaOpcodes.INVOKESPECIAL(method.callable, '<init>', '(Ljava/lang/reflect/Method;Z)V'),
CATCH('java/lang/NoSuchMethodError'),
ASTORE_name(context, '#EXCEPTION#'),
JavaOpcodes.NEW('org/python/exceptions/RuntimeError'),
JavaOpcodes.DUP(),
JavaOpcodes.LDC('Unable to find MAKE_FUNCTION output %s.%s' % (context.module.descriptor, full_method_name)),
JavaOpcodes.INVOKESPECIAL('org/python/exceptions/RuntimeError', '<init>', '(Ljava/lang/String;)V'),
JavaOpcodes.ATHROW(),
END_TRY()
)
class BUILD_SLICE(Opcode):
def __init__(self, argc, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.argc = argc
def __arg_repr__(self):
return '%s' % (self.argc)
@property
def consume_count(self):
return self.argc
@property
def product_count(self):
return 1
def convert(self, context, arguments):
for argument in arguments:
argument.operation.transpile(context, argument.arguments)
class MAKE_CLOSURE(Opcode):
def __init__(self, argc, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.argc = argc
def __arg_repr__(self):
return '%s' % (self.argc)
@property
def consume_count(self):
return 3 + self.argc
@property
def product_count(self):
return 1
class LOAD_CLOSURE(Opcode):
def __init__(self, i, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.i = i
def __arg_repr__(self):
return '%s' % (self.i)
@property
def consume_count(self):
return 0
@property
def product_count(self):
return 1
# def convert(self, context, arguments):
# return []
# class LOAD_DEREF(Opcode):
# class STORE_DEREF(Opcode):
# class DELETE_DEREF(Opcode):
# class CALL_FUNCTION_KW(Opcode):
# class CALL_FUNCTION_VAR_KW(Opcode):
# class SETUP_WITH(Opcode):
class LIST_APPEND(Opcode):
def __init__(self, index, python_offset, starts_line, is_jump_target):
super().__init__(python_offset, starts_line, is_jump_target)
self.index = index
def __arg_repr__(self):
return str(self.index)
@property
def consume_count(self):
return 1
@property
def product_count(self):
return 0
def convert(self, context, arguments):
for i in range(1, self.index):
context.add_opcodes(
ASTORE_name(context, '##temp-%s-%s##' % (id(self), i))
)
context.add_opcodes(
JavaOpcodes.DUP(),
)
for argument in arguments:
argument.operation.transpile(context, argument.arguments)
context.add_opcodes(
JavaOpcodes.INVOKEVIRTUAL('java/util/ArrayList', 'add', '(Ljava/util/ArrayList;)Z'),
JavaOpcodes.POP(),
)
for i in range(self.index, 1, -1):
context.add_opcodes(
ALOAD_name(context, '##temp-%s-%s##' % (id(self), i - 1))
)
# class SET_ADD(Opcode):
# class MAP_ADD(Opcode):
# class LOAD_CLASSDEREF(Opcode):
| {
"content_hash": "2d52c0b2dbb3bdc56b3ddce0fb3af894",
"timestamp": "",
"source": "github",
"line_count": 2254,
"max_line_length": 143,
"avg_line_length": 29.6885536823425,
"alnum_prop": 0.576347171164709,
"repo_name": "chubbymaggie/voc",
"id": "aa0e7504a6436576760014bcc67e9c3eabf852ff",
"size": "66918",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "voc/python/opcodes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "227208"
},
{
"name": "Makefile",
"bytes": "4720"
},
{
"name": "Python",
"bytes": "640728"
}
],
"symlink_target": ""
} |
from parsetron import String, Grammar, Regex, Or, Optional, ZeroOrMore, \
OneOrMore, RobustParser
__author__ = 'Xuchen Yao'
def replaced_string(s, v):
return String(s).replace_result_with(v)
def result_sum(r):
# result could either be a list or a single item
try:
r.set(sum(r.get()))
except TypeError: # not a list
r.set(r.get())
def result_mul(r):
try:
m = 1
for i in r.get():
if type(i) is list:
i = i[0]
m *= i
except TypeError: # not a list
m = r.get()
r.set(m)
class NumbersGrammar(Grammar):
single_maps = [
('zero', 0), ('o', 0), ('oh', 0), ('nada', 0), ('one', 1),
('a', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5),
('six', 6), ('seven', 7), ('eight', 8), ('nine', 9), ('ten', 10),
('eleven', 11), ('twelve', 12), ('thirteen', 13), ('fourteen', 14),
('forteen', 14), ('fifteen', 15), ('sixteen', 16), ('seventeen', 17),
('eighteen', 18), ('nineteen', 19)]
ten_maps = [
('ten', 10), ('twenty', 20), ('thirty', 30), ('forty', 40),
('fourty', 40), ('fifty', 50), ('sixty', 60), ('seventy', 70),
('eighty', 80), ('ninety', 90)]
digits = Regex(r"\d+").set_result_action(lambda r: r.set(int(r.get())))
single = Or([replaced_string(s, v) for s, v in single_maps])
ten = Or([replaced_string(s, v) for s, v in ten_maps])
double = (Optional(ten) + Optional(single)).set_result_action(result_sum) \
| digits
a_hundred = replaced_string('hundred', 100)
zero_maps = [
('hundred', 100), ('thousand', 1000), ('million', int(1e6)),
('billion', int(1e9)), ('trillion', int(1e12))]
zero = Or([replaced_string(s, v) for s, v in zero_maps])
zeros = ZeroOrMore(zero).set_result_action(result_mul)
hundred = ((double + a_hundred).set_result_action(result_mul) +
Optional(double)).set_result_action(result_sum)
unit = ((double | hundred) + zeros).set_result_action(result_mul)
GOAL = OneOrMore(unit).set_result_action(result_sum)
sents = [
('zero', 0),
('twelve', 12),
('twenty', 20),
('twenty three', 23),
('23', 23),
('eight hundred fifty eight', 858),
('one hundred twenty five', 125),
('seventy three', 73),
('twelve hundred thirty five', 1235),
('twenty two hundred thirty five', 2235),
('two thousand', 2000),
('two thousand two hundred thirty five', 2235),
('seventy eight thousand nine hundred twenty one', 78921),
('seven hundred eighty nine thousand twenty one', 789021),
('one million sixty one', 1000061),
('1 million sixty one', 1000061),
('1 million 61', 1000061),
('twenty three million seven hundred eighty nine thousand', 23789000),
('one hundred thousand sixty one', 100061),
('one hundred thousand five hundred sixty one', 100561),
('1 hundred thousand 5 hundred 61', 100561),
]
@staticmethod
def test():
parser = RobustParser(NumbersGrammar())
for sent, expect in NumbersGrammar.sents:
t, r = parser.parse(sent)
# print(t)
# print(r)
assert r.get() == expect, "%s <- %s" % (str(r.get()), sent)
def test():
"""
Simple test method to be called by pytest
"""
NumbersGrammar.test()
if __name__ == "__main__":
test()
| {
"content_hash": "208f6047fe67264f06bac247b835329a",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 79,
"avg_line_length": 31.78181818181818,
"alnum_prop": 0.5371853546910755,
"repo_name": "Kitt-AI/parsetron",
"id": "f2ad8c88bc3541137e431097f082563dddfdc80a",
"size": "3496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsetron/grammars/numbers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1944"
},
{
"name": "Python",
"bytes": "145033"
},
{
"name": "Shell",
"bytes": "493"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_naboo_tradefederationloyalist_large2.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "a5526f7328e6a73a661e39319d515c17",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 88,
"avg_line_length": 24.76923076923077,
"alnum_prop": 0.7080745341614907,
"repo_name": "obi-two/Rebelion",
"id": "a1468b2ff5678df876b0e9dd26ad9b392d2b3fb5",
"size": "467",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/building/poi/shared_naboo_tradefederationloyalist_large2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from django.utils.translation import gettext as _
from zerver.decorator import webhook_view
from zerver.lib.exceptions import JsonableError
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
@webhook_view("IFTTT")
@has_request_variables
def api_iftt_app_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Any] = REQ(argument_type="body"),
) -> HttpResponse:
topic = payload.get("topic")
content = payload.get("content")
if topic is None:
topic = payload.get("subject") # Backwards-compatibility
if topic is None:
raise JsonableError(_("Topic can't be empty"))
if content is None:
raise JsonableError(_("Content can't be empty"))
if not isinstance(topic, str):
raise JsonableError(_("Topic must be a string"))
if not isinstance(content, str):
raise JsonableError(_("Content must be a string"))
check_send_webhook_message(request, user_profile, topic, content)
return json_success()
| {
"content_hash": "7cccde6b779d286d2e24cb725356c4bb",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 69,
"avg_line_length": 32,
"alnum_prop": 0.7163461538461539,
"repo_name": "eeshangarg/zulip",
"id": "45203545d043194ad8938c3695010515529f5f56",
"size": "1248",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "zerver/webhooks/ifttt/view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "484233"
},
{
"name": "Dockerfile",
"bytes": "5056"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "713408"
},
{
"name": "Handlebars",
"bytes": "343958"
},
{
"name": "JavaScript",
"bytes": "3738321"
},
{
"name": "Perl",
"bytes": "9884"
},
{
"name": "Puppet",
"bytes": "106355"
},
{
"name": "Python",
"bytes": "9442083"
},
{
"name": "Ruby",
"bytes": "3250"
},
{
"name": "Shell",
"bytes": "135667"
},
{
"name": "TypeScript",
"bytes": "275302"
}
],
"symlink_target": ""
} |
"""Layout to monitor flight status."""
from makani.control import system_params
from makani.gs.monitor2.apps.layout import base
from makani.gs.monitor2.apps.plugins import common
from makani.gs.monitor2.apps.plugins.indicators import aio_comms
from makani.gs.monitor2.apps.plugins.indicators import avionics
from makani.gs.monitor2.apps.plugins.indicators import batt
from makani.gs.monitor2.apps.plugins.indicators import control
from makani.gs.monitor2.apps.plugins.indicators import estimator
from makani.gs.monitor2.apps.plugins.indicators import gps
from makani.gs.monitor2.apps.plugins.indicators import ground_power
from makani.gs.monitor2.apps.plugins.indicators import ground_station
from makani.gs.monitor2.apps.plugins.indicators import motor
from makani.gs.monitor2.apps.plugins.indicators import mvlv
from makani.gs.monitor2.apps.plugins.indicators import network
from makani.gs.monitor2.apps.plugins.indicators import node_status
from makani.gs.monitor2.apps.plugins.indicators import servo
from makani.gs.monitor2.apps.plugins.indicators import short_stack
_SYSTEM_PARAMS = system_params.GetSystemParams().contents
class FlightLayout(base.BaseLayout):
"""The flight layout."""
_NAME = 'Flight'
_DESIRED_VIEW_COLS = 12
_ORDER_HORIZONTALLY = False
_TOP_MOTORS = ['Pto', 'Pti', 'Sti', 'Sto']
_BOTTOM_MOTORS = ['Pbo', 'Pbi', 'Sbi', 'Sbo']
# Derived class should set the _MODE.
_MODE = '<unset>'
def Initialize(self):
self._AddIndicators('Comms Status', [
network.TetherCommsStatusPoFIndicator('PoF', show_label=True),
network.TetherCommsStatusEoPIndicator('EoP', show_label=False),
network.TetherCommsStatusWifiIndicator('Wifi', show_label=False),
network.JoystickRadioStatusIndicator('Joystick Radio'),
network.TetherLongRangeRadioStatusIndicator('Long range'),
], properties={'cols': 3})
self._AddIndicators('AIO Update', [
aio_comms.CoreSwitchAioUpdateIndicator(),
aio_comms.ControllerAioUpdateIndicator(),
aio_comms.FlightComputerAioUpdateIndicator(),
aio_comms.JoystickAioUpdateIndicator(),
], properties={'cols': 3})
self._AddIndicators('Control', [
control.ControllerInitStateIndicator(),
control.VersionIndicator(),
control.FlightPlanIndicator(),
control.FlightModeIndicator(self._MODE),
control.FlightModeGatesIndicator(self._MODE),
control.ControlTimeIndicator(self._MODE),
control.ControllerTimingIndicator(self._MODE),
control.ExperimentIndicator(self._MODE),
control.JoystickIndicator(),
control.HoverGainRampScaleIndicator(self._MODE),
control.TetherReleaseIndicator(),
control.TetherReleaseReadinessIndicator(
['LoadcellPortA', 'LoadcellPortB',
'LoadcellStarboardA', 'LoadcellStarboardB']),
], properties={'cols': 3})
self._AddIndicators('Weather', [
ground_station.WindIndicator(),
ground_station.WeatherSensorIndicator(),
], properties={'cols': 3})
self._AddIndicators('Ground Station', [
ground_station.PerchAzimuthIndicator(),
ground_station.DetwistErrorIndicator(),
ground_station.EStopStatusIndicator(),
ground_station.GsCommsIndicator(),
ground_station.GsArmingStatusIndicator(),
ground_power.SummaryIndicator('Ground Power'),
ground_power.FaultIsolationIndicator('Power Fault Isolation [V]'),
], properties={'cols': 3})
self._AddIndicators('Faults', [
control.FdAllActiveIndicator(),
short_stack.ShortStackFlightIndicator(),
], properties={'cols': 3})
self._AddBreak()
self._AddIndicators('Avionics', [
servo.StatusIndicator(self._MODE),
motor.StackBusPowerIndicator(self._MODE, 'Wing Power (Generated)'),
avionics.FpvIndicator(self._MODE, ['A', 'B', 'C'], 'Fpv Enabled'),
avionics.PitotCoverIndicator(common.FULL_COMMS_MODE, ['A', 'B', 'C'],
'Pitot Cover'),
], properties={'cols': 3})
self._AddIndicators('Power System',
self._GetPowerSystemIndicators(),
properties={'cols': 3})
self._AddIndicators(
'Top Motors',
self._GetMotorIndicators(self._TOP_MOTORS), properties={'cols': 3})
self._AddIndicators(
'Bottom Motors',
self._GetMotorIndicators(self._BOTTOM_MOTORS) +
[control.FlutterWarningIndicator(self._MODE)], properties={'cols': 3})
self._AddBreak()
self._AddIndicators('Tail Servos', [
servo.ArmedTailIndicator(self._MODE),
servo.R22TemperatureTailIndicator(self._MODE),
servo.ElePosChart(self._MODE, ylim=[-30, 30]),
servo.RudPosChart(self._MODE, ylim=[-90, 90]),
], properties={'cols': 3})
self._AddIndicators('Port Servos', [
servo.ArmedPortIndicator(self._MODE),
servo.R22TemperaturePortIndicator(self._MODE),
servo.PortPosChart(self._MODE, ylim=[-40, 20]),
], properties={'cols': 3})
self._AddIndicators('Starboard Servos', [
servo.ArmedStarboardIndicator(self._MODE),
servo.R22TemperatureStarboardIndicator(self._MODE),
servo.StarboardPosChart(self._MODE, ylim=[-40, 20]),
], properties={'cols': 3})
self._AddIndicators('Wing GPS', [
], properties={'cols': 3})
self._AddIndicators('Estimator', [
estimator.EstimatorGsgBias(common.FULL_COMMS_MODE),
estimator.EstimatorGpsDiff(common.FULL_COMMS_MODE),
estimator.EstimatorGyroDiffIndicator(common.FULL_COMMS_MODE),
estimator.EstimatorMagnetometerDiffIndicator(common.FULL_COMMS_MODE),
estimator.EstimatorAttitudeDiffIndicator(common.FULL_COMMS_MODE),
estimator.EstimatorGyroBiasIndicator(common.FULL_COMMS_MODE),
estimator.EstimatorGyroBiasDriftIndicator(common.FULL_COMMS_MODE),
estimator.EstimatorGsgDiff(common.FULL_COMMS_MODE),
], properties={'cols': 3})
self._AddIndicators('Vessel', [
gps.TetherUpGpsIndicator(name='GPS BaseStation'),
control.VesselPositionIndicator(common.FULL_COMMS_MODE),
control.VesselAttitudeIndicator(common.FULL_COMMS_MODE),
], properties={'cols': 3})
self._AddBreak()
self._AddIndicators('Wing', [
control.HoverAnglesChart(self._MODE, ylim=[-90, 90]),
control.TetherAnglesChart(ylim=[-100, 100]),
control.WingPosChart(self._MODE, ylim=[-600, 500]),
control.AirSpeedChart(self._MODE, ylim=[0, 80]),
control.TensionChart(self._MODE),
control.TensionPilotOffsetIndicator(),
avionics.BridleJunctionLoadcellIndicator('Bridle Junc'),
control.ImpactZoneChart(self._MODE),
], properties={'cols': 3})
def _GetPowerSystemIndicators(self):
indicators = [
motor.MotorLVInputIndicator(chars_per_line=15),
node_status.TetherNodePowerSummary('Power'),
batt.TetherDownLvBusVoltageIndicator(),
batt.StateOfChargeIndicator(['A', 'B'], 'State of Charge'),
# Show both MvLv and battery warnings and errors. There can be more than
# 6 warnings/errors in the worst case, but set it to 6 to save space.
# This is the only place showing Mvlv errors in SN01/Gin since Mvlv
# errors are excluded from TetherNodeErrorSummary for noise issue in
# SN1/Gin
mvlv.LvSummaryIndicator(num_lines=6),
]
if _SYSTEM_PARAMS.wing_serial != system_params.kWingSerial01:
indicators.insert(0, motor.MotorVoltageIndicator(chars_per_line=15))
return indicators
def _GetMotorIndicators(self, motor_labels):
indicators = [
motor.ArmedIndicator(
self._MODE, motor_labels=motor_labels, show_label=True),
motor.ErrorIndicator(
self._MODE, motor_labels=motor_labels, show_label=False),
motor.WarningIndicator(
self._MODE, motor_labels=motor_labels, show_label=False),
motor.MotorBusVoltageIndicator(
self._MODE, motor_labels=motor_labels, show_label=False),
motor.BoardTemperatureIndicator(
self._MODE, motor_labels=motor_labels, show_label=False),
motor.CapacitorTemperatureIndicator(
self._MODE, motor_labels=motor_labels, show_label=False),
]
if _SYSTEM_PARAMS.wing_serial in (system_params.kWingSerial04Hover,
system_params.kWingSerial04Crosswind):
indicators += [
motor.ModuleTemperatureIndicator(
motor_labels=motor_labels, show_label=False),
]
else:
indicators += [
motor.HeatPlateTemperatureIndicator(
self._MODE, motor_labels=motor_labels, show_label=False),
]
indicators += [
motor.SpeedChart(
self._MODE, 'Speed [rad/s]', motor_labels,
ylim=[-250, 250], show_cmd=True),
]
return indicators
| {
"content_hash": "f3a9d8fdc7e04552c1a9a8f248b871f1",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 80,
"avg_line_length": 41.324074074074076,
"alnum_prop": 0.6757786242437822,
"repo_name": "google/makani",
"id": "db279945d5ae97aa17ba4829dbd9aa6a0c56eade",
"size": "9515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gs/monitor2/apps/plugins/layouts/flight_template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "119408"
},
{
"name": "C",
"bytes": "20174258"
},
{
"name": "C++",
"bytes": "30512322"
},
{
"name": "CSS",
"bytes": "8921"
},
{
"name": "Dockerfile",
"bytes": "1381"
},
{
"name": "Emacs Lisp",
"bytes": "1134"
},
{
"name": "HTML",
"bytes": "65745"
},
{
"name": "Java",
"bytes": "1558475"
},
{
"name": "JavaScript",
"bytes": "130727"
},
{
"name": "Jupyter Notebook",
"bytes": "1154728"
},
{
"name": "MATLAB",
"bytes": "1026162"
},
{
"name": "Makefile",
"bytes": "2798"
},
{
"name": "Objective-C",
"bytes": "62972"
},
{
"name": "Perl",
"bytes": "870724"
},
{
"name": "Python",
"bytes": "5552781"
},
{
"name": "RPC",
"bytes": "195736"
},
{
"name": "Roff",
"bytes": "2567875"
},
{
"name": "SWIG",
"bytes": "8663"
},
{
"name": "Shell",
"bytes": "297941"
},
{
"name": "Starlark",
"bytes": "462998"
},
{
"name": "Vim Script",
"bytes": "2281"
},
{
"name": "XC",
"bytes": "50398"
},
{
"name": "XS",
"bytes": "49289"
}
],
"symlink_target": ""
} |
import unittest
from datetime import datetime
from unittest import mock
from django.test import SimpleTestCase, ignore_warnings
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.http import (
base36_to_int, escape_leading_slashes, http_date, int_to_base36,
is_safe_url, is_same_domain, parse_etags, parse_http_date, quote_etag,
url_has_allowed_host_and_scheme, urlencode, urlquote, urlquote_plus,
urlsafe_base64_decode, urlsafe_base64_encode, urlunquote, urlunquote_plus,
)
class URLEncodeTests(SimpleTestCase):
cannot_encode_none_msg = (
"Cannot encode None for key 'a' in a query string. Did you mean to "
"pass an empty string or omit the value?"
)
def test_tuples(self):
self.assertEqual(urlencode((('a', 1), ('b', 2), ('c', 3))), 'a=1&b=2&c=3')
def test_dict(self):
result = urlencode({'a': 1, 'b': 2, 'c': 3})
# Dictionaries are treated as unordered.
self.assertIn(result, [
'a=1&b=2&c=3',
'a=1&c=3&b=2',
'b=2&a=1&c=3',
'b=2&c=3&a=1',
'c=3&a=1&b=2',
'c=3&b=2&a=1',
])
def test_dict_containing_sequence_not_doseq(self):
self.assertEqual(urlencode({'a': [1, 2]}, doseq=False), 'a=%5B1%2C+2%5D')
def test_dict_containing_tuple_not_doseq(self):
self.assertEqual(urlencode({'a': (1, 2)}, doseq=False), 'a=%281%2C+2%29')
def test_custom_iterable_not_doseq(self):
class IterableWithStr:
def __str__(self):
return 'custom'
def __iter__(self):
yield from range(0, 3)
self.assertEqual(urlencode({'a': IterableWithStr()}, doseq=False), 'a=custom')
def test_dict_containing_sequence_doseq(self):
self.assertEqual(urlencode({'a': [1, 2]}, doseq=True), 'a=1&a=2')
def test_dict_containing_empty_sequence_doseq(self):
self.assertEqual(urlencode({'a': []}, doseq=True), '')
def test_multivaluedict(self):
result = urlencode(MultiValueDict({
'name': ['Adrian', 'Simon'],
'position': ['Developer'],
}), doseq=True)
# MultiValueDicts are similarly unordered.
self.assertIn(result, [
'name=Adrian&name=Simon&position=Developer',
'position=Developer&name=Adrian&name=Simon',
])
def test_dict_with_bytes_values(self):
self.assertEqual(urlencode({'a': b'abc'}, doseq=True), 'a=abc')
def test_dict_with_sequence_of_bytes(self):
self.assertEqual(urlencode({'a': [b'spam', b'eggs', b'bacon']}, doseq=True), 'a=spam&a=eggs&a=bacon')
def test_dict_with_bytearray(self):
self.assertEqual(urlencode({'a': bytearray(range(2))}, doseq=True), 'a=0&a=1')
def test_generator(self):
self.assertEqual(urlencode({'a': range(2)}, doseq=True), 'a=0&a=1')
self.assertEqual(urlencode({'a': range(2)}, doseq=False), 'a=range%280%2C+2%29')
def test_none(self):
with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg):
urlencode({'a': None})
def test_none_in_sequence(self):
with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg):
urlencode({'a': [None]}, doseq=True)
def test_none_in_generator(self):
def gen():
yield None
with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg):
urlencode({'a': gen()}, doseq=True)
class Base36IntTests(SimpleTestCase):
def test_roundtrip(self):
for n in [0, 1, 1000, 1000000]:
self.assertEqual(n, base36_to_int(int_to_base36(n)))
def test_negative_input(self):
with self.assertRaisesMessage(ValueError, 'Negative base36 conversion input.'):
int_to_base36(-1)
def test_to_base36_errors(self):
for n in ['1', 'foo', {1: 2}, (1, 2, 3), 3.141]:
with self.assertRaises(TypeError):
int_to_base36(n)
def test_invalid_literal(self):
for n in ['#', ' ']:
with self.assertRaisesMessage(ValueError, "invalid literal for int() with base 36: '%s'" % n):
base36_to_int(n)
def test_input_too_large(self):
with self.assertRaisesMessage(ValueError, 'Base36 input too large'):
base36_to_int('1' * 14)
def test_to_int_errors(self):
for n in [123, {1: 2}, (1, 2, 3), 3.141]:
with self.assertRaises(TypeError):
base36_to_int(n)
def test_values(self):
for n, b36 in [(0, '0'), (1, '1'), (42, '16'), (818469960, 'django')]:
self.assertEqual(int_to_base36(n), b36)
self.assertEqual(base36_to_int(b36), n)
class IsSafeURLTests(SimpleTestCase):
def test_bad_urls(self):
bad_urls = (
'http://example.com',
'http:///example.com',
'https://example.com',
'ftp://example.com',
r'\\example.com',
r'\\\example.com',
r'/\\/example.com',
r'\\\example.com',
r'\\example.com',
r'\\//example.com',
r'/\/example.com',
r'\/example.com',
r'/\example.com',
'http:///example.com',
r'http:/\//example.com',
r'http:\/example.com',
r'http:/\example.com',
'javascript:alert("XSS")',
'\njavascript:alert(x)',
'\x08//example.com',
r'http://otherserver\@example.com',
r'http:\\testserver\@example.com',
r'http://testserver\me:[email protected]',
r'http://testserver\@example.com',
r'http:\\testserver\confirm\[email protected]',
'http:999999999',
'ftp:9999999999',
'\n',
'http://[2001:cdba:0000:0000:0000:0000:3257:9652/',
'http://2001:cdba:0000:0000:0000:0000:3257:9652]/',
)
for bad_url in bad_urls:
with self.subTest(url=bad_url):
self.assertIs(
url_has_allowed_host_and_scheme(bad_url, allowed_hosts={'testserver', 'testserver2'}),
False,
)
def test_good_urls(self):
good_urls = (
'/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://example.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'http://testserver/[email protected]',
'/url%20with%20spaces/',
'path/http:2222222222',
)
for good_url in good_urls:
with self.subTest(url=good_url):
self.assertIs(
url_has_allowed_host_and_scheme(good_url, allowed_hosts={'otherserver', 'testserver'}),
True,
)
def test_basic_auth(self):
# Valid basic auth credentials are allowed.
self.assertIs(
url_has_allowed_host_and_scheme(r'http://user:pass@testserver/', allowed_hosts={'user:pass@testserver'}),
True,
)
def test_no_allowed_hosts(self):
# A path without host is allowed.
self.assertIs(url_has_allowed_host_and_scheme('/confirm/[email protected]', allowed_hosts=None), True)
# Basic auth without host is not allowed.
self.assertIs(url_has_allowed_host_and_scheme(r'http://testserver\@example.com', allowed_hosts=None), False)
def test_allowed_hosts_str(self):
self.assertIs(url_has_allowed_host_and_scheme('http://good.com/good', allowed_hosts='good.com'), True)
self.assertIs(url_has_allowed_host_and_scheme('http://good.co/evil', allowed_hosts='good.com'), False)
def test_secure_param_https_urls(self):
secure_urls = (
'https://example.com/p',
'HTTPS://example.com/p',
'/view/?param=http://example.com',
)
for url in secure_urls:
with self.subTest(url=url):
self.assertIs(
url_has_allowed_host_and_scheme(url, allowed_hosts={'example.com'}, require_https=True),
True,
)
def test_secure_param_non_https_urls(self):
insecure_urls = (
'http://example.com/p',
'ftp://example.com/p',
'//example.com/p',
)
for url in insecure_urls:
with self.subTest(url=url):
self.assertIs(
url_has_allowed_host_and_scheme(url, allowed_hosts={'example.com'}, require_https=True),
False,
)
def test_is_safe_url_deprecated(self):
msg = (
'django.utils.http.is_safe_url() is deprecated in favor of '
'url_has_allowed_host_and_scheme().'
)
with self.assertWarnsMessage(RemovedInDjango40Warning, msg):
is_safe_url('https://example.com', allowed_hosts={'example.com'})
class URLSafeBase64Tests(unittest.TestCase):
def test_roundtrip(self):
bytestring = b'foo'
encoded = urlsafe_base64_encode(bytestring)
decoded = urlsafe_base64_decode(encoded)
self.assertEqual(bytestring, decoded)
@ignore_warnings(category=RemovedInDjango40Warning)
class URLQuoteTests(unittest.TestCase):
def test_quote(self):
self.assertEqual(urlquote('Paris & Orl\xe9ans'), 'Paris%20%26%20Orl%C3%A9ans')
self.assertEqual(urlquote('Paris & Orl\xe9ans', safe="&"), 'Paris%20&%20Orl%C3%A9ans')
def test_unquote(self):
self.assertEqual(urlunquote('Paris%20%26%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
self.assertEqual(urlunquote('Paris%20&%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
def test_quote_plus(self):
self.assertEqual(urlquote_plus('Paris & Orl\xe9ans'), 'Paris+%26+Orl%C3%A9ans')
self.assertEqual(urlquote_plus('Paris & Orl\xe9ans', safe="&"), 'Paris+&+Orl%C3%A9ans')
def test_unquote_plus(self):
self.assertEqual(urlunquote_plus('Paris+%26+Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
self.assertEqual(urlunquote_plus('Paris+&+Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
class IsSameDomainTests(unittest.TestCase):
def test_good(self):
for pair in (
('example.com', 'example.com'),
('example.com', '.example.com'),
('foo.example.com', '.example.com'),
('example.com:8888', 'example.com:8888'),
('example.com:8888', '.example.com:8888'),
('foo.example.com:8888', '.example.com:8888'),
):
self.assertIs(is_same_domain(*pair), True)
def test_bad(self):
for pair in (
('example2.com', 'example.com'),
('foo.example.com', 'example.com'),
('example.com:9999', 'example.com:8888'),
('foo.example.com:8888', ''),
):
self.assertIs(is_same_domain(*pair), False)
class ETagProcessingTests(unittest.TestCase):
def test_parsing(self):
self.assertEqual(
parse_etags(r'"" , "etag", "e\\tag", W/"weak"'),
['""', '"etag"', r'"e\\tag"', 'W/"weak"']
)
self.assertEqual(parse_etags('*'), ['*'])
# Ignore RFC 2616 ETags that are invalid according to RFC 7232.
self.assertEqual(parse_etags(r'"etag", "e\"t\"ag"'), ['"etag"'])
def test_quoting(self):
self.assertEqual(quote_etag('etag'), '"etag"') # unquoted
self.assertEqual(quote_etag('"etag"'), '"etag"') # quoted
self.assertEqual(quote_etag('W/"etag"'), 'W/"etag"') # quoted, weak
class HttpDateProcessingTests(unittest.TestCase):
def test_http_date(self):
t = 1167616461.0
self.assertEqual(http_date(t), 'Mon, 01 Jan 2007 01:54:21 GMT')
def test_parsing_rfc1123(self):
parsed = parse_http_date('Sun, 06 Nov 1994 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
@mock.patch('django.utils.http.datetime.datetime')
def test_parsing_rfc850(self, mocked_datetime):
mocked_datetime.side_effect = datetime
mocked_datetime.utcnow = mock.Mock()
utcnow_1 = datetime(2019, 11, 6, 8, 49, 37)
utcnow_2 = datetime(2020, 11, 6, 8, 49, 37)
utcnow_3 = datetime(2048, 11, 6, 8, 49, 37)
tests = (
(utcnow_1, 'Tuesday, 31-Dec-69 08:49:37 GMT', datetime(2069, 12, 31, 8, 49, 37)),
(utcnow_1, 'Tuesday, 10-Nov-70 08:49:37 GMT', datetime(1970, 11, 10, 8, 49, 37)),
(utcnow_1, 'Sunday, 06-Nov-94 08:49:37 GMT', datetime(1994, 11, 6, 8, 49, 37)),
(utcnow_2, 'Wednesday, 31-Dec-70 08:49:37 GMT', datetime(2070, 12, 31, 8, 49, 37)),
(utcnow_2, 'Friday, 31-Dec-71 08:49:37 GMT', datetime(1971, 12, 31, 8, 49, 37)),
(utcnow_3, 'Sunday, 31-Dec-00 08:49:37 GMT', datetime(2000, 12, 31, 8, 49, 37)),
(utcnow_3, 'Friday, 31-Dec-99 08:49:37 GMT', datetime(1999, 12, 31, 8, 49, 37)),
)
for utcnow, rfc850str, expected_date in tests:
with self.subTest(rfc850str=rfc850str):
mocked_datetime.utcnow.return_value = utcnow
parsed = parse_http_date(rfc850str)
self.assertEqual(datetime.utcfromtimestamp(parsed), expected_date)
def test_parsing_asctime(self):
parsed = parse_http_date('Sun Nov 6 08:49:37 1994')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
def test_parsing_year_less_than_70(self):
parsed = parse_http_date('Sun Nov 6 08:49:37 0037')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(2037, 11, 6, 8, 49, 37))
class EscapeLeadingSlashesTests(unittest.TestCase):
def test(self):
tests = (
('//example.com', '/%2Fexample.com'),
('//', '/%2F'),
)
for url, expected in tests:
with self.subTest(url=url):
self.assertEqual(escape_leading_slashes(url), expected)
| {
"content_hash": "f935dda6d78d3918bae5786eabb9569a",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 117,
"avg_line_length": 39.49860724233983,
"alnum_prop": 0.5717207334273625,
"repo_name": "simonw/django",
"id": "ed6824429d51a5a56dc639b56c73910d5432c982",
"size": "14180",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "tests/utils_tests/test_http.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85351"
},
{
"name": "HTML",
"bytes": "227641"
},
{
"name": "JavaScript",
"bytes": "258434"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "13501540"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
} |
import os
from PIL import Image
def calcOptiFactorString(orig, new, asprocent=False,reverse=False):
origlen = len(orig)
newlen = len(new)
factor = newlen/origlen
if reverse:
factor = 1 - factor
if asprocent:
factor *= 100
factor = round(factor)
return factor
def calcOptiFactorBinary(fpatho, fpathn, asprocent=False,reverse=False):
byteso = os.path.getsize(fpatho)
bytesn = os.path.getsize(fpathn)
factor = bytesn/byteso
if reverse:
factor = 1 - factor
if asprocent:
factor *= 100
factor = round(factor)
return factor
def needsCompress(fpath,data):
#find entry
for i in range(0, len(data)):
entry = data[i]
if entry["fpath"] == fpath:
# found it! now check exists && filemtime..
if not os.path.exists(fpath):
return True
if entry["ftime"] < os.path.getmtime(fpath):
return True # found but file is out of date, yes compress it
return False # found and hasn't changed, no and continue
return True # not found so yes do compress it
def openImageOpti(fpath,fext):
im = Image.open(fpath)
return im
"""
if fext==".png":
return im # png is getting BIGGER when I create palette index..??!!! ignore it
pass
try:
nrOfColors = getNumberOfColors(im)
if nrOfColors>1024:
return im # too many colors to just reduce to 256 or less..
elif nrOfColors > 256:
print(" -> Convert to 256 colors..")
cim = im.convert('P', palette=Image.ADAPTIVE, colors=256)
else:
print(" -> Convert to '"+ str(round(nrOfColors/2)) +"' colors..")
cim = im.convert('P', palette=Image.ADAPTIVE, colors=round(nrOfColors/2))
if fext==".jpg" or fext==".jpeg":
return cim.convert("RGB") # jpeg: needs mode RGB image
return cim # return mode P image
except ValueError:
print(" -> Could not convert colors :(")
return im # if all fails..
"""
def getNumberOfColors(im):
# prep color list to keep track
colors = []
# get size
imw, imh = im.size
# convert to rgb (so we have all the colors even if we import indexed)
rgb_im = im.convert('RGBA')
# get pixeldata..
for x in range(0, imw):
for y in range(0,imh):
# get r g b
r, g, b, a = rgb_im.getpixel((x, y))
hex = '#{:02x}{:02x}{:02x}{:02x}'.format(a, r, g, b)
if not hex in colors:
colors.append(hex)
if len(colors)>1024: return 2056 # too many colors :S
return len(colors)
| {
"content_hash": "31660c2aac8568d8c03f0fcf020bb4a5",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 80,
"avg_line_length": 24.25,
"alnum_prop": 0.665807560137457,
"repo_name": "rejhgadellaa/icerrr",
"id": "5bc7df98de471ead6b5f47b45d9fb30fbbfda53b",
"size": "2371",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "androidapp/py-web-optimizer/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "8661"
},
{
"name": "CSS",
"bytes": "53054"
},
{
"name": "HTML",
"bytes": "69690"
},
{
"name": "Java",
"bytes": "297316"
},
{
"name": "JavaScript",
"bytes": "461691"
},
{
"name": "PHP",
"bytes": "83269"
},
{
"name": "Python",
"bytes": "9369"
},
{
"name": "Shell",
"bytes": "5781"
}
],
"symlink_target": ""
} |
from .claripy import *
from .light import *
from .heavy import *
from .lifter import VEXLifter
| {
"content_hash": "c6c2f30621c8e9a3cf184aa8beab73e1",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 29,
"avg_line_length": 23.75,
"alnum_prop": 0.7578947368421053,
"repo_name": "schieb/angr",
"id": "45b364fd2d1de876b0ad19cced5f3a960d63326c",
"size": "95",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "angr/engines/vex/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6375"
},
{
"name": "C++",
"bytes": "39522"
},
{
"name": "Dockerfile",
"bytes": "493"
},
{
"name": "Makefile",
"bytes": "739"
},
{
"name": "Python",
"bytes": "4987778"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import sys
sys.path.insert(0, "..")
import webbrowser
from qqweibo import OAuthHandler, API
API_KEY = 'your key'
API_SECRET = 'your secret'
if API_KEY.startswith('your'):
print ('You must fill API_KEY and API_SECRET!')
webbrowser.open("http://open.t.qq.com/apps_index.php")
raise RuntimeError('You must set API_KEY and API_SECRET')
auth = OAuthHandler(API_KEY, API_SECRET)
token = YOUR TOKEN HERE (so called consumer)
tokenSecret = YOUR TOKEN_SECRET HERE (so called token)
auth.setToken(token, tokenSecret)
api = API(auth)
"""
Avaliable API:
Do to refer api.doc.rst
api.timeline.broadcast
api.timeline.home
api.timeline.mentions
api.timeline.public
api.timeline.special
api.timeline.topic
api.timeline.user
"""
def dumpTweet(t):
try:
print ("{0.nick}({0.name}) => {0.origtext} [{0.from_}]".format(t))
if t.source:
print ("!Orig: {0.source.origtext}".format(t))
except UnicodeEncodeError:
# NOTE: this is a very common error under win32
print ("Error: Some tweets or usernames may be outside "
"your system encoding")
for t in api.timeline.home():
dumpTweet(t)
for retid in api.timeline.homeids():
t = api.tweet.show(retid.id)
# or the magic t = retid.as_tweet()
dumpTweet(t)
print ("Warning: it may use up your request quota.")
break
for t in api.timeline.users(names=['andelf', 'karenmo']):
dumpTweet(t)
| {
"content_hash": "37866ebe79627660942edc4fcbbd326e",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 74,
"avg_line_length": 24.677419354838708,
"alnum_prop": 0.64640522875817,
"repo_name": "hufei/qqweibo",
"id": "11d42d5ee10044183ad4a796eb66ceb072b09d09",
"size": "1883",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "examples/example-timeline.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import unittest
import vsmlib
import logging
import vsmlib.benchmarks.similarity.similarity
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
path = "test/data/embeddings/text/plain_with_file_header"
class Tests(unittest.TestCase):
def test_similar(self):
model = vsmlib.model.load_from_dir(path)
sims = model.get_most_similar_words("apple", cnt=12)
for w, s in sims:
print(w, s)
self.assertIsInstance(model, object)
sims = model.get_most_similar_words("apple", cnt=12)
model.normalize()
logger.info("after normalization:")
sims = model.get_most_similar_words("apple", cnt=12)
for w, s in sims:
print(w, s)
logger.info("after normalized copy:")
model = vsmlib.model.load_from_dir(path)
model.cache_normalized_copy()
sims = model.get_most_similar_words("apple", cnt=12)
for w, s in sims:
print(w, s)
def test_similarity(self):
path_model = "./test/data/embeddings/text/plain_with_file_header"
model = vsmlib.model.load_from_dir(path_model)
options = {}
options["path_dataset"] = "./test/data/benchmarks/similarity/"
vsmlib.benchmarks.similarity.similarity.run(model, options) | {
"content_hash": "7474f349616bd7a636bbcad738eb3180",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 73,
"avg_line_length": 35.21621621621622,
"alnum_prop": 0.6415963161933999,
"repo_name": "undertherain/vsmlib",
"id": "12703ea56643eed14e1e905db50275967548062c",
"size": "1303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_similar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "123"
},
{
"name": "Python",
"bytes": "201669"
}
],
"symlink_target": ""
} |
from .repeat import Repeat
| {
"content_hash": "e6b64a3ebf6ce0cc4d49cd2fedc05ff8",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 26,
"avg_line_length": 27,
"alnum_prop": 0.8148148148148148,
"repo_name": "interuss/dss",
"id": "c738568938e87a3c66314ba65e6950db2895c0c8",
"size": "27",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monitoring/uss_qualifier/action_generators/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "518"
},
{
"name": "Dockerfile",
"bytes": "6594"
},
{
"name": "Go",
"bytes": "583387"
},
{
"name": "HTML",
"bytes": "20494"
},
{
"name": "Jsonnet",
"bytes": "601530"
},
{
"name": "Makefile",
"bytes": "10609"
},
{
"name": "PLpgSQL",
"bytes": "4759"
},
{
"name": "Python",
"bytes": "948652"
},
{
"name": "Shell",
"bytes": "76140"
}
],
"symlink_target": ""
} |
import smarter
from .models import Page, PageFile
class PageViews(smarter.GenericViews):
model = Page
options = {
'add': {
'redirect': lambda view, request, **kwargs: view.get_url('index')
},
'edit': {
'exclude': ('owner',),
'redirect': lambda view, request, **kwargs: kwargs['obj'].get_absolute_url()
}
}
class PageFileViews(smarter.GenericViews):
model = PageFile
options = {
'edit': None,
'details': None,
'add': {
'redirect': lambda view, request, **kwargs: view.get_url('index')
}
} | {
"content_hash": "fe54186fe6c774522260c1d7873048c4",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 88,
"avg_line_length": 24.115384615384617,
"alnum_prop": 0.532695374800638,
"repo_name": "fabiosantoscode/django-smarter",
"id": "96309ae268b0c1bda538226449794e26916b670f",
"size": "627",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "example/pages/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "41211"
},
{
"name": "Shell",
"bytes": "4527"
}
],
"symlink_target": ""
} |
from .stage01_isotopomer_spectrumAccuracy_io import stage01_isotopomer_spectrumAccuracy_io
#Remove after refactor
from .stage01_isotopomer_spectrumAccuracy_postgresql_models import *
class stage01_isotopomer_spectrumAccuracy_execute(stage01_isotopomer_spectrumAccuracy_io):
def execute_analyzeSpectrumAccuracy(self,experiment_id_I, sample_names_I = None, sample_name_abbreviations_I = None, met_ids_I = None, scan_types_I = None):
'''calculate the average spectrum accuracy'''
mids = mass_isotopomer_distributions();
print('execute_analyzeSpectrumAccuracy...')
# get time points
time_points = self.get_timePoint_experimentID_dataStage01Normalized(experiment_id_I);
for tp in time_points:
print('Calculating spectrum accuracy from isotopomer normalized for time-point ' + str(tp));
if sample_names_I:
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for sn in sample_names_I:
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePointAndSampleName_dataStage01Normalized(experiment_id_I,st,tp,sn);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
elif sample_name_abbreviations_I:
sample_abbreviations = sample_name_abbreviations_I;
# query sample types from sample name abbreviations and time-point from data_stage01_isotopomer_normalized
else:
# get sample names and sample name abbreviations
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_dataStage01Normalized(experiment_id_I,st,tp);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_abbreviations_tmp))]);
for sna_cnt,sna in enumerate(sample_abbreviations):
print('Calculating spectrum accuracy from isotopomer normalized for sample name abbreviation ' + sna);
# get the scan_types
if scan_types_I:
scan_types = [];
scan_types_tmp = [];
scan_types_tmp = self.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Normalized(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
scan_types = [st for st in scan_types_tmp if st in scan_types_I];
else:
scan_types = [];
scan_types = self.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Normalized(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
for scan_type in scan_types:
print('Calculating spectrum accuracy for scan type ' + scan_type)
# met_ids
if not met_ids_I:
met_ids = [];
met_ids = self.get_metIDs_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndScanType_dataStage01Normalized( \
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type);
else:
met_ids = met_ids_I;
if not(met_ids): continue #no component information was found
for met in met_ids:
print('Calculating spectrum accuracy for metabolite ' + met);
replicate_numbers = [];
replicate_numbers = self.get_replicateNumbers_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetID_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met);
peakSpectrum_normalized_lst = [];
for rep in replicate_numbers:
print('Calculating spectrum accuracy for replicate_number ' + str(rep));
#get data
peakData_I = {};
peakData_I = self.get_dataNormalized_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetIDAndReplicateNumber_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met,rep);
fragment_formulas = list(peakData_I.keys());
peakSpectrum_corrected, peakSpectrum_normalized = mids.extract_peakList_normMax(\
peakData_I, fragment_formulas, True);
peakSpectrum_normalized_lst.append(peakSpectrum_normalized);
peakSpectrum_accuracy = mids.calculate_fragmentSpectrumAccuracy(peakSpectrum_normalized_lst);
# update data_stage01_isotopomer_spectrumAccuracy
for frag,accuracy in peakSpectrum_accuracy.items():
if accuracy:
row = [];
row = data_stage01_isotopomer_spectrumAccuracy(experiment_id_I, sna, sample_types_lst[sna_cnt], tp, met,frag, accuracy, scan_type, True);
self.session.add(row);
self.session.commit();
def execute_analyzeSpectrumAccuracyNormSum(self,experiment_id_I, sample_names_I = None, sample_name_abbreviations_I = None, met_ids_I = None, scan_types_I = None):
'''calculate the average spectrum accuracy'''
mids = mass_isotopomer_distributions();
print('execute_analyzeSpectrumAccuracy...')
# get time points
time_points = self.get_timePoint_experimentID_dataStage01Normalized(experiment_id_I);
for tp in time_points:
print('Calculating spectrum accuracy from isotopomer normalized for time-point ' + str(tp));
if sample_names_I:
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for sn in sample_names_I:
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePointAndSampleName_dataStage01Normalized(experiment_id_I,st,tp,sn);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
elif sample_name_abbreviations_I:
sample_abbreviations = sample_name_abbreviations_I;
# query sample types from sample name abbreviations and time-point from data_stage01_isotopomer_normalized
else:
# get sample names and sample name abbreviations
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_dataStage01Normalized(experiment_id_I,st,tp);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_abbreviations_tmp))]);
for sna_cnt,sna in enumerate(sample_abbreviations):
print('Calculating spectrum accuracy from isotopomer normalized for sample name abbreviation ' + sna);
# get the scan_types
if scan_types_I:
scan_types = [];
scan_types_tmp = [];
scan_types_tmp = self.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Normalized(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
scan_types = [st for st in scan_types_tmp if st in scan_types_I];
else:
scan_types = [];
scan_types = self.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Normalized(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
for scan_type in scan_types:
print('Calculating spectrum accuracy for scan type ' + scan_type)
# met_ids
if not met_ids_I:
met_ids = [];
met_ids = self.get_metIDs_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndScanType_dataStage01Normalized( \
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type);
else:
met_ids = met_ids_I;
if not(met_ids): continue #no component information was found
for met in met_ids:
print('Calculating spectrum accuracy for metabolite ' + met);
replicate_numbers = [];
replicate_numbers = self.get_replicateNumbers_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetID_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met);
peakSpectrum_normalized_lst = [];
for rep in replicate_numbers:
print('Calculating spectrum accuracy for replicate_number ' + str(rep));
#get data
peakData_I = {};
peakData_I = self.get_dataNormalized_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetIDAndReplicateNumber_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met,rep);
fragment_formulas = list(peakData_I.keys());
peakSpectrum_corrected, peakSpectrum_normalized = mids.extract_peakList_normSum(\
peakData_I, fragment_formulas, True);
peakSpectrum_normalized_lst.append(peakSpectrum_normalized);
peakSpectrum_accuracy = mids.calculate_fragmentSpectrumAccuracy_normSum(peakSpectrum_normalized_lst);
# update data_stage01_isotopomer_spectrumAccuracy
for frag,accuracy in peakSpectrum_accuracy.items():
if accuracy:
row = [];
row = data_stage01_isotopomer_spectrumAccuracyNormSum(experiment_id_I, sna, sample_types_lst[sna_cnt], tp, met,frag, accuracy, scan_type, True);
self.session.add(row);
self.session.commit();
| {
"content_hash": "03cfc0052846d8aee1dbce2813826b09",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 188,
"avg_line_length": 69.31288343558282,
"alnum_prop": 0.580545229244114,
"repo_name": "dmccloskey/SBaaS_isotopomer",
"id": "ebe1355402facffb24ba4d5d0c08f0ced172a0d2",
"size": "11305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SBaaS_isotopomer/stage01_isotopomer_spectrumAccuracy_execute.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "544550"
}
],
"symlink_target": ""
} |
{
'name': 'Webkit Report Engine',
'description': """
This module adds a new Report Engine based on WebKit library (wkhtmltopdf) to support reports designed in HTML + CSS.
=====================================================================================================================
The module structure and some code is inspired by the report_openoffice module.
The module allows:
------------------
- HTML report definition
- Multi header support
- Multi logo
- Multi company support
- HTML and CSS-3 support (In the limit of the actual WebKIT version)
- JavaScript support
- Raw HTML debugger
- Book printing capabilities
- Margins definition
- Paper size definition
Multiple headers and logos can be defined per company. CSS style, header and
footer body are defined per company.
For a sample report see also the webkit_report_sample module, and this video:
http://files.me.com/nbessi/06n92k.mov
Requirements and Installation:
------------------------------
This module requires the ``wkthtmltopdf`` library to render HTML documents as
PDF. Version 0.9.9 or later is necessary, and can be found at
http://code.google.com/p/wkhtmltopdf/ for Linux, Mac OS X (i386) and Windows (32bits).
After installing the library on the OpenERP Server machine, you may need to set
the path to the ``wkthtmltopdf`` executable file in a system parameter named
``webkit_path`` in Settings -> Customization -> Parameters -> System Parameters
If you are experiencing missing header/footer problems on Linux, be sure to
install a 'static' version of the library. The default ``wkhtmltopdf`` on
Ubuntu is known to have this issue.
TODO:
-----
* JavaScript support activation deactivation
* Collated and book format support
* Zip return for separated PDF
* Web client WYSIWYG
""",
'version': '0.9',
'depends': ['base','report'],
'author': 'Camptocamp',
'category': 'Reporting', # i.e a technical module, not shown in Application install menu
'url': 'http://http://www.camptocamp.com/',
'data': [ 'security/ir.model.access.csv',
'data.xml',
'wizard/report_webkit_actions_view.xml',
'company_view.xml',
'header_view.xml',
'ir_report_view.xml',
],
'demo': [
"report/webkit_report_demo.xml",
],
'test': [
"test/print.yml",
],
'installable': True,
'auto_install': False,
'images': ['images/companies_webkit.jpeg','images/header_html.jpeg','images/header_img.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| {
"content_hash": "4619491e7d4b545521eb2ddfdcd93cef",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 117,
"avg_line_length": 36.0958904109589,
"alnum_prop": 0.6402277039848198,
"repo_name": "diogocs1/comps",
"id": "b8557e7747ef39ba943898ae6a3b2ec03966ebd9",
"size": "4029",
"binary": false,
"copies": "89",
"ref": "refs/heads/master",
"path": "web/addons/report_webkit/__openerp__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "701"
},
{
"name": "CSS",
"bytes": "856533"
},
{
"name": "HTML",
"bytes": "299671"
},
{
"name": "Java",
"bytes": "620166"
},
{
"name": "JavaScript",
"bytes": "5844302"
},
{
"name": "Makefile",
"bytes": "21002"
},
{
"name": "PHP",
"bytes": "14259"
},
{
"name": "Python",
"bytes": "10647376"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "17746"
},
{
"name": "XSLT",
"bytes": "120278"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from models import Article,TagInfo,AboutMe,Toys
from common import sync_es,trans_localdate_format
# Register your models here.
def make_published(self, request, queryset):
rows_updated = queryset.update(status='p')
if rows_updated == 1:
message_bit = "1 article was"
else:
message_bit = "%s articles were" % rows_updated
self.message_user(request, "%s successfully marked as published." % message_bit)
#add manual sync to es
def sync_to_elasticsearch(self, request, queryset):
for i in queryset:
try:
esinsert = {}
esinsert['title'] = i.title
esinsert['content'] = i.content
esinsert['status'] = i.status
esinsert['createtime'] = trans_localdate_format(i.timestamp)
#print esinsert
sync_es(esinsert,i.id)
self.message_user(request, "sync to elasticsearch successfully.")
except:
self.message_user(request, "sync to elasticsearch happen wrong.")
class ArticleAdmin(admin.ModelAdmin):
list_display = ['title', 'status']
#ordering = ['title']
actions = [make_published,sync_to_elasticsearch]
readonly_fields = ('last_modified','timestamp',)
admin.site.register(Article,ArticleAdmin)
admin.site.register(TagInfo)
admin.site.register(Toys)
admin.site.register(AboutMe)
| {
"content_hash": "401486024752de0b0c42d396ef881188",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 84,
"avg_line_length": 34.5,
"alnum_prop": 0.663768115942029,
"repo_name": "mnpiozhang/myblog",
"id": "61dd2d4ba0e592bb95659a084b5022688e3d566f",
"size": "1380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blogweb/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47232"
},
{
"name": "HTML",
"bytes": "41104"
},
{
"name": "Python",
"bytes": "53295"
}
],
"symlink_target": ""
} |
""" Google BigQuery support """
def _try_import():
# since pandas is a dependency of pandas-gbq
# we need to import on first use
try:
import pandas_gbq
except ImportError:
# give a nice error message
raise ImportError("Load data from Google BigQuery\n"
"\n"
"the pandas-gbq package is not installed\n"
"see the docs: https://pandas-gbq.readthedocs.io\n"
"\n"
"you can install via pip or conda:\n"
"pip install pandas-gbq\n"
"conda install pandas-gbq -c conda-forge\n")
return pandas_gbq
def read_gbq(query, project_id=None, index_col=None, col_order=None,
reauth=False, verbose=True, private_key=None, dialect='legacy',
**kwargs):
r"""Load data from Google BigQuery.
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
Authentication to the Google BigQuery service is via OAuth 2.0.
- If "private_key" is not provided:
By default "application default credentials" are used.
If default application credentials are not found or are restrictive,
user account credentials are used. In this case, you will be asked to
grant permissions for product name 'pandas GBQ'.
- If "private_key" is provided:
Service account credentials will be used to authenticate.
Parameters
----------
query : str
SQL-Like Query to return data values
project_id : str
Google BigQuery Account project ID.
index_col : str (optional)
Name of result column to use for index in results DataFrame
col_order : list(str) (optional)
List of BigQuery column names in the desired order for results
DataFrame
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
verbose : boolean (default True)
Verbose output
private_key : str (optional)
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. jupyter iPython notebook on remote host)
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery SQL Reference
<https://cloud.google.com/bigquery/sql-reference/>`__
`**kwargs` : Arbitrary keyword arguments
configuration (dict): query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery SQL Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__
Returns
-------
df: DataFrame
DataFrame representing results of query
"""
pandas_gbq = _try_import()
return pandas_gbq.read_gbq(
query, project_id=project_id,
index_col=index_col, col_order=col_order,
reauth=reauth, verbose=verbose,
private_key=private_key,
dialect=dialect,
**kwargs)
def to_gbq(dataframe, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail', private_key=None):
pandas_gbq = _try_import()
pandas_gbq.to_gbq(dataframe, destination_table, project_id,
chunksize=chunksize,
verbose=verbose, reauth=reauth,
if_exists=if_exists, private_key=private_key)
| {
"content_hash": "0b9780c1778e859122718003f038a771",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 94,
"avg_line_length": 36.25,
"alnum_prop": 0.6286079182630907,
"repo_name": "winklerand/pandas",
"id": "b452b0cf5ddd4efe956c8f1ea8383ca09e28c186",
"size": "3915",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas/io/gbq.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4071"
},
{
"name": "C",
"bytes": "493226"
},
{
"name": "C++",
"bytes": "17353"
},
{
"name": "HTML",
"bytes": "551706"
},
{
"name": "Makefile",
"bytes": "907"
},
{
"name": "PowerShell",
"bytes": "2972"
},
{
"name": "Python",
"bytes": "12249109"
},
{
"name": "R",
"bytes": "1177"
},
{
"name": "Shell",
"bytes": "23114"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.