repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fiam/blangoblog | blango/forms.py | 1 | 1444 | from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.contrib.auth.models import User
from django import forms
from blango.models import Comment
from blango.jsforms import JSModelForm
# This violates the DRY principe, but it's the only
# way I found for editing staff comments from
# the Django admin application
class CommentForm(JSModelForm):
author = forms.CharField(label=_('Name'), max_length=256)
author_uri = forms.CharField(label=_('Website'), max_length=256, required=False)
author_email = forms.EmailField(label=_('Email'), help_text=mark_safe('<span class="small">%s</span>' % _('(Won\'t be published)')))
class Meta:
model = Comment
fields = ('author', 'author_uri', 'author_email', 'body')
def save(self, entry):
self.instance.entry = entry
super(CommentForm, self).save()
def clean_author(self):
author = self.cleaned_data['author']
try:
User.objects.get(username=author)
raise forms.ValidationError(_('This username belongs to a registered user'))
except User.DoesNotExist:
return author
class UserCommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('body', )
def save(self, entry):
self.instance.user = self.user
self.instance.entry = entry
super(UserCommentForm, self).save(entry)
| bsd-3-clause | -577,320,069,726,871,300 | 31.088889 | 136 | 0.668975 | false | 4.03352 | false | false | false |
stryder199/RyarkAssignments | Assignment2/web2py/applications/cqg/question/hamming.py | 1 | 4192 | import os
import file_util
import html_util
import hamming_util
# table properties
BORDER = 1
MIN_CELL_WIDTH = 36
MIN_CELL_HEIGHT = 16
# superclass for our two question types
class hamming:
def __init__(self,question_library_path,question_path):
self.question_library_path = question_library_path
self.question_path = question_path
config = file_util.dynamic_import(os.path.join(
question_library_path,question_path,'cqg_config.py'))
self.parity = config.parity
self.code_word = list(
hamming_util.generate_code_word(config.message,config.parity))
self.code_word_indexes = config.code_word_indexes
def get_question_library_path(self):
return self.question_library_path
def get_question_path(self):
return self.question_path
def get_css(self,answer):
return ('#hamming_table td { '
'text-align:center;'
'width:%i; height:%i; }'%(MIN_CELL_WIDTH,MIN_CELL_HEIGHT)
+ html_util.make_css_borders(BORDER,'hamming_table'))
class fill(hamming):
def __init__(self,question_library_path,question_path):
hamming.__init__(self,question_library_path,question_path)
# replace code_word hotspots with None
for i in self.code_word_indexes:
self.code_word[i-1] = None
def get_html(self,answer):
# generate question description
if self.parity == 0:
parity_string = 'even'
else:
parity_string = 'odd'
html = "<p>Fill in the bits for a valid Hamming code " + \
'using <b>' + parity_string + '</b> parity:</p>'
# generate a list of selects with bits specified by answer
# bits = list of len(code_word) items where bits[i]:
# code_word[i] if code_word[i] in [0,1]
# a select box if code_word[i] is None
indexes = range(1,len(self.code_word)+1) # one-relative
bits = []
for i,bit in enumerate(self.code_word,1):
if bit == None:
name = 'bit_' + str(i)
bit = html_util.get_select(
name,['','0','1'],answer[name])
bits.append(bit)
# generate table containing select lists
html += '<center>'
html += html_util.get_table([indexes,bits],'id="hamming_table"')
html += '</center>'
return html
def get_input_element_ids(self):
ids = []
for i,code in enumerate(self.code_word,1):
if code == None:
ids.append('bit_' + str(i))
return ids
def check_answer(self,answer):
# fill code_word with bits specified by answer
new_code_word = ''
for i,bit in enumerate(self.code_word,1):
if bit == None:
bit = answer['bit_' + str(i)]
# every input must be binary
if bit not in ['0','1']:
return False
new_code_word += bit
# check correctness of new_code_word
return hamming_util.check_code_word(
new_code_word,self.parity) == 0
class find_error(hamming):
def __init__(self,question_library_path,question_path):
hamming.__init__(self,question_library_path,question_path)
# flip bit specified by code_word_indexes
if self.code_word[self.code_word_indexes-1] == '0':
self.code_word[self.code_word_indexes-1] = '1'
else:
self.code_word[self.code_word_indexes-1] = '0'
def get_html(self,answer):
# generate question description
if self.parity == 0:
parity_string = 'even'
else:
parity_string = 'odd'
html = '<p>Assume exactly one bit is incorrect.</p>' + \
'Indicate the incorrect bit ' + \
'using <b>' + parity_string + '</b> parity:'
# generate list of radio buttons with
# the bit specified by answer set
indexes = range(1,len(self.code_word)+1) # one-relative
radio_buttons = []
for i in indexes:
is_set = answer['incorrect_bit'] == str(i)
radio_buttons.append(html_util.get_radio_button(
'incorrect_bit',str(i),is_set))
# generate table containing radio buttons
html += '<center>'
html += html_util.get_table(
[indexes,self.code_word,radio_buttons],'id="hamming_table"')
html += '</center>'
return html
def get_input_element_ids(self):
return ['incorrect_bit']
def check_answer(self,answer):
if not (answer['incorrect_bit'] != None and
answer['incorrect_bit'].isdigit()):
return False
code_word_string = ''
for code in self.code_word:
code_word_string += code
return int(answer['incorrect_bit']) == \
hamming_util.check_code_word(code_word_string,self.parity)
| mit | 5,480,421,653,543,299,000 | 28.111111 | 66 | 0.671756 | false | 2.952113 | true | false | false |
gwct/core | core2/isofilter.py | 1 | 9979 | #!/usr/bin/python
########################################################################################
#Script to filter out isoforms from peptide files in FASTA ENSEMBL or NCBI format. This
#script can also add an easier to read species label to each sequence within the file.
#
#Sample ENSEMBL usage: python isoform_filter.py -i [input_fasta_file] -t ens -l [species_label] -o [output_filename]
#
#Sample NCBI usage: python isoform_filter.py -i [input_fasta_file] -t ncbi -g [toplevel_gff_file] -l [species_label] -o [output_filename]
#
#To just do the relabeling, set -f 0. You shouldn't need a gff file for the NCBI file in
#this case. For NCBI relabeling, the gene ID is also moved to the front of the title line.
#
#Written by: Gregg Thomas, Summer 2014
#
#NCBI filter command kindly provided by the folks at NCBI.
#
########################################################################################
import sys, re, os, argparse
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/corelib/")
import core
############################################
#Function definitions.
def optParse():
#This function handles the command line options.
parser = argparse.ArgumentParser()
parser.add_argument("-i", dest="input_file", help="An input file containing peptides from a species in FASTA format");
parser.add_argument("-t", dest="file_type", help="Currently supported file types are ENSEMBL and NCBI peptide files. Enter as 'ens' or 'ncbi' here. Note: If file type is NCBI you will also need to specify the top level gff file with -g")
parser.add_argument("-g", dest="gff_file", help="If file type is NCBI, the top level gff file is also needed and should be specified here.");
parser.add_argument("-l", dest="spec_label", help="A species label to add to the gene ID of each sequence.", default="");
parser.add_argument("-o", dest="output_file", help="The desired name of the output file. If none is specified the default is [input_filename]_isofiltered.fa or [input_filename]_isofiltered_relabel.fa");
args = parser.parse_args();
if None in [args.input_file, args.file_type, args.output_file]:
sys.exit(core.errorOut(1, "An input file (-i), input file type (-t), and an output file (-o) must all be specified"));
if args.file_type not in ['ens', 'ncbi']:
sys.exit(core.errorOut(2, "File type (-t) must be one of either 'ens' (Ensembl) or 'ncbi'"));
if args.file_type == "ens" and args.gff_file != None:
sys.exit(core.errorOut(3, "A gff file (-g) should not be specified with file type ens"));
if args.file_type == "ncbi" and args.gff_file == None:
sys.exit(core.errorOut(4, "A gff file (-g) must be specified with file type ncbi"));
return args.input_file, args.file_type, args.gff_file, args.spec_label, args.output_file;
############################################
def ensFilter(inseqs, spec_label, outfilename):
print "Indexing", len(inseqs), "sequences to be filtered.";
print "Parsing identifiers...";
for title in inseqs:
geneid = title[title.index("gene:") + 5:title.index("gene:") + 23];
if geneid in identDict:
identDict[geneid].append((title, inseqs[title]));
else:
identDict[geneid] = [];
identDict[geneid].append((title, inseqs[title]));
sys.stderr.write('\b');
print "Filtering and writing sequences...";
numbars, donepercent, i = 0,[],0;
for key in identDict:
numbars, donepercent = core.loadingBar(i, len(identDict), donepercent, numbars);
if len(identDict[key]) == 1:
long_title, long_seq = identDict[key][0];
else:
titlelist = [];
seqlist = [];
for tup in identDict[key]:
cur_itle, cur_seq = tup;
titlelist.append(cur_itle);
seqlist.append(cur_seq);
long_seq = max(seqlist, key=len)
long_title = titlelist[seqlist.index(long_seq)];
new_title = ">" + spec_label + "_" + long_title[1:];
core.writeSeq(outfilename, long_seq, new_title);
i += 1;
pstring = "100.0% complete.";
sys.stderr.write('\b' * len(pstring) + pstring);
print "\nDone!";
print i, "sequences written.";
print len(inseqs) - i, "sequences filtered.";
############################################
def ncbiFilter(inseqs, gff_file, spec_label, outfilename):
numbars, donepercent, i = 0, [], 0;
print "Obtaining longest isoforms from .gff file...";
cmd = "zcat " + gff_file + " | awk \'BEGIN{FS=\" \";OFS=\"|\"}$3==\"CDS\"{if($4<$5){print $5-$4+1,$9}else{print $4-$5+1,$9}}\' | grep \"[NX]P[_]\" | sed \'s/\([0-9]*\).*GeneID:\([0-9]*\).*\([NX]P[_][0-9]*\.[0-9]*\).*/\\1|\\2|\\3/\' | awk \'BEGIN{FS=\"|\";OFS=\"\t\";gene=\"\";acc=\"\";len=0}{if(acc!=$3){print gene,acc,len/3-1;gene=$2;acc=$3;len=$1}else{len=len+$1}}END{print gene,acc,len/3-1}\' | sort -k1,1n -k3,3nr -k2,2 | awk \'BEGIN{FS=\" \";OFS=\" \";gene=\"\";acc=\"\";len=0}{if(gene!=$1){print $1,$2,$3};gene=$1;acc=$2;len=$3}\' > ncbi_isoform_filter_tmp11567.txt"
os.system(cmd);
tmpFile = open("ncbi_isoform_filter_tmp11567.txt", "r");
tmpLines = tmpFile.readlines();
tmpFile.close();
os.system("rm ncbi_isoform_filter_tmp11567.txt");
longest_isos = [];
for each in tmpLines:
longest_isos.append(each.split("\t")[1]);
longest_isos = filter(None, longest_isos);
print "Writing longest isoforms to output file...";
count = 0;
for title in inseqs:
numbars, donepercent = core.loadingBar(i, len(inseqs), donepercent, numbars);
i += 1;
found = 0;
for gid in longest_isos:
if gid in title:
gid = title[title.index("P_")-1:title.index("|",title.index("P_"))]
new_title = ">" + spec_label + "_" + gid + " |" + title[1:title.index("P_")-1] + title[title.index("|",title.index("P_"))+1:];
core.writeSeq(outfilename, inseqs[title], new_title);
count += 1;
break;
pstring = "100.0% complete.";
sys.stderr.write('\b' * len(pstring) + pstring);
print "\nDone!";
print count, "sequences written.";
print len(inseqs) - count, "sequences filtered.";
############################################
#Main Block
############################################
infilename, in_type, gff_file, label, outfilename = optParse();
pad = 50;
print "=======================================================================";
print "\t\t\t" + core.getDateTime();
print core.spacedOut("Filtering isoforms from:", pad), infilename;
if in_type == "ens":
print core.spacedOut("File type:", pad), "Ensembl";
if in_type == "ncbi":
print core.spacedOut("File type:", pad), "NCBI";
print core.spacedOut("Using GFF file:", pad), gff_file;
if in_type == "crow":
print core.spacedOut("File type:", pad), "Crow";
if label != "":
print core.spacedOut("Adding label to beginning of FASTA headers:", pad), label;
print core.spacedOut("Writing output to:", pad), outfilename;
core.filePrep(outfilename);
print "--------------------------";
identDict = {};
ins, skip_flag = core.fastaReader(infilename);
if in_type == "ens":
ensFilter(ins, label, outfilename);
elif in_type == "ncbi":
ncbiFilter(ins, gff_file, label, outfilename);
print "=======================================================================";
## DEFUNCT FILTER FOR THE CROW FILES
# elif in_type == "crow":
# crowFilter(ins, label, outfilename);
# def crowFilter(inSeqs, filterflag, speclabel, outFilename):
# rotator = 0;
# numbars = 0;
# donepercent = [];
# i = 0;
# if filterflag == 1:
# print "Indexing", len(inSeqs), "sequences to be filtered.";
# print "Parsing identifiers...";
# for each in inSeqs:
# rotator = core.loadingRotator(i, rotator, 100)
# curTitle, curSeq = core.getFastafromInd(inFilename, each[0], each[1], each[2], each[3]);
# if "gene=" not in curTitle:
# print curTitle;
# continue;
# geneid = curTitle[curTitle.index("gene=") + 5:].strip();
# if geneid in identDict:
# identDict[geneid].append(each);
# else:
# identDict[geneid] = [];
# identDict[geneid].append(each);
# i = i + 1;
# sys.stderr.write('\b');
# print "Filtering and writing sequences...";
# i = 0;
# count = 0;
# for key in identDict:
# numbars, donepercent = core.loadingBar(i, len(identDict), donepercent, numbars);
# if len(identDict[key]) == 1:
# curTitle, curSeq = core.getFastafromInd(inFilename, identDict[key][0][0], identDict[key][0][1], identDict[key][0][2], identDict[key][0][3]);
# if speclabel != "":
# newTitle = ">" + speclabel + "_" + curTitle[1:];
# core.writeSeq(outFilename, curSeq, newTitle);
# else:
# core.writeSeq(outFilename, curSeq, curTitle);
# count = count + 1;
# else:
# titlelist = [];
# seqlist = [];
# for inds in identDict[key]:
# aTitle, aSeq = core.getFastafromInd(inFilename, inds[0], inds[1], inds[2], inds[3]);
# titlelist.append(aTitle);
# seqlist.append(aSeq);
# longseq = max(seqlist, key=len)
# for inds in identDict[key]:
# aTitle, aSeq = core.getFastafromInd(inFilename, inds[0], inds[1], inds[2], inds[3]);
# if aSeq == longseq:
# curTitle, curSeq = core.getFastafromInd(inFilename, inds[0], inds[1], inds[2], inds[3]);
# if speclabel != "":
# newTitle = ">" + speclabel + "_" + curTitle[1:];
# core.writeSeq(outFilename, curSeq, newTitle);
# else:
# core.writeSeq(outFilename, curSeq, curTitle);
# count = count + 1;
# break;
# i = i + 1;
# pstring = "100.0% complete.";
# sys.stderr.write('\b' * len(pstring) + pstring);
# print "\nDone!";
# print count, "out of", len(identDict), "identifiers written.";
# print len(inSeqs) - count, "sequences filtered.";
# else:
# print "Relabeling...";
# for seq in inSeqs:
# numbars, donepercent = core.loadingBar(i, len(inSeqs), donepercent, numbars);
# i = i + 1;
# curTitle, curSeq = core.getFastafromInd(inFilename, seq[0], seq[1], seq[2], seq[3]);
# newTitle = ">" + speclabel + "_" + curTitle[1:];
# core.writeSeq(outFilename, curSeq, newTitle);
# pstring = "100.0% complete.";
# sys.stderr.write('\b' * len(pstring) + pstring);
# print "\nDone!";
| gpl-3.0 | 8,496,657,265,609,689,000 | 32.712838 | 573 | 0.605772 | false | 2.78044 | false | false | false |
dodger487/MIST | data/makeSnippets.py | 1 | 5346 | #!/usr/bin/env python
# Chris Riederer
# Google, Inc
# 2014-07-25
import test_detect
import numpy as np
import os
import json
import random
import sys
def makeNegativeSnippets(runData, number, snipPrefixTime=100000000, snipPostfixTime=500000000):
return makeSnippets(runData, True, numberNegative=number, snipPrefixTime=snipPrefixTime, snipPostfixTime=snipPostfixTime)
def makePositiveSnippets(runData, snipPrefixTime=100000000, snipPostfixTime=500000000):
return makeSnippets(runData, False, snipPrefixTime=snipPrefixTime, snipPostfixTime=snipPostfixTime)
def makeSnippets(runData, isNegative, numberNegative=None, snipPrefixTime=10000000, snipPostfixTime=100000000):
"""Given a runData file, makes smaller snippets of positive examples for training
runData: the JSON object representation of a recording
snipPrefixTime: the time, in NANOSECONDS, preceding the label time that we're
putting in the snippet
snipPrefixTime: the time, in NANOSECONDS, after the label time that we're
putting in the snippet
"""
data = np.array(runData["magnetometer"])
data = data[data[:, 2:].any(1)]
domain = data[:,0]
if isNegative and len(runData['labels']) != 0:
raise Exception("Length of labels should be 0 when generating negative examples")
elif not isNegative and len(runData['labels']) == 0:
raise Exception("Length of labels cannot be 0 when generating positive examples")
elif isNegative:
# generate start point for snippets, and ensure snippet is entirely in recorded data
possibleStartPoints = domain[domain < domain[-1] - snipPostfixTime - snipPostfixTime]
labels = [[labelTime, 1] for labelTime in random.sample(possibleStartPoints, numberNegative)]
else:
labels = runData['labels']
snippets = []
for index, (labelTime, label) in enumerate(labels):
snippet = runData.copy()
if isNegative:
snippet['labels'] = []
else:
snippet['labels'] = [[labelTime, label]]
snippet['filename'] = "%s-%02d.json" % (runData['filename'].rsplit('.')[0], index)
snippetIndices = (domain >= labelTime-snipPrefixTime) & (domain < labelTime+snipPostfixTime)
snippet['magnetometer'] = list(map(list, data[snippetIndices, :])) # convert back to python list, so JSON can serialize
snippets.append(snippet)
return snippets
def makeSnippet(runData, snipId, startTime, snipLength=600000000):
data = np.array(runData["magnetometer"])
data = data[data[:, 2:].any(1)]
domain = data[:,0]
snippet = runData.copy()
labels = [[labelTime, label] for labelTime, label in runData['labels'] if startTime < labelTime < startTime+snipLength]
snippet['labels'] = labels
# todo: filename
snippet['filename'] = "%s-hn-%02d.json" % (runData['filename'].rsplit('.')[0], snipId)
snippetIndices = (domain >= startTime) & (domain < startTime+snipLength)
snippet['magnetometer'] = list(map(list, data[snippetIndices, :])) # convert back to python list, so JSON can serialize
return snippet
def findHardNegatives(runData, snipLength=600000000):
"""Find portions of a signal that are difficult for our detector to realize are negative"""
# TODO: initially writing this just for negative runData files... should make it work with everything
detector = test_detect.OriginalDetector()
snippet = runData.copy()
data = np.array(runData["magnetometer"])
data = data[data[:, 2:].any(1)]
domain = data[:,0]
min_cost = float('inf')
for startTime in domain[(domain < domain[-1] - snipLength)]:
snippetIndices = (domain >= startTime) & (domain < startTime+snipLength)
snippet['magnetometer'] = list(map(list, data[snippetIndices, :])) # convert back to python list, so JSON can serialize
snippet['labels'] = []
cost = detector.evaluateCost(snippet, True)
if cost < min_cost:
min_cost = cost
worst_snip = snippet.copy()
return worst_snip
def createSnippetsFromRunDataList(runDataList):
runDataList = test_detect.GetRunDataFromArgs(sys.argv[1:])
for runData in runDataList:
snips = createSnippetsFromPlot(runData)
for snip in snips:
newFilename = os.path.join('relabeled', snip['filename'])
with open(newFilename, 'w') as f:
print newFilename
json.dump(snip, f)
def createSnippetsFromPlot(runData, inputLabels=[], snipLength=600000000):
"""This creates a plot from runData. When the user clicks on the plot, a snippet
of length snipLength nanoseconds is created and plotted. The user can repeat
this process as many times as he or she likes. When the user closes the
original plot, the list of the created snippets is returned.
"""
snippets = []
def onclick(event):
startTime = event.xdata
print "Start time of snippet: %16d" % int(startTime)
snipId = len(snippets)
snip = makeSnippet(runData, snipId, startTime, snipLength=snipLength)
snippets.append(snip) # add to snippets
test_detect.PlotData(snip) # plot new snip
test_detect.pl.show()
test_detect.PlotData(runData, inputLabels=inputLabels)
fig = test_detect.pl.gcf()
cid = fig.canvas.mpl_connect('button_press_event', onclick)
test_detect.pl.show()
return snippets
if __name__ == '__main__':
runDataList = test_detect.GetRunDataFromArgs(sys.argv[1:])
createSnippetsFromRunDataList(runDataList)
# print sum([len(runData['labels']) for runData in runDataList])
| apache-2.0 | -2,253,044,168,712,271,600 | 39.195489 | 123 | 0.721848 | false | 3.568758 | true | false | false |
rhildred/rhildred.github.io | tag_generator.py | 1 | 1427 | #!/usr/bin/env python
'''
tag_generator.py
Copyright 2017 Long Qian
Contact: [email protected]
This script creates tags for your Jekyll blog hosted by Github page.
No plugins required.
'''
import glob
import os
import re
post_dir = '_posts/'
tag_dir = 'tag/'
filenames = glob.glob(post_dir + '*')
total_tags = []
for filename in filenames:
f = open(filename, 'r')
crawl = False
for line in f:
if crawl:
current_tags = line.strip().split()
if current_tags[0] == 'tags:':
total_tags.extend(current_tags[1:])
crawl = False
break
if line.strip() == '---':
if not crawl:
crawl = True
else:
crawl = False
break
f.close()
total_tags = set(total_tags)
old_tags = glob.glob(tag_dir + '*.md')
for tag in old_tags:
os.remove(tag)
for tag in total_tags:
sTag = re.sub("^\.", "", tag)
tag_filename = tag_dir + sTag.lower().replace('.', '-') + '.md'
f = open(tag_filename, 'a')
write_str = '---\nlayout: tagpage\ntitle: \"Tag: ' + tag + '\"\ntag: ' + tag + '\nrobots: noindex\nexclude_from_search: true\ntagline: \'"Creative Active Individuals can only grow up in a society that emphasizes learning instead of teaching." - Chris Alexander\'\n---\n'
f.write(write_str)
f.close()
print("Tags generated, count", total_tags.__len__())
| mit | 1,607,549,321,355,180,300 | 25.924528 | 274 | 0.573931 | false | 3.272936 | false | false | false |
Samfox2/motioneye | motioneye/v4l2ctl.py | 1 | 11312 |
# Copyright (c) 2013 Calin Crisan
# This file is part of motionEye.
#
# motionEye is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import fcntl
import logging
import os.path
import re
import stat
import subprocess
import time
import utils
_resolutions_cache = {}
_ctrls_cache = {}
_ctrl_values_cache = {}
_DEV_V4L_BY_ID = '/dev/v4l/by-id/'
def find_v4l2_ctl():
try:
return subprocess.check_output('which v4l2-ctl', shell=True).strip()
except subprocess.CalledProcessError: # not found
return None
def list_devices():
global _resolutions_cache, _ctrls_cache, _ctrl_values_cache
logging.debug('listing v4l2 devices...')
try:
output = ''
started = time.time()
p = subprocess.Popen('v4l2-ctl --list-devices 2>/dev/null', shell=True, stdout=subprocess.PIPE, bufsize=1)
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
data = p.stdout.read(1024)
if not data:
break
except IOError:
data = ''
time.sleep(0.01)
output += data
if len(output) > 10240:
logging.warn('v4l2-ctl command returned more than 10k of output')
break
if time.time() - started > 3:
logging.warn('v4l2-ctl command ran for more than 3 seconds')
break
except subprocess.CalledProcessError:
logging.debug('failed to list devices (probably no devices installed)')
return []
try:
# try to kill the v4l2-ctl subprocess
p.kill()
except:
pass # nevermind
name = None
devices = []
for line in output.split('\n'):
if line.startswith('\t'):
device = line.strip()
persistent_device = find_persistent_device(device)
devices.append((device, persistent_device, name))
logging.debug('found device %(name)s: %(device)s, %(persistent_device)s' % {
'name': name, 'device': device, 'persistent_device': persistent_device})
else:
name = line.split('(')[0].strip()
# clear the cache
_resolutions_cache = {}
_ctrls_cache = {}
_ctrl_values_cache = {}
return devices
def list_resolutions(device):
global _resolutions_cache
device = utils.make_str(device)
if device in _resolutions_cache:
return _resolutions_cache[device]
logging.debug('listing resolutions of device %(device)s...' % {'device': device})
resolutions = set()
output = ''
started = time.time()
p = subprocess.Popen('v4l2-ctl -d "%(device)s" --list-formats-ext | grep -vi stepwise | grep -oE "[0-9]+x[0-9]+" || true' % {
'device': device}, shell=True, stdout=subprocess.PIPE, bufsize=1)
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
data = p.stdout.read(1024)
if not data:
break
except IOError:
data = ''
time.sleep(0.01)
output += data
if len(output) > 10240:
logging.warn('v4l2-ctl command returned more than 10k of output')
break
if time.time() - started > 3:
logging.warn('v4l2-ctl command ran for more than 3 seconds')
break
try:
# try to kill the v4l2-ctl subprocess
p.kill()
except:
pass # nevermind
for pair in output.split('\n'):
pair = pair.strip()
if not pair:
continue
width, height = pair.split('x')
width = int(width)
height = int(height)
if (width, height) in resolutions:
continue # duplicate resolution
if width < 96 or height < 96: # some reasonable minimal values
continue
if width % 16 or height % 16: # ignore non-modulo 16 resolutions
continue
resolutions.add((width, height))
logging.debug('found resolution %(width)sx%(height)s for device %(device)s' % {
'device': device, 'width': width, 'height': height})
if not resolutions:
logging.debug('no resolutions found for device %(device)s, using common values' % {'device': device})
# no resolution returned by v4l2-ctl call, add common default resolutions
resolutions = utils.COMMON_RESOLUTIONS
resolutions = list(sorted(resolutions, key=lambda r: (r[0], r[1])))
_resolutions_cache[device] = resolutions
return resolutions
def device_present(device):
device = utils.make_str(device)
try:
st = os.stat(device)
return stat.S_ISCHR(st.st_mode)
except:
return False
def find_persistent_device(device):
device = utils.make_str(device)
try:
devs_by_id = os.listdir(_DEV_V4L_BY_ID)
except OSError:
return device
for p in devs_by_id:
p = os.path.join(_DEV_V4L_BY_ID, p)
if os.path.realpath(p) == device:
return p
return device
def get_brightness(device):
return _get_ctrl(device, 'brightness')
def set_brightness(device, value):
_set_ctrl(device, 'brightness', value)
def get_contrast(device):
return _get_ctrl(device, 'contrast')
def set_contrast(device, value):
_set_ctrl(device, 'contrast', value)
def get_saturation(device):
return _get_ctrl(device, 'saturation')
def set_saturation(device, value):
_set_ctrl(device, 'saturation', value)
def get_hue(device):
return _get_ctrl(device, 'hue')
def set_hue(device, value):
_set_ctrl(device, 'hue', value)
def _get_ctrl(device, control):
global _ctrl_values_cache
device = utils.make_str(device)
if not device_present(device):
return None
if device in _ctrl_values_cache and control in _ctrl_values_cache[device]:
return _ctrl_values_cache[device][control]
controls = _list_ctrls(device)
properties = controls.get(control)
if properties is None:
logging.warn('control %(control)s not found for device %(device)s' % {
'control': control, 'device': device})
return None
value = int(properties['value'])
# adjust the value range
if 'min' in properties and 'max' in properties:
min_value = int(properties['min'])
max_value = int(properties['max'])
value = int(round((value - min_value) * 100.0 / (max_value - min_value)))
else:
logging.warn('min and max values not found for control %(control)s of device %(device)s' % {
'control': control, 'device': device})
logging.debug('control %(control)s of device %(device)s is %(value)s%%' % {
'control': control, 'device': device, 'value': value})
return value
def _set_ctrl(device, control, value):
global _ctrl_values_cache
device = utils.make_str(device)
if not device_present(device):
return
controls = _list_ctrls(device)
properties = controls.get(control)
if properties is None:
logging.warn('control %(control)s not found for device %(device)s' % {
'control': control, 'device': device})
return
_ctrl_values_cache.setdefault(device, {})[control] = value
# adjust the value range
if 'min' in properties and 'max' in properties:
min_value = int(properties['min'])
max_value = int(properties['max'])
value = int(round(min_value + value * (max_value - min_value) / 100.0))
else:
logging.warn('min and max values not found for control %(control)s of device %(device)s' % {
'control': control, 'device': device})
logging.debug('setting control %(control)s of device %(device)s to %(value)s' % {
'control': control, 'device': device, 'value': value})
output = ''
started = time.time()
p = subprocess.Popen('v4l2-ctl -d "%(device)s" --set-ctrl %(control)s=%(value)s' % {
'device': device, 'control': control, 'value': value}, shell=True, stdout=subprocess.PIPE, bufsize=1)
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
data = p.stdout.read(1024)
if not data:
break
except IOError:
data = ''
time.sleep(0.01)
output += data
if len(output) > 10240:
logging.warn('v4l2-ctl command returned more than 10k of output')
break
if time.time() - started > 3:
logging.warn('v4l2-ctl command ran for more than 3 seconds')
break
try:
# try to kill the v4l2-ctl subprocess
p.kill()
except:
pass # nevermind
def _list_ctrls(device):
global _ctrls_cache
device = utils.make_str(device)
if device in _ctrls_cache:
return _ctrls_cache[device]
output = ''
started = time.time()
p = subprocess.Popen('v4l2-ctl -d "%(device)s" --list-ctrls' % {
'device': device}, shell=True, stdout=subprocess.PIPE, bufsize=1)
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
data = p.stdout.read(1024)
if not data:
break
except IOError:
data = ''
time.sleep(0.01)
output += data
if len(output) > 10240:
logging.warn('v4l2-ctl command returned more than 10k of output')
break
if time.time() - started > 3:
logging.warn('v4l2-ctl command ran for more than 3 seconds')
break
try:
# try to kill the v4l2-ctl subprocess
p.kill()
except:
pass # nevermind
controls = {}
for line in output.split('\n'):
if not line:
continue
match = re.match('^\s*(\w+)\s+\(\w+\)\s+\:\s*(.+)', line)
if not match:
continue
(control, properties) = match.groups()
properties = dict([v.split('=', 1) for v in properties.split(' ') if v.count('=')])
controls[control] = properties
_ctrls_cache[device] = controls
return controls
| gpl-3.0 | -9,183,610,515,678,452,000 | 26.062201 | 129 | 0.573285 | false | 3.825499 | false | false | false |
Atush/py_learning | generator/contact.py | 1 | 1799 | from model.contact import Contact
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", 'file'])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + " "*10
return prefix + "".join(random.choice(symbols) for i in range(random.randrange(maxlen)))
def random_phone(maxlen):
symbols = string.digits
return "".join(random.choice(symbols) for i in range(random.randrange(maxlen)))
def random_email(maxlen_username, maxlen_domain):
symbols = string.ascii_letters
username = "".join(random.choice(symbols) for i in range(random.randrange(maxlen_username)))
domain = "".join(random.choice(symbols) for i in range(random.randrange(maxlen_domain))) + "." + "".join(random.choice(string.ascii_letters) for i in range(random.randrange(4)))
return username + "@" + domain
testdata = [Contact(firstname="", lastname="", address="", homephone="", mobile="", workphone="", email="", email2="", email3="", phone2="")] + [Contact(firstname = random_string("FN", 10), lastname=random_string("LN", 10), address=random_string("address", 20), homephone=random_phone(10), mobile=random_phone(10), workphone=random_phone(10), email=random_email(15,5), email2=random_email(15,5), email3=random_email(15,5), phone2=random_phone(10)) for i in range(5)]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent = 2)
out.write(jsonpickle.encode(testdata)) | apache-2.0 | -7,085,304,235,291,700,000 | 39.909091 | 466 | 0.679266 | false | 3.343866 | false | false | false |
BBN-Q/Auspex | src/auspex/instruments/hall_probe.py | 1 | 1712 | # Copyright 2016 Raytheon BBN Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
import numpy as np
class HallProbe(object):
"""Simple wrapper for converting Hall probe voltage measurements to
actual fields values."""
def __init__(self, calibration_file, supply_voltage_method, readout_voltage_method):
super(HallProbe, self).__init__()
self.name = "Lakeshore Hall Probe"
with open(calibration_file) as cf:
lines = [l for l in cf.readlines() if l[0] != '#']
if len(lines) != 2:
raise Exception("Invalid Hall probe calibration file, must contain two lines.")
try:
self.output_voltage = float(lines[0])
except:
raise TypeError("Could not convert output voltage to floating point value.")
try:
poly_coeffs = np.array(lines[1].split(), dtype=np.float)
self.field_vs_voltage = np.poly1d(poly_coeffs)
except:
raise TypeError("Could not convert calibration coefficients into list of floats")
self.getter = readout_voltage_method
self.setter = supply_voltage_method
self.setter(self.output_voltage)
@property
def field(self):
return self.get_field()
def get_field(self):
return self.field_vs_voltage( self.getter() )
def __repr__(self):
name = "Mystery Instrument" if self.name == "" else self.name
return "{} @ {}".format(name, self.resource_name)
| apache-2.0 | -244,677,329,928,552,860 | 37.909091 | 97 | 0.620327 | false | 4.07619 | false | false | false |
avastjohn/maventy_new | registration/urls.py | 1 | 3567 | """
URLConf for Django user registration and authentication.
If the default behavior of the registration views is acceptable to
you, simply use a line like this in your root URLConf to set up the
default URLs for registration::
(r'^accounts/', include('registration.urls')),
This will also automatically set up the views in
``django.contrib.auth`` at sensible default locations.
But if you'd like to customize the behavior (e.g., by passing extra
arguments to the various views) or split up the URLs, feel free to set
up your own URL patterns for these views instead. If you do, it's a
good idea to use the names ``registration_activate``,
``registration_complete`` and ``registration_register`` for the
various steps of the user-signup process.
"""
import functools
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib.auth import views as auth_views
from registration.views import activate
from registration.views import register
from registration.forms import RegistrationFormUniqueEmailWithCaptcha
# register2 is register with the form_class given
register2 = functools.partial(register,
form_class = RegistrationFormUniqueEmailWithCaptcha)
urlpatterns = patterns('',
# Activation keys get matched by \w+ instead of the more specific
# [a-fA-F0-9]{40} because a bad activation key should still get to the view;
# that way it can return a sensible "invalid key" message instead of a
# confusing 404.
url(r'^activate/(?P<activation_key>\w+)/$',
activate,
name='registration_activate'),
url(r'^login/$',
auth_views.login,
{'template_name': 'registration/login.html'},
name='auth_login'),
url(r'^logout/$',
auth_views.logout,
name='auth_logout'),
url(r'^password/change/$',
auth_views.password_change,
name='auth_password_change'),
url(r'^password/change/done/$',
auth_views.password_change_done,
name='auth_password_change_done'),
url(r'^password/reset/$',
auth_views.password_reset,
name='auth_password_reset'),
url(r'^password/reset/confirm/(?P<uidb36>.+)/(?P<token>.+)/$',
auth_views.password_reset_confirm,
name='auth_password_reset_confirm'),
url(r'^password/reset/complete/$',
auth_views.password_reset_complete,
name='auth_password_reset_complete'),
url(r'^password/reset/done/$',
auth_views.password_reset_done,
name='auth_password_reset_done'),
url(r'^register/$',
register2,
name='registration_register'),
url(r'^register/complete/$',
direct_to_template,
{'template': 'registration/registration_complete.html'},
name='registration_complete'),
)
| bsd-3-clause | -5,240,740,255,448,144,000 | 45.324675 | 99 | 0.540791 | false | 5.132374 | false | false | false |
chenders/deadonfilm | app/deadonfilm.py | 1 | 3643 | import imdb
import json
import os
import logging
from logging.handlers import RotatingFileHandler
from urllib.parse import urlparse
from flask import (
Flask,
redirect,
make_response,
request,
send_from_directory,
render_template
)
import psycopg2.extras
url = urlparse(os.environ.get('IMDB_DB'))
insecure_redirect = os.environ.get('SECURE_REDIRECT_URL', False)
app = Flask(__name__, root_path='./')
i = imdb.IMDb()
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
conn.autocommit = True
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
@app.before_first_request
def setup_logging():
logger = RotatingFileHandler('app/logs/deadonfilm.log', maxBytes=1000000, backupCount=2)
logger = logging.getLogger('deadonfilm')
logger.setLevel(logging.DEBUG)
app.logger.addHandler(logger)
app.logger.setLevel(logging.DEBUG)
@app.route('/')
def index():
if insecure_redirect and not request.is_secure:
return redirect(insecure_redirect, code=301)
return render_template('index.html')
@app.route('/search/')
def search():
"""
Find movie by title search (using IMDb API). Query argument ``q``.
"""
app.logger.info('Searching for %s' % request.args.get('q'))
movie = request.args.get('q')
m = i.search_movie(movie)
resp = make_response(json.dumps(
[{
'value': mt['long imdb title'],
'id': mt.getID()
} for mt in m if mt.get('kind') == 'movie']))
resp.headers['Content-Type'] = 'application/json'
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
@app.route('/died/', methods=['POST'])
def died():
"""
Who died from the movie with the given IMDb id?
"""
movie_id = request.form['id']
movie = i.get_movie(movie_id, info=["full credits"])
if movie is None:
resp = make_response("Movie not found: {}".format(movie_id, 404))
else:
actors = movie.data['cast']
actors_by_id = {}
for actor in actors:
actors_by_id[int(actor.getID())] = actor
cursor.execute("""SELECT
* from name_basics WHERE
person_id IN %s AND
death_year NOTNULL
""", (tuple(actors_by_id.keys()),))
pastos = []
for person in cursor.fetchall():
person_id = person['person_id']
character = str(actors_by_id[person_id].currentRole)
pastos.append({
'person_id': person['person_id'],
'birth': person['birth_year'],
'death': person['death_year'],
'character': character,
'name': person['primary_name']
})
pastos = sorted(pastos, key=lambda pasto: pasto['death'], reverse=True)
resp = make_response(json.dumps(pastos))
resp.headers['Content-Type'] = 'application/json'
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
if __name__ == '__main__':
@app.route('/static/js/<path:path>')
def send_js(path):
return send_from_directory('./static/js', path)
@app.route('/static/css/<path:path>')
def send_css(path):
return send_from_directory('./static/css', path)
@app.route('/static/images/<path:path>')
def send_img(path):
return send_from_directory('./static/images', path)
@app.route('/dist/<path:path>')
def send_dist(path):
return send_from_directory('./dist', path)
app.run()
| mit | -4,903,017,006,393,662,000 | 27.912698 | 92 | 0.59429 | false | 3.643 | false | false | false |
admk/soap | soap/parser/program.py | 1 | 1972 | import re
import sh
from soap.datatype import type_cast
from soap.expression import is_variable
from soap.program import ProgramFlow, PragmaInputFlow, PragmaOutputFlow
from soap.parser.common import _lift_child, _lift_dontcare, CommonVisitor
from soap.parser.expression import DeclarationVisitor, ExpressionVisitor
from soap.parser.grammar import compiled_grammars
from soap.parser.statement import StatementVisitor
class PragmaVisitor(object):
def _visit_comma_seperated_list(self, node, children):
item, comma_item_list = children
return [item] + [each for _, each in comma_item_list]
def visit_pragma_input_statement(self, node, children):
pragma_lit, input_lit, input_list = children
return PragmaInputFlow(input_list)
def visit_pragma_output_statement(self, node, children):
pragma_lit, output_lit, output_list = children
return PragmaOutputFlow(output_list)
def visit_input_assign_expr(self, node, children):
variable, _, number = children
return variable, number
def visit_input_expr(self, node, children):
child = _lift_child(self, node, children)
if not is_variable(child):
return child
return child, type_cast(child.dtype, top=True)
visit_input_list = visit_output_list = _visit_comma_seperated_list
visit_input = visit_output = visit_pragma = _lift_dontcare
class _ProgramVisitor(
CommonVisitor, DeclarationVisitor, ExpressionVisitor,
StatementVisitor, PragmaVisitor):
grammar = compiled_grammars['statement']
def _preprocess(text):
text = re.sub(r'#\s*pragma', '__pragma', text)
text = sh.cpp('-E', '-P', _in=text).stdout.decode('utf-8')
text = re.sub(r'__pragma', '#pragma', text)
return text
def parse(program, decl=None):
decl = decl or {}
visitor = _ProgramVisitor(decl)
program = _preprocess(program)
flow = visitor.parse(program)
return ProgramFlow(flow)
| mit | 7,840,282,045,129,042,000 | 32.423729 | 73 | 0.698783 | false | 3.734848 | false | false | false |
beeftornado/sentry | src/sentry/templatetags/sentry_avatars.py | 2 | 2789 | from __future__ import absolute_import
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from six.moves.urllib.parse import urlencode
from sentry.models import User, UserAvatar
from sentry.utils.avatar import get_email_avatar, get_gravatar_url, get_letter_avatar
register = template.Library()
# Adapted from http://en.gravatar.com/site/implement/images/django/
# The "mm" default is for the grey, "mystery man" icon. See:
# http://en.gravatar.com/site/implement/images/
@register.simple_tag(takes_context=True)
def gravatar_url(context, email, size, default="mm"):
return get_gravatar_url(email, size, default)
@register.simple_tag(takes_context=True)
def letter_avatar_svg(context, display_name, identifier, size=None):
return mark_safe(get_letter_avatar(display_name, identifier, size=size))
@register.simple_tag(takes_context=True)
def profile_photo_url(context, user_id, size=None):
try:
avatar = UserAvatar.objects.get_from_cache(user=user_id)
except UserAvatar.DoesNotExist:
return
url = reverse("sentry-user-avatar-url", args=[avatar.ident])
if size:
url += "?" + urlencode({"s": size})
return settings.SENTRY_URL_PREFIX + url
# Don't use this in any situations where you're rendering more
# than 1-2 avatars. It will make a request for every user!
@register.simple_tag(takes_context=True)
def email_avatar(context, display_name, identifier, size=None, try_gravatar=True):
return mark_safe(get_email_avatar(display_name, identifier, size, try_gravatar))
@register.inclusion_tag("sentry/partial/avatar.html")
def avatar(user, size=36):
# user can be User or OrganizationMember
if isinstance(user, User):
user_id = user.id
email = user.email
else:
user_id = user.user_id
email = user.email
if user_id:
email = user.user.email
return {
"email": email,
"user_id": user_id,
"size": size,
"avatar_type": user.get_avatar_type(),
"display_name": user.get_display_name(),
"label": user.get_label(),
}
@register.inclusion_tag("sentry/partial/avatar.html")
def avatar_for_email(user, size=36):
# user can be User or OrganizationMember
if isinstance(user, User):
user_id = user.id
email = user.email
else:
user_id = user.user_id
email = user.email
if user_id:
email = user.user.email
return {
"for_email": True,
"email": email,
"user_id": user_id,
"size": size,
"avatar_type": user.get_avatar_type(),
"display_name": user.get_display_name(),
"label": user.get_label(),
}
| bsd-3-clause | 8,308,949,113,032,601,000 | 31.057471 | 85 | 0.66583 | false | 3.499373 | false | false | false |
teamfx/openjfx-9-dev-rt | modules/javafx.web/src/main/native/Tools/Scripts/webkitpy/common/system/filesystem.py | 1 | 11234 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Wrapper object for the file system / source tree."""
import codecs
import errno
import exceptions
import filecmp
import glob
import hashlib
import os
import shutil
import sys
import tempfile
import time
class FileSystem(object):
"""FileSystem interface for webkitpy.
Unless otherwise noted, all paths are allowed to be either absolute
or relative."""
sep = os.sep
pardir = os.pardir
def abspath(self, path):
# FIXME: This gross hack is needed while we transition from Cygwin to native Windows, because we
# have some mixing of file conventions from different tools:
if sys.platform == 'cygwin':
path = os.path.normpath(path)
path_components = path.split(os.sep)
if path_components and len(path_components[0]) == 2 and path_components[0][1] == ':':
path_components[0] = path_components[0][0]
path = os.path.join('/', 'cygdrive', *path_components)
return os.path.abspath(path)
def realpath(self, path):
return os.path.realpath(path)
def path_to_module(self, module_name):
"""A wrapper for all calls to __file__ to allow easy unit testing."""
# FIXME: This is the only use of sys in this file. It's possible this function should move elsewhere.
return sys.modules[module_name].__file__ # __file__ is always an absolute path.
def expanduser(self, path):
return os.path.expanduser(path)
def basename(self, path):
return os.path.basename(path)
def chdir(self, path):
return os.chdir(path)
def copyfile(self, source, destination):
shutil.copyfile(source, destination)
def dirname(self, path):
return os.path.dirname(path)
def exists(self, path):
return os.path.exists(path)
def dirs_under(self, path, dir_filter=None):
"""Return the list of all directories under the given path in topdown order.
Args:
dir_filter: if not None, the filter will be invoked
with the filesystem object and the path of each dirfound.
The dir is included in the result if the callback returns True.
"""
def filter_all(fs, dirpath):
return True
dir_filter = dir_filter or filter_all
dirs = []
for (dirpath, dirnames, filenames) in os.walk(path):
if dir_filter(self, dirpath):
dirs.append(dirpath)
return dirs
def files_under(self, path, dirs_to_skip=[], file_filter=None):
"""Return the list of all files under the given path in topdown order.
Args:
dirs_to_skip: a list of directories to skip over during the
traversal (e.g., .svn, resources, etc.)
file_filter: if not None, the filter will be invoked
with the filesystem object and the dirname and basename of
each file found. The file is included in the result if the
callback returns True.
"""
def filter_all(fs, dirpath, basename):
return True
file_filter = file_filter or filter_all
files = []
if self.isfile(path):
if file_filter(self, self.dirname(path), self.basename(path)):
files.append(path)
return files
if self.basename(path) in dirs_to_skip:
return []
for (dirpath, dirnames, filenames) in os.walk(path):
for d in dirs_to_skip:
if d in dirnames:
dirnames.remove(d)
for filename in filenames:
if file_filter(self, dirpath, filename):
files.append(self.join(dirpath, filename))
return files
def getcwd(self):
return os.getcwd()
def glob(self, path):
return glob.glob(path)
def isabs(self, path):
return os.path.isabs(path)
def isfile(self, path):
return os.path.isfile(path)
def getsize(self, path):
return os.path.getsize(path)
def isdir(self, path):
return os.path.isdir(path)
def join(self, *comps):
return os.path.join(*comps)
def listdir(self, path):
return os.listdir(path)
def mkdtemp(self, **kwargs):
"""Create and return a uniquely named directory.
This is like tempfile.mkdtemp, but if used in a with statement
the directory will self-delete at the end of the block (if the
directory is empty; non-empty directories raise errors). The
directory can be safely deleted inside the block as well, if so
desired.
Note that the object returned is not a string and does not support all of the string
methods. If you need a string, coerce the object to a string and go from there.
"""
class TemporaryDirectory(object):
def __init__(self, **kwargs):
self._kwargs = kwargs
self._directory_path = tempfile.mkdtemp(**self._kwargs)
def __str__(self):
return self._directory_path
def __enter__(self):
return self._directory_path
def __exit__(self, type, value, traceback):
# Only self-delete if necessary.
# FIXME: Should we delete non-empty directories?
if os.path.exists(self._directory_path):
os.rmdir(self._directory_path)
return TemporaryDirectory(**kwargs)
def maybe_make_directory(self, *path):
"""Create the specified directory if it doesn't already exist."""
try:
os.makedirs(self.join(*path))
except OSError, e:
if e.errno != errno.EEXIST:
raise
def move(self, source, destination):
shutil.move(source, destination)
def mtime(self, path):
return os.stat(path).st_mtime
def normpath(self, path):
return os.path.normpath(path)
def open_binary_tempfile(self, suffix):
"""Create, open, and return a binary temp file. Returns a tuple of the file and the name."""
temp_fd, temp_name = tempfile.mkstemp(suffix)
f = os.fdopen(temp_fd, 'wb')
return f, temp_name
def open_binary_file_for_reading(self, path):
return codecs.open(path, 'rb')
def read_binary_file(self, path):
"""Return the contents of the file at the given path as a byte string."""
with file(path, 'rb') as f:
return f.read()
def write_binary_file(self, path, contents):
with file(path, 'wb') as f:
f.write(contents)
def open_text_file_for_reading(self, path, errors='strict'):
# Note: There appears to be an issue with the returned file objects
# not being seekable. See http://stackoverflow.com/questions/1510188/can-seek-and-tell-work-with-utf-8-encoded-documents-in-python .
return codecs.open(path, 'r', 'utf8', errors)
def open_text_file_for_writing(self, path):
return codecs.open(path, 'w', 'utf8')
def open_stdin(self):
return codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
def read_text_file(self, path):
"""Return the contents of the file at the given path as a Unicode string.
The file is read assuming it is a UTF-8 encoded file with no BOM."""
with codecs.open(path, 'r', 'utf8') as f:
return f.read()
def write_text_file(self, path, contents):
"""Write the contents to the file at the given location.
The file is written encoded as UTF-8 with no BOM."""
with codecs.open(path, 'w', 'utf-8') as f:
f.write(contents.decode('utf-8') if type(contents) == str else contents)
def sha1(self, path):
contents = self.read_binary_file(path)
return hashlib.sha1(contents).hexdigest()
def relpath(self, path, start='.'):
return os.path.relpath(path, start)
class _WindowsError(exceptions.OSError):
"""Fake exception for Linux and Mac."""
pass
def remove(self, path, osremove=os.remove):
"""On Windows, if a process was recently killed and it held on to a
file, the OS will hold on to the file for a short while. This makes
attempts to delete the file fail. To work around that, this method
will retry for a few seconds until Windows is done with the file."""
try:
exceptions.WindowsError
except AttributeError:
exceptions.WindowsError = FileSystem._WindowsError
retry_timeout_sec = 3.0
sleep_interval = 0.1
while True:
try:
osremove(path)
return True
except exceptions.WindowsError, e:
time.sleep(sleep_interval)
retry_timeout_sec -= sleep_interval
if retry_timeout_sec < 0:
raise e
def rmtree(self, path):
"""Delete the directory rooted at path, whether empty or not."""
shutil.rmtree(path, ignore_errors=True)
def copytree(self, source, destination):
shutil.copytree(source, destination)
def split(self, path):
"""Return (dirname, basename + '.' + ext)"""
return os.path.split(path)
def splitext(self, path):
"""Return (dirname + os.sep + basename, '.' + ext)"""
return os.path.splitext(path)
def compare(self, path1, path2):
return filecmp.cmp(path1, path2)
| gpl-2.0 | -5,104,576,357,975,560,000 | 35.23871 | 140 | 0.619904 | false | 4.324095 | false | false | false |
ekadhanda/bin | python/coda-scrip.py | 1 | 9971 | #! /usr/bin/env python
# Written by Vasaant S/O Krishnan Friday, 19 May 2017
# Run without arguments for instructions.
import sys
usrFile = sys.argv[1:]
if len(usrFile) == 0:
print ""
print "# Script to read in file of the CODA format and perform some basic"
print "# statistical computations. An index.txt and chain.txt file must be"
print "# provided and the script will automatically identify them for internal"
print "# use. Options are:"
print ""
print "# print = Outputs mean, std and confidence interval (default 95%)."
print "# var = Specify your required variable for hist, trace."
print "# per = Specify your required confidence interval (requires var=)."
print "# hist = Plot histogram (requires var=)."
print "# bins = Choose bin size (default bins=100)"
print "# trace = Trace plot (requires var=)."
print ""
print " -->$ coda-script.py CODAindex.txt CODAchain.txt per=xx var=xx bins=xx print hist trace"
print ""
exit()
import re
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
#=====================================================================
# Define variables.
#
ints = '\s+?([+-]?\d+)' # Integers for regex
#floats = '\s+?([+-]?\d+(?:\.\d+)?)' # Floats or int
floats = '\s+?([+-]?\d+(?:\.\d+)?|\.\d+)([eE][+-]?\d+)?' # Floats or int or scientific
codaFiles = [] # CODAindex and CODAchain files
indexFileFnd = False # CODAindex file identified?
chainFileFnd = False # CODAchain file identified?
indexCodes = {} # Dictionary containing CODAindex info.
# chainIndx = [] # Indexes/Column 1 of CODAchain.txt file
chainData = [] # Data/Column 2 of CODAchain.txt file
percentile = 95.0 # Default percentile
bins = 100 # Default number of bins for histogram
reqIndxCode = '' # User requested varible for hist, trace
#=====================================================================
#=====================================================================
# Determine which are the CODAindex and CODAchain files and
# automatically assign them to their respective variables.
#
for i in usrFile:
codaSearch = re.search('.txt',i)
if codaSearch:
codaFiles.append(i)
if len(codaFiles) == 2: # Assuming 1 index and 1 chain file
for j in codaFiles:
with open(j,'r') as chkTyp: # Run a quick check on the first line only
firstLine = chkTyp.readline()
codaIndex = re.search('^(\S+)' + ints + ints + '$', firstLine)
codaChain = re.search('^(\d+)' + floats + '$', firstLine)
if codaIndex:
indexFile = j
indexFileFnd = True
if codaChain:
chainFile = j
chainFileFnd = True
else:
print "Insfficient files of CODA*.txt format."
print "Check your input files."
#=====================================================================
#=====================================================================
# Determine percentile
#
for i in usrFile:
userPercentile = re.search('per=([+-]?\d+(?:\.\d+)?)',i)
if userPercentile:
percentile = abs(float(userPercentile.group(1)))
usrFile.append('print')
#=====================================================================
#=====================================================================
# Determine user requested variable from CODAIndex file
#
for i in usrFile:
userReqCodaIndx = re.search('var=(\S+)',i)
if userReqCodaIndx:
reqIndxCode = str(userReqCodaIndx.group(1))
# ... same for number of bins:
for i in usrFile:
userReqBins = re.search('bins=(\d+)',i)
if userReqBins:
bins = int(userReqBins.group(1))
usrFile.append('hist')
#=====================================================================
if indexFileFnd and chainFileFnd:
#=====================================================================
# Harvest index file for the variable list and corresponding
# [start,stop] coords:
#
for line in open(indexFile, 'r'):
reqIndex = re.search('^(\S+)' + ints + ints + '$', line)
if reqIndex:
key = str(reqIndex.group(1))
value = [int(reqIndex.group(2)), int(reqIndex.group(3))]
indexCodes[key] = value
maxElement = max(indexCodes, key = indexCodes.get) # The key with the largest value
chainLen = max(indexCodes[maxElement]) # The largest value (expected amt. of data)
#=====================================================================
#=====================================================================
# I thought that initialising the arrays before filling them
# would be faster. It is not.
#
# chainIndx = np.zeros(chainLen)
# chainData = np.zeros(chainLen)
# with open(chainFile, 'r') as harvestVals:
# for i in range(chainLen):
# currLine = harvestVals.readline()
# reqChain = re.search('^(\d+)' + floats + '$', currLine)
# if reqChain:
# chainIndx[i] = int(reqChain.group(1))
# chainData[i] = float(reqChain.group(2))
#=====================================================================
#=====================================================================
# Harvest chain file
#
for line in open(chainFile, 'r'):
reqChain = re.search('^(\d+)' + floats + '$', line)
if reqChain:
#chainIndx.append( int(reqChain.group(1)))
chainData.append(float(reqChain.group(2)))
#chainIndx = np.array(chainIndx)
chainData = np.array(chainData)
#=====================================================================
#=====================================================================
# Basic check on the harvest by comparing harvested vs. expected
# no. of data.
#
if len(chainData) != chainLen:
print " Warning! "
print " %10d lines expected from %s."%(chainLen,indexFile)
print " %10d lines harvested from %s."%(len(chainData),chainFile)
#=====================================================================
#=====================================================================
# Output some basic statistics to the terminal.
#
if 'print' in usrFile:
print "\n%20s %10s %10s"%("mean","std",str(percentile)+"%")
for i in indexCodes:
strtIndx = indexCodes[i][0] - 1 # Python starts from 0. CODAindex from 1
stopIndx = indexCodes[i][1] # ... but np.array needs this to get to the end
npPerTile = np.percentile(chainData[strtIndx:stopIndx],[0,percentile]) # Numpy sorts internally
minPer = npPerTile[0]
maxPer = npPerTile[1]
print "%8s %10.4f %10.4f %6d, %6.3f"%(i, chainData[strtIndx:stopIndx].mean(),
chainData[strtIndx:stopIndx].std(),
minPer,maxPer
)
print ""
#=====================================================================
#=====================================================================
# Trace plot that gives the variable value as a function of its
# rank (or position in the chain)
#
if 'trace' in usrFile:
if reqIndxCode != '':
for i in indexCodes:
if reqIndxCode == i:
strtIndx = indexCodes[i][0] - 1 # Python starts from 0. CODAindex from 1
stopIndx = indexCodes[i][1] # ... but np.array needs this to get to the end
traceRank = range(stopIndx-strtIndx)
plt.plot(traceRank,chainData[strtIndx:stopIndx])
plt.xlabel('Rank')
plt.ylabel('Variable: '+i)
plt.show()
else:
print "No variable selected by user for trace plot."
#=====================================================================
#=====================================================================
# Histogram
#
if 'hist' in usrFile:
if reqIndxCode != '':
for i in indexCodes:
if reqIndxCode == i:
strtIndx = indexCodes[i][0] - 1 # Python starts from 0. CODAindex from 1
stopIndx = indexCodes[i][1] # ... but np.array needs this to get to the end
[n, bins, patches] = plt.hist(chainData[strtIndx:stopIndx],
bins = bins,
normed = True,
histtype= 'step'
)
y = mlab.normpdf(bins, chainData[strtIndx:stopIndx].mean(),
chainData[strtIndx:stopIndx].std()
)
npPerTile = np.percentile(chainData[strtIndx:stopIndx],[0,percentile])
maxPer = npPerTile[1]
plt.axvline(x=maxPer, color='k', label=str(percentile)+'%',ls=':',lw=0.8)
plt.plot(bins,y,'--')
plt.ylabel('Variable: '+i)
plt.legend(frameon=False)
plt.show()
else:
print "No variable selected by user for histogram."
#=====================================================================
| mit | 5,365,606,739,219,322,000 | 40.032922 | 110 | 0.446094 | false | 4.400265 | false | false | false |
ESOedX/edx-platform | common/djangoapps/third_party_auth/management/commands/remove_social_auth_users.py | 1 | 2126 | """
Management command to remove social auth users. Intended for use in masters
integration sandboxes to allow partners reset users and enrollment data.
"""
from __future__ import absolute_import
import logging
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from six.moves import input
from third_party_auth.models import SAMLProviderConfig
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Management command to remove all social auth entries AND the corresponding edX
users for a given IDP.
Usage:
manage.py remove_social_auth_users gtx
"""
confirmation_prompt = "Type 'confirm' to continue with deletion\n"
def add_arguments(self, parser):
parser.add_argument('IDP', help='slug for the idp to remove all users from')
parser.add_argument(
'--force',
action='store_true',
help='Skip manual confirmation step before deleting objects',
)
@transaction.atomic
def handle(self, *args, **options):
slug = options['IDP']
if not settings.FEATURES.get('ENABLE_ENROLLMENT_RESET'):
raise CommandError('ENABLE_ENROLLMENT_RESET feature not enabled on this enviroment')
try:
SAMLProviderConfig.objects.current_set().get(slug=slug)
except SAMLProviderConfig.DoesNotExist:
raise CommandError(u'No SAML provider found for slug {}'.format(slug))
users = User.objects.filter(social_auth__provider=slug)
user_count = len(users)
count, models = users.delete()
log.info(
u'\n%s users and their related models will be deleted:\n%s\n',
user_count,
models,
)
if not options['force']:
confirmation = input(self.confirmation_prompt)
if confirmation != 'confirm':
raise CommandError('User confirmation required. No records have been modified')
log.info(u'Deleting %s records...', count)
| agpl-3.0 | -630,919,036,611,812,900 | 32.21875 | 96 | 0.663688 | false | 4.392562 | false | false | false |
akretion/odoo | addons/mrp/models/mrp_routing.py | 7 | 5147 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class MrpRouting(models.Model):
""" Specifies routings of work centers """
_name = 'mrp.routing'
_description = 'Routings'
name = fields.Char('Routing', required=True)
active = fields.Boolean(
'Active', default=True,
help="If the active field is set to False, it will allow you to hide the routing without removing it.")
code = fields.Char(
'Reference',
copy=False, default=lambda self: _('New'), readonly=True)
note = fields.Text('Description')
operation_ids = fields.One2many(
'mrp.routing.workcenter', 'routing_id', 'Operations',
copy=True, oldname='workcenter_lines')
location_id = fields.Many2one(
'stock.location', 'Raw Materials Location',
help="Keep empty if you produce at the location where you find the raw materials. "
"Set a location if you produce at a fixed location. This can be a partner location "
"if you subcontract the manufacturing operations.")
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env['res.company']._company_default_get('mrp.routing'))
@api.model
def create(self, vals):
if 'code' not in vals or vals['code'] == _('New'):
vals['code'] = self.env['ir.sequence'].next_by_code('mrp.routing') or _('New')
return super(MrpRouting, self).create(vals)
class MrpRoutingWorkcenter(models.Model):
_name = 'mrp.routing.workcenter'
_description = 'Work Center Usage'
_order = 'sequence, id'
name = fields.Char('Operation', required=True)
workcenter_id = fields.Many2one('mrp.workcenter', 'Work Center', required=True)
sequence = fields.Integer(
'Sequence', default=100,
help="Gives the sequence order when displaying a list of routing Work Centers.")
routing_id = fields.Many2one(
'mrp.routing', 'Parent Routing',
index=True, ondelete='cascade', required=True,
help="The routing contains all the Work Centers used and for how long. This will create work orders afterwards "
"which alters the execution of the manufacturing order.")
note = fields.Text('Description')
company_id = fields.Many2one(
'res.company', 'Company',
readonly=True, related='routing_id.company_id', store=True)
worksheet = fields.Binary('worksheet')
time_mode = fields.Selection([
('auto', 'Compute based on real time'),
('manual', 'Set duration manually')], string='Duration Computation',
default='auto')
time_mode_batch = fields.Integer('Based on', default=10)
time_cycle_manual = fields.Float(
'Manual Duration', default=60,
help="Time in minutes. Is the time used in manual mode, or the first time supposed in real time when there are not any work orders yet.")
time_cycle = fields.Float('Duration', compute="_compute_time_cycle")
workorder_count = fields.Integer("# Work Orders", compute="_compute_workorder_count")
batch = fields.Selection([
('no', 'Once all products are processed'),
('yes', 'Once a minimum number of products is processed')], string='Next Operation',
help="Set 'no' to schedule the next work order after the previous one. Set 'yes' to produce after the quantity set in 'Quantity To Process' has been produced.",
default='no', required=True)
batch_size = fields.Float('Quantity to Process', default=1.0)
workorder_ids = fields.One2many('mrp.workorder', 'operation_id', string="Work Orders")
@api.multi
@api.depends('time_cycle_manual', 'time_mode', 'workorder_ids')
def _compute_time_cycle(self):
manual_ops = self.filtered(lambda operation: operation.time_mode == 'manual')
for operation in manual_ops:
operation.time_cycle = operation.time_cycle_manual
for operation in self - manual_ops:
data = self.env['mrp.workorder'].read_group([
('operation_id', '=', operation.id),
('state', '=', 'done')], ['operation_id', 'duration', 'qty_produced'], ['operation_id'],
limit=operation.time_mode_batch)
count_data = dict((item['operation_id'][0], (item['duration'], item['qty_produced'])) for item in data)
if count_data.get(operation.id) and count_data[operation.id][1]:
operation.time_cycle = (count_data[operation.id][0] / count_data[operation.id][1]) * (operation.workcenter_id.capacity or 1.0)
else:
operation.time_cycle = operation.time_cycle_manual
@api.multi
def _compute_workorder_count(self):
data = self.env['mrp.workorder'].read_group([
('operation_id', 'in', self.ids),
('state', '=', 'done')], ['operation_id'], ['operation_id'])
count_data = dict((item['operation_id'][0], item['operation_id_count']) for item in data)
for operation in self:
operation.workorder_count = count_data.get(operation.id, 0)
| agpl-3.0 | 1,015,476,141,267,269,400 | 49.960396 | 168 | 0.63979 | false | 3.929008 | false | false | false |
Xilinx/hopper | hopper/utils/git/watcher.py | 1 | 5949 | # Copyright (c) 2015 Xilinx Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import urlparse
import shutil
import datetime
from hopper.utils.logger import *
import hopper.utils.git.tasks
import hopper.utils.git.repo
import hopper.source.meta
import threading
class Watcher:
class GroupState:
def __init__(self, layers):
self.layers = layers
self.refstate = {}
def getRefPairs(self):
pairs = []
for i in self.layers:
if i.source and isinstance(i.source, hopper.source.meta.GitSource):
if i.source.canFetch():
# TODO: handle ref/tag/commit properly below
pairs.append((i.source.remote, "refs/heads/" + i.source.ref))
return pairs
def filterPairs(self, remoterefs):
filteredrefs = {}
for p in self.getRefPairs():
if p[0] in remoterefs:
for i in remoterefs[p[0]].iteritems():
if i[0] == p[1]:
if p[0] not in filteredrefs:
filteredrefs[p[0]] = {}
filteredrefs[p[0]][i[0]] = i[1]
return filteredrefs
def update(self, remoterefs, trigger = False):
rrefs = self.filterPairs(remoterefs)
pairs = self.getRefPairs()
changed = False
oldrefstate = self.refstate
newrefstate = {}
for i in pairs:
if i[0] in rrefs:
if i[1] in rrefs[i[0]]:
newcommit = rrefs[i[0]][i[1]]
if i[0] not in newrefstate:
newrefstate[i[0]] = {}
newrefstate[i[0]][i[1]] = newcommit
log("remote: %s, ref: %s, value = %s" % (i[0], i[1], newcommit))
if trigger:
changed = True
if oldrefstate != None:
if i[0] in oldrefstate and i[1] in oldrefstate[i[0]]:
if newrefstate[i[0]][i[1]] != oldrefstate[i[0]][i[1]]:
changed = True
self.refstate = newrefstate
return changed
def cloneRefPin(self, remoterefs):
filtered = self.filterPairs(remoterefs)
# create layers that match the layers object, fill in pinned refs
pinnedlayers = hopper.source.meta.LayerCollection(self.layers.defaultversion)
for i in self.layers:
if isinstance(i.source, hopper.source.meta.GitSource):
# TODO: fixup pciking of ref name
refname = "refs/heads/" + i.source.ref
refpin = None
if i.source.remote in filtered:
refs = filtered[i.source.remote]
if refname in refs:
refpin = refs[refname]
newsource = hopper.source.meta.GitSource(i.source.remote, refpin)
else:
newsource = i.source
pinnedlayers.add(i.getFullName(), newsource)
return pinnedlayers
def __init__(self, environment):
self.environment = environment
self.stop = threading.Event()
self.thread = None
self.interval = 0
self.lock = threading.RLock()
self.groups = []
self.changeevent = threading.Condition()
self.changequeue = []
def addLayers(self, layers):
group = Watcher.GroupState(layers)
self.groups.append(group)
def start(self, interval = 30):
if self.thread and self.thread.isAlive():
return
self.interval = interval
self.thread = threading.Thread(target = self.__worker__)
self.daemon = True
self.thread.start()
def stop(self):
if self.thread and self.thread.isAlive():
self.stop.set()
self.thread.join()
def alive(self):
if self.thread and self.thread.isAlive():
return True
return False
def trigger(self):
self.__check__(True)
def __check__(self, trigger = False):
with self.lock:
haschanges = False
remotes = []
for i in self.groups:
for p in i.getRefPairs():
if p[0] not in remotes:
remotes.append(p[0])
self.environment.debug("need to update for the following remotes -> %s" % remotes)
refstate = {}
for i in remotes:
self.environment.log("Grabbing refs from remote for %s" % i)
result = hopper.utils.git.tasks.GitTask.run(["ls-remote", i], environment = self.environment)
if result[0] == 0:
refstate[i] = {}
for r in result[1].splitlines():
parts = r.split()
refstate[i][parts[1]] = parts[0]
self.environment.debug("got refs -> %s" % repr(refstate[i]))
else:
self.environment.error("Failed to get remote state for '%s' error message = %s" % (i, result[1]))
return
haschanges = False
for i in self.groups:
if i.update(refstate, trigger):
self.environment.log("Changes have happened since last check, pinning")
changes = i.cloneRefPin(refstate)
self.changequeue.append((i.layers, changes, datetime.datetime.utcnow()))
haschanges = True
if haschanges:
with self.changeevent:
self.changeevent.notifyAll()
def __worker__(self):
while not self.stop.wait(self.interval):
self.__check__()
def wait(self):
if self.alive():
if self.hasnext():
return
with self.changeevent:
self.changeevent.wait()
def hasnext(self):
with self.lock:
if len(self.changequeue) != 0:
return True
return False
def getnext(self):
with self.lock:
if len(self.changequeue) != 0:
return self.changequeue.pop()
return None
| mit | 497,451,319,322,704,600 | 28.161765 | 102 | 0.675408 | false | 3.245499 | false | false | false |
SLongofono/448_Project4 | testMath.py | 1 | 5536 | import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import Variance
import math
def testVariance():
print ("1. Testing Variance")
weighting = [2,2,2,2,2,2,2,2,2,2]
test1 = [['artist1', 'artist2', 'artist3'],['genre1', 'genre2', 'genre3'],0,0,0,0,0,0,0,0]
test2 = [['artist1'],['genre1', 'genre2'],1,2,3,4,5,6,7,8]
test3 = [['artist1'],['genre1','genre2'],6,7,8,9,2,3,4,5]
test4 = []
emptylist = -1
diffList1 = []
diffList2 = []
knownVal1 = [0,0,1,2,3,4,5,6,7,8]
knownVal2 = [0,0,5,5,5,5,3,3,3,3]
print "\t A. Variance between a populated list and a list of zeros ..."
for i in range(len(test1)):
diffList1.append(Variance.getVariance(test1,test2)[i] -knownVal1[i])
print "\t B. Variance between 2 populated lists ..."
for i in range(len(test2)):
diffList2.append(Variance.getVariance(test3,test2)[i] - knownVal2[i])
print "\t C. Variance calculated on an empty List ..."
emptylistValue = Variance.getVariance(test3,test4)
if emptylistValue == emptylist:
for i in range (len(diffList1)):
if ((diffList1[i] or diffList2[i]) > .0000001):
return False
return True
def testWeightedDifference():
print "2. Testing Weighted Difference"
weighting = [2,2,2,2,2,2,2,2,2,2]
badWeighting = []
test1 = [['artist1', 'artist2', 'artist3'],['genre1', 'genre2', 'genre3'],0,0,0,0,0,0,0,0]
test2 = [['artist1'],['genre1', 'genre2'],1,2,3,4,5,6,7,8]
test3 = [['artist1'],['genre1', 'genre2'],6,7,8,9,2,3,4,5]
test4 = []
diffList1 = []
diffList2 = []
diffList3 = []
knownVal1 = [0,0,2,4,6,8,10,12,14,16]
knownVal2 = [0,0,10,10,10,10,6,6,6,6]
emptylistValue = -1
print "\t A. Weighted Difference between a populated list and a list of zeros ..."
for i in range(len(test1)):
diffList1.append(Variance.getWeightedDifference(test2, test1, weighting)[i] - knownVal1[i])
print "\t B. Weighted Difference between 2 populated lists ..."
for i in range(len(test1)):
diffList2.append(Variance.getWeightedDifference(test3, test2, weighting)[i] - knownVal2[i])
print "\t C. Testing when Weighting is an empty list ..."
diffList3 = Variance.getWeightedDifference(test3,test2,badWeighting)
print "\t D.Testing when one of the lists is an empty list ..."
emptylist = Variance.getWeightedDifference(test4,test2,weighting)
if emptylist == emptylistValue:
for i in range(len(diffList1)):
if((diffList1[i] or diffList2[i])> .0000001):
return False
return True
def testgetNewWeight():
print "3. Testing getNewWeight"
badstddevs = []
stddevs = [1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0]
knownVal1 = [1, 1, 1, 0.5, 0.333, 0.25, 0.2, 0.167, 0.143, 0.125]
emptylistValue = -1
diffList = []
print "\t A. getNewWeight when stddevs is empty ..."
emptylist = Variance.getNewWeight(badstddevs)
print "\t B. getNewWeight when stddevs is populated ..."
for i in range(len(knownVal1)):
diffList.append(Variance.getNewWeight(stddevs)[i] - knownVal1[i])
if emptylist == emptylistValue:
for i in range(len(diffList)):
if(diffList[i] > .0000001):
return False
return True
def filter2sigmaTest():
print("4. Testing Filter2Sigma")
averages = [[],[],10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0]
stddevs = [2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0]
knownVal = [1, 1, 1, 0, 0, 0, 0]
testSongs = [
[[],[], 10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0],
[[],[], 6.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0],
[[],[], 10.0,10.0,10.0,10.0,10.0,10.0,10.0,14.0],
[[],[], 5.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0],
[[],[], 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0],
[[],[], 15.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0],
[[],[], 10.0,10.0,10.0,10.0,10.0,10.0,10.0,15.0],
]
val = Variance.filter2Sigma(testSongs, averages, stddevs)
return val == knownVal
def teststdDev():
print("5. Testing Standard Deviation")
stdDev = []
diffList = []
listWithRowsAsColumns = [[1,2,3,4,5,6,7,8],
[6,1,9,0,5,7,3,4],
[5,5,5,5,5,5,5,5],
[23,100,1,0,8,9,5,6],
[7,5,4,3,2,1,9,6]
]
listofCalculatedStdDevs = [2.449,3.0,0.0,33.481,2.645]
for column in listWithRowsAsColumns:
vals = [x for x in column]
Nval = len(vals)
mean = sum(vals)/Nval
stdDev.append((sum([(x-mean)**2 for x in vals])/(Nval-1))**0.5)
for i in range(len(listofCalculatedStdDevs)):
diffList.append(stdDev[i] - listofCalculatedStdDevs[i])
for i in range(len(diffList)):
if(diffList[i] > .001):
return False
return True
def go():
numTests = 0
numPassed = 0
print "**************************************"
print "********MATH FUNCTION TESTING*********"
print "**************************************"
numTests +=1
if testVariance():
print "\t Variance test passed! \n\n"
numPassed += 1
numTests +=1
if testWeightedDifference():
print "\tWeightedDifference test passed!\n\n"
numPassed +=1
numTests +=1
if testgetNewWeight():
print "\t getNewWeight test passed!\n\n"
numPassed +=1
numTests +=1
if (filter2sigmaTest()):
print "\t f2sigma test passed!\n\n"
numPassed+=1
numTests +=1
if(teststdDev()):
print "\t Standard Deviation Test Passed!"
numPassed +=1
print "Tests: %d\nTests passed: %d\nPercentage: %f\n\n" % (numTests,numPassed, (float(numPassed)/numTests)*100)
return numTests,numPassed
if __name__ == "__main__":
x,y = go()
print "Tests: %d\nTests passed: %d\nPercentage: %f\n\n" % (x,y, (float(y)/x)*100)
| mit | 5,970,112,297,490,376,000 | 28.763441 | 112 | 0.621929 | false | 2.390328 | true | false | false |
ww-Kenya/rainbow6 | motion_detection.py | 1 | 5448 | import argparse
import datetime
import imutils
import time
import cv2
import RPi.GPIO as GPIO
import os
import smtplib
from servo import Servo
RESIZE_WIDTH = 500
RESIZE_HEIGHT = 375
THRESHOLD = 30
MAXPIXELVAL = 255
MORNINGTIME = 7
NIGHTTIME = 19
MIN_RECTANGLE = 2000
MAX_RECTANGLE = 90000
HARDDRIVE_LOCATION = "/media/pi/Seagate\ Expansion\ Drive/videos/"
HOME_LOCATION = "/home/pi/test/rainbow6/"
TITLE = ""
server = smtplib.SMTP('smtp.gmail.com',587)
server.starttls()
#server.login("","")
msg = "intruder"
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default = MIN_RECTANGLE, help="minimum area size")
ap.add_argument("-m", "--max-area", type=int, default = MAX_RECTANGLE,help="maximum area size")
args = vars(ap.parse_args())
if time.gmtime().tm_hour <= MORNINGTIME or time.gmtime().tm_hour >= NIGHTTIME:
print("Using Pi Camera")
camera = cv2.VideoCapture(1)
time.sleep(0.25)
else:
print("Using regular camera")
camera = cv2.VideoCapture(0)
time.sleep(0.25)
motor = Servo(12, 16, 18, 1.8)
timecount = time.gmtime().tm_sec
firstFrame = None
moved = False
motionDetected = False
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = None
resetTimer = (time.gmtime().tm_sec +30 ) % 60
settling = False
time.sleep(1)
emailed = False
while True:
(grabbed, frame) = camera.read()
text = "Unoccupied"
if not grabbed:
break
frame = imutils.resize(frame, width=RESIZE_WIDTH)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
continue
if resetTimer is time.gmtime().tm_sec:
firstFrame = None
frame = None
thresh = None
frameDelta = None
resetTimer = (time.gmtime().tm_sec + 30) % 60
print("Reseting")
continue
if settling and settletime is time.gmtime().tm_sec:
settling = False
firstFrame = None
frame = None
thresh = None
frameDelta = None
continue
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, THRESHOLD, MAXPIXELVAL, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
_, cnts, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
count = 0
for c in cnts:
if cv2.contourArea(c) < args["min_area"]:
continue
if cv2.contourArea(c) > args["max_area"]:
continue
motionTimerMinute = time.gmtime().tm_min
motionDetected = True
if out is None:
TITLE = str(time.gmtime().tm_year) + "-" + str(time.gmtime().tm_mon) + "-" + str(time.gmtime().tm_mday) + "-" + str(time.gmtime().tm_hour) + "-" + str(time.gmtime().tm_min) + '.avi'
out = cv2.VideoWriter(TITLE,fourcc, 20.0,(RESIZE_WIDTH,RESIZE_HEIGHT))
if not emailed:
#server.sendmail("","",msg)
emailed = True
(x, y, w, h) = cv2.boundingRect(c)
if count is 0:
maxx = x
maxw = w
maxh = h
maxy = y
else:
maxarea = maxw*maxh
if maxarea < w*h:
maxx = x
maxw = w
maxh = h
maxy = y
count = count + 1
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
if motionDetected:
out.write(frame)
cv2.putText(frame, "Room Status: {}".format(text), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I %M:%S%p"), (10, frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.25, (0, 0, 255), 1)
if count > 0 and not settling:
cv2.rectangle(frame, (maxx, maxy), (maxx + maxw, maxy + maxh), (0, 255, 0), 2)
centerRect = maxx + maxw / 2
if time.gmtime().tm_sec != timecount:
if (centerRect > (RESIZE_WIDTH / 2 + int(RESIZE_WIDTH*0.05))):
motor.moveDegrees(36,0.02)
moved = True
elif (centerRect < (RESIZE_WIDTH / 2 - int(RESIZE_WIDTH*0.05))):
motor.moveDegrees(-36,0.02)
moved = True
timecount = time.gmtime().tm_sec
elif out is not None:
minutes = time.gmtime().tm_min
minuteCheck = (motionTimerMinute + 1) % 60
if minutes is minuteCheck:
motionDetected = False
print("Releasing out stream")
out.release()
time.sleep(1)
print(HOME_LOCATION+TITLE)
print(HARDDRIVE_LOCATION+TITLE)
#os.rename(HOME_LOCATION+TITLE , HARDDRIVE_LOCATION+TITLE)
out = None
emailed = False
#cv2.imshow("First Frame", firstFrame)
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
if moved:
moved = False
while motor.ismoving:
pass
settletime = (time.gmtime().tm_sec +2) % 60
settling = True
firstFrame = None
frame = None
thresh = None
frameDelta = None
camera.release()
cv2.destroyAllWindows()
if out is not None:
out.release()
time.sleep(1)
server.quit()
| mit | 8,836,440,019,275,679,000 | 27.978723 | 193 | 0.581131 | false | 3.250597 | false | false | false |
markgw/jazzparser | src/jazzparser/formalisms/music_halfspan/songtools.py | 1 | 20939 | """Interactive shell tools for the Halfspan formalism.
These tools concern song recognition and allow utilities for recognising
songs to be called from the shell.
"""
"""
============================== License ========================================
Copyright (C) 2008, 2010-12 University of Edinburgh, Mark Granroth-Wilding
This file is part of The Jazz Parser.
The Jazz Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The Jazz Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with The Jazz Parser. If not, see <http://www.gnu.org/licenses/>.
============================ End license ======================================
"""
__author__ = "Mark Granroth-Wilding <[email protected]>"
from jazzparser.shell.tools import Tool
from jazzparser.shell import ShellError
from jazzparser.utils.options import ModuleOption, options_help_text
from jazzparser.utils.strings import str_to_bool
class LoadCorpusTool(Tool):
"""
Tool to load a corpus of tonal space analyses of songs. These may then
be used for song recognition. This must be called before other song
recognition tools will work.
A corpus may be created from the chord corpus using the bin/data/parsegs.py
to parse the chord corpus and store the analyses in a file.
"""
name = "Load analysis set"
commands = ['loadsongs']
usage = ('loadsongs <name>', "load the named tonal space analysis corpus")
help = """\
Loads a tonal space analysis corpus by name. This corpus may then be used by
other tools which require a song corpus.
These corpora are built using the script bin/data/parsegs.py.
"""
def run(self, args, state):
from jazzparser.data.tonalspace import TonalSpaceAnalysisSet
if len(args) != 1:
raise ShellError, "Please give the name of a tonal space analysis "\
"set. Available sets are: %s" % \
", ".join(TonalSpaceAnalysisSet.list())
try:
# Try loading the named set
songset = TonalSpaceAnalysisSet.load(args[0])
except Exception, err:
raise ShellError, "Error loading tonal space analysis set: %s" % \
err
print "Loaded tonal space analysis set '%s'" % args[0]
# Store this in the state so other tools can use it
state.data['songset'] = songset
class ListSongsTool(Tool):
name = "List songs"
commands = ['songs']
usage = ('songs', "list songs in loaded songset")
help = """\
List all the song names in the loaded tonal space analysis songset.
"""
def run(self, args, state):
# Try getting song data
songset = state.get_data("songset",
help_msg="Use command 'loadsongs' to load a songset")
print "\n".join(["%d. %s" % (num,name) for (num,name) in \
enumerate(songset.songs)])
class PrintAnalysisTool(Tool):
name = "Print analysis"
commands = ['songanal']
usage = ('songanal <songnum>', "display the tonal space analysis for song "\
"number <songnum> in the loaded songset")
help = """\
Prints the tonal space path that is the analysis of a song from a loaded
songset.
"""
def run(self, args, state):
from jazzparser.formalisms.music_halfspan.semantics import semantics_to_coordinates
if len(args) == 0:
raise ShellError, "Give a song number"
# Get the song from the dataset
song = get_song(int(args[0]), state)
print "Analysis of '%s'" % song[0]
print "\nSemantics"
# Display the semantics
print song[1]
print "\nTonal space path"
# Also display the TS coordinates
print semantics_to_coordinates(song[1])
class ResultSongTSEditDistanceTool(Tool):
name = "Compare result"
commands = ['songcomparets', 'songcompts']
usage = ('songcomparets <result-num> <song-num>', "compare a parse result "\
"to a song in the database using the tonal space edit distance metric")
help = """\
Compares a parse result to a specific song in the database using the tonal
space edit distance metric and outputs the alignment distance.
See also:
songcomparedep: to compare a result to a song in terms of dependency
recovery.
"""
tool_options = Tool.tool_options + [
ModuleOption('local', filter=str_to_bool,
usage="local=B, where B is true or false",
default=False,
help_text="Use local alignment to score the similarity "\
"of the tonal space paths instead of global"),
ModuleOption('song', filter=str_to_bool,
usage="tosong=B, where B is true or false",
default=False,
help_text="Compare the numbered song in the corpus to the "\
"second song, instead of comparing the numbered result "\
"to the song"),
ModuleOption('alignment', filter=str_to_bool,
usage="alignment=B, where B is true or false",
default=False,
help_text="Output the full alignment, with the two step "\
"lists above one another"),
]
def run(self, args, state):
from jazzparser.formalisms.music_halfspan.evaluation import \
tonal_space_local_alignment, tonal_space_alignment, \
arrange_alignment
if len(args) < 2:
raise ShellError, "Give a result number and a song number"
resnum = int(args[0])
songnum = int(args[1])
song = get_song(songnum, state)
songsem = song[1]
if self.options['song']:
# Compare a song instead of a result
compsong = get_song(resnum, state)
resultsem = compsong[1]
print "Comparing '%s' to '%s'" % (compsong[0], song[0])
else:
# Normal behaviour: compare a result to a song
if resnum >= len(state.results):
raise ShellError, "No result number %d" % resnum
result = state.results[resnum]
resultsem = result.semantics
print "Comparing result %d to '%s'" % (resnum, song[0])
# Do the comparison
if self.options['local']:
ops, song_steps, result_steps, distance = \
tonal_space_local_alignment(songsem.lf, resultsem.lf)
else:
ops, song_steps, result_steps, distance = \
tonal_space_alignment(songsem.lf, resultsem.lf, distance=True)
print "Steps in '%s':" % song[0]
print song_steps
if self.options['song']:
print "Steps in '%s'" % compsong[0]
else:
print "Steps in result path:"
print result_steps
print "Alignment operations:"
print ops
if self.options['alignment']:
print "Full alignment:"
# Print the alignment in three rows
WRAP_TO = 70
wrapped_rows = []
current_row = []
current_width = 0
# Wrap the rows
for cells in arrange_alignment(song_steps, result_steps, ops):
if len(cells[0]) + current_width > WRAP_TO:
# Start a new row
wrapped_rows.append(current_row)
current_row = []
current_width = 0
current_row.append(cells)
current_width += len(cells[0])
# Add the incomplete last row
wrapped_rows.append(current_row)
for row in wrapped_rows:
lefts, rights, opses = zip(*row)
print " ".join(lefts)
print " ".join(rights)
print " ".join(opses)
print
print "Distance: %s" % distance
class ResultSongDependencyRecoveryTool(Tool):
name = "Compare result"
commands = ['songcomparedep', 'songdep']
usage = ('songcomparedep <result-num> <song-num>', "compare a parse result "\
"to a song in the database using the tonal space edit distance metric")
help = """\
Compares a parse result to a specific song in the database in terms of
dependency recovery and outputs the recall, precision and f-score.
See also:
songcomparets: to compare a result to a song in terms of tonal space path
edit distance.
"""
tool_options = Tool.tool_options + [
ModuleOption('song', filter=str_to_bool,
usage="tosong=B, where B is true or false",
default=False,
help_text="Compare the numbered song in the corpus to the "\
"second song, instead of comparing the numbered result "\
"to the song"),
]
def run(self, args, state):
from jazzparser.formalisms.music_halfspan.semantics.distance import \
MaximalDependencyAlignment
if len(args) < 2:
raise ShellError, "Give a result number and a song number"
resnum = int(args[0])
songnum = int(args[1])
song = get_song(songnum, state)
songsem = song[1]
if self.options['song']:
# Compare a song instead of a result
compsong = get_song(resnum, state)
resultsem = compsong[1]
print "Comparing '%s' to '%s'" % (compsong[0], song[0])
else:
# Normal behaviour: compare a result to a song
if resnum >= len(state.results):
raise ShellError, "No result number %d" % resnum
result = state.results[resnum]
resultsem = result.semantics
print "Comparing result %d to '%s'" % (resnum, song[0])
# Compare the two logical forms on the basis of overlapping dependencies
options = {
'output' : 'recall',
}
recall_metric = MaximalDependencyAlignment(options=options)
options = {
'output' : 'precision',
}
precision_metric = MaximalDependencyAlignment(options=options)
recall = recall_metric.distance(resultsem, songsem)
precision = precision_metric.distance(resultsem, songsem)
# Print out each comparison
print "Recall: %s" % recall
print "Precision: %s" % precision
print "F-score: %s" % (2.0*recall*precision / (recall+precision))
class RecogniseSongTool(Tool):
name = "Recognise song"
commands = ['findsong', 'song']
usage = ('findsong [<result-num>]', "find the closest matching song "\
"in the loaded songset")
help = """\
Compares a parse result (the top probability one by default) to all the songs
in the loaded songset and finds the closest matches by tonal space path
similarity. Outputs a list of the closest matches.
"""
tool_options = Tool.tool_options + [
ModuleOption('average', filter=int,
usage="average=N, where B is an integer",
help_text="Average the distance measure over that given "\
"by the top N results (starting at the result given "\
"in the first argument, if given)"),
ModuleOption('metric',
usage="metric=M, where M is the name of an available metric",
help_text="Select a metric to make the comparison with. "\
"Call with metric=help to get a list of metrics"),
ModuleOption('mopts',
usage="mopts=OPT=VAL:OPT=VAL:...",
help_text="Options to pass to the metric. Use mopts=help "\
"to see a list of options"),
]
def run(self, args, state):
from jazzparser.formalisms.music_halfspan.evaluation import \
tonal_space_local_alignment, tonal_space_distance
from jazzparser.formalisms.music_halfspan import Formalism
metric_name = self.options['metric']
if metric_name == "help":
# Print a list of available metrics
print ", ".join([metric.name for metric in Formalism.semantics_distance_metrics])
return
if len(args) == 0:
resnum = 0
else:
resnum = int(args[0])
if self.options['average'] and self.options['average'] > 1:
# Average the distance over several results
resnums = range(resnum, resnum+self.options['average'])
else:
# Just a single result
resnums = [resnum]
resultsems = []
for resnum in resnums:
# Get the result semantics that we're going to try to match
if resnum >= len(state.results):
raise ShellError, "No result number %d" % resnum
result = state.results[resnum]
resultsems.append(result.semantics)
# Get the loaded songset containing the song corpus
songset = state.get_data("songset",
help_msg="Use command 'loadsongs' to load a songset")
# Load the appropriate metric
if metric_name is None:
# Use the first in the list as default
metric_cls = Formalism.semantics_distance_metrics[0]
else:
for m in Formalism.semantics_distance_metrics:
if m.name == metric_name:
metric_cls = m
break
else:
# No metric found matching this name
print "No metric '%s'" % metric_name
sys.exit(1)
print "Using distance metric: %s\n" % metric_cls.name
# Now process the metric options
moptstr = self.options['mopts']
if moptstr is not None:
if moptstr == "help":
# Output this metric's option help
print options_help_text(metric_cls.OPTIONS,
intro="Available options for metric '%s'" % metric_cls.name)
return
else:
moptstr = ""
mopts = ModuleOption.process_option_string(moptstr)
# Instantiate the metric with these options
metric = metric_cls(options=mopts)
song_distances = {}
# Try matching against each song
for resultsem in resultsems:
for name,song in songset.analyses:
distance = metric.distance(resultsem, song)
song_distances.setdefault(name, []).append(distance)
# Average the scores
distances = []
for name,costs in song_distances.items():
ave_cost = sum(costs)/float(len(costs))
distances.append((ave_cost,name))
# Sort so the closest ones come first
distances.sort(key=lambda x:x[0])
# Output all the songs, ordered by similarity, with their distance
for i,(distance,name) in enumerate(distances):
print "%d> %s (%s)" % (i, name, distance)
class SongSelfSimilarityTool(Tool):
"""
For fooling around with comparing songs to themselves to see what happens.
"""
name = "Self similarity"
commands = ['selfsim']
usage = ('selfsim <song-num>', "")
help = ""
tool_options = Tool.tool_options + [
ModuleOption('local', filter=str_to_bool,
usage="local=B, where B is true or false",
default=False,
help_text="Sort results by local alignment score, not "\
"global"),
]
def run(self, args, state):
from jazzparser.formalisms.music_halfspan.evaluation import \
tonal_space_local_alignment, tonal_space_distance
songnum = int(args[0])
name,song = get_song(songnum, state)
songset = state.get_data("songset")
distances = []
# Try comparing this song to each song in the set
for other_name,other_song in songset.analyses:
# Align locally and globally
ops,steps1,steps2,local_distance = \
tonal_space_local_alignment(other_song.lf, song.lf)
global_distance = \
tonal_space_distance(other_song.lf, song.lf)
distances.append((other_name, local_distance, global_distance))
# Sort the results
if self.options['local']:
distances.sort(key=lambda x:x[1])
else:
distances.sort(key=lambda x:x[2])
# Print out each one
print "Aligned %s with:" % name
for other_name, local_distance, global_distance in distances:
print "%s: local: %s, global: %s" % \
(other_name,local_distance,global_distance)
class SongTreeTool(Tool):
"""
Converts a song's semantics to a tree. Mainly just for debugging.
"""
name = "Song tree"
commands = ['tree']
usage = ('tree <song-num>', "converts the semantics of the song to a tree "\
"representation")
tool_options = Tool.tool_options + [
ModuleOption('res', filter=str_to_bool,
usage="res=B, where B is true or false",
default=False,
help_text="Show a result, instead of a corpus song"),
]
help = """\
Converts the semantics of the numbered song to its tree representation that
will be used for comparison to other logical forms. This is mainly for
debugging and has no use in itself.
"""
def run(self, args, state):
from jazzparser.formalisms.music_halfspan.harmstruct import \
semantics_to_dependency_trees
if self.options['res']:
resnum = int(args[0])
res = state.results[resnum]
song = res.semantics
print "Dependency tree for result %d\n" % resnum
else:
songnum = int(args[0])
name,song = get_song(songnum, state)
print "Dependency tree for '%s'\n" % name
print "Semantics:"
print song
print "\nTrees:"
for t in semantics_to_dependency_trees(song):
print t
class SongDependencyGraphTool(Tool):
"""
Converts a song's semantics to a tree. Mainly just for debugging.
"""
name = "Song dependency graph"
commands = ['depgraph', 'dep']
usage = ('depgraph <song-num>', "converts the semantics of the song to a "\
"dependency graph representation")
tool_options = Tool.tool_options + [
ModuleOption('res', filter=str_to_bool,
usage="res=B, where B is true or false",
default=False,
help_text="Show a result, instead of a corpus song"),
]
help = """\
Converts the semantics of the numbered song to its tree representation that
will be used for comparison to other logical forms. This is mainly for
debugging and has no use in itself.
"""
def run(self, args, state):
from jazzparser.formalisms.music_halfspan.harmstruct import \
semantics_to_dependency_graph
if self.options['res']:
resnum = int(args[0])
res = state.results[resnum]
song = res.semantics
print "Dependency graph for result %d\n" % resnum
else:
songnum = int(args[0])
name,song = get_song(songnum, state)
print "Dependency graph for '%s'\n" % name
print "Semantics:"
print song
print
graph, times = semantics_to_dependency_graph(song)
print graph
def get_song(num, state):
"""
Retreive a song from the loaded songset by number. Utility function used
by tools above.
"""
songset = state.get_data("songset",
help_msg="Use command 'loadsongs' to load a songset")
if num >= len(songset):
raise ShellError, "There is no song %d. Use the 'songs' command to "\
"see a list of songs" % num
else:
return songset.analyses[num]
| gpl-3.0 | -6,989,043,982,802,696,000 | 38.433145 | 93 | 0.570562 | false | 4.240381 | false | false | false |
tobiz/OGN-Flight-Logger_V2 | settings.py | 1 | 9941 |
#-------------------------------------
# OGN-Flight-Logger Settings
#-------------------------------------
# Python APRS/OGN program to log flight times, durations, maximum heights achieved and tracks
#
# This python program creates an SQlite db of flights from a given location and aircraft list
# (the later two parameters are to be be developed into a more generalised format).#
#
# At the moment this is very much 'in development'#
#
# To install OGN Flight Logger the following prerequisites are required
# - python-tz
# - sqlite3
# - libfap
#
# If installing on an arm based system this can be achieved by:
#
# sudo apt-get install python-tz sqlite3
# wget http://www.pakettiradio.net/downloads/libfap/1.5/libfap6_1.5_armhf.deb
# sudo dpkg -i libfap*.deb
#
#-------------------------------------
# Setting values
#
# The values APRS_SERVER_HOST and APRS_SERVER_PORT are FIXED
# All other values should be set for a specific location and USER/PASSCODE
# Failure to change USER/PASSCODE results in an error
#-------------------------------------
#
# APRS_SERVER_HOST = 'rotate.aprs2.net'
# APRS_SERVER_PORT = 14580
APRS_SERVER_HOST = 'aprs.glidernet.org'
APRS_SERVER_PORT = 14580
#
# Please get your own Username and Passcode from http://www.george-smart.co.uk/wiki/APRS_Callpass
# DO NOT USE THE VALUES IN THIS FILE AS IT WILL STOP A PREVIOUS INVOCATION WORKING CORRECTLY
#
APRS_USER = 'PythonEx' # Username
APRS_PASSCODE = 1234 # Passcode. See http://www.george-smart.co.uk/wiki/APRS_Callpass
#
# Check that APRS_USER and APRS_PASSCODE are set
#
assert len(APRS_USER) > 3 and len(str(APRS_PASSCODE)) > 0, 'Please set APRS_USER and APRS_PASSCODE in settings.py.'
#
# User defined configuration values
#
#
# This value for base Directory for relative files, ie:
# - flogger_schema-1.0.4.sql
# - logs
# - tracks
import sys, os
file = sys.argv[0]
pathname = os.path.dirname(file)
#FLOGGER_BS = "/home/pjr/git_neon/OGN-Flight-Logger_V2/"
FLOGGER_BS = pathname + "/"
#FLOGGER_BS = "/home/pi/workspace/OGN-Flight-Logger_V2.1/"
FLOGGER_MODE = "test" # Test or live mode
FLOGGER_DB_SCHEMA = FLOGGER_BS + "flogger_schema-1.0.4.sql" # File holding SQLite3 database schema
#FLOGGER_QNH = 340 # QNH ie ASL in metres for airfield at lat/logitude, if set to 0, elevation is automatically looked up. This is Sutton Bank
FLOGGER_QNH = 0 # QNH ie ASL in metres for airfield at lat/logitude, if set to 0, elevation is automatically looked up. This is Sutton Bank
FLOGGER_LATITUDE, FLOGGER_LONGITUDE = '+54.228833', '-1.209639' # Latitude, longitude of named OGN receiver airfield
#FLOGGER_AIRFIELD_DETAILS = "" # Location details for use by geocoder. If blank, "" use LAT, LONG etc
FLOGGER_AIRFIELD_DETAILS = "Yorkshire Gliding Club UK" # Location details for use by geocoder. If blank, "" use LAT, LONG etc
FLOGGER_MIN_FLIGHT_TIME = "0:4:0" # Minimum time for duration to be considered a flight, hh:mm:ss
FLOGGER_KEEPALIVE_TIME = 900 # Interval in seconds for sending tcp/ip keep alive on socket connection
FLOGGER_DB_NAME = "flogger.sql3.2" # Name of file for flogger SQLite3 database
FLOGGER_FLARMNET_DB_URL = "http://www.flarmnet.org/files/data.fln" # URL of Flarmnet database
#FLOGGER_OGN_DB_URL = "http://ddb.glidernet.org/download" # URL of OGN Flarm database or blank for don't use
FLOGGER_OGN_DB_URL = "http://ddb.glidernet.org/download/?t=1" # URL of OGN Flarm database or blank for don't use
#FLOGGER_OGN_DB_URL = "" # URL of OGN Flarm to registration mapping database
#FLOGGER_AIRFIELD_NAME = "SuttonBnk" # Name of Flarm base station for airfield. NOTE MUST BE PROVIDED
FLOGGER_AIRFIELD_NAME = "SUTTON BANK" # Name of Flarm base station for airfield. NOTE MUST BE PROVIDED AS in flarmdb record
# If blank, "" then all aircraft in db are included in logs & tracks
#FLOGGER_FLEET_CHECK = "Y" # Checks Flarm ID is for aircraft fleet of FLOGGER_AIRFIELD_NAME if "Y"
FLOGGER_FLEET_CHECK = "N" # Checks Flarm ID is for aircraft fleet of FLOGGER_AIRFIELD_NAME if "Y"
FLOGGER_QFE_MIN = 100 # Minimum altitude in metres attained for inclusion as a flight, ie ~300 ft
FLOGGER_LOG_PATH = FLOGGER_BS + "logs" # Path where log files are stored
FLOGGER_TRACKS = "Y" # If Y flight tracks are recorded. Default is N, ie No tracks logged
FLOGGER_TRACKS_FOLDER = FLOGGER_BS + "tracks" # Folder for .gpx files for flight tracks
FLOGGER_V_SMALL = 10.0 # Lowest moving speed to be considered as zero kph
FLOGGER_NAME = "OGN_Flogger" # Name to be displayed on APRS
FLOGGER_VER = "0.2.3" # Flogger version number
FLOGGER_RAD = "50" # APRS radius in km from base station in AIRFIELD_DETAILS
FLOGGER_FLIGHTS_LOG = FLOGGER_BS + "" # Folder for csv file of daily flights record
FLOGGER_DATA_RETENTION = 3 # Number of days to keep .csv files, ie delete, if "0" keep all files
FLOGGER_LOG_TUGS = "Y" # Don't log tug flights if "N"
FLOGGER_TRACKS_IGC = "N" # Dump flight tracks in IGC format if "Y" else no
FLOGGER_LOG_TIME_DELTA = -1 # Number of hours before sunset to start processing flight log
FLOGGER_SMTP_SERVER_URL = '' # URL of smtp server for sending email
FLOGGER_SMTP_SERVER_PORT = 25 # smtp server port number, normally 25
FLOGGER_SMTP_TX = "" # Flight log sender email addrs
FLOGGER_SMTP_RX = "" # Flight log receiver email addrs
FLOGGER_AIRFIELD_LIMIT = 2000 # Distance from airfield centre considered a 'Land Out' in metres
FLOGGER_LANDOUT_MODE = "email" # Send land out msg by "email", "SMS", or "" don't send
FLOGGER_TAKEOFF_EMAIL = "Y" # Send email for each take off if Yes else no
FLOGGER_LANDING_EMAIL = "Y" # Send email for each landing if Yes else no
FLOGGER_LOG_LAUNCH_FAILURES = "N" # Log launch failures, ie below min time & min height
FLOGGER_LOCATION_HORIZON = '-0:34' # Adjustments for angle to horizon for sunset
FLOGGER_V_TAKEOFF_MIN = 10 # Min ground speed considered as takenoff. ogn-live is (55Km/h)
FLOGGER_V_LANDING_MIN = 10 # Min ground speed considered as landed. ogn-live is (40Km/h)
FLOGGER_DT_TUG_LAUNCH = 20 # Delta t(sec) between glider and tug takeoff times to be tug launched
FLOGGER_DUPLICATE_FLIGHT_DELTA_T = "0:1:00" # Delta between two landing & takeoff times of same aircraft to be different flights
FLOGGER_DUPLICATE_FLIGHT_DELTA = 90 # Delta time (secs) for duplicate flights
#
# The following fields are used to determine if data from APRS is a position packet from any 1 of up to 4 OGN receivers base stations.
# The OGN receiver areas can overlap and if more then 1 is supplied it will increase the accuracy of both the data and track results
# The list of OGN receivers can be found at http://wiki.glidernet.org/list-of-receivers. The field values are strings for any
# APRS AIRFIELDS code value. One or more must be specified.
# If a value is not needed use a null string, ie "". Coordinates for the primary OGN receiver station are either supplied
# by FLOGGER_LATITUDE, FLOGGER_LONGITUDE values or if these are not supplied then those returned by a geolocator
# service using FLOGGER_AIRFIELD_DETAILS. The primary OGN receiver base station coordinates together with the value
# of FLOGGER_RAD are used to filter the data received from APRS.
#
#FLOGGER_APRS_BASE_1 = "SuttonBnk"
#FLOGGER_APRS_BASE_2 = "UKPOC"
#FLOGGER_APRS_BASE_3 = "UKRUF"
#FLOGGER_APRS_BASE_4 = "Linton"
FLOGGER_APRS_BASES = ["SuttonBnk", "UKPOC", "UKRUF", "Linton", "Riponhill"]
# Coded 001-099: Gliders,
# 101-199: Tugs,
# 201-299: Motor Gliders,
# 301-399: Other
# Note. No reason for coding these values other than, 'why not!'
FLOGGER_FLEET_LIST = {"G-CHEF":1, "G-CHVR":2, "G-CKFN":3, "G-CKJH":4,
"G-CKLW":5, "G-CJVZ":6, "G-DDKC":7, "G-DDPO":8,
"G-BETM":101, "G-CIOF":102, "G-MOYR":103, "G-BJIV": 104,
"G-OSUT":201,
}
#
# Aircraft types in OGN Database, see https://github.com/glidernet/ogn-ddb/blob/master/index.php#L87
#
FLOGGER_AIRCRAFT_CAT = [
'None' # 0 = Blank
'Gliders/motoGliders', # 1
'Planes', # 2
'Ultralights', # 3
'Helicoters', # 4
'Drones/UAV', # 5
'Others', # 6
]
| gpl-3.0 | 1,513,637,528,252,753,400 | 62.318471 | 192 | 0.574489 | false | 3.557981 | false | false | false |
StephanII/accelerator-toolkit | magnets.py | 1 | 3901 | from base import Device
import math as math
class SectorBendingMagnet(Device):
def __init__(self, nomenclature="", width=0., height=0., length=0., angle=0.):
Device.__init__(self, nomenclature, width, height, length)
self.angle = angle
def __repr__(self):
r = str(self) + "("
r += "width=" + str(self.width) + "m, "
r += "height=" + str(self.height) + "m, "
r += "length=" + str(self.length) + "m, "
r += "angle=" + str(self.angle) + "rad)"
return r
def transport(self, ion):
if self.angle == 0:
ion.x += self.length * ion.dx
ion.y += self.length * ion.dy
else:
radius = self.length / self.angle
cos_angle = math.cos(self.angle)
sin_angle = math.sin(self.angle)
x = cos_angle * ion.x
x += radius * sin_angle * ion.dx
x += radius * (1. - cos_angle) * ion.dp
dx = -(1. / radius) * sin_angle * ion.x
dx += cos_angle * ion.dx + sin_angle * ion.dp
y = ion.y + self.length * ion.dy
dl = -sin_angle * ion.x
dl -= radius * (1. - cos_angle) * ion.dx
dl -= radius * (self.length - radius * sin_angle) * ion.dp
ion.x = x
ion.dx = dx
ion.y = y
ion.dl = dl
self.forward_if_not_lost(ion)
class RectangleBendingMagnet(Device):
pass
class HorizontalKickerMagnet(Device):
def __init__(self, nomenclature="", width=0., height=0., angle=0.):
Device.__init__(self, nomenclature, width, height)
self.angle = angle
def __repr__(self):
r = str(self) + "("
r += "width=" + str(self.width) + "m, "
r += "height=" + str(self.height) + "m, "
r += "length=" + str(self.length) + "m, "
r += "angle=" + str(self.angle) + "rad)"
return r
def transport(self, ion):
ion.dx += self.angle
self.forward_if_not_lost(ion)
# -----------------------------------------------------------------------------------------
#
#
class QuadrupoleMagnet(Device):
def __init__(self, nomenclature="", width=0., height=0., length=0., strength=0.):
Device.__init__(self, nomenclature, width, height, length)
self.strength = strength
def __repr__(self):
r = str(self) + "("
r += "width=" + str(self.width) + "m, "
r += "height=" + str(self.height) + "m, "
r += "length=" + str(self.length) + "m, "
r += "strength=" + str(self.strength) + "rad)"
return r
def transport(self, ion):
sqrts = math.sqrt(abs(self.strength))
omega = self.length * sqrts
cosomega = math.cos(omega)
coshomega = math.cosh(omega)
sinomega = math.sin(omega)
sinhomega = math.sinh(omega)
if self.strength < 0:
x = cosomega * ion.x + (sinomega / sqrts) * ion.dx
dx = -sinomega * sqrts * ion.x + cosomega * ion.dx
y = coshomega * ion.y + (sinhomega / sqrts) * ion.dy
dy = sinhomega * sqrts * ion.y + coshomega * ion.dy
ion.x = x
ion.dx = dx
ion.y = y
ion.dy = dy
elif self.strength > 0:
x = coshomega * ion.x + (sinhomega / sqrts) * ion.dx
dx = sinhomega * sqrts * ion.x + coshomega * ion.dx
y = cosomega * ion.y + (sinomega / sqrts) * ion.dy
dy = -sinomega * sqrts * ion.y + cosomega * ion.dy
ion.x = x
ion.dx = dx
ion.y = y
ion.dy = dy
else:
ion.x += self.length * ion.dx
ion.y += self.length * ion.dy
self.forward_if_not_lost(ion)
# -----------------------------------------------------------------------------------------
#
class SixtupoleMagnet(Device):
pass
| mit | -7,058,308,463,578,992,000 | 28.55303 | 91 | 0.471161 | false | 3.380416 | false | false | false |
anortef/calico | calico/felix/test/test_frules.py | 1 | 11864 | # -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_frules
~~~~~~~~~~~~~~~~~~~~~~~~~
Tests of iptables rules generation function.
"""
import logging
from mock import Mock, patch, call, ANY
from calico.felix import frules
from calico.felix.config import Config
from calico.felix.fiptables import IptablesUpdater
from calico.felix.frules import (
profile_to_chain_name, rules_to_chain_rewrite_lines, UnsupportedICMPType,
_rule_to_iptables_fragment
)
from calico.felix.test.base import BaseTestCase
_log = logging.getLogger(__name__)
DEFAULT_MARK = ('--append chain-foo --match comment '
'--comment "Mark as not matched" --jump MARK --set-mark 1')
RULES_TESTS = [
([{"src_net": "10.0.0.0/8"},], 4,
["--append chain-foo --source 10.0.0.0/8 --jump RETURN",
DEFAULT_MARK]),
([{"protocol": "icmp",
"src_net": "10.0.0.0/8",
"icmp_type": 7,
"icmp_code": 123},], 4,
["--append chain-foo --protocol icmp --source 10.0.0.0/8 "
"--match icmp --icmp-type 7/123 "
"--jump RETURN",
DEFAULT_MARK]),
([{"protocol": "icmp",
"src_net": "10.0.0.0/8",
"icmp_type": 7},], 4,
["--append chain-foo --protocol icmp --source 10.0.0.0/8 "
"--match icmp --icmp-type 7 "
"--jump RETURN",
DEFAULT_MARK]),
([{"protocol": "icmpv6",
"src_net": "1234::beef",
"icmp_type": 7},], 6,
["--append chain-foo --protocol icmpv6 --source 1234::beef "
"--match icmp6 --icmpv6-type 7 "
"--jump RETURN",
DEFAULT_MARK]),
([{"protocol": "tcp",
"src_tag": "tag-foo",
"src_ports": ["0:12", 13]}], 4,
["--append chain-foo --protocol tcp "
"--match set --match-set ipset-foo src "
"--match multiport --source-ports 0:12,13 --jump RETURN",
DEFAULT_MARK]),
([{"protocol": "tcp",
"src_ports": [0, "2:3", 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]}], 4,
["--append chain-foo --protocol tcp "
"--match multiport --source-ports 0,2:3,4,5,6,7,8,9,10,11,12,13,14,15 "
"--jump RETURN",
"--append chain-foo --protocol tcp "
"--match multiport --source-ports 16,17 "
"--jump RETURN",
DEFAULT_MARK]),
]
IP_SET_MAPPING = {
"tag-foo": "ipset-foo",
"tag-bar": "ipset-bar",
}
class TestRules(BaseTestCase):
def test_profile_to_chain_name(self):
self.assertEqual(profile_to_chain_name("inbound", "prof1"),
"felix-p-prof1-i")
self.assertEqual(profile_to_chain_name("outbound", "prof1"),
"felix-p-prof1-o")
def test_split_port_lists(self):
self.assertEqual(
frules._split_port_lists([1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15]),
[['1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15']]
)
self.assertEqual(
frules._split_port_lists([1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16]),
[['1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15'],
['16']]
)
self.assertEqual(
frules._split_port_lists([1, "2:3", 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17]),
[['1', '2:3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15'],
['16', '17']]
)
def test_rules_generation(self):
for rules, ip_version, expected_output in RULES_TESTS:
fragments = rules_to_chain_rewrite_lines(
"chain-foo",
rules,
ip_version,
IP_SET_MAPPING,
on_allow="RETURN",
)
self.assertEqual(fragments, expected_output)
def test_bad_icmp_type(self):
with self.assertRaises(UnsupportedICMPType):
_rule_to_iptables_fragment("foo", {"icmp_type": 255}, 4, {})
def test_bad_protocol_with_ports(self):
with self.assertRaises(AssertionError):
_rule_to_iptables_fragment("foo", {"protocol": "10",
"src_ports": [1]}, 4, {})
def test_build_input_chain(self):
chain, deps = frules._build_input_chain("tap+",
"123.0.0.1",
1234,
546, 547,
False,
"DROP")
self.assertEqual(chain, [
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --protocol tcp --destination 123.0.0.1 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 546 --dport 547 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump DROP',
])
self.assertEqual(deps, set())
def test_build_input_chain_ipip(self):
chain, deps = frules._build_input_chain("tap+",
"123.0.0.1",
1234,
546, 547,
False,
"DROP",
"felix-hosts")
self.assertEqual(chain, [
'--append felix-INPUT --protocol ipencap --match set ! --match-set felix-hosts src --jump DROP',
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --protocol tcp --destination 123.0.0.1 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 546 --dport 547 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump DROP',
])
self.assertEqual(deps, set())
def test_build_input_chain_return(self):
chain, deps = frules._build_input_chain("tap+",
None,
None,
546, 547,
True,
"RETURN")
self.assertEqual(chain, [
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 130',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 131',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 132',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 133',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 135',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 136',
'--append felix-INPUT --protocol udp --sport 546 --dport 547 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump felix-FROM-ENDPOINT',
])
self.assertEqual(deps, set(["felix-FROM-ENDPOINT"]))
@patch("calico.felix.futils.check_call", autospec=True)
@patch("calico.felix.frules.devices", autospec=True)
@patch("calico.felix.frules.HOSTS_IPSET_V4", autospec=True)
def test_install_global_rules(self, m_ipset, m_devices, m_check_call):
m_devices.interface_exists.return_value = False
m_devices.interface_up.return_value = False
m_config = Mock(spec=Config)
m_config.IP_IN_IP_ENABLED = True
m_config.METADATA_IP = "123.0.0.1"
m_config.METADATA_PORT = 1234
m_config.DEFAULT_INPUT_CHAIN_ACTION = "RETURN"
m_config.IFACE_PREFIX = "tap"
m_v4_upd = Mock(spec=IptablesUpdater)
m_v6_upd = Mock(spec=IptablesUpdater)
m_v4_nat_upd = Mock(spec=IptablesUpdater)
frules.install_global_rules(m_config, m_v4_upd, m_v6_upd, m_v4_nat_upd)
m_ipset.ensure_exists.assert_called_once_with()
self.assertEqual(
m_check_call.mock_calls,
[
call(["ip", "tunnel", "add", "tunl0", "mode", "ipip"]),
call(["ip", "link", "set", "tunl0", "up"]),
]
)
expected_chains = {
'felix-INPUT': [
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --protocol tcp --destination 123.0.0.1 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 68 --dport 67 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump felix-FROM-ENDPOINT'
],
'felix-FORWARD': [
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN',
'--append felix-FORWARD --jump felix-FROM-ENDPOINT --in-interface tap+',
'--append felix-FORWARD --jump felix-TO-ENDPOINT --out-interface tap+',
'--append felix-FORWARD --jump ACCEPT --in-interface tap+',
'--append felix-FORWARD --jump ACCEPT --out-interface tap+'
]
}
m_v4_upd.rewrite_chains.assert_called_once_with(
expected_chains,
{
'felix-INPUT': set(['felix-FROM-ENDPOINT']),
'felix-FORWARD': set([
'felix-FROM-ENDPOINT',
'felix-TO-ENDPOINT'
])
},
async=False
)
self.assertEqual(
m_v4_upd.ensure_rule_inserted.mock_calls,
[
call("INPUT --jump felix-INPUT", async=False),
call("FORWARD --jump felix-FORWARD", async=False),
]
) | apache-2.0 | 8,944,739,970,673,061,000 | 42.944444 | 124 | 0.517701 | false | 3.757998 | true | false | false |
seanpue/al340 | lessons/textanalysis/Untitled0.py | 1 | 1100 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import nltk
%matplotlib inline
# <codecell>
import os
from nltk.corpus.reader.plaintext import PlaintextCorpusReader
corpusdir = 'data/texts/' # Directory of corpus.
corpus0 = PlaintextCorpusReader(corpusdir, '.*')
corpus = nltk.Text(corpus0.words())
# <codecell>
corpus.concordance('girls')
# <codecell>
corpus.concordance("'", lines=all)
# <codecell>
len(set(corpus))
# <codecell>
len(corpus)
# <codecell>
corpus.common_contexts(['general'])
# <codecell>
from nltk.corpus import stopwords
stopwords = stopwords.words(‘english’)
# <codecell>
corpus.dispersion_plot(["women","girls","fire"])
# <codecell>
import mpld3
# <codecell>
mpld3.enable_notebook()
# <codecell>
corpus.dispersion_plot(["women","girls","fire"], )
# <codecell>
len(corpus)
# <codecell>
len(set(corpus)) / len(corpus)
# <codecell>
corpus[0:100]
# <codecell>
fdist1 = nltk.FreqDist(corpus)
# <codecell>
fdist1.most_common(50)
# <codecell>
fdist1.plot(50, cumulative=True)
# <codecell>
corpus[w.upper() for w in corpus]
| mit | -1,315,354,380,263,225,000 | 11.454545 | 62 | 0.686131 | false | 2.531178 | false | false | false |
karrtikr/ete | ete3/tools/phylobuild_lib/interface.py | 1 | 16365 | # #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: [email protected]
#
#
# #END_LICENSE#############################################################
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import re
import time
from signal import signal, SIGWINCH, SIGKILL, SIGTERM
from collections import deque
from textwrap import TextWrapper
import six.moves.queue
import threading
from .logger import get_main_log
from .utils import GLOBALS, clear_tempdir, terminate_job_launcher, pjoin, pexist
from .errors import *
import six
from six import StringIO
MAIN_LOG = False
# try:
# import curses
# except ImportError:
# NCURSES = False
# else:
# NCURSES = True
NCURSES = False
# CONVERT shell colors to the same curses palette
SHELL_COLORS = {
"10": '\033[1;37;41m', # white on red
"11": '\033[1;37;43m', # white on orange
"12": '\033[1;37;45m', # white on magenta
"16": '\033[1;37;46m', # white on blue
"13": '\033[1;37;40m', # black on white
"06": '\033[1;34m', # light blue
"05": '\033[1;31m', # light red
"03": '\033[1;32m', # light green
"8": '\033[1;33m', # yellow
"7": '\033[36m', # cyan
"6": '\033[34m', # blue
"3": '\033[32m', # green
"4": '\033[33m', # orange
"5": '\033[31m', # red
"2": "\033[35m", # magenta
"1": "\033[0m", # white
"0": "\033[0m", # end
}
def safe_int(x):
try:
return int(x)
except TypeError:
return x
def shell_colorify_match(match):
return SHELL_COLORS[match.groups()[2]]
class ExcThread(threading.Thread):
def __init__(self, bucket, *args, **kargs):
threading.Thread.__init__(self, *args, **kargs)
self.bucket = bucket
def run(self):
try:
threading.Thread.run(self)
except Exception:
self.bucket.put(sys.exc_info())
raise
class Screen(StringIO):
# tags used to control color of strings and select buffer
TAG = re.compile("@@((\d+),)?(\d+):", re.MULTILINE)
def __init__(self, windows):
StringIO.__init__(self)
self.windows = windows
self.autoscroll = {}
self.pos = {}
self.lines = {}
self.maxsize = {}
self.stdout = None
self.logfile = None
self.wrapper = TextWrapper(width=80, initial_indent="",
subsequent_indent=" ",
replace_whitespace=False)
if NCURSES:
for windex in windows:
h, w = windows[windex][0].getmaxyx()
self.maxsize[windex] = (h, w)
self.pos[windex] = [0, 0]
self.autoscroll[windex] = True
self.lines[windex] = 0
def scroll(self, win, vt, hz=0, refresh=True):
line, col = self.pos[win]
hz_pos = col + hz
if hz_pos < 0:
hz_pos = 0
elif hz_pos >= 1000:
hz_pos = 999
vt_pos = line + vt
if vt_pos < 0:
vt_pos = 0
elif vt_pos >= 1000:
vt_pos = 1000 - 1
if line != vt_pos or col != hz_pos:
self.pos[win] = [vt_pos, hz_pos]
if refresh:
self.refresh()
def scroll_to(self, win, vt, hz=0, refresh=True):
line, col = self.pos[win]
hz_pos = hz
if hz_pos < 0:
hz_pos = 0
elif hz_pos >= 1000:
hz_pos = 999
vt_pos = vt
if vt_pos < 0:
vt_pos = 0
elif vt_pos >= 1000:
vt_pos = 1000 - 1
if line != vt_pos or col != hz_pos:
self.pos[win] = [vt_pos, hz_pos]
if refresh:
self.refresh()
def refresh(self):
for windex, (win, dim) in six.iteritems(self.windows):
h, w, sy, sx = dim
line, col = self.pos[windex]
if h is not None:
win.touchwin()
win.noutrefresh(line, col, sy+1, sx+1, sy+h-2, sx+w-2)
else:
win.noutrefresh()
curses.doupdate()
def write(self, text):
if six.PY3:
text = str(text)
else:
if isinstance(text, six.text_type):
#text = text.encode(self.stdout.encoding)
text = text.encode("UTF-8")
if NCURSES:
self.write_curses(text)
if self.logfile:
text = re.sub(self.TAG, "", text)
self.write_log(text)
else:
if GLOBALS["color_shell"]:
text = re.sub(self.TAG, shell_colorify_match, text)
else:
text = re.sub(self.TAG, "", text)
self.write_normal(text)
if self.logfile:
self.write_log(text)
def write_log(self, text):
self.logfile.write(text)
self.logfile.flush()
def write_normal(self, text):
#_text = '\n'.join(self.wrapper.wrap(text))
#self.stdout.write(_text+"\n")
self.stdout.write(text)
def write_curses(self, text):
formatstr = deque()
for m in re.finditer(self.TAG, text):
x1, x2 = m.span()
cindex = safe_int(m.groups()[2])
windex = safe_int(m.groups()[1])
formatstr.append([x1, x2, cindex, windex])
if not formatstr:
formatstr.append([None, 0, 1, 1])
if formatstr[0][1] == 0:
stop, start, cindex, windex = formatstr.popleft()
if windex is None:
windex = 1
else:
stop, start, cindex, windex = None, 0, 1, 1
while start is not None:
if formatstr:
next_stop, next_start, next_cindex, next_windex = formatstr.popleft()
else:
next_stop, next_start, next_cindex, next_windex = None, None, cindex, windex
face = curses.color_pair(cindex)
win, (h, w, sy, sx) = self.windows[windex]
ln, cn = self.pos[windex]
# Is this too inefficient?
new_lines = text[start:next_stop].count("\n")
self.lines[windex] += new_lines
if self.lines[windex] > self.maxsize[windex]:
_y, _x = win.getyx()
for _i in self.lines[windex]-self.maxsize(windex):
win.move(0,0)
win.deleteln()
win.move(_y, _x)
# Visual scroll
if self.autoscroll[windex]:
scroll = self.lines[windex] - ln - h
if scroll > 0:
self.scroll(windex, scroll, refresh=False)
try:
win.addstr(text[start:next_stop], face)
except curses.error:
win.addstr("???")
start = next_start
stop = next_stop
cindex = next_cindex
if next_windex is not None:
windex = next_windex
self.refresh()
def resize_screen(self, s, frame):
import sys,fcntl,termios,struct
data = fcntl.ioctl(self.stdout.fileno(), termios.TIOCGWINSZ, '1234')
h, w = struct.unpack('hh', data)
win = self.windows
#main = curses.initscr()
#h, w = main.getmaxyx()
#win[0] = (main, (None, None, 0, 0))
#curses.resizeterm(h, w)
win[0][0].resize(h, w)
win[0][0].clear()
info_win, error_win, debug_win = setup_layout(h, w)
win[1][1] = info_win
win[2][1] = error_win
win[3][1] = debug_win
self.refresh()
def init_curses(main_scr):
if not NCURSES or not main_scr:
# curses disabled, no multi windows
return None
# Colors
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(6, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(10, curses.COLOR_WHITE, curses.COLOR_RED)
curses.init_pair(11, curses.COLOR_WHITE, curses.COLOR_YELLOW)
curses.init_pair(12, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
WIN = {}
main = main_scr
h, w = main.getmaxyx()
WIN[0] = (main, (None, None, 0, 0))
# Creates layout
info_win, error_win, debug_win = setup_layout(h, w)
WIN[1] = [curses.newpad(5000, 1000), info_win]
WIN[2] = [curses.newpad(5000, 1000), error_win]
WIN[3] = [curses.newpad(5000, 1000), debug_win]
#WIN[1], WIN[11] = newwin(h-1, w/2, 1,1)
#WIN[2], WIN[12] = newwin(h-dbg_h-1, (w/2)-1, 1, (w/2)+2)
#WIN[3], WIN[13] = newwin(dbg_h-1, (w/2)-1, h-dbg_h+1, (w/2)+2)
for windex, (w, dim) in six.iteritems(WIN):
#w = WIN[i]
#w.bkgd(str(windex))
w.bkgd(" ")
w.keypad(1)
w.idlok(True)
w.scrollok(True)
return WIN
def clear_env():
try:
terminate_job_launcher()
except:
pass
base_dir = GLOBALS["basedir"]
lock_file = pjoin(base_dir, "alive")
try:
os.remove(lock_file)
except Exception:
print("could not remove lock file %s" %lock_file, file=sys.stderr)
clear_tempdir()
def app_wrapper(func, args):
global NCURSES
base_dir = GLOBALS.get("scratch_dir", GLOBALS["basedir"])
lock_file = pjoin(base_dir, "alive")
if not args.enable_ui:
NCURSES = False
if not pexist(lock_file) or args.clearall:
open(lock_file, "w").write(time.ctime())
else:
clear_env()
print('\nThe same process seems to be running. Use --clearall or remove the lock file "alive" within the output dir', file=sys.stderr)
sys.exit(-1)
try:
if NCURSES:
curses.wrapper(main, func, args)
else:
main(None, func, args)
except ConfigError as e:
if GLOBALS.get('_background_scheduler', None):
GLOBALS['_background_scheduler'].terminate()
print("\nConfiguration Error:", e, file=sys.stderr)
clear_env()
sys.exit(-1)
except DataError as e:
if GLOBALS.get('_background_scheduler', None):
GLOBALS['_background_scheduler'].terminate()
print("\nData Error:", e, file=sys.stderr)
clear_env()
sys.exit(-1)
except KeyboardInterrupt:
# Control-C is also grabbed by the back_launcher, so it is no necessary
# to terminate from here
print("\nProgram was interrupted.", file=sys.stderr)
if args.monitor:
print(("VERY IMPORTANT !!!: Note that launched"
" jobs will keep running as you provided the --monitor flag"), file=sys.stderr)
clear_env()
sys.exit(-1)
except:
if GLOBALS.get('_background_scheduler', None):
GLOBALS['_background_scheduler'].terminate()
clear_env()
raise
else:
if GLOBALS.get('_background_scheduler', None):
GLOBALS['_background_scheduler'].terminate()
clear_env()
def main(main_screen, func, args):
""" Init logging and Screen. Then call main function """
global MAIN_LOG
# Do I use ncurses or basic terminal interface?
screen = Screen(init_curses(main_screen))
# prints are handled by my Screen object
screen.stdout = sys.stdout
if args.logfile:
screen.logfile = open(os.path.join(GLOBALS["basedir"], "npr.log"), "w")
sys.stdout = screen
sys.stderr = screen
# Start logger, pointing to the selected screen
if not MAIN_LOG:
MAIN_LOG = True
log = get_main_log(screen, [28,26,24,22,20,10][args.verbosity])
# Call main function as lower thread
if NCURSES:
screen.refresh()
exceptions = six.moves.queue.Queue()
t = ExcThread(bucket=exceptions, target=func, args=[args])
t.daemon = True
t.start()
ln = 0
chars = "\\|/-\\|/-"
cbuff = 1
try:
while 1:
try:
exc = exceptions.get(block=False)
except six.moves.queue.Empty:
pass
else:
exc_type, exc_obj, exc_trace = exc
# deal with the exception
#print exc_trace, exc_type, exc_obj
raise exc_obj
mwin = screen.windows[0][0]
key = mwin.getch()
mwin.addstr(0, 0, "%s (%s) (%s) (%s)" %(key, screen.pos, ["%s %s" %(i,w[1]) for i,w in list(screen.windows.items())], screen.lines) + " "*50)
mwin.refresh()
if key == 113:
# Fixes the problem of prints without newline char
raise KeyboardInterrupt("Q Pressed")
if key == 9:
cbuff += 1
if cbuff>3:
cbuff = 1
elif key == curses.KEY_UP:
screen.scroll(cbuff, -1)
elif key == curses.KEY_DOWN:
screen.scroll(cbuff, 1)
elif key == curses.KEY_LEFT:
screen.scroll(cbuff, 0, -1)
elif key == curses.KEY_RIGHT:
screen.scroll(cbuff, 0, 1)
elif key == curses.KEY_NPAGE:
screen.scroll(cbuff, 10)
elif key == curses.KEY_PPAGE:
screen.scroll(cbuff, -10)
elif key == curses.KEY_END:
screen.scroll_to(cbuff, 999, 0)
elif key == curses.KEY_HOME:
screen.scroll_to(cbuff, 0, 0)
elif key == curses.KEY_RESIZE:
screen.resize_screen(None, None)
else:
pass
except:
# fixes the problem of restoring screen when last print
# did not contain a newline char. WTF!
print("\n")
raise
#while 1:
# if ln >= len(chars):
# ln = 0
# #screen.windows[0].addstr(0,0, chars[ln])
# #screen.windows[0].refresh()
# time.sleep(0.2)
# ln += 1
else:
func(args)
def setup_layout(h, w):
# Creates layout
header = 4
start_x = 0
start_y = header
h -= start_y
w -= start_x
h1 = h/2 + h%2
h2 = h/2
if w > 160:
# _______
# | |___|
# |___|___|
w1 = w/2 + w%2
w2 = w/2
info_win = [h, w1, start_y, start_x]
error_win = [h1, w2, start_y, w1]
debug_win = [h2, w2, h1, w1]
else:
# ___
# |___|
# |___|
# |___|
h2a = h2/2 + h2%2
h2b = h2/2
info_win = [h1, w, start_y, start_x]
error_win = [h2a, w, h1, start_x]
debug_win = [h2b, w, h1+h2a, start_x]
return info_win, error_win, debug_win
| gpl-3.0 | 3,725,017,766,903,037,000 | 29.935728 | 157 | 0.524962 | false | 3.478954 | false | false | false |
phenoxim/nova | nova/api/openstack/placement/handler.py | 1 | 9631 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handlers for placement API.
Individual handlers are associated with URL paths in the
ROUTE_DECLARATIONS dictionary. At the top level each key is a Routes
compliant path. The value of that key is a dictionary mapping
individual HTTP request methods to a Python function representing a
simple WSGI application for satisfying that request.
The ``make_map`` method processes ROUTE_DECLARATIONS to create a
Routes.Mapper, including automatic handlers to respond with a
405 when a request is made against a valid URL with an invalid
method.
"""
import routes
import webob
from oslo_log import log as logging
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.handlers import aggregate
from nova.api.openstack.placement.handlers import allocation
from nova.api.openstack.placement.handlers import allocation_candidate
from nova.api.openstack.placement.handlers import inventory
from nova.api.openstack.placement.handlers import resource_class
from nova.api.openstack.placement.handlers import resource_provider
from nova.api.openstack.placement.handlers import root
from nova.api.openstack.placement.handlers import trait
from nova.api.openstack.placement.handlers import usage
from nova.api.openstack.placement import policy
from nova.api.openstack.placement import util
from nova.i18n import _
LOG = logging.getLogger(__name__)
# URLs and Handlers
# NOTE(cdent): When adding URLs here, do not use regex patterns in
# the path parameters (e.g. {uuid:[0-9a-zA-Z-]+}) as that will lead
# to 404s that are controlled outside of the individual resources
# and thus do not include specific information on the why of the 404.
ROUTE_DECLARATIONS = {
'/': {
'GET': root.home,
},
# NOTE(cdent): This allows '/placement/' and '/placement' to
# both work as the root of the service, which we probably want
# for those situations where the service is mounted under a
# prefix (as it is in devstack). While weird, an empty string is
# a legit key in a dictionary and matches as desired in Routes.
'': {
'GET': root.home,
},
'/resource_classes': {
'GET': resource_class.list_resource_classes,
'POST': resource_class.create_resource_class
},
'/resource_classes/{name}': {
'GET': resource_class.get_resource_class,
'PUT': resource_class.update_resource_class,
'DELETE': resource_class.delete_resource_class,
},
'/resource_providers': {
'GET': resource_provider.list_resource_providers,
'POST': resource_provider.create_resource_provider
},
'/resource_providers/{uuid}': {
'GET': resource_provider.get_resource_provider,
'DELETE': resource_provider.delete_resource_provider,
'PUT': resource_provider.update_resource_provider
},
'/resource_providers/{uuid}/inventories': {
'GET': inventory.get_inventories,
'POST': inventory.create_inventory,
'PUT': inventory.set_inventories,
'DELETE': inventory.delete_inventories
},
'/resource_providers/{uuid}/inventories/{resource_class}': {
'GET': inventory.get_inventory,
'PUT': inventory.update_inventory,
'DELETE': inventory.delete_inventory
},
'/resource_providers/{uuid}/usages': {
'GET': usage.list_usages
},
'/resource_providers/{uuid}/aggregates': {
'GET': aggregate.get_aggregates,
'PUT': aggregate.set_aggregates
},
'/resource_providers/{uuid}/allocations': {
'GET': allocation.list_for_resource_provider,
},
'/allocations': {
'POST': allocation.set_allocations,
},
'/allocations/{consumer_uuid}': {
'GET': allocation.list_for_consumer,
'PUT': allocation.set_allocations_for_consumer,
'DELETE': allocation.delete_allocations,
},
'/allocation_candidates': {
'GET': allocation_candidate.list_allocation_candidates,
},
'/traits': {
'GET': trait.list_traits,
},
'/traits/{name}': {
'GET': trait.get_trait,
'PUT': trait.put_trait,
'DELETE': trait.delete_trait,
},
'/resource_providers/{uuid}/traits': {
'GET': trait.list_traits_for_resource_provider,
'PUT': trait.update_traits_for_resource_provider,
'DELETE': trait.delete_traits_for_resource_provider
},
'/usages': {
'GET': usage.get_total_usages,
},
}
def dispatch(environ, start_response, mapper):
"""Find a matching route for the current request.
If no match is found, raise a 404 response.
If there is a matching route, but no matching handler
for the given method, raise a 405.
"""
result = mapper.match(environ=environ)
if result is None:
raise webob.exc.HTTPNotFound(
json_formatter=util.json_error_formatter)
# We can't reach this code without action being present.
handler = result.pop('action')
environ['wsgiorg.routing_args'] = ((), result)
return handler(environ, start_response)
def handle_405(environ, start_response):
"""Return a 405 response when method is not allowed.
If _methods are in routing_args, send an allow header listing
the methods that are possible on the provided URL.
"""
_methods = util.wsgi_path_item(environ, '_methods')
headers = {}
if _methods:
# Ensure allow header is a python 2 or 3 native string (thus
# not unicode in python 2 but stay a string in python 3)
# In the process done by Routes to save the allowed methods
# to its routing table they become unicode in py2.
headers['allow'] = str(_methods)
# Use Exception class as WSGI Application. We don't want to raise here.
response = webob.exc.HTTPMethodNotAllowed(
_('The method specified is not allowed for this resource.'),
headers=headers, json_formatter=util.json_error_formatter)
return response(environ, start_response)
def make_map(declarations):
"""Process route declarations to create a Route Mapper."""
mapper = routes.Mapper()
for route, targets in declarations.items():
allowed_methods = []
for method in targets:
mapper.connect(route, action=targets[method],
conditions=dict(method=[method]))
allowed_methods.append(method)
allowed_methods = ', '.join(allowed_methods)
mapper.connect(route, action=handle_405, _methods=allowed_methods)
return mapper
class PlacementHandler(object):
"""Serve Placement API.
Dispatch to handlers defined in ROUTE_DECLARATIONS.
"""
def __init__(self, **local_config):
# NOTE(cdent): Local config currently unused.
self._map = make_map(ROUTE_DECLARATIONS)
def __call__(self, environ, start_response):
# All requests but '/' require admin.
if environ['PATH_INFO'] != '/':
context = environ['placement.context']
# TODO(cdent): Using is_admin everywhere (except /) is
# insufficiently flexible for future use case but is
# convenient for initial exploration.
if not policy.placement_authorize(context, 'placement'):
raise webob.exc.HTTPForbidden(
_('admin required'),
json_formatter=util.json_error_formatter)
# Check that an incoming request with a content-length header
# that is an integer > 0 and not empty, also has a content-type
# header that is not empty. If not raise a 400.
clen = environ.get('CONTENT_LENGTH')
try:
if clen and (int(clen) > 0) and not environ.get('CONTENT_TYPE'):
raise webob.exc.HTTPBadRequest(
_('content-type header required when content-length > 0'),
json_formatter=util.json_error_formatter)
except ValueError as exc:
raise webob.exc.HTTPBadRequest(
_('content-length header must be an integer'),
json_formatter=util.json_error_formatter)
try:
return dispatch(environ, start_response, self._map)
# Trap the NotFound exceptions raised by the objects used
# with the API and transform them into webob.exc.HTTPNotFound.
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
exc, json_formatter=util.json_error_formatter)
# Remaining uncaught exceptions will rise first to the Microversion
# middleware, where any WebOb generated exceptions will be caught and
# transformed into legit HTTP error responses (with microversion
# headers added), and then to the FaultWrapper middleware which will
# catch anything else and transform them into 500 responses.
# NOTE(cdent): There should be very few uncaught exceptions which are
# not WebOb exceptions at this stage as the handlers are contained by
# the wsgify decorator which will transform those exceptions to
# responses itself.
| apache-2.0 | 7,792,931,749,543,994,000 | 40.15812 | 78 | 0.668986 | false | 4.246473 | false | false | false |
fallen/artiq | artiq/language/scan.py | 1 | 4762 | """
Implementation and management of scan objects.
A scan object (e.g. :class:`artiq.language.scan.LinearScan`) represents a
one-dimensional sweep of a numerical range. Multi-dimensional scans are
constructed by combining several scan objects.
Iterate on a scan object to scan it, e.g. ::
for variable in self.scan:
do_something(variable)
Iterating multiple times on the same scan object is possible, with the scan
restarting at the minimum value each time. Iterating concurrently on the
same scan object (e.g. via nested loops) is also supported, and the
iterators are independent from each other.
Scan objects are supported both on the host and the core device.
"""
from random import Random, shuffle
import inspect
from artiq.language.core import *
from artiq.language.environment import NoDefault, DefaultMissing
__all__ = ["ScanObject",
"NoScan", "LinearScan", "RandomScan", "ExplicitScan",
"Scannable"]
class ScanObject:
pass
class NoScan(ScanObject):
"""A scan object that yields a single value."""
def __init__(self, value):
self.value = value
@portable
def _gen(self):
yield self.value
@portable
def __iter__(self):
return self._gen()
def describe(self):
return {"ty": "NoScan", "value": self.value}
class LinearScan(ScanObject):
"""A scan object that yields a fixed number of increasing evenly
spaced values in a range."""
def __init__(self, min, max, npoints):
self.min = min
self.max = max
self.npoints = npoints
@portable
def _gen(self):
r = self.max - self.min
d = self.npoints - 1
for i in range(self.npoints):
yield r*i/d + self.min
@portable
def __iter__(self):
return self._gen()
def describe(self):
return {"ty": "LinearScan",
"min": self.min, "max": self.max, "npoints": self.npoints}
class RandomScan(ScanObject):
"""A scan object that yields a fixed number of randomly ordered evenly
spaced values in a range."""
def __init__(self, min, max, npoints, seed=0):
self.sequence = list(LinearScan(min, max, npoints))
shuffle(self.sequence, Random(seed).random)
@portable
def __iter__(self):
return iter(self.sequence)
def describe(self):
return {"ty": "RandomScan",
"min": self.min, "max": self.max, "npoints": self.npoints}
class ExplicitScan(ScanObject):
"""A scan object that yields values from an explicitly defined sequence."""
def __init__(self, sequence):
self.sequence = sequence
@portable
def __iter__(self):
return iter(self.sequence)
def describe(self):
return {"ty": "ExplicitScan", "sequence": self.sequence}
_ty_to_scan = {
"NoScan": NoScan,
"LinearScan": LinearScan,
"RandomScan": RandomScan,
"ExplicitScan": ExplicitScan
}
class Scannable:
"""An argument (as defined in :class:`artiq.language.environment`) that
takes a scan object.
:param global_min: The minimum value taken by the scanned variable, common
to all scan modes. The user interface takes this value to set the
range of its input widgets.
:param global_max: Same as global_min, but for the maximum value.
:param global_step: The step with which the value should be modified by
up/down buttons in a user interface.
:param unit: A string representing the unit of the scanned variable, for user
interface (UI) purposes.
:param ndecimals: The number of decimals a UI should use.
"""
def __init__(self, default=NoDefault, unit="",
global_step=1.0, global_min=None, global_max=None,
ndecimals=2):
if default is not NoDefault:
self.default_value = default
self.unit = unit
self.global_step = global_step
self.global_min = global_min
self.global_max = global_max
self.ndecimals = ndecimals
def default(self):
if not hasattr(self, "default_value"):
raise DefaultMissing
return self.default_value
def process(self, x):
cls = _ty_to_scan[x["ty"]]
args = dict()
for arg in inspect.getargspec(cls).args[1:]:
if arg in x:
args[arg] = x[arg]
return cls(**args)
def describe(self):
d = {"ty": "Scannable"}
if hasattr(self, "default_value"):
d["default"] = self.default_value.describe()
d["unit"] = self.unit
d["global_step"] = self.global_step
d["global_min"] = self.global_min
d["global_max"] = self.global_max
d["ndecimals"] = self.ndecimals
return d
| gpl-3.0 | 1,219,659,166,398,376,400 | 28.395062 | 81 | 0.625367 | false | 3.849636 | false | false | false |
restful-open-annotation/oa-adapter | formats/json_format.py | 1 | 2113 | #!/usr/bin/env python
"""JSON content-type support for Open Annotation."""
__author__ = 'Sampo Pyysalo'
__license__ = 'MIT'
import json
# Default values for rendering options
PRETTYPRINT_DEFAULT = True
KEEPCONTEXT_DEFAULT = False
# Short name for this format.
format_name = 'json'
# The MIME types associated with this format.
mimetypes = ['application/json']
def from_jsonld(data, options=None):
"""Render JSON-LD data into JSON string.
This is intended to be used as a mimerender render function
(see http://mimerender.readthedocs.org/en/latest/).
If options['prettyprint'] is True, renders the data so that it is
more easily readable by humans.
If options['keepcontext'] is True, includes the JSON-LD @context
in the JSON data if present.
Args:
data: dict containing JSON-LD data in expanded JSON-LD form
(see http://www.w3.org/TR/json-ld/#expanded-document-form).
options: dict of rendering options, or None for defaults.
Returns:
String representing the rendered data.
"""
if options is None:
options = {}
# @context is not considered part of the JSON format
keepcontext = options.get('keepcontext', KEEPCONTEXT_DEFAULT)
if not keepcontext and '@context' in data:
del data['@context']
prettyprint = options.get('prettyprint', PRETTYPRINT_DEFAULT)
if prettyprint:
return json.dumps(data, indent=2, separators=(',', ': '))+'\n'
else:
return json.dumps(data)
def to_jsonld(data, options=None):
"""Parse JSON data into JSON-LD.
Args:
data: string in JSON format.
options: dict of parsing options, or None for defaults.
Returns:
dict containing JSON-LD data in expanded JSON-LD form
(see http://www.w3.org/TR/json-ld/#expanded-document-form).
"""
if options is None:
options = {}
encoding = options.get('encoding')
if encoding is None:
jsonld = json.loads(data)
else:
jsonld = json.loads(data, encoding=encoding)
# TODO: add context and expand
return jsonld
| mit | 113,083,809,037,946,270 | 27.173333 | 71 | 0.658779 | false | 3.855839 | false | false | false |
seprich/py-bson-rpc | bsonrpc/concurrent.py | 1 | 2990 | # -*- coding: utf-8 -*-
'''
This module provides a collection of concurrency related
object generators. These generators will create either
native threading based or greenlet based objects depending
on which threading_model is selected.
'''
from bsonrpc.options import ThreadingModel
__license__ = 'http://mozilla.org/MPL/2.0/'
def _spawn_thread(fn, *args, **kwargs):
from threading import Thread
t = Thread(target=fn, args=args, kwargs=kwargs)
t.start()
return t
def _spawn_greenlet(fn, *args, **kwargs):
from gevent import Greenlet
g = Greenlet(fn, *args, **kwargs)
g.start()
return g
def spawn(threading_model, fn, *args, **kwargs):
if threading_model == ThreadingModel.GEVENT:
return _spawn_greenlet(fn, *args, **kwargs)
if threading_model == ThreadingModel.THREADS:
return _spawn_thread(fn, *args, **kwargs)
def _new_queue(*args, **kwargs):
from six.moves.queue import Queue
return Queue(*args, **kwargs)
def _new_gevent_queue(*args, **kwargs):
from gevent.queue import Queue
return Queue(*args, **kwargs)
def new_queue(threading_model, *args, **kwargs):
if threading_model == ThreadingModel.GEVENT:
return _new_gevent_queue(*args, **kwargs)
if threading_model == ThreadingModel.THREADS:
return _new_queue(*args, **kwargs)
def _new_thread_lock(*args, **kwargs):
from threading import Lock
return Lock(*args, **kwargs)
def _new_gevent_lock(*args, **kwargs):
from gevent.lock import Semaphore
return Semaphore(*args, **kwargs)
def new_lock(threading_model, *args, **kwargs):
if threading_model == ThreadingModel.GEVENT:
return _new_gevent_lock(*args, **kwargs)
if threading_model == ThreadingModel.THREADS:
return _new_thread_lock(*args, **kwargs)
class Promise(object):
def __init__(self, event):
object.__setattr__(self, '_event', event)
object.__setattr__(self, '_value', None)
def __getattr__(self, name):
return getattr(self._event, name)
def __setattr__(self, name, value):
if hasattr(self._event, name):
object.__setattr__(self._event, name, value)
else:
object.__setattr__(self, name, value)
@property
def value(self):
return self._value
def set(self, value):
object.__setattr__(self, '_value', value)
self._event.set()
def wait(self, timeout=None):
if not self._event.wait(timeout):
raise RuntimeError(
u'Promise timeout after %.02f seconds.' % timeout)
return self._value
def _new_thread_event():
from threading import Event
return Event()
def _new_gevent_event():
from gevent.event import Event
return Event()
def new_promise(threading_model):
if threading_model == ThreadingModel.GEVENT:
return Promise(_new_gevent_event())
if threading_model == ThreadingModel.THREADS:
return Promise(_new_thread_event())
| mpl-2.0 | 622,220,974,657,172,900 | 25.696429 | 66 | 0.644147 | false | 3.695921 | false | false | false |
MaxTakahashi/hammr | hammr/utils/publish_utils.py | 1 | 14372 | # Copyright 2007-2015 UShareSoft SAS, All rights reserved
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ussclicore.utils import printer
from uforge.objects.uforge import *
def publish_vcd(pimage, builder):
# doing field verification
if not "orgName" in builder:
printer.out("orgName in vcd builder not found", printer.ERROR)
return
if not "catalogName" in builder:
printer.out("catalogName in vcd builder not found", printer.ERROR)
return
if not "imageName" in builder:
printer.out("imageName in vcd builder not found", printer.ERROR)
return
pimage.credAccount.organizationName = builder["orgName"]
pimage.credAccount.catalogId = builder["catalogName"]
pimage.credAccount.displayName = builder["imageName"]
return pimage
def publish_vcenter(builder):
pimage = PublishImageVSphere()
# doing field verification
if not "displayName" in builder:
printer.out("displayName in vcenter builder not found", printer.ERROR)
return
if not "esxHost" in builder:
printer.out("esxHost in vcenter builder not found", printer.ERROR)
return
if not "datastore" in builder:
printer.out("datastore in vcenter builder not found", printer.ERROR)
return
if "network" in builder:
pimage.network = builder["network"]
pimage.displayName = builder["displayName"]
pimage.esxHost = builder["esxHost"]
pimage.datastore = builder["datastore"]
return pimage
def publish_cloudstack(pimage, builder):
# doing field verification
if not "imageName" in builder:
printer.out("imageName in cloudstack builder not found", printer.ERROR)
return
if not "zone" in builder:
printer.out("zone in cloudstack builder not found", printer.ERROR)
return
if "publicImage" in builder:
pimage.credAccount.publicImage = True if (builder["publicImage"] == "true") else False
if "featured" in builder:
pimage.credAccount.featuredEnabled = True if (builder["featured"] == "true") else False
pimage.credAccount.displayName = builder["imageName"]
pimage.credAccount.zoneName = builder["zone"]
return pimage
def publish_cloudstack_qcow2(pimage, builder):
return publish_cloudstack(pimage, builder)
def publish_cloudstack_vhd(pimage, builder):
return publish_cloudstack(pimage, builder)
def publish_cloudstack_ova(pimage, builder):
return publish_cloudstack(pimage, builder)
def publish_susecloud(pimage, builder):
# doing field verification
if not "imageName" in builder:
printer.out("imageName in susecloud builder not found", printer.ERROR)
return
if not "tenant" in builder:
printer.out("tenant in susecloud builder not found", printer.ERROR)
return
if "description" in builder:
pimage.credAccount.description = builder["description"]
pimage.credAccount.displayName = builder["imageName"]
pimage.credAccount.tenantName = builder["tenant"]
if "publicImage" in builder:
pimage.credAccount.publicImage = True if (builder["publicImage"] == "true") else False
# if "paraVirtualMode" in builder:
# pimage.credAccount. = True if (builder["paraVirtualMode"]=="true") else False
return pimage
def publish_openstack(builder):
pimage = PublishImageOpenStack()
# doing field verification
if not "displayName" in builder:
printer.out("displayName in openstack builder not found", printer.ERROR)
return
if not "tenantName" in builder:
printer.out("TenantName in openstack builder not found", printer.ERROR)
return
pimage.displayName = builder["displayName"]
pimage.tenantName = builder["tenantName"]
if "publicImage" in builder:
pimage.credAccount.publicImage = True if (builder["publicImage"] == "true") else False
if "keystoneDomain" in builder:
pimage.keystoneDomain = builder["keystoneDomain"]
return
if "keystoneProject" in builder:
pimage.keystoneProject = builder["keystoneProject"]
return
return pimage
def publish_openstackqcow2(builder):
return publish_openstack(builder)
def publish_openstackvhd(pimage, builder):
return publish_openstack(pimage, builder)
def publish_openstackvmdk(pimage, builder):
return publish_openstack(pimage, builder)
def publish_openstackvdi(pimage, builder):
return publish_openstack(pimage, builder)
def publish_aws(builder):
pimage = PublishImageAws()
# doing field verification
if not "bucket" in builder:
printer.out("bucket in AWS builder not found", printer.ERROR)
return
if not "region" in builder:
printer.out("region in AMI builder not found", printer.ERROR)
return
pimage.bucket = builder["bucket"]
pimage.region = builder["region"]
return pimage
def publish_azure(builder):
if "blob" in builder or "container" in builder:
printer.out("Azure Resource Manager publish")
return publish_azure_arm(builder)
else:
printer.out("Azure classic publish")
return publish_azure_classic(builder)
def publish_azure_classic(builder):
pimage = PublishImageAzure()
# doing field verification
if not "storageAccount" in builder:
printer.out("storageAccount in Microsoft Azure not found", printer.ERROR)
return
if not "region" in builder:
printer.out("region in Microsoft Azure not found", printer.ERROR)
return
pimage.storageAccount = builder["storageAccount"]
pimage.region = builder["region"]
return pimage
def publish_azure_arm(builder):
pimage = PublishImageAzureResourceManager()
if not "storageAccount" in builder:
printer.out("storageAccount not found", printer.ERROR)
return
if not "container" in builder:
printer.out("container not found", printer.ERROR)
return
if not "blob" in builder:
printer.out("blob not found", printer.ERROR)
return
if not "displayName" in builder:
printer.out("displayName not found", printer.ERROR)
return
if "resourceGroup" in builder:
pimage.resourceGroup = builder["resourceGroup"]
pimage.storageAccount = builder["storageAccount"]
pimage.container = builder["container"]
pimage.blob = builder["blob"]
pimage.displayName = builder["displayName"]
return pimage
def publish_flexiant(builder):
pimage = PublishImageFlexiant()
# doing field verification
if not "diskOffering" in builder:
printer.out("diskOffering in flexiant builder not found", printer.ERROR)
return
if not "virtualDatacenterName" in builder:
printer.out("virtualDatacenterName in flexiant builder not found", printer.ERROR)
return
if not "machineImageName" in builder:
printer.out("machineImageName in flexiant builder not found", printer.ERROR)
return
pimage.diskOffering = builder["diskOffering"]
pimage.virtualDatacenterName = builder["virtualDatacenterName"]
pimage.machineImageName = builder["machineImageName"]
return pimage
def publish_flexiant_kvm(pimage, builder):
return publish_flexiant(pimage, builder)
def publish_flexiant_ova(pimage, builder):
return publish_flexiant(pimage, builder)
def publish_flexiantraw(builder):
return publish_flexiant(builder)
def publish_abiquo(pimage, builder):
# doing field verification
if not "enterprise" in builder:
printer.out("enterprise in abiquo builder not found", printer.ERROR)
return
if not "datacenter" in builder:
printer.out("datacenter in abiquo builder not found", printer.ERROR)
return
if not "productName" in builder:
printer.out("productName in abiquo builder not found", printer.ERROR)
return
if not "category" in builder:
printer.out("category in abiquo builder not found", printer.ERROR)
return
if not "description" in builder:
printer.out("description in abiquo builder not found", printer.ERROR)
return
pimage.credAccount.datacenterName = builder["datacenter"]
pimage.credAccount.displayName = builder["productName"]
pimage.credAccount.category = builder["category"]
pimage.credAccount.organizationName = builder["enterprise"]
pimage.credAccount.description = builder["description"]
return pimage
def publish_nimbula(pimage, builder):
# doing field verification
if not "imageListName" in builder:
printer.out("imageListName in nimbula builder not found", printer.ERROR)
return
if not "imageVersion" in builder:
printer.out("imageVersion in nimbula builder not found", printer.ERROR)
return
if not "description" in builder:
printer.out("description in nimbula builder not found", printer.ERROR)
return
pimage.credAccount.imageVersion = builder["imageVersion"]
pimage.credAccount.description = builder["description"]
pimage.credAccount.listName = builder["imageListName"]
return pimage
def publish_nimbula_kvm(pimage, builder):
return publish_nimbula(pimage, builder)
def publish_nimbula_esx(pimage, builder):
return publish_nimbula(pimage, builder)
def publish_eucalyptus(pimage, builder):
# doing field verification
if not "imageName" in builder:
printer.out("imageName in Eucalyptus builder not found", printer.ERROR)
return
if not "description" in builder:
printer.out("description in Eucalyptus builder not found", printer.ERROR)
return
if not "bucket" in builder:
printer.out("bucket in Eucalyptus builder not found", printer.ERROR)
return
pimage.credAccount.displayName = builder["imageName"]
pimage.credAccount.bucket = builder["bucket"]
pimage.credAccount.description = builder["description"]
if "ramdisk" in builder and "kernelId" in builder:
pimage.credAccount.ramdiskId = builder["ramdisk"]
pimage.credAccount.kernelId = builder["kernelId"]
return pimage
def publish_eucalyptus_kvm(pimage, builder):
return publish_eucalyptus(pimage, builder)
def publish_eucalyptus_xen(pimage, builder):
return publish_eucalyptus(pimage, builder)
def publish_gce(pimage, builder):
# doing field verification
if not "computeZone" in builder:
printer.out("computeZone in GCE builder not found", printer.ERROR)
return
if not "bucketLocation" in builder:
printer.out("bucketLocation in GCE builder not found", printer.ERROR)
return
if not "bucket" in builder:
printer.out("bucket in GCE builder not found", printer.ERROR)
return
if not "projectId" in builder:
printer.out("projectId in GCE builder not found", printer.ERROR)
return
if not "storageClass" in builder:
printer.out("storageClass in GCE builder not found", printer.ERROR)
return
if not "diskNamePrefix" in builder:
printer.out("diskNamePrefix in AMI builder not found", printer.ERROR)
return
if "description" in builder:
pimage.credAccount.description = builder["description"]
pimage.credAccount.bucket = builder["bucket"]
pimage.credAccount.tenantName = builder["projectId"]
pimage.credAccount.category = builder["storageClass"]
pimage.credAccount.displayName = builder["diskNamePrefix"]
pimage.credAccount.zoneName = builder["computeZone"]
pimage.publishLocation = builder["bucketLocation"]
return pimage
def publish_outscale(pimage, builder):
# doing field verification
if not "zone" in builder:
printer.out("zone in outscale builder not found", printer.ERROR)
return
if not "description" in builder:
pimage.credAccount.description = builder["description"]
pimage.credAccount.zoneName = builder["zone"]
return pimage
def publish_k5vmdk(builder):
pimage = PublishImageK5()
# doing field verification
if not "displayName" in builder:
printer.out("displayName in k5 builder not found", printer.ERROR)
return
if not "domain" in builder:
printer.out("domain in k5 builder not found", printer.ERROR)
return
if not "project" in builder:
printer.out("project in k5 builder not found", printer.ERROR)
return
if not "region" in builder:
printer.out("region in k5 builder not found", printer.ERROR)
return
pimage.displayName = builder["displayName"]
pimage.keystoneDomain = builder["domain"]
pimage.keystoneProject = builder["project"]
pimage.publishLocation = builder["region"]
return pimage
def publish_docker(builder):
pimage = PublishImageDocker()
if not "namespace" in builder:
printer.out("namespace in Docker builder is missing", printer.ERROR)
return
if not "repositoryName" in builder:
printer.out("repositoryName in Docker builder is missing", printer.ERROR)
return
if not "tagName" in builder:
printer.out("tagName in Docker builder is missing", printer.ERROR)
return
pimage.namespace = builder["namespace"]
pimage.repositoryName = builder["repositoryName"]
pimage.tagName = builder["tagName"]
return pimage
def publish_oracleraw(builder):
pimage = PublishImageOracle()
if not "displayName" in builder:
printer.out("displayName in Oracle builder is missing", printer.ERROR)
return
if not "computeEndPoint" in builder:
printer.out("computeEndPoint in Oracle builder is missing", printer.ERROR)
return
pimage.displayName = builder["displayName"]
pimage.computeEndPoint = builder["computeEndPoint"]
return pimage
| apache-2.0 | 8,318,043,477,958,264,000 | 31.515837 | 95 | 0.695519 | false | 4.09809 | false | false | false |
zsommers/bdo_chronicle | bdo_tools/nodes/urls.py | 1 | 1286 | from django.conf.urls import include, url
from django.views.generic import DetailView, ListView, TemplateView
from . import models
kingdoms_patterns = [
url(r'^(?P<pk>[0-9]+)/$', DetailView.as_view(model=models.Kingdom), name='detail'),
url(r'^$', ListView.as_view(model=models.Kingdom), name='list'),
]
territories_patterns = [
url(r'^(?P<pk>[0-9]+)/$', DetailView.as_view(model=models.Territory), name='detail'),
url(r'^$', ListView.as_view(model=models.Territory), name='list'),
]
nodes_patterns = [
url(r'^(?P<pk>[0-9]+)/$', DetailView.as_view(model=models.Node), name='detail'),
url(r'^$', ListView.as_view(model=models.Node), name='list'),
]
properties_patterns = [
url(r'^(?P<pk>[0-9]+)/$', DetailView.as_view(model=models.Property), name='detail'),
url(r'^$', ListView.as_view(model=models.Property), name='list'),
]
app_name = 'nodes'
urlpatterns = [
url(r'^kingdoms/', include(kingdoms_patterns, namespace='kingdoms')),
url(r'^territories/', include(territories_patterns, namespace='territories')),
url(r'^nodes/', include(nodes_patterns, namespace='nodes')),
url(r'^properties/', include(properties_patterns, namespace='properties')),
url(r'^$', TemplateView.as_view(template_name='nodes/main.html'), name='main'),
]
| mit | 9,142,507,298,452,070,000 | 37.969697 | 89 | 0.664075 | false | 3.083933 | false | true | false |
travistang/late_fyt | proportional.py | 1 | 1578 | import numpy
import random
import sum_tree
class Experience(object):
def __init__(self, memory_size, batch_size, alpha):
self.tree = sum_tree.SumTree(memory_size)
self.memory_size = memory_size
self.batch_size = batch_size
self.alpha = alpha
def add(self, data, priority):
self.tree.add(data, priority**self.alpha)
def size(self):
return self.tree.filled_size()
def select(self, beta):
if self.tree.filled_size() < self.batch_size:
return None, None, None
out = []
indices = []
weights = []
priorities = []
for _ in range(self.batch_size):
r = random.random()
data, priority, index = self.tree.find(r)
priorities.append(priority)
weights.append((1./self.memory_size/priority)**-beta if priority > 1e-16 else 0)
indices.append(index)
out.append(data)
self.priority_update([index], [0]) # To avoid duplicating
self.priority_update(indices, priorities) # Revert priorities
return out, weights, indices
def priority_update(self, indices, priorities):
for i, p in zip(indices, priorities):
self.tree.val_update(i, p**self.alpha)
def reset_alpha(self, alpha):
self.alpha, old_alpha = alpha, self.alpha
priorities = [self.tree.get_val(i)**-old_alpha for i in range(self.tree.filled_size())]
self.priority_update(range(self.tree.filled_size()), priorities)
| mit | 7,641,482,424,519,402,000 | 30.56 | 95 | 0.584918 | false | 3.80241 | false | false | false |
jorvis/biocode | gff/convert_genbank_to_gff3.py | 1 | 10340 | #!/usr/bin/env python3
"""
This is a script to convert GenBank flat files to GFF3 format with a specific focus on
initially maintaining as much structural annotation as possible, then expanding into
functional annotation support.
This is not guaranteed to convert all features, but warnings will be printed wherever possible
for features which aren't included.
Currently supported:
Structural features: gene, CDS, mRNA, tRNA, rRNA
Annotations: primary identifiers, gene product name
This is written to handle multi-entry GBK files
Caveats:
- Because the GBK flatfile format doesn't explicitly model parent/child features, this script
links them using the expected format convention of shared /locus_tag entries for each feature
of the gene graph (gene, mRNA, CDS)
- It has only been tested with prokaryotic (non-spliced) genes
Author: Joshua Orvis (jorvis AT gmail)
"""
import argparse
import sys
from collections import defaultdict
from Bio import SeqIO
from biocode import annotation, things, utils
def main():
parser = argparse.ArgumentParser( description='Convert GenBank flat files to GFF3 format')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input GBK file' )
parser.add_argument('-o', '--output_file', type=str, required=False, help='Path to an output GFF file to be created' )
parser.add_argument('--with_fasta', dest='fasta', action='store_true', help='Include the FASTA section with genomic sequence at end of file. (default)' )
parser.add_argument('--no_fasta', dest='fasta', action='store_false' )
parser.set_defaults(fasta=True)
args = parser.parse_args()
## output will either be a file or STDOUT
ofh = sys.stdout
if args.output_file is not None:
ofh = open(args.output_file, 'wt')
ofh.write("##gff-version 3\n")
assemblies = dict()
current_assembly = None
current_gene = None
current_RNA = None
rna_count_by_gene = defaultdict(int)
exon_count_by_RNA = defaultdict(int)
seqs_pending_writes = False
features_skipped_count = 0
# each gb_record is a SeqRecord object
for gb_record in SeqIO.parse(open(args.input_file, "r"), "genbank"):
mol_id = gb_record.name
if mol_id not in assemblies:
assemblies[mol_id] = things.Assembly(id=mol_id)
if len(str(gb_record.seq)) > 0:
seqs_pending_writes = True
assemblies[mol_id].residues = str(gb_record.seq)
assemblies[mol_id].length = len(str(gb_record.seq))
current_assembly = assemblies[mol_id]
# each feat is a SeqFeature object
for feat in gb_record.features:
#print(feat)
fmin = int(feat.location.start)
fmax = int(feat.location.end)
if feat.location.strand == 1:
strand = '+'
elif feat.location.strand == -1:
strand = '-'
else:
raise Exception("ERROR: unstranded feature encountered: {0}".format(feat))
#print("{0} located at {1}-{2} strand:{3}".format( locus_tag, fmin, fmax, strand ) )
if feat.type == 'source':
continue
if feat.type == 'gene':
# print the previous gene (if there is one)
if current_gene is not None:
gene.print_as(fh=ofh, source='GenBank', format='gff3')
locus_tag = feat.qualifiers['locus_tag'][0]
gene = things.Gene(id=locus_tag, locus_tag=locus_tag)
gene.locate_on( target=current_assembly, fmin=fmin, fmax=fmax, strand=strand )
current_gene = gene
current_RNA = None
elif feat.type == 'mRNA':
locus_tag = feat.qualifiers['locus_tag'][0]
rna_count_by_gene[locus_tag] += 1
feat_id = "{0}.mRNA.{1}".format( locus_tag, rna_count_by_gene[locus_tag] )
mRNA = things.mRNA(id=feat_id, parent=current_gene, locus_tag=locus_tag)
mRNA.locate_on( target=current_assembly, fmin=fmin, fmax=fmax, strand=strand )
gene.add_mRNA(mRNA)
current_RNA = mRNA
if feat_id in exon_count_by_RNA:
raise Exception( "ERROR: two different RNAs found with same ID: {0}".format(feat_id) )
else:
exon_count_by_RNA[feat_id] = 0
elif feat.type == 'tRNA':
locus_tag = feat.qualifiers['locus_tag'][0]
rna_count_by_gene[locus_tag] += 1
feat_id = "{0}.tRNA.{1}".format(locus_tag, rna_count_by_gene[locus_tag])
if 'product' in feat.qualifiers:
anticodon = feat.qualifiers['product'][0]
else:
anticodon = None
tRNA = things.tRNA(id=feat_id, parent=current_gene, anticodon=anticodon)
tRNA.locate_on(target=current_assembly, fmin=fmin, fmax=fmax, strand=strand)
gene.add_tRNA(tRNA)
current_RNA = tRNA
if feat_id in exon_count_by_RNA:
raise Exception( "ERROR: two different RNAs found with same ID: {0}".format(feat_id) )
else:
exon_count_by_RNA[feat_id] = 0
elif feat.type == 'rRNA':
locus_tag = feat.qualifiers['locus_tag'][0]
rna_count_by_gene[locus_tag] += 1
feat_id = "{0}.rRNA.{1}".format(locus_tag, rna_count_by_gene[locus_tag])
if 'product' in feat.qualifiers:
product = feat.qualifiers['product'][0]
else:
product = None
annot = annotation.FunctionalAnnotation(product_name=product)
rRNA = things.rRNA(id=feat_id, parent=current_gene, annotation=annot)
rRNA.locate_on( target=current_assembly, fmin=fmin, fmax=fmax, strand=strand )
gene.add_rRNA(rRNA)
current_RNA = rRNA
if feat_id in exon_count_by_RNA:
raise Exception( "ERROR: two different RNAs found with same ID: {0}".format(feat_id) )
else:
exon_count_by_RNA[feat_id] = 0
elif feat.type == 'CDS':
locus_tag = feat.qualifiers['locus_tag'][0]
# If processing a prokaryotic GBK, we'll encounter CDS before mRNA, so we have to
# manually make one
if current_RNA is None:
feat_id = "{0}.mRNA.{1}".format( locus_tag, rna_count_by_gene[locus_tag] )
mRNA = things.mRNA(id=feat_id, parent=current_gene)
mRNA.locate_on( target=current_assembly, fmin=fmin, fmax=fmax, strand=strand )
gene.add_mRNA(mRNA)
current_RNA = mRNA
if 'product' in feat.qualifiers:
product = feat.qualifiers['product'][0]
else:
product = None
if 'gene' in feat.qualifiers:
gene_symbol = feat.qualifiers['gene'][0]
else:
gene_symbol = None
annot = annotation.FunctionalAnnotation(product_name=product, gene_symbol=gene_symbol)
if 'db_xref' in feat.qualifiers:
for dbxref in feat.qualifiers['db_xref']:
annot.add_dbxref(dbxref)
polypeptide_id = "{0}.polypeptide.{1}".format( locus_tag, rna_count_by_gene[locus_tag] )
polypeptide = things.Polypeptide(id=polypeptide_id, parent=mRNA, annotation=annot)
mRNA.add_polypeptide(polypeptide)
exon_count_by_RNA[current_RNA.id] += 1
cds_id = "{0}.CDS.{1}".format( current_RNA.id, exon_count_by_RNA[current_RNA.id] )
current_CDS_phase = 0
for loc in feat.location.parts:
subfmin = int(loc.start)
subfmax = int(loc.end)
CDS = things.CDS(id=cds_id, parent=current_RNA)
CDS.locate_on( target=current_assembly, fmin=subfmin, fmax=subfmax, strand=strand, phase=current_CDS_phase )
current_RNA.add_CDS(CDS)
# calculate the starting phase for the next CDS feature (in case there is one)
# 0 + 6 = 0 TTGCAT
# 0 + 7 = 2 TTGCATG
# 1 + 6 = 1 TTGCAT
# 2 + 7 = 1 TTGCATG
# general: 3 - ((length - previous phase) % 3)
current_CDS_phase = 3 - (((subfmax - subfmin) - current_CDS_phase) % 3)
if current_CDS_phase == 3:
current_CDS_phase = 0
exon_id = "{0}.exon.{1}".format( current_RNA.id, exon_count_by_RNA[current_RNA.id] )
exon = things.Exon(id=exon_id, parent=current_RNA)
exon.locate_on( target=current_assembly, fmin=subfmin, fmax=subfmax, strand=strand )
current_RNA.add_exon(exon)
exon_count_by_RNA[current_RNA.id] += 1
else:
print("WARNING: The following feature was skipped:\n{0}".format(feat))
features_skipped_count += 1
# don't forget to do the last gene, if there were any
if current_gene is not None:
gene.print_as(fh=ofh, source='GenBank', format='gff3')
if args.fasta is True:
if seqs_pending_writes is True:
ofh.write("##FASTA\n")
for assembly_id in assemblies:
ofh.write(">{0}\n".format(assembly_id))
ofh.write("{0}\n".format(utils.wrapped_fasta(assemblies[assembly_id].residues)))
if features_skipped_count > 0:
print("Warning: {0} unsupported feature types were skipped".format(features_skipped_count))
if __name__ == '__main__':
main()
| mit | -5,754,837,827,644,957,000 | 40.693548 | 158 | 0.547872 | false | 3.726126 | false | false | false |
jespino/coval | coval/international.py | 1 | 3349 | import string
import re
def isbn(isbn, strict=True):
'''Validation of an ISBN (international Standard Book Number)'''
if not strict:
isbn = isbn.upper()
if isbn[0:4] == 'ISBN':
isbn = isbn[4:]
isbn = isbn.strip().replace("-", "").replace("/", "")
if len(isbn) == 10:
return isbn10(isbn)
elif len(isbn) == 13:
return isbn13(isbn)
else:
return False
# Extracted from Wikipedia's http://en.wikipedia.org/wiki/Isbn page
def isbn10(isbn, strict=True):
'''Validation of an ISBN (international Standard Book Number) in ISBN-10 format'''
if not strict:
isbn = isbn.upper()
if isbn[0:4] == 'ISBN':
isbn = isbn[4:]
isbn = isbn.strip().replace("-", "").replace("/", "")
if not re.match('^\d{10}$', isbn):
return False
total = sum([int(num)*weight for num, weight in
zip(isbn, reversed(range(1, 11)))])
return total%11==0
# Extracted from Wikipedia's http://en.wikipedia.org/wiki/Isbn page
def isbn13(isbn, strict=True):
'''Validation of an ISBN (international Standard Book Number) in ISBN-13 format'''
if not strict:
isbn = isbn.upper()
if isbn[0:4] == 'ISBN':
isbn = isbn[4:]
isbn = isbn.strip().replace("-", "").replace("/", "")
if not re.match('^\d{13}$', isbn):
return False
total = sum([int(num)*weight for num, weight in zip(isbn, (1,3)*6)])
ck = 10-(total%10)
return ck == int(isbn[-1])
def iban(iban, strict=True):
'''Validation of an IBAN (international bankaccount number)'''
country_code_length = {
'AD': 24, 'AE': 23, 'AL': 28, 'AT': 20, 'BA': 20, 'BE': 16, 'BG': 22,
'CH': 21, 'CY': 28, 'CZ': 24, 'DE': 22, 'DK': 18, 'EE': 20, 'ES': 24,
'FR': 27, 'FI': 18, 'GB': 22, 'GE': 22, 'GI': 23, 'GR': 27, 'HR': 21,
'HU': 28, 'IE': 22, 'IL': 23, 'IS': 26, 'IT': 27, 'KW': 30, 'LB': 28,
'LI': 21, 'LT': 20, 'LU': 20, 'LV': 21, 'MC': 27, 'ME': 22, 'MK': 19,
'MR': 27, 'MT': 31, 'MU': 30, 'NL': 18, 'NO': 15, 'PL': 28, 'PT': 25,
'RO': 24, 'RS': 22, 'SA': 24, 'SE': 24, 'SI': 19, 'SK': 24, 'SM': 27,
'TN': 24, 'TR': 26,
}
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if not strict:
iban = iban.strip().replace("-", "").replace("/", "")
iban = iban.upper()
if len(iban)<2 or not iban[0:2] in country_code_length.keys():
return False
if len(iban) != country_code_length[iban[0:2]]:
return False
iban = iban[4:]+iban[0:4]
iban_translated = ''
for char in iban:
if char in letters:
iban_translated += str(letters.index(char)+10)
elif char in '0123456789':
iban_translated += char
else:
return False
return (int(iban_translated) % 97) == 1
def banknote_euro(banknote, strict=True):
'''Validation of a Euro banknote id'''
euro_country_codes = 'JKLMNPRSTUVWXYZ'
if not strict:
banknote = banknote.strip().replace("-", "").replace("/", "")
if len(banknote) != 12:
return False
if not banknote[0] in euro_country_codes:
return False
# Convert charater to ascii code
banknote = int(str(ord(banknote[0]))+banknote[1:])
return (int(banknote) % 9) == 0
| bsd-3-clause | 4,280,021,965,208,855,600 | 30.299065 | 86 | 0.533294 | false | 3.075298 | false | false | false |
kennethreitz/pipenv | pipenv/vendor/shellingham/posix/__init__.py | 1 | 2843 | import os
import re
from .._core import SHELL_NAMES, ShellDetectionFailure
from . import proc, ps
def _get_process_mapping():
"""Select a way to obtain process information from the system.
* `/proc` is used if supported.
* The system `ps` utility is used as a fallback option.
"""
for impl in (proc, ps):
try:
mapping = impl.get_process_mapping()
except EnvironmentError:
continue
return mapping
raise ShellDetectionFailure('compatible proc fs or ps utility is required')
def _iter_process_args(mapping, pid, max_depth):
"""Iterator to traverse up the tree, yielding each process's argument list.
"""
for _ in range(max_depth):
try:
proc = mapping[pid]
except KeyError: # We've reached the root process. Give up.
break
if proc.args: # Persumably the process should always have a name?
yield proc.args
pid = proc.ppid # Go up one level.
def _get_login_shell(proc_cmd):
"""Form shell information from the SHELL environment variable if possible.
"""
login_shell = os.environ.get('SHELL', '')
if login_shell:
proc_cmd = login_shell
else:
proc_cmd = proc_cmd[1:]
return (os.path.basename(proc_cmd).lower(), proc_cmd)
_INTERPRETER_SHELL_NAMES = [
(re.compile(r'^python(\d+(\.\d+)?)?$'), {'xonsh'}),
]
def _get_interpreter_shell(proc_name, proc_args):
"""Get shell invoked via an interpreter.
Some shells are implemented on, and invoked with an interpreter, e.g. xonsh
is commonly executed with an executable Python script. This detects what
script the interpreter is actually running, and check whether that looks
like a shell.
See sarugaku/shellingham#26 for rational.
"""
for pattern, shell_names in _INTERPRETER_SHELL_NAMES:
if not pattern.match(proc_name):
continue
for arg in proc_args:
name = os.path.basename(arg).lower()
if os.path.isfile(arg) and name in shell_names:
return (name, arg)
return None
def _get_shell(cmd, *args):
if cmd.startswith('-'): # Login shell! Let's use this.
return _get_login_shell(cmd)
name = os.path.basename(cmd).lower()
if name in SHELL_NAMES: # Command looks like a shell.
return (name, cmd)
shell = _get_interpreter_shell(name, args)
if shell:
return shell
return None
def get_shell(pid=None, max_depth=6):
"""Get the shell that the supplied pid or os.getpid() is running in.
"""
pid = str(pid or os.getpid())
mapping = _get_process_mapping()
for proc_args in _iter_process_args(mapping, pid, max_depth):
shell = _get_shell(*proc_args)
if shell:
return shell
return None
| mit | 1,781,591,116,275,053,000 | 29.569892 | 79 | 0.623285 | false | 3.790667 | false | false | false |
MC911-MV-1s2016/lya-compiler-python | lyacompiler/lya_debug_source.py | 1 | 11212 | lya_source_dcl = """
dcl dcl1 int;
dcl dcl2, dcl3, dcl4, dcl5 char;
dcl dcl6, dcl7 int, dcl8 bool;
dcl dcl9 int = 5;
dcl dcl10, dcl11 int = 6;
dcl dcl12 int, dcl13, dcl14 int = 10;
dcl dcl15 int (2:5);
dcl dcl16 char (0:10);
dcl dcl17 bool(10:11);
dcl dcl18 dcl17 (1:2);
dcl dcl19 int (0:1) (1:2);
"""
lya_source_syn = """
syn syn1 = 1;
syn syn2, syn3, syn4 = 3;
syn syn5 int = 2;
syn syn6, syn7 int = 3;
syn syn8 = 10, syn9 = 12;
syn syn10, syn11 int = 13, syn12 = 20;
"""
lya_source_type = """
type type1 = int;
type type2 = char;
type type3 = bool;
type type4 = type3;
type type7, type8 = int;
type type9, type10, type11 = char;
type type12 = bool, type13 = type9;
type type14 = int, type15, type16 = char, type17, type18, type19 = char;
type type20 = ref int;
type type21 = ref ref type20;
type type22 = chars[20];
type type23 = array [int] char;
type type24 = array[1:2] bool;
type type25 = array[int, bool, char, mode1(1:4), int(3:5), 1:5] bool;
"""
lya_source_composite_mode = """
dcl cms1 chars [10];
dcl cma1 array [int] bool;
dcl cma2 array [bool, int] char;
"""
lya_source_procedure1 = """
power: proc (n int, r int) returns (int);
dcl c int;
type t = bool;
end;
"""
lya_source_procedure2 = """
power: proc (n int, r int) returns (int);
end;
"""
lya_source_procedure3 = """
power: proc (n int, r int);
dcl c int;
type t = bool;
end;
"""
lya_source_procedure4 = """
power: proc () returns (int);
dcl c int;
type t = bool;
end;
"""
lya_source_procedure5 = """
power: proc (n int, r int);
end;
"""
lya_source_procedure6 = """
power: proc () returns (int);
end;
"""
lya_source_procedure7 = """
power: proc ();
dcl c int;
end;
"""
lya_source_procedure8 = """
power: proc ();
end;
"""
lya_source_procedure9 = """
power: proc (n int loc, r, z int) returns (int loc);
dcl c, d int = 1;
type t = bool;
end;
"""
lya_source_if1 = """
label: if 1+2 then
exit label1;
else
exit label2;
fi;
"""
lya_source_if2 = """
if 1+2 then
exit label1;
exit label2;
fi;
"""
lya_source_if3 = """
if 1+2 then
else
exit label2;
exit label3;
fi;
"""
lya_source_if4 = """
if 1+2 then
else
fi;
"""
lya_source_if5 = """
if 1+2 then
exit label1;
elsif 1+2 then
exit label2;
exit label22;
else
exit lable3;
fi;
"""
lya_source_if6 = """
if 1+2 then
exit label1;
elsif 1+2 then
exit label2;
exit label22;
fi;
"""
lya_source_if7 = """
if 1+2 then
if 1+3 then
exit label1;
fi;
elsif 1+2 then
exit label2;
if 2+5 then
else
exit label22;
fi;
else
if 2+5 then
exit a1;
elsif 1+2 then
exit label22;
fi;
fi;
"""
lya_source_action1 = """
label1: ac1 = 10 + 10;
ac2 += 2;
ac3 -= 10;
ac4 *= 55;
ac5 /= 1;
ac5 %= 20;
ac6 &= 2;
"""
lya_source_expression = """
dcl var1 int=3+5-7*7/9%3;
dcl var2 int = 2 in 3;
dcl var3 bool = 5 && 3 || 1 == 2 & 2;
dcl var4 bool = if 2 then 3 else 5 fi;
dcl var2 int = var1 + 3;
"""
lya_source_action2 = """
exit label1;
result 1 + 2;
return;
return 2 + 1;
"""
lya_source_call1 = """
function();
function(1);
function(1, 2);
function(1+2, 2);
function(1,2,3/2);
"""
lya_source_call2 = """
num(1);
pred();
succ(1,2);
upper(1/2);
lower(2/3);
length();
read(100);
print(var2+2);
"""
lya_source_do1 = """
dcl var int = 3;
do od;
do var = 2; od;
do while 1; od;
do while 3; var = 32; od;
"""
lya_source_do2 = """
do for counter in int; od;
do for counter in bool; var3 = 12; od;
do for counter down in char; od;
do for counter in int while 3; var = 32; od;
do for counter = 3 to 8; od;
do for counter = 3 down to 8; od;
do for counter = 3 by 5 to 8; od;
do for counter = 3 by 5 down to 8; od;
"""
lya_source_do3 = """
dcl var int = 3;
do od;
do var = 2; od;
do while var; od;
do while 3; var = 32; od;
"""
test2_source = """dcl m int = 2, n int = 3;
p: proc (x int);
dcl s int;
s = m * x;
print("s = ", s);
end;
p(n);
print(m);"""
test3_source = """dcl m int = 2, n int = 3;
p: proc (x, y int, b bool) returns (int);
dcl s int = x;
if b then
s += y;
result s;
else
result y;
fi;
end;
dcl b bool;
read (b);
print (p(m, n, b));"""
test4_source = """dcl i int, b bool = true;
x:
do while b;
read (i);
if i <= 0 then
exit x;
fi;
print (i*i);
od;
print (0);"""
test5_source = """dcl i, soma int;
soma = 0;
do for i=1 to 10;
soma += i;
od;
print (soma);
"""
test6_source = """dcl i int;
dcl soma int = 0, b bool = true;
do for i=1 to 10 while b;
soma += i;
if soma > 100 then
b = false;
fi;
od;
print (soma);"""
test7_source = """dcl i,j int, r ref int;
p: proc(x int, y ref int) returns (int);
dcl b bool;
read(b);
if b then
y = -> i;
result y->;
else
y = r;
result r->;
fi;
end;
read(i);
r = -> i;
print(p(i,->j));"""
test8_source = """dcl i int, j,k int = 2;
p: proc(x int, y int loc) returns (int loc);
dcl z int = y;
y = x;
result k;
print(z); /* print 2 */
end;
i = p(3,j);
print(i, j); /* print 2,3 */"""
test9_source = """dcl a array[3:10] int;
dcl i,j int;
read(j);
a[3]=2*j;
do
for i = 4 to 10;
a[i] = 5+i;
od;
print(a[j]);"""
test10_source = """dcl x, y int;
p: proc (b bool) returns (int loc);
if b then
result x;
else
result y;
fi;
end;
dcl b bool = false;
p(b) = 20;
p(true) = 10;
print(x, y); // display 10, 20
"""
test11_source = """type vector = array[1:10] int;
dcl v vector, i int;
sum: proc (v vector) returns (int);
dcl s, i int;
i = 1;
s = 0;
do
while i<=10;
s = s + v[i];
i += 1;
od;
return s;
end;
do
for i = 1 to 10;
read(v[i]);
od;
print(sum(v));"""
syn_test_source = """syn sy1 = 20;
syn sy6 = sy1;
syn sy2 char = 'c';
syn sy3 bool = true;
syn sy4 int = 1 + sy1;"""
dcl_op_source = """dcl var1 int=3+5-7*7/9%3; dcl var2 int = 2 in 3;"""
dcl_op_source2 = """dcl var2, varx char;\ndcl var3, var4 int = 10;\ndcl var5 = 10 + 5 * (10 - 20);"""
test_rel_exp_source = """dcl m bool = false, n bool = false;
p: proc (x bool);
dcl s bool;
s = m >= x;
end;
p(n);"""
test_unary_op_source = """dcl m int = 2, n int = 3;
p: proc (x int);
dcl s bool;
s = !true;
end;
p(n);"""
test_elsif_source = """dcl m int = 2, n int = 3, y, s int, b bool = true;
if b then
s += y;
elsif b then
s = y;
else
s = 3;
fi;
print (s);"""
testret_source = """dcl m int = 2, n int = 3;
p: proc (x, y int, b bool) returns (int);
dcl s int = x;
if b then
s += y;
return s;
else
result y;
fi;
end;
dcl b bool = true;
read (b);
print (p(m, n, b));"""
typedef_source = """type my_int = int;
dcl x my_int = 2;
type vector = array[1:10] int;
dcl v vector;
type p_int = ref int;
dcl pi p_int;
print(x);
print(v);
print(pi);
type r_my_int = ref my_int;
dcl uou r_my_int;
print(uou);"""
printtest_source = """
dcl c chars[10] = "BANANA";
print("Oi", "tudo bem?");
print(c);"""
# The only variable exported from this module.
__all__ = ['lya_debug_source']
lya_gcd = """
gcd: proc (x int, y int) returns (int);
dcl g int;
g = y;
do
while x > 0;
g = x;
x = y - (y/x) * x;
y = g;
od;
return g;
end;
dcl a, b int;
print("give-me two integers separated by space:");
read(a);
read(b);
print ("GCD of ", a, b, " is ", gcd(a,b));"""
lya_gen_primes = """dcl n1, n2, i, j int, flag bool;
print("Enter 2 numbers (intervals) separated by space: ");
read(n1);
read(n2);
print("Prime numbers between ", n1, " and ", n2, " are:\n");
do
for i = n1 to n2;
flag = true;
loop: do
for j = 2 to i/2;
if i % j == 0 then
flag = false;
exit loop;
fi;
od;
if flag then
print(i, " ");
fi;
od;
"""
lya_bubble_sort = """dcl v array[0:100] int;
dcl n, c, d, swap int;
print("Enter number of elements: ");
read(n);
print("Enter ", n, " integers\n");
do
for c = 0 to n-1;
read(v[c]);
od;
do
for c = 0 to n-2;
do
for d = 0 to n-c-2;
// For decreasing order use "<"
if v[d] > v[d+1] then
swap = v[d];
v[d] = v[d+1];
v[d+1] = swap;
fi;
od;
od;
print("Sorted list in ascending order:\n");
do
for c = 0 to n-1;
print(v[c], " ");
od;
"""
lya_palindrome = """dcl n,t int, reverse int = 0;
print("Enter a number: ");
read(n);
t = n;
do
while t != 0;
reverse = reverse * 10;
reverse = reverse + t % 10;
t = t / 10;
od;
if n == reverse then
print(n, " is a palindrome number.\n");
else
print(n, " is not a palindrome number.\n");
fi;"""
lya_ref_example = """swapByRef: proc(x ref int, y ref int);
dcl t int = x->;
x-> = y->;
y-> = t;
end;
dcl i int = 10, j int = 20;
// declaring reference to int
dcl r ref int = ->i;
swapByRef( r, ->j );
print(i, j);"""
lya_fibo = """fibo: proc (n int, g int loc);
dcl h int;
if n < 0 then
print(g);
return;
else
h = g; fibo(n-1, h);
g = h; fibo(n-2, g);
fi;
print(n,g);
end;
dcl k int = 0;
fibo(3,k);
//fibo(-1,k);
"""
lya_armstrong = """power: proc (n int, r int) returns (int);
dcl c int, p int = 1;
do
for c = 1 to r;
p = p*n;
od;
return p;
end;
dcl n int, sum int = 0;
dcl temp, remainder int, digits int = 0;
print("Input an integer: ");
read(n);
temp = n;
do
while temp != 0;
digits += 1;
temp = temp / 10;
od;
temp = n;
do
while temp != 0;
remainder = temp % 10;
sum = sum + power(remainder, digits);
temp = temp / 10;
od;
if n == sum then
print(n, " is an Armstrong number.\n");
else
print(n, " is not an Armstrong number.\n");
fi;"""
lya_fat = """
fat: proc (n int) returns (int);
if n==0 then
return 1;
else
return n * fat (n-1);
fi;
end;
dcl x int;
print("give-me a positive integer:");
read(x);
print("fatorial of ", x, " = ", fat(x));"""
lya_int_stack = """syn top int = 10;
type stack = array [1:top+1] int;
push: proc (s stack loc, elem int);
if s[top+1] == top then
print("stack is full");
else
s[top+1] += 1;
s[s[top+1]] = elem;
fi;
end;
pop: proc (s stack loc) returns (int);
if s[top+1] == 0 then
print("empty stack");
result 0;
else
result s[s[top+1]];
s[top+1] -= 1;
fi;
end;
init: proc (s stack loc);
s[top+1] = 0;
end;
dcl q stack, v1, v2 int;
init(q);
read(v1, v2);
push(q,v1);
push(q,v2);
print(pop(q) + pop(q));"""
lya_debug_source = lya_bubble_sort
| bsd-3-clause | -97,219,455,865,958,500 | 16.382946 | 101 | 0.505619 | false | 2.561572 | true | false | false |
filippobrizzi/soma | graph_sched/graphCreator.py | 1 | 5395 | import sys
import pargraph as par
import copy
import schedule as sched
import profiler as pro
import time
import multiprocessing
import itertools
import random
import threading
""" Usage: call with <filename> <pragma_xml_file> <executable_name> <profiling_interations> True/False (for output) """
if __name__ == "__main__":
pragma_xml = sys.argv[1]
executable = sys.argv[2]
count = int(sys.argv[3])
output = sys.argv[4]
execution_time = float(sys.argv[5])
deadline = float(sys.argv[6])
multi = sys.argv[7]
#runs count time the executable and aggregates the informations in executable_profile.xml. The single profile outputs are saved as profile+iter.xml
profile_xml = pro.profileCreator(count, executable)
#return the nested dot graphs in code style (one for each function)
visual_nested_graphs = par.getNesGraph(pragma_xml, profile_xml)
#returns the graphs to be visualized and the object graphs in flow style (one for each function)
(visual_flow_graphs, flow_graphs) = par.getParalGraph(pragma_xml, profile_xml)
i = 0
for g in visual_nested_graphs:
g.write_pdf('graphs/%s_code.pdf'%flow_graphs[i].type)
g.write_dot('graphs/%s_code.dot'%flow_graphs[i].type)
i += 1
i = 0
for g in visual_flow_graphs:
g.write_pdf('graphs/%s_flow.pdf'%flow_graphs[i].type)
g.write_dot('graphs/%s_flow.dot'%flow_graphs[i].type)
i += 1
#creates the flow type graph --> flow.xml
par.dump_graphs(flow_graphs)
#adding to the original xml the profiling informations --> code.xml
pro.add_profile_xml(profile_xml, pragma_xml)
#creating the total graph with the call-tree
func_graph = par.create_complete_graph(visual_flow_graphs, profile_xml)
#creating the graphs with the function calls
func_graph.write_pdf('graphs/function_graphs.pdf')
func_graph.write_dot('graphs/function_graphs.dot')
#creating the expanded graph where the functions are inserted in the flow graph
exp_flows = copy.deepcopy(flow_graphs)
par.explode_graph(exp_flows)
main_flow = sched.get_main(exp_flows)
#creating a generator for the expanded graph
gen = sched.generate_task(main_flow)
#creating a new generator for the expanded graph
sched.make_white(main_flow)
#getting the number of physical cores of the machine profiled
max_flows = sched.get_core_num(profile_xml)
max_flows = 4
#getting cores of the actual machine, but the problem is multithreading
cores = multiprocessing.cpu_count()
if cores == 1:
cores = 2
#initializing all the lists for the parallel scheduling algorithm
tasks_list = []
task_list = []
flows_list = []
optimal_flow_list = []
p_list = []
queue_list = []
results = []
num_tasks = 0
#getting the number of tasks in the expanded graph and creating a list of task
for task in gen:
task_list.append(task)
num_tasks += 1
if output == 'True':
sched.make_white(main_flow)
par.scanGraph(main_flow)
#starting the parallel or sequential search of the best solution with a timing constrain
if multi == 'parallel':
for core in range(cores):
tmp = []
optimal_flow_list.append(tmp)
tmp_2 = []
flows_list.append(tmp_2)
random.shuffle(task_list)
tasks_list.append(copy.deepcopy(task_list))
q = sched.Queue()
queue_list.append(q)
p_list.append(multiprocessing.Process(target = sched.get_optimal_flow, args = (flows_list[core], tasks_list[core], 0, optimal_flow_list[core], num_tasks, max_flows, execution_time, queue_list[core], )))
print "starting core: ",core
p_list[core].start()
#getting the results from the processes
for queue in queue_list:
t = queue.q.get()
results.append(t)
#joining all the processes
i = 0
for p in p_list:
p.join()
print "core ", i, " joined"
i += 1
#getting the best result
optimal_flow = results[0]
best = 0
for i in range(len(results)):
print "result:"
for flow in results[i]:
flow.dump()
if sched.get_cost(results[i]) < sched.get_cost(optimal_flow):
best = i
optimal_flow = results[best]
else:
optimal_flow = []
flow_list = []
execution_time += time.clock()
print "searching best schedule"
sched.get_optimal_flow_single(flow_list, task_list, 0, optimal_flow, num_tasks, max_flows, execution_time )
#printing the best result
print "solution:"
for flow in optimal_flow:
flow.dump("\t")
print "\ttime:",flow.time
#substitutes "for tasks" with splitted versions if present in the optimal flows
par.add_new_tasks(optimal_flow, main_flow)
sched.make_white(main_flow)
gen_ = sched.generate_task(main_flow)
t_list = []
for t in gen_:
t_list.append(t)
"""
print t.type," @ ", t.start_line, " has parents:"
for p in t.parent:
print "\t ",p.type," @ ", p.start_line
print "and children:"
for c in t.children:
print "\t ",c.type," @ ", c.start_line
print
"""
#adds id's to all the tasks to retrive the flow to which they belong
par.add_flow_id(optimal_flow, t_list)
#sets arrival times and deadlines using a modified version of the chetto algorithm
sched.chetto(main_flow, deadline, optimal_flow)
#checks if the schedule is feasible and in case creates the schedule file
if sched.check_schedule(main_flow):
sched.create_schedule(main_flow, len(optimal_flow))
sched.make_white(main_flow)
#sched.print_schedule(main_flow)
else:
print "tasks not schedulable, try with more search time"
#prints extended info of the entire pragma graph
| gpl-3.0 | -2,984,676,818,941,451,300 | 27.544974 | 206 | 0.704912 | false | 3.044582 | false | false | false |
jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_types07.py | 1 | 2093 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'types07.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/calcChain.xml', '[Content_Types].xml', 'xl/_rels/workbook.xml.rels']
self.ignore_elements = {}
def test_write_nan_and_inf(self):
"""Test writing special numbers."""
workbook = Workbook(self.got_filename, {'nan_inf_to_errors': True})
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, float('nan'))
worksheet.write(1, 0, float('inf'))
worksheet.write(2, 0, float('-inf'))
workbook.close()
self.assertExcelEqual()
def test_write_nan_and_inf_write_number(self):
"""Test writing special numbers."""
workbook = Workbook(self.got_filename, {'nan_inf_to_errors': True})
worksheet = workbook.add_worksheet()
worksheet.write_number(0, 0, float('nan'))
worksheet.write_number(1, 0, float('inf'))
worksheet.write_number(2, 0, float('-inf'))
workbook.close()
self.assertExcelEqual()
def test_write_nan_and_inf_write_as_string(self):
"""Test writing special numbers."""
workbook = Workbook(self.got_filename, {'nan_inf_to_errors': True,
'strings_to_numbers': True})
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, 'nan')
worksheet.write(1, 0, 'inf')
worksheet.write(2, 0, '-inf')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause | -7,666,676,698,832,765,000 | 28.478873 | 101 | 0.580029 | false | 3.730838 | true | false | false |
DimaWoogy/convex_hull_bst | charts/run.py | 1 | 4349 | from matplotlib import pyplot as plt
from matplotlib.font_manager import FontProperties
from subprocess import call
import random
import sys
import math
from scipy.spatial import ConvexHull
from shapely import geometry
font = FontProperties()
font.set_family('Times New Roman')
font.set_size(12)
def generate_points_on_circle(size):
for i in range(size):
angle = random.uniform(0, 2 * math.pi)
yield (math.cos(angle), math.sin(angle))
def generate_points_in_circle(size):
for i in range(size):
angle = random.uniform(0, 2 * math.pi)
radius = random.random()
yield (radius * math.cos(angle), radius * math.sin(angle))
def generate_points_with_normal_dist(size):
return [(random.gauss(0, 1), random.gauss(0, 1)) for i in range(size)]
def triangle_area(triangle):
def distance(p1, p2):
return math.hypot(p1[0] - p2[0], p1[1] - p2[1])
a, b, c = triangle
first = distance(a, b)
second = distance(b, c)
third = distance(c, a)
p = 0.5 * (first + second + third)
return math.sqrt(p * (p - first) * (p - second) * (p - third))
def triangles_from_hull(points):
p = [points[i] for i in ConvexHull(points).vertices]
t = [(p[0], p[i], p[i + 1]) for i in range(1, len(p) - 1)]
w = [triangle_area(x) for x in t]
return t, w
def random_point_in_triangle(triangle):
a, b, c = triangle
r1, r2 = random.random(), random.random()
x = (1 - math.sqrt(r1)) * a[0] + (math.sqrt(r1) * (1 - r2)) * b[0] + \
(math.sqrt(r1) * r2) * c[0]
y = (1 - math.sqrt(r1)) * a[1] + (math.sqrt(r1) * (1 - r2)) * b[1] + \
(math.sqrt(r1) * r2) * c[1]
return x, y
def generate_points_with_percent_on_hull(size, percent):
points_on_hull = list(generate_points_on_circle(round(size * percent)))
triangles, weights = triangles_from_hull(points_on_hull)
in_hull_size = size - len(points_on_hull)
points_in_hull = [random_point_in_triangle(t) for t in random.choices(
triangles, weights=weights, k=in_hull_size)]
res = points_on_hull + points_in_hull
random.shuffle(res)
return res
def generate_input(sizes, genFunc):
with open("in.txt", 'w') as f:
f.write(str(len(sizes)) + '\n')
for size in sizes:
points = list(genFunc(size))
f.write(str(len(points)) + '\n')
for x, y in points:
f.write(str(x) + ' ' + str(y) + '\n')
def read_algo_names():
with open("algoNames.txt", 'r') as f:
return list(f.readlines())
def read_result():
with open("out.txt", 'r') as f:
return list(zip(*[list(map(float, line.split())) for line in f]))
def plot_and_save(x, values, names, labelx, labely, filename):
linestyles = ['k--', 'k-']
g = []
for i in range(len(values)):
g.append(plt.plot(x, values[i], linestyles[i])[0])
plt.yticks(fontname="Times New Roman", fontsize=10)
plt.xticks(fontname="Times New Roman", fontsize=10)
plt.legend(g, names, prop=font)
plt.xlabel(labelx, fontproperties=font)
plt.ylabel(labely, fontproperties=font)
plt.grid()
plt.savefig(filename, bbox_inches='tight')
def new_comparison(executable):
percentage_on_hull = [0.1, 0.5, 1, 1.5, 2, 2.5, 3]
print(percentage_on_hull)
points_num = [10000, 25000, 50000, 75000, 100000, 250000, 500000, 1000000]
print(points_num)
for n in points_num:
generate_input(percentage_on_hull,
lambda percent:
generate_points_with_percent_on_hull(n, percent / 100))
call([executable])
y = read_result()
print([f[1] / f[0] for f in zip(*y)])
plt.figure()
plot_and_save(percentage_on_hull, y, read_algo_names(),
'процент', 'время (сек)',
'comparison_' + str(n) + '.svg')
def classic_comparison(executable):
x = [1000, 2500, 5000, 7500, 10000, 25000, 50000, 75000, 100000]
plt.figure()
generate_input(x, generate_points_with_normal_dist)
call([executable])
plot_and_save(x, read_result(), read_algo_names(),
'количество точек', 'время (сек)', 'comparison_gauss.svg')
if len(sys.argv) == 2:
new_comparison(sys.argv[1])
else:
print("Usage: run.py path_to_executable")
| mit | -5,708,612,539,971,205,000 | 29.146853 | 78 | 0.597773 | false | 2.946685 | false | false | false |
z23han/Wrangling-MongoDB | Lesson_1_Data_Extraction_Fundamentals/parseCSV.py | 1 | 2010 | # Your task is to read the input DATAFILE line by line, and for the first 10 lines (not including the header)
# split each line on "," and then for each line, create a dictionary
# where the key is the header title of the field, and the value is the value of that field in the row.
# The function parse_file should return a list of dictionaries,
# each data line in the file being a single list entry.
# Field names and values should not contain extra whitespace, like spaces or newline characters.
# You can use the Python string method strip() to remove the extra whitespace.
# You have to parse only the first 10 data lines in this exercise,
# so the returned list should have 10 entries!
import os
DATADIR = ""
DATAFILE = "beatles-diskography.csv"
# -*- coding: utf-8 -*-
def parse_file(datafile):
data = []
with open(datafile, "r") as f:
cnt = 0
key_list = []
value_list = []
for line in f:
#print line
if cnt == 0:
key_list = line.strip().split(',')
else:
value_list = line.strip().split(',')
if cnt != 0:
data_dict = {}
for i in xrange(len(key_list)):
data_dict[key_list[i]] = value_list[i]
data.append(data_dict)
cnt += 1
if cnt > 10:
break
return data
def test():
# a simple test of your implemetation
datafile = os.path.join(DATADIR, DATAFILE)
d = parse_file(datafile)
firstline = {'Title': 'Please Please Me', 'UK Chart Position': '1', 'Label': 'Parlophone(UK)', 'Released': '22 March 1963', 'US Chart Position': '-', 'RIAA Certification': 'Platinum', 'BPI Certification': 'Gold'}
tenthline = {'Title': '', 'UK Chart Position': '1', 'Label': 'Parlophone(UK)', 'Released': '10 July 1964', 'US Chart Position': '-', 'RIAA Certification': '', 'BPI Certification': 'Gold'}
assert d[0] == firstline
assert d[9] == tenthline
test()
| agpl-3.0 | 410,967,701,874,879,040 | 37.653846 | 216 | 0.602488 | false | 3.771107 | false | false | false |
City-of-Helsinki/smbackend | smbackend/urls.py | 1 | 1679 | from django.contrib import admin
from django.urls import include, re_path
from django.utils.translation import gettext_lazy as _
from munigeo.api import all_views as munigeo_views
from rest_framework import routers
from observations.api import views as observations_views
from observations.views import obtain_auth_token
from services import views
from services.api import all_views as services_views
from services.unit_redirect_viewset import UnitRedirectViewSet
from shortcutter import urls as shortcutter_urls
admin.site.site_header = _("Servicemap administration")
admin.site.index_title = _("Application management")
router = routers.DefaultRouter()
registered_api_views = set()
for view in services_views + munigeo_views + observations_views:
kwargs = {}
if view["name"] in registered_api_views:
continue
else:
registered_api_views.add(view["name"])
if "basename" in view:
kwargs["basename"] = view["basename"]
router.register(view["name"], view["class"], **kwargs)
urlpatterns = [
# Examples:
# url(r'^$', 'smbackend.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
# url(r'^', include(v1_api.urls)),
# url(r'^admin/', include(admin.site.urls)),
re_path(r"^admin/", admin.site.urls),
re_path(r"^open311/", views.post_service_request, name="services"),
re_path(r"^v2/", include(router.urls)),
re_path(r"^v2/api-token-auth/", obtain_auth_token, name="api-auth-token"),
re_path(r"^v2/redirect/unit/", UnitRedirectViewSet.as_view({"get": "list"})),
re_path(r"^v2/suggestion/", views.suggestion, name="suggestion"),
re_path(r"", include(shortcutter_urls)),
]
| agpl-3.0 | 5,467,478,504,102,424,000 | 35.5 | 81 | 0.699226 | false | 3.483402 | false | false | false |
code-for-india/sahana_shelter_worldbank | models/zzz_1st_run.py | 1 | 13208 | # -*- coding: utf-8 -*-
# 1st-run initialisation
# Set settings.base.prepopulate to 0 in Production
# (to save 1x DAL hit every page).
pop_list = settings.get_base_prepopulate()
if pop_list == 0:
pop_list = []
else:
table = db[auth.settings.table_group_name]
# The query used here takes 2/3 the time of .count().
if db(table.id > 0).select(table.id, limitby=(0, 1)).first():
pop_list = []
if not isinstance(pop_list, (list, tuple)):
pop_list = [pop_list]
if len(pop_list) > 0:
# =========================================================================
# Populate default roles and permissions
#
# Allow debug
import sys
print >> sys.stdout, "Please be patient whilst the database is populated"
# Shortcuts
acl = auth.permission
sysroles = auth.S3_SYSTEM_ROLES
create_role = auth.s3_create_role
#update_acls = auth.s3_update_acls
# Do not remove or change order of these 5 definitions (System Roles):
create_role("Administrator",
"System Administrator - can access & make changes to any data",
uid=sysroles.ADMIN,
system=True, protected=True)
create_role("Authenticated",
"Authenticated - all logged-in users",
uid=sysroles.AUTHENTICATED,
protected=True)
create_role("Anonymous",
"Unauthenticated users",
# Allow unauthenticated users to view the list of organisations
# so they can select an organisation when registering
dict(t="org_organisation", uacl=acl.READ, entity="any"),
# Allow unauthenticated users to see the list of sites for an
# org when registering
dict(c="org", f="sites_for_org", uacl=acl.READ, entity="any"),
uid=sysroles.ANONYMOUS,
protected=True)
# Primarily for Security Policy 2
create_role("Editor",
"Editor - can access & make changes to any unprotected data",
uid=sysroles.EDITOR,
system=True, protected=True)
# MapAdmin
map_admin = create_role("MapAdmin",
"MapAdmin - allowed access to edit the MapService Catalogue",
dict(c="gis", uacl=acl.ALL, oacl=acl.ALL),
dict(c="gis", f="location", uacl=acl.ALL, oacl=acl.ALL),
uid=sysroles.MAP_ADMIN,
system=True, protected=True)
# OrgAdmin (policies 6, 7 and 8)
create_role("OrgAdmin",
"OrgAdmin - allowed to manage user roles for entity realms",
uid=sysroles.ORG_ADMIN,
system=True, protected=True)
# Enable shortcuts (needed by default.py)
system_roles = auth.get_system_roles()
ADMIN = system_roles.ADMIN
AUTHENTICATED = system_roles.AUTHENTICATED
ANONYMOUS = system_roles.ANONYMOUS
EDITOR = system_roles.EDITOR
MAP_ADMIN = system_roles.MAP_ADMIN
ORG_ADMIN = system_roles.ORG_ADMIN
# =========================================================================
# Configure Scheduled Tasks
#
has_module = settings.has_module
if has_module("msg"):
# Send Messages from Outbox
# SMS every minute
s3task.schedule_task("msg_process_outbox",
vars={"contact_method":"SMS"},
period=120, # seconds
timeout=120, # seconds
repeats=0 # unlimited
)
# Emails every 5 minutes
s3task.schedule_task("msg_process_outbox",
vars={"contact_method":"EMAIL"},
period=300, # seconds
timeout=300, # seconds
repeats=0 # unlimited
)
# Tweets every minute
#s3task.schedule_task("msg_process_outbox",
# vars={"contact_method":"TWITTER"},
# period=120, # seconds
# timeout=120, # seconds
# repeats=0 # unlimited
# )
# Subscription notifications
s3task.schedule_task("notify_check_subscriptions",
period=300,
timeout=300,
repeats=0)
# Daily maintenance
s3task.schedule_task("maintenance",
vars={"period":"daily"},
period=86400, # seconds, so 1/day
timeout=600, # seconds
repeats=0 # unlimited
)
# =========================================================================
# Import PrePopulate data
#
# Override authorization
auth.override = True
# Load all Models to ensure all DB tables present
s3db.load_all_models()
# Shortcuts
path_join = os.path.join
request_folder = request.folder
if settings.get_auth_opt_in_to_email():
table = db.pr_group
for team in settings.get_auth_opt_in_team_list():
table.insert(name = team, group_type = 5)
# Synchronisation
db.sync_config.insert() # Defaults are fine
# Person Registry
tablename = "pr_person"
# Add extra indexes on search fields
# Should work for our 3 supported databases: sqlite, MySQL & PostgreSQL
field = "first_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
field = "middle_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
field = "last_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
# GIS
# Add extra index on search field
# Should work for our 3 supported databases: sqlite, MySQL & PostgreSQL
tablename = "gis_location"
field = "name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
# Messaging Module
if has_module("msg"):
update_super = s3db.update_super
# To read inbound email, set username (email address), password, etc.
# here. Insert multiple records for multiple email sources.
table = db.msg_email_channel
id = table.insert(server = "imap.gmail.com",
protocol = "imap",
use_ssl = True,
port = 993,
username = "example-username",
password = "password",
delete_from_server = False
)
update_super(table, dict(id=id))
# Need entries for the Settings/1/Update URLs to work
table = db.msg_sms_outbound_gateway
id = table.insert(outgoing_sms_handler = "WEB_API")
update_super(table, dict(id=id))
table = db.msg_sms_modem_channel
id = table.insert(modem_baud = 115200)
update_super(table, dict(id=id))
table = db.msg_sms_webapi_channel
id = table.insert(to_variable = "to")
update_super(table, dict(id=id))
table = db.msg_sms_smtp_channel
id = table.insert(address="changeme")
update_super(table, dict(id=id))
table = db.msg_tropo_channel
id = table.insert(token_messaging = "")
update_super(table, dict(id=id))
table = db.msg_twitter_channel
id = table.insert(enabled = False)
update_super(table, dict(id=id))
# Budget Module
if has_module("budget"):
db.budget_parameter.insert() # Defaults are fine
# Climate Module
if has_module("climate"):
s3db.climate_first_run()
# CAP module
if has_module("cap"):
db.cap_alert.insert(template_title="Default", is_template=True)
# Incident Reporting System
if has_module("irs"):
# Categories visible to ends-users by default
table = db.irs_icategory
table.insert(code = "flood")
table.insert(code = "geophysical.landslide")
table.insert(code = "roadway.bridgeClosure")
table.insert(code = "roadway.roadwayClosure")
table.insert(code = "other.buildingCollapsed")
table.insert(code = "other.peopleTrapped")
table.insert(code = "other.powerFailure")
# Supply Module
if has_module("supply"):
db.supply_catalog.insert(name = settings.get_supply_catalog_default())
# Ensure DB population committed when running through shell
db.commit()
# =========================================================================
# PrePopulate import (from CSV)
#
# Create the bulk Importer object
bi = s3base.S3BulkImporter()
s3.import_role = bi.import_role
s3.import_user = bi.import_user
s3.import_image = bi.import_image
s3.import_remote_csv = bi.import_remote_csv
# Relax strict email-matching rule for import updates of person records
email_required = settings.get_pr_import_update_requires_email()
settings.pr.import_update_requires_email = False
# Additional settings for user table imports:
s3db.configure("auth_user",
onaccept = lambda form: auth.s3_approve_user(form.vars))
s3db.add_components("auth_user", auth_membership="user_id")
# Flag that Assets are being imported, not synced
s3.asset_import = True
# Allow population via shell scripts
if not request.env.request_method:
request.env.request_method = "GET"
grandTotalStart = datetime.datetime.now()
for pop_setting in pop_list:
start = datetime.datetime.now()
# Clear Tasklist
bi.tasks = []
# Import data specific to the prepopulate setting
if pop_setting == 1:
# Populate with the default data
path = path_join(request_folder,
"private",
"templates",
"default")
bi.perform_tasks(path)
else:
path = path_join(request_folder,
"private",
"templates",
pop_setting)
if os.path.exists(path):
bi.perform_tasks(path)
else:
print >> sys.stderr, "Unable to install data %s no valid directory found" % pop_setting
grandTotalEnd = datetime.datetime.now()
duration = grandTotalEnd - grandTotalStart
try:
# Python 2.7
duration = '{:.2f}'.format(duration.total_seconds()/60)
print >> sys.stdout, "Pre-populate task completed in %s mins" % duration
except AttributeError:
# older Python
print >> sys.stdout, "Pre-populate task completed in %s" % duration
bi.resultList = []
for errorLine in bi.errorList:
try:
print >> sys.stderr, errorLine
except:
s3_unicode = s3base.s3_unicode
_errorLine = ""
for i in range(0, len(errorLine)):
try:
_errorLine += s3_unicode(errorline[i])
except:
pass
print >> sys.stderr, _errorLine
# Restore setting for strict email-matching
settings.pr.import_update_requires_email = email_required
# Restore Auth
auth.override = False
# Update Location Tree (disabled during prepop)
start = datetime.datetime.now()
gis.update_location_tree()
end = datetime.datetime.now()
print >> sys.stdout, "Location Tree update completed in %s" % (end - start)
# Countries are only editable by MapAdmin
db(db.gis_location.level == "L0").update(owned_by_group=map_admin)
if has_module("stats"):
# Populate stats_demographic_aggregate (disabled during prepop)
# - needs to be done after locations
start = datetime.datetime.now()
s3db.stats_demographic_rebuild_all_aggregates()
end = datetime.datetime.now()
print >> sys.stdout, "Demographic data aggregation completed in %s" % (end - start)
if has_module("vulnerability"):
# Populate vulnerability_aggregate (disabled during prepop)
# - needs to be done after locations
start = datetime.datetime.now()
s3db.vulnerability_rebuild_all_aggregates()
end = datetime.datetime.now()
print >> sys.stdout, "Vulnerability data aggregation completed in %s" % (end - start)
grandTotalEnd = datetime.datetime.now()
duration = grandTotalEnd - grandTotalStart
try:
# Python 2.7
duration = '{:.2f}'.format(duration.total_seconds()/60)
print >> sys.stdout, "Pre-populate completed in %s mins" % duration
except AttributeError:
# older Python
print >> sys.stdout, "Pre-populate completed in %s" % duration
# Restore view
response.view = "default/index.html"
# END =========================================================================
| mit | -2,316,289,051,526,580,700 | 36.310734 | 103 | 0.552998 | false | 4.240128 | false | false | false |
rackerlabs/deuce | deuce/tests/test_validation.py | 1 | 21839 | import hashlib
from unittest import TestCase
import uuid
from falcon import request
from stoplight import validate
from stoplight.exceptions import ValidationFailed
from deuce.transport import validation as v
from deuce.transport.wsgi import errors
class MockRequest(object):
pass
class InvalidSeparatorError(Exception):
"""Invalid Separator Error is raised whenever
a invalid separator is set for joining query strings
in a url"""
def __init__(self, msg):
Exception.__init__(self, msg)
class TestRulesBase(TestCase):
@staticmethod
def build_request(params=None, separator='&'):
"""Build a request object to use for testing
:param params: list of tuples containing the name and value pairs
for parameters to add to the QUERY_STRING
"""
mock_env = {
'wsgi.errors': 'mock',
'wsgi.input': 'mock',
'REQUEST_METHOD': 'PUT',
'PATH_INFO': '/',
'SERVER_NAME': 'mock',
'SERVER_PORT': '8888',
'QUERY_STRING': None
}
if params is not None:
for param in params:
name = param[0]
value = param[1]
param_set = '{0}='.format(name)
if value is not None and len(value):
param_set = '{0}={1}'.format(name, value)
if mock_env['QUERY_STRING'] is None:
mock_env['QUERY_STRING'] = param_set
else:
if separator in ('&', ';'):
mock_env['QUERY_STRING'] = '{1}{0}{2}'.format(
separator, mock_env['QUERY_STRING'], param_set)
else:
raise InvalidSeparatorError('separator in query string'
'must be & or ;')
if mock_env['QUERY_STRING'] is None:
del mock_env['QUERY_STRING']
return request.Request(mock_env)
def cases_with_none_okay(self):
positive_cases = self.__class__.positive_cases[:]
positive_cases.append(None)
negative_cases = self.__class__.negative_cases[:]
while negative_cases.count(None):
negative_cases.remove(None)
while negative_cases.count(''):
negative_cases.remove('')
return (positive_cases, negative_cases)
class TestRequests(TestRulesBase):
def test_request(self):
positive_case = [TestRulesBase.build_request()]
negative_case = [MockRequest()]
for case in positive_case:
v.is_request(case)
for case in negative_case:
with self.assertRaises(ValidationFailed):
v.is_request(none_ok=True)(case)
class TestVaultRules(TestRulesBase):
positive_cases = [
'a',
'0',
'__vault_id____',
'-_-_-_-_-_-_-_-',
'snake_case_is_ok',
'So-are-hyphonated-names',
'a' * v.VAULT_ID_MAX_LEN
]
negative_cases = [
'', # empty case should raise
'.', '!', '@', '#', '$', '%',
'^', '&', '*', '[', ']', '/',
'@#$@#$@#^@%$@#@#@#$@!!!@$@$@',
'\\', 'a' * (v.VAULT_ID_MAX_LEN + 1),
None
]
@validate(vault_id=v.VaultGetRule)
def utilize_get_vault_id(self, vault_id):
return True
@validate(vault_id=v.VaultPutRule)
def utilize_put_vault_id(self, vault_id):
return True
@validate(req=v.RequestRule(v.VaultMarkerRule))
def utilize_request(self, req, raiseme=False):
if raiseme:
raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string))
else:
return True
def test_vault_id(self):
for name in self.__class__.positive_cases:
v.val_vault_id(name)
for name in self.__class__.negative_cases:
with self.assertRaises(ValidationFailed):
v.val_vault_id()(name)
def test_vault_get(self):
for p_case in self.__class__.positive_cases:
self.assertTrue(self.utilize_get_vault_id(p_case))
for case in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPNotFound):
self.utilize_get_vault_id(case)
def test_vault_put(self):
for p_case in self.__class__.positive_cases:
self.assertTrue(self.utilize_put_vault_id(p_case))
for case in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_put_vault_id(case)
def test_vault_id_marker(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for vault_id in positive_cases:
vault_id_req = TestRulesBase.build_request(params=[('marker',
vault_id)])
self.assertTrue(self.utilize_request(vault_id_req))
# We currently skip the negative test for the VaultMarkerRule
# due to the nature of the negative cases for the Vault Name.
# Leaving the code in below should we figure out a good way to
# capture the data for the URL encoding.
#
# Note: It is not a failure of build_request()'s QUERY_STRING building
# but a miss-match between it, urllib.parse.urlencode(), and Falcon.
# Use of urllib.parse.urlencode() has other issues here as well.
#
# for vault_id in negative_cases:
# vault_id_req = TestRulesBase.build_request(params=[('marker',
# vault_id)])
# with self.assertRaises(errors.HTTPNotFound):
# self.utilize_request(vault_id_req, raiseme=True)
class TestMetadataBlockRules(TestRulesBase):
positive_cases = [
'da39a3ee5e6b4b0d3255bfef95601890afd80709',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'ffffffffffffffffffffffffffffffffffffffff',
'a' * 40,
]
negative_cases = [
'',
'.',
'a', '0', 'f', 'F', 'z', '#', '$', '?',
'a39a3ee5e6b4b0d3255bfef95601890afd80709', # one char short
'da39a3ee5e6b4b0d3255bfef95601890afd80709a', # one char long
'DA39A3EE5E6B4B0D3255BFEF95601890AFD80709',
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF',
'AaaAaaAaaaaAaAaaaAaaaaaaaAAAAaaaaAaaaaaa' * 2,
'AaaAaaAaaaaAaAaaaAaaaaaaaAAAAaaaaAaaaaaa' * 3,
'AaaAaaAaaaaAaAaaaAaaaaaaaAAAAaaaaAaaaaaa' * 4,
None
]
@validate(metadata_block_id=v.BlockGetRule)
def utilize_get_metadata_block_get(self, metadata_block_id):
return True
@validate(metadata_block_id=v.BlockPutRule)
def utilize_put_metadata_block_id(self, metadata_block_id):
return True
@validate(metadata_block_id=v.BlockPostRule)
def utilize_post_metadata_block_id(self, metadata_block_id):
return True
@validate(metadata_block_id=v.BlockGetRuleNoneOk)
def utilize_get_metadata_block_get_none_okay(self, metadata_block_id):
return True
@validate(metadata_block_id=v.BlockPutRuleNoneOk)
def utilize_put_metadata_block_id_none_okay(self, metadata_block_id):
return True
@validate(metadata_block_id=v.BlockPostRuleNoneOk)
def utilize_post_metadata_block_id_none_okay(self, metadata_block_id):
return True
@validate(req=v.RequestRule(v.BlockMarkerRule))
def utilize_request(self, req, raiseme=False):
if raiseme:
raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string))
else:
return True
def test_block_id(self):
for blockid in self.__class__.positive_cases:
v.val_block_id(blockid)
for blockid in self.__class__.negative_cases:
with self.assertRaises(v.ValidationFailed):
v.val_block_id()(blockid)
def test_get_block_id(self):
for blockid in self.__class__.positive_cases:
self.utilize_get_metadata_block_get(blockid)
for blockid in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPNotFound):
self.utilize_get_metadata_block_get(blockid)
def test_put_block_id(self):
for blockid in self.__class__.positive_cases:
self.utilize_put_metadata_block_id(blockid)
for blockid in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_put_metadata_block_id(blockid)
def test_get_block_id_none_okay(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for blockid in positive_cases:
self.utilize_get_metadata_block_get_none_okay(blockid)
for blockid in negative_cases:
with self.assertRaises(errors.HTTPNotFound):
self.utilize_get_metadata_block_get_none_okay(blockid)
def test_put_block_id_none_okay(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for blockid in positive_cases:
self.utilize_put_metadata_block_id_none_okay(blockid)
for blockid in negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_put_metadata_block_id_none_okay(blockid)
def test_post_block_id(self):
for blockid in self.__class__.positive_cases:
self.utilize_post_metadata_block_id(blockid)
for blockid in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_post_metadata_block_id(blockid)
def test_post_block_id_none_okay(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for blockid in positive_cases:
self.utilize_post_metadata_block_id_none_okay(blockid)
for blockid in negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_post_metadata_block_id_none_okay(blockid)
def test_block_id_marker(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for block_id in positive_cases:
block_id_req = TestRulesBase.build_request(params=[('marker',
block_id)])
self.assertTrue(self.utilize_request(block_id_req))
for block_id in negative_cases:
block_id_req = TestRulesBase.build_request(params=[('marker',
block_id)])
with self.assertRaises(errors.HTTPNotFound):
self.utilize_request(block_id_req, raiseme=True)
class TestStorageBlockRules(TestRulesBase):
positive_cases = [hashlib.sha1(bytes(i)).hexdigest() + '_' +
str(uuid.uuid4()) for i in range(0, 1000)]
negative_cases = [
'',
'fecfd28bbc9345891a66d7c1b8ff46e60192d'
'2840c3de7c4-5fe9-4b2e-b19a-9cf81364997b', # note no '_' between sha1
# and uuid
'e7bf692b-ec7b-40ad-b0d1-45ce6798fb6z', # note trailing z
str(uuid.uuid4()).upper(), # Force case sensitivity
None
]
@validate(storage_block_id=v.StorageBlockGetRule)
def utilize_get_storage_block_get(self, storage_block_id):
return True
@validate(storage_block_id=v.StorageBlockPutRule)
def utilize_put_storage_block_id(self, storage_block_id):
return True
@validate(storage_block_id=v.StorageBlockRuleGetNoneOk)
def utilize_get_storage_block_get_none_okay(self, storage_block_id):
return True
@validate(storage_block_id=v.StorageBlockRulePutNoneOk)
def utilize_put_storage_block_id_none_okay(self, storage_block_id):
return True
@validate(req=v.RequestRule(v.StorageBlockMarkerRule))
def utilize_request(self, req, raiseme=False):
if raiseme:
raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string))
else:
return True
def test_storage_storage_block_id(self):
for storage_id in self.__class__.positive_cases:
v.val_storage_block_id(storage_id)
for storage_id in self.__class__.negative_cases:
with self.assertRaises(ValidationFailed):
v.val_storage_block_id()(storage_id)
def test_get_storage_block_id(self):
for storage_id in self.__class__.positive_cases:
self.utilize_get_storage_block_get(storage_id)
for storage_id in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPNotFound):
self.utilize_get_storage_block_get(storage_id)
def test_put_storage_block_id(self):
for storage_id in self.__class__.positive_cases:
self.utilize_put_storage_block_id(storage_id)
for storage_id in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_put_storage_block_id(storage_id)
def test_get_storage_block_id_none_okay(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for storage_id in positive_cases:
self.utilize_get_storage_block_get_none_okay(storage_id)
for storage_id in negative_cases:
with self.assertRaises(errors.HTTPNotFound):
self.utilize_get_storage_block_get_none_okay(storage_id)
def test_put_storage_block_id_none_okay(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for storage_id in positive_cases:
self.utilize_put_storage_block_id_none_okay(storage_id)
for storage_id in negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_put_storage_block_id_none_okay(storage_id)
def test_storage_block_id_marker(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for storage_id in positive_cases:
storage_id_req = TestRulesBase.build_request(params=[('marker',
storage_id)])
self.assertTrue(self.utilize_request(storage_id_req))
for storage_id in negative_cases:
storage_id_req = TestRulesBase.build_request(params=[('marker',
storage_id)])
with self.assertRaises(errors.HTTPNotFound):
self.utilize_request(storage_id_req, raiseme=True)
class TestFileRules(TestRulesBase):
# Let's try try to append some UUIds and check for faileus
positive_cases = [str(uuid.uuid4()) for _ in range(0, 1000)]
negative_cases = [
'',
'e7bf692b-ec7b-40ad-b0d1-45ce6798fb6z', # note trailing z
str(uuid.uuid4()).upper(), # Force case sensitivity
None
]
@validate(file_id=v.FileGetRule)
def utilize_file_id_get(self, file_id):
return True
@validate(file_id=v.FilePutRule)
def utilize_file_id_put(self, file_id):
return True
@validate(file_id=v.FilePostRule)
def utilize_file_id_post(self, file_id):
return True
@validate(file_id=v.FileGetRuleNoneOk)
def utilize_file_id_get_none_okay(self, file_id):
return True
@validate(file_id=v.FilePutRuleNoneOk)
def utilize_file_id_put_none_okay(self, file_id):
return True
@validate(file_id=v.FilePostRuleNoneOk)
def utilize_file_id_post_none_okay(self, file_id):
return True
@validate(req=v.RequestRule(v.FileMarkerRule))
def utilize_request(self, req, raiseme=False):
if raiseme:
raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string))
else:
return True
def test_file_id(self):
for fileid in self.__class__.positive_cases:
v.val_file_id(fileid)
for fileid in self.__class__.negative_cases:
with self.assertRaises(ValidationFailed):
v.val_file_id()(fileid)
def test_get_file_id(self):
for file_id in self.__class__.positive_cases:
self.utilize_file_id_get(file_id)
for file_id in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPNotFound):
self.utilize_file_id_get(file_id)
def test_put_file_id(self):
for file_id in self.__class__.positive_cases:
self.utilize_file_id_put(file_id)
for file_id in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_file_id_put(file_id)
def test_post_file_id(self):
for file_id in self.__class__.positive_cases:
self.utilize_file_id_post(file_id)
for file_id in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_file_id_post(file_id)
def test_get_file_id_none_okay(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for file_id in positive_cases:
self.utilize_file_id_get_none_okay(file_id)
for file_id in negative_cases:
with self.assertRaises(errors.HTTPNotFound):
self.utilize_file_id_get_none_okay(file_id)
def test_put_file_id_none_okay(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for file_id in positive_cases:
self.utilize_file_id_put_none_okay(file_id)
for file_id in negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_file_id_put_none_okay(file_id)
def test_post_file_id_none_okay(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for file_id in positive_cases:
self.utilize_file_id_post_none_okay(file_id)
for file_id in negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_file_id_post_none_okay(file_id)
def test_file_id_marker(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for file_id in positive_cases:
file_id_req = TestRulesBase.build_request(params=[('marker',
file_id)])
self.assertTrue(self.utilize_request(file_id_req))
for file_id in negative_cases:
file_id_req = TestRulesBase.build_request(params=[('marker',
file_id)])
with self.assertRaises(errors.HTTPNotFound):
self.utilize_request(file_id_req, raiseme=True)
class TestOffsetRules(TestRulesBase):
positive_cases = [
'0', '1', '2', '3', '55', '100',
'101010', '99999999999999999999999999999'
]
negative_cases = [
'-1', '-23', 'O', 'zero', 'one', '-999', '1.0', '1.3',
'0.0000000000001',
None
]
@validate(req=v.RequestRule(v.OffsetMarkerRule))
def utilize_request(self, req, raiseme=False):
if raiseme:
raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string))
else:
return True
def test_offset(self):
for offset in self.__class__.positive_cases:
v.val_offset()(offset)
for offset in self.__class__.negative_cases:
with self.assertRaises(ValidationFailed):
v.val_offset()(offset)
def test_offset_marker(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for offset in positive_cases:
offset_req = TestRulesBase.build_request(params=[('marker',
offset)])
self.assertTrue(self.utilize_request(offset_req))
for offset in negative_cases:
offset_req = TestRulesBase.build_request(params=[('marker',
offset)])
with self.assertRaises(errors.HTTPNotFound):
self.utilize_request(offset_req, raiseme=True)
class TestLimitRules(TestRulesBase):
positive_cases = [
'0', '100', '100000000', '100'
]
negative_cases = [
'-1', 'blah', None
]
@validate(req=v.RequestRule(v.LimitRule))
def utilize_request(self, req, raiseme=False):
if raiseme:
raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string))
else:
return True
def test_limit(self):
for limit in self.__class__.positive_cases:
v.val_limit()(limit)
for limit in self.__class__.negative_cases:
with self.assertRaises(ValidationFailed):
v.val_limit()(limit)
v.val_limit(empty_ok=True)('')
v.val_limit(none_ok=True)(None)
with self.assertRaises(ValidationFailed):
v.val_limit()('')
with self.assertRaises(ValidationFailed):
v.val_limit()(None)
def test_limit_marker(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for limit in positive_cases:
limit_req = TestRulesBase.build_request(params=[('limit',
limit)])
self.assertTrue(self.utilize_request(limit_req))
for limit in negative_cases:
limit_req = TestRulesBase.build_request(params=[('limit',
limit)])
with self.assertRaises(errors.HTTPNotFound):
self.utilize_request(limit_req, raiseme=True)
| apache-2.0 | 3,394,681,553,709,124,600 | 32.444104 | 79 | 0.589587 | false | 3.774456 | true | false | false |
googleapis/python-aiplatform | google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py | 1 | 13902 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1beta1.types import migration_service
from google.longrunning import operations_pb2 # type: ignore
from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import MigrationServiceGrpcTransport
class MigrationServiceGrpcAsyncIOTransport(MigrationServiceTransport):
"""gRPC AsyncIO backend transport for MigrationService.
A service that migrates resources from automl.googleapis.com,
datalabeling.googleapis.com and ml.googleapis.com to Vertex AI.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes)
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
**self_signed_jwt_kwargs,
**kwargs,
)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def search_migratable_resources(
self,
) -> Callable[
[migration_service.SearchMigratableResourcesRequest],
Awaitable[migration_service.SearchMigratableResourcesResponse],
]:
r"""Return a callable for the search migratable resources method over gRPC.
Searches all of the resources in
automl.googleapis.com, datalabeling.googleapis.com and
ml.googleapis.com that can be migrated to Vertex AI's
given location.
Returns:
Callable[[~.SearchMigratableResourcesRequest],
Awaitable[~.SearchMigratableResourcesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "search_migratable_resources" not in self._stubs:
self._stubs["search_migratable_resources"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.MigrationService/SearchMigratableResources",
request_serializer=migration_service.SearchMigratableResourcesRequest.serialize,
response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize,
)
return self._stubs["search_migratable_resources"]
@property
def batch_migrate_resources(
self,
) -> Callable[
[migration_service.BatchMigrateResourcesRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the batch migrate resources method over gRPC.
Batch migrates resources from ml.googleapis.com,
automl.googleapis.com, and datalabeling.googleapis.com
to Vertex AI.
Returns:
Callable[[~.BatchMigrateResourcesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_migrate_resources" not in self._stubs:
self._stubs["batch_migrate_resources"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.MigrationService/BatchMigrateResources",
request_serializer=migration_service.BatchMigrateResourcesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_migrate_resources"]
__all__ = ("MigrationServiceGrpcAsyncIOTransport",)
| apache-2.0 | -2,154,680,416,297,731,300 | 43.700965 | 102 | 0.630557 | false | 4.663536 | false | false | false |
liiight/notifiers | tests/providers/test_mailgun.py | 1 | 2725 | import pytest
import datetime
import time
from email import utils
from notifiers.exceptions import BadArguments
from notifiers.core import FAILURE_STATUS
provider = "mailgun"
class TestMailgun:
def test_mailgun_metadata(self, provider):
assert provider.metadata == {
"base_url": "https://api.mailgun.net/v3/{domain}/messages",
"name": "mailgun",
"site_url": "https://documentation.mailgun.com/",
}
@pytest.mark.parametrize(
"data, message",
[
({}, "to"),
({"to": "foo"}, "domain"),
({"to": "foo", "domain": "bla"}, "api_key"),
({"to": "foo", "domain": "bla", "api_key": "bla"}, "from"),
(
{"to": "foo", "domain": "bla", "api_key": "bla", "from": "bbb"},
"message",
),
],
)
def test_mailgun_missing_required(self, data, message, provider):
data["env_prefix"] = "test"
with pytest.raises(BadArguments, match=f"'{message}' is a required property"):
provider.notify(**data)
@pytest.mark.online
def test_mailgun_sanity(self, provider, test_message):
provider.notify(message=test_message, raise_on_errors=True)
@pytest.mark.online
def test_mailgun_all_options(self, provider, tmpdir, test_message):
dir_ = tmpdir.mkdir("sub")
file_1 = dir_.join("hello.txt")
file_1.write("content")
file_2 = dir_.join("world.txt")
file_2.write("content")
now = datetime.datetime.now() + datetime.timedelta(minutes=3)
rfc_2822 = utils.formatdate(time.mktime(now.timetuple()))
data = {
"message": test_message,
"html": f"<b>{now}</b>",
"subject": f"{now}",
"attachment": [file_1.strpath, file_2.strpath],
"inline": [file_1.strpath, file_2.strpath],
"tag": ["foo", "bar"],
"dkim": True,
"deliverytime": rfc_2822,
"testmode": False,
"tracking": True,
"tracking_clicks": "htmlonly",
"tracking_opens": True,
"require_tls": False,
"skip_verification": True,
"headers": {"foo": "bar"},
"data": {"foo": {"bar": "bla"}},
}
provider.notify(**data, raise_on_errors=True)
def test_mailgun_error_response(self, provider):
data = {
"api_key": "FOO",
"message": "bla",
"to": "[email protected]",
"domain": "foo",
"from": "[email protected]",
}
rsp = provider.notify(**data)
assert rsp.status == FAILURE_STATUS
assert "Forbidden" in rsp.errors
| mit | -4,442,736,360,810,114,600 | 32.231707 | 86 | 0.519266 | false | 3.707483 | true | false | false |
davidnmurray/iris | lib/iris/io/__init__.py | 1 | 15278 | # (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides an interface to manage URI scheme support in iris.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import glob
import os.path
import types
import re
import collections
import iris.fileformats
import iris.fileformats.dot
import iris.cube
import iris.exceptions
# Saving routines, indexed by file extension.
class _SaversDict(dict):
"""A dictionary that can only have string keys with no overlap."""
def __setitem__(self, key, value):
if not isinstance(key, six.string_types):
raise ValueError("key is not a string")
if key in self:
raise ValueError("A saver already exists for", key)
for k in self.keys():
if k.endswith(key) or key.endswith(k):
raise ValueError("key %s conflicts with existing key %s" % (key, k))
dict.__setitem__(self, key, value)
_savers = _SaversDict()
def run_callback(callback, cube, field, filename):
"""
Runs the callback mechanism given the appropriate arguments.
Args:
* callback:
A function to add metadata from the originating field and/or URI which
obeys the following rules:
1. Function signature must be: ``(cube, field, filename)``.
2. Modifies the given cube inplace, unless a new cube is
returned by the function.
3. If the cube is to be rejected the callback must raise
an :class:`iris.exceptions.IgnoreCubeException`.
.. note::
It is possible that this function returns None for certain callbacks,
the caller of this function should handle this case.
"""
if callback is None:
return cube
# Call the callback function on the cube, generally the function will
# operate on the cube in place, but it is also possible that the function
# will return a completely new cube instance.
try:
result = callback(cube, field, filename)
except iris.exceptions.IgnoreCubeException:
result = None
else:
if result is None:
result = cube
elif not isinstance(result, iris.cube.Cube):
raise TypeError("Callback function returned an "
"unhandled data type.")
return result
def decode_uri(uri, default='file'):
r'''
Decodes a single URI into scheme and scheme-specific parts.
In addition to well-formed URIs, it also supports bare file paths.
Both Windows and UNIX style paths are accepted.
.. testsetup::
from iris.io import *
Examples:
>>> from iris.io import decode_uri
>>> print(decode_uri('http://www.thing.com:8080/resource?id=a:b'))
('http', '//www.thing.com:8080/resource?id=a:b')
>>> print(decode_uri('file:///data/local/dataZoo/...'))
('file', '///data/local/dataZoo/...')
>>> print(decode_uri('/data/local/dataZoo/...'))
('file', '/data/local/dataZoo/...')
>>> print(decode_uri('file:///C:\data\local\dataZoo\...'))
('file', '///C:\\data\\local\\dataZoo\\...')
>>> print(decode_uri('C:\data\local\dataZoo\...'))
('file', 'C:\\data\\local\\dataZoo\\...')
>>> print(decode_uri('dataZoo/...'))
('file', 'dataZoo/...')
'''
# make sure scheme has at least 2 letters to avoid windows drives
# put - last in the brackets so it refers to the character, not a range
# reference on valid schemes: http://tools.ietf.org/html/std66#section-3.1
match = re.match(r"^([a-zA-Z][a-zA-Z0-9+.-]+):(.+)", uri)
if match:
scheme = match.group(1)
part = match.group(2)
else:
# Catch bare UNIX and Windows paths
scheme = default
part = uri
return scheme, part
def expand_filespecs(file_specs):
"""
Find all matching file paths from a list of file-specs.
Args:
* file_specs (iterable of string):
File paths which may contain '~' elements or wildcards.
Returns:
A list of matching file paths. If any of the file-specs matches no
existing files, an exception is raised.
"""
# Remove any hostname component - currently unused
filenames = [os.path.expanduser(fn[2:] if fn.startswith('//') else fn)
for fn in file_specs]
# Try to expand all filenames as globs
glob_expanded = {fn : sorted(glob.glob(fn)) for fn in filenames}
# If any of the specs expanded to an empty list then raise an error
value_lists = glob_expanded.values()
if not all(value_lists):
raise IOError("One or more of the files specified did not exist %s." %
["%s expanded to %s" % (pattern, expanded if expanded else "empty")
for pattern, expanded in six.iteritems(glob_expanded)])
return sum(value_lists, [])
def load_files(filenames, callback, constraints=None):
"""
Takes a list of filenames which may also be globs, and optionally a
constraint set and a callback function, and returns a
generator of Cubes from the given files.
.. note::
Typically, this function should not be called directly; instead, the
intended interface for loading is :func:`iris.load`.
"""
all_file_paths = expand_filespecs(filenames)
# Create default dict mapping iris format handler to its associated filenames
handler_map = collections.defaultdict(list)
for fn in all_file_paths:
with open(fn, 'rb') as fh:
handling_format_spec = iris.fileformats.FORMAT_AGENT.get_spec(os.path.basename(fn), fh)
handler_map[handling_format_spec].append(fn)
# Call each iris format handler with the approriate filenames
for handling_format_spec in sorted(handler_map):
fnames = handler_map[handling_format_spec]
if handling_format_spec.constraint_aware_handler:
for cube in handling_format_spec.handler(fnames, callback,
constraints):
yield cube
else:
for cube in handling_format_spec.handler(fnames, callback):
yield cube
def load_http(urls, callback):
"""
Takes a list of urls and a callback function, and returns a generator
of Cubes from the given URLs.
.. note::
Typically, this function should not be called directly; instead, the
intended interface for loading is :func:`iris.load`.
"""
# Create default dict mapping iris format handler to its associated filenames
handler_map = collections.defaultdict(list)
for url in urls:
handling_format_spec = iris.fileformats.FORMAT_AGENT.get_spec(url, None)
handler_map[handling_format_spec].append(url)
# Call each iris format handler with the appropriate filenames
for handling_format_spec in sorted(handler_map):
fnames = handler_map[handling_format_spec]
for cube in handling_format_spec.handler(fnames, callback):
yield cube
def _dot_save(cube, target):
# A simple wrapper for `iris.fileformats.dot.save` which allows the
# saver to be registered without triggering the import of
# `iris.fileformats.dot`.
import iris.fileformats.dot
return iris.fileformats.dot.save(cube, target)
def _dot_save_png(cube, target, **kwargs):
# A simple wrapper for `iris.fileformats.dot.save_png` which allows the
# saver to be registered without triggering the import of
# `iris.fileformats.dot`.
import iris.fileformats.dot
return iris.fileformats.dot.save_png(cube, target, **kwargs)
def _grib_save(cube, target, append=False, **kwargs):
# A simple wrapper for `iris.fileformats.grib.save_grib2` which
# allows the saver to be registered without having `gribapi`
# installed.
try:
import gribapi
except ImportError:
raise RuntimeError('Unable to save GRIB file - the ECMWF '
'`gribapi` package is not installed.')
return iris.fileformats.grib.save_grib2(cube, target, append, **kwargs)
def _check_init_savers():
# TODO: Raise a ticket to resolve the cyclic import error that requires
# us to initialise this on first use. Probably merge io and fileformats.
if "pp" not in _savers:
_savers.update({"pp": iris.fileformats.pp.save,
"nc": iris.fileformats.netcdf.save,
"dot": _dot_save,
"dotpng": _dot_save_png,
"grib2": _grib_save})
def add_saver(file_extension, new_saver):
"""
Add a custom saver to the Iris session.
Args:
* file_extension - A string such as "pp" or "my_format".
* new_saver - A function of the form ``my_saver(cube, target)``.
See also :func:`iris.io.save`
"""
# Make sure it's a func with 2+ args
if not hasattr(new_saver, "__call__") or new_saver.__code__.co_argcount < 2:
raise ValueError("Saver routines must be callable with 2+ arguments.")
# Try to add this saver. Invalid keys will be rejected.
_savers[file_extension] = new_saver
def find_saver(filespec):
"""
Find the saver function appropriate to the given filename or extension.
Args:
* filespec - A string such as "my_file.pp" or "PP".
Returns:
A save function or None.
Save functions can be passed to :func:`iris.io.save`.
"""
_check_init_savers()
matches = [ext for ext in _savers if filespec.lower().endswith('.' + ext) or
filespec.lower() == ext]
# Multiple matches could occur if one of the savers included a '.':
# e.g. _savers = {'.dot.png': dot_png_saver, '.png': png_saver}
if len(matches) > 1:
fmt = "Multiple savers found for %r: %s"
matches = ', '.join(map(repr, matches))
raise ValueError(fmt % (filespec, matches))
return _savers[matches[0]] if matches else None
def save(source, target, saver=None, **kwargs):
"""
Save one or more Cubes to file (or other writable).
Iris currently supports three file formats for saving, which it can
recognise by filename extension:
* netCDF - the Unidata network Common Data Format:
* see :func:`iris.fileformats.netcdf.save`
* GRIB2 - the WMO GRIdded Binary data format;
* see :func:`iris.fileformats.grib.save_grib2`
* PP - the Met Office UM Post Processing Format.
* see :func:`iris.fileformats.pp.save`
A custom saver can be provided to the function to write to a different
file format.
Args:
* source - A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or
sequence of cubes.
* target - A filename (or writable, depending on file format).
When given a filename or file, Iris can determine the
file format.
Kwargs:
* saver - Optional. Specifies the save function to use.
If omitted, Iris will attempt to determine the format.
This keyword can be used to implement a custom save
format. Function form must be:
``my_saver(cube, target)`` plus any custom keywords. It
is assumed that a saver will accept an ``append`` keyword
if it's file format can handle multiple cubes. See also
:func:`iris.io.add_saver`.
All other keywords are passed through to the saver function; see the
relevant saver documentation for more information on keyword arguments.
Examples::
# Save a cube to PP
iris.save(my_cube, "myfile.pp")
# Save a cube list to a PP file, appending to the contents of the file
# if it already exists
iris.save(my_cube_list, "myfile.pp", append=True)
# Save a cube to netCDF, defaults to NETCDF4 file format
iris.save(my_cube, "myfile.nc")
# Save a cube list to netCDF, using the NETCDF4_CLASSIC storage option
iris.save(my_cube_list, "myfile.nc", netcdf_format="NETCDF3_CLASSIC")
.. warning::
Saving a cube whose data has been loaded lazily
(if `cube.has_lazy_data()` returns `True`) to the same file it expects
to load data from will cause both the data in-memory and the data on
disk to be lost.
.. code-block:: python
cube = iris.load_cube('somefile.nc')
# The next line causes data loss in 'somefile.nc' and the cube.
iris.save(cube, 'somefile.nc')
In general, overwriting a file which is the source for any lazily loaded
data can result in corruption. Users should proceed with caution when
attempting to overwrite an existing file.
"""
# Determine format from filename
if isinstance(target, six.string_types) and saver is None:
saver = find_saver(target)
elif hasattr(target, 'name') and saver is None:
saver = find_saver(target.name)
elif isinstance(saver, six.string_types):
saver = find_saver(saver)
if saver is None:
raise ValueError("Cannot save; no saver")
# Single cube?
if isinstance(source, iris.cube.Cube):
saver(source, target, **kwargs)
# CubeList or sequence of cubes?
elif (isinstance(source, iris.cube.CubeList) or
(isinstance(source, (list, tuple)) and
all([isinstance(i, iris.cube.Cube) for i in source]))):
# Only allow cubelist saving for those fileformats that are capable.
if not 'iris.fileformats.netcdf' in saver.__module__:
# Make sure the saver accepts an append keyword
if not "append" in saver.__code__.co_varnames:
raise ValueError("Cannot append cubes using saver function "
"'%s' in '%s'" %
(saver.__code__.co_name,
saver.__code__.co_filename))
# Force append=True for the tail cubes. Don't modify the incoming
# kwargs.
kwargs = kwargs.copy()
for i, cube in enumerate(source):
if i != 0:
kwargs['append'] = True
saver(cube, target, **kwargs)
# Netcdf saver.
else:
saver(source, target, **kwargs)
else:
raise ValueError("Cannot save; non Cube found in source")
| gpl-3.0 | -5,592,605,519,203,370,000 | 34.948235 | 99 | 0.624231 | false | 4.0894 | false | false | false |
darkfeline/mir.dlsite | setup.py | 1 | 1535 | # Copyright (C) 2016 Allen Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from setuptools import setup
def find_version(path):
with open(path) as f:
text = f.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
text, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='mir.dlsite',
version=find_version('mir/dlsite/__init__.py'),
description='API for DLsite',
long_description='',
keywords='',
url='https://github.com/darkfeline/mir.dlsite',
author='Allen Li',
author_email='[email protected]',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.6',
],
packages=['mir.dlsite'],
install_requires=[
'beautifulsoup4~=4.6',
'dataclasses==0.5',
'lxml~=4.0',
],
)
| apache-2.0 | 3,668,088,456,833,777,700 | 29.098039 | 74 | 0.636482 | false | 3.743902 | false | false | false |
UCSD-PL/kraken | reflex/coq/bench-quark/test/quark/output/output.py | 1 | 6774 | #!/usr/bin/python
import sys
import os
import tempfile
import gobject
import gtk
import socket
import shm
import threading
import time
import struct
import cairo
import array
import cPickle as pickle
import message
import config
#gtk.gdk.threads_init()
def olog(str):
olog_nonl(str + "\n")
def olog_nonl(str):
sys.stderr.write("O: " + str)
sys.stderr.flush()
class UI:
shm_obj = None
sem_obj = None
cr = None
pixbuf = None
rectangle = None
def redraw(self) :
print "redraw-----"
if self.sem_obj != None:
self.thread_lock.acquire()
try :
try :
self.sem_obj.P()
try :
shm_obj = self.shm_obj
size = struct.unpack_from("i", shm_obj.read(4,4*0))[0]
x = struct.unpack_from("i", shm_obj.read(4,4*1))[0]
y = struct.unpack_from("i", shm_obj.read(4,4*2))[0]
width = struct.unpack_from("i", shm_obj.read(4,4*3))[0]
height = struct.unpack_from("i", shm_obj.read(4,4*4))[0]
pixbufloader = gtk.gdk.PixbufLoader()
pixbufloader.write(shm_obj.read(size,4*5))
pixbufloader.close()
pixbuf = pixbufloader.get_pixbuf()
# shm_obj = self.shm_obj
# size = struct.unpack_from("i", shm_obj.read(4,4*0))[0]
# x = struct.unpack_from("i", shm_obj.read(4,4*1))[0]
# y = struct.unpack_from("i", shm_obj.read(4,4*2))[0]
# width = struct.unpack_from("i", shm_obj.read(4,4*3))[0]
# height = struct.unpack_from("i", shm_obj.read(4,4*4))[0]
# pixels = pickle.loads(shm_obj.read(size,4*5))
# pixbuf = gtk.gdk.pixbuf_new_from_array(pixels, gtk.gdk.COLORSPACE_RGB,8)
finally :
self.sem_obj.V()
pass
#print pixbuf.get_width()
#print pixbuf.get_height()
#print x
#print y
pixbuf.copy_area(0, 0, pixbuf.get_width(), pixbuf.get_height(), self.pixbuf, x, y)
self.rectangle = (x,y,width,height)
self.win.queue_draw_area(x,y, pixbuf.get_width(), pixbuf.get_height())
except TypeError:
olog("unexpected error:" + str(sys.exc_info()[0]))
pass
except :
olog("unexpected general error:" + str(sys.exc_info()[0]))
pass
finally:
self.thread_lock.release()
pass
def window_destroyed(self, widget, data=None):
#olog("window is destroyed")
gtk.main_quit()
def expose(self, widget, event):
# Load Cairo drawing context.
self.thread_lock.acquire()
try :
if self.pixbuf <> None :
area = event.area
#olog("x,y,width,height = %d %d %d %d" % (area.x, area.y, area.width, area.height))
self.pixbuf.render_to_drawable(self.win.window, gtk.gdk.GC(self.win.window), area.x, area.y, area.x, area.y, area.width, area.height)
# if self.rectangle <> None:
# cr = widget.window.cairo_create()
# cr.set_line_width(1)
# cr.set_source_rgb(255, 0, 0)
# cr.rectangle(self.rectangle[0], self.rectangle[1], self.rectangle[2], self.rectangle[3])
# cr.stroke()
finally:
self.thread_lock.release()
def handle_input(self, source, condition):
#olog("handle_input:")
m = self.message_handler.recv()
if m[0] == message.RenderCompleted :
# load a new shared memory
#olog("display msg is received")
shmid = int(m[1])
if self.shm_obj <> None:
if self.shm_obj.shmid == shmid :
self.redraw()
else:
self.thread_lock.acquire()
try :
self.shm_obj.detach()
self.shm_obj = shm.memory(shmid)
self.sem_obj = shm.semaphore(shm.getsemid(shmid))
self.shm_obj.attach()
finally:
self.thread_lock.release()
else :
self.thread_lock.acquire()
try :
self.shm_obj = shm.memory(shmid)
self.sem_obj = shm.semaphore(shm.getsemid(shmid))
self.shm_obj.attach()
finally:
self.thread_lock.release()
else :
sys.stderr.write('invalid event type\n')
sys.exit(1)
gobject.io_add_watch(self.soc.fileno(), gobject.IO_IN, self.handle_input)
return False
def handle_hup(self, source, condition):
gtk.main_quit()
return False
def main(self):
self.thread_lock = threading.Lock()
self.shm_obj = None
self.sem_obj = None
self.message_handler = message.MessageHandler()
self.soc = self.message_handler.KCHAN
# socket.fromfd(int(sys.argv[1]), msg.FAMILY, msg.TYPE)
gobject.io_add_watch(self.soc.fileno(), gobject.IO_IN, self.handle_input)
gobject.io_add_watch(self.soc.fileno(), gobject.IO_HUP, self.handle_hup)
window = gtk.Window() #gtk.WINDOW_TOPLEVEL)
window.set_decorated(False)
window.set_app_paintable(True)
screen = window.get_screen()
rgba = screen.get_rgba_colormap()
window.set_colormap(rgba)
window.set_title("Quark Web Browser Output")
vsize = config.ydimension
window.set_default_size(1100, vsize - 200)
#window.set_keep_above(True)
window.set_decorated(False)
window.connect("destroy", self.window_destroyed)
window.connect('expose-event', self.expose)
#window.move(100,300)
window.move(63,143)
self.win = window
window.show_all()
(x,y,width,height,depth) = self.win.window.get_geometry()
#width = 4000
#height = 4000
#self.pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, width, height)
self.pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, width, height)
gtk.main()
def curr_tab(self):
return self.tabs[self.curr]
UI().main()
| gpl-2.0 | 2,311,913,955,336,338,400 | 35.419355 | 149 | 0.50059 | false | 3.659643 | false | false | false |
Flexget/Flexget | flexget/event.py | 1 | 3312 | """
Provides small event framework
"""
from typing import Callable, List, Dict, Any
from loguru import logger
logger = logger.bind(name='event')
class Event:
"""Represents one registered event."""
def __init__(self, name: str, func: Callable, priority: int = 128) -> None:
self.name = name
self.func = func
self.priority = priority
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __eq__(self, other):
return self.priority == other.priority
def __lt__(self, other):
return self.priority < other.priority
def __gt__(self, other):
return self.priority > other.priority
def __str__(self):
return f'<Event(name={self.name},func={self.func.__name__},priority={self.priority})>'
__repr__ = __str__
def __hash__(self):
return hash((self.name, self.func, self.priority))
_events: Dict[str, List[Event]] = {}
def event(name: str, priority: int = 128) -> Callable[[Callable], Callable]:
"""Register event to function with a decorator"""
def decorator(func: Callable) -> Callable:
add_event_handler(name, func, priority)
return func
return decorator
def get_events(name: str) -> List[Event]:
"""
:param String name: event name
:return: List of :class:`Event` for *name* ordered by priority
"""
if name not in _events:
raise KeyError('No such event %s' % name)
_events[name].sort(reverse=True)
return _events[name]
def add_event_handler(name: str, func: Callable, priority: int = 128) -> Event:
"""
:param string name: Event name
:param function func: Function that acts as event handler
:param priority: Priority for this hook
:return: Event created
:rtype: Event
:raises Exception: If *func* is already registered in an event
"""
events = _events.setdefault(name, [])
for event in events:
if event.func == func:
raise ValueError(
'%s has already been registered as event listener under name %s'
% (func.__name__, name)
)
logger.trace('registered function {} to event {}', func.__name__, name)
event = Event(name, func, priority)
events.append(event)
return event
def remove_event_handlers(name: str) -> None:
"""Removes all handlers for given event `name`."""
_events.pop(name, None)
def remove_event_handler(name: str, func: Callable) -> None:
"""Remove `func` from the handlers for event `name`."""
for e in list(_events.get(name, [])):
if e.func is func:
_events[name].remove(e)
def fire_event(name: str, *args, **kwargs) -> Any:
"""
Trigger an event with *name*. If event is not hooked by anything nothing happens. If a function that hooks an event
returns a value, it will replace the first argument when calling next function.
:param name: Name of event to be called
:param args: List of arguments passed to handler function
:param kwargs: Key Value arguments passed to handler function
"""
if name in _events:
for event in get_events(name):
result = event(*args, **kwargs)
if result is not None:
args = (result,) + args[1:]
return args and args[0]
| mit | 3,774,398,002,613,047,300 | 28.571429 | 119 | 0.617452 | false | 3.952267 | false | false | false |
initcrash/transdb | transdb/fields.py | 1 | 4827 | from django.db import models
from django.conf import settings
from django.utils.translation import get_language
from django.utils.translation import ugettext as _
from django.utils.encoding import force_unicode, smart_str, smart_unicode
from django.forms.fields import Field
from django.forms import ValidationError
from widgets import TransCharWidget, TransTextWidget
def get_default_language_name():
'''
Get language from default language specified by LANGUAGE_CODE in settings
Used in error messages
'''
lang_name = ''
for lang in settings.LANGUAGES:
if lang[0] == settings.LANGUAGE_CODE:
lang_name = lang[1]
break
return force_unicode(lang_name)
class TransDbValue(unicode):
'''
This class implements a unicode string, but with a hidden attribute raw_data.
When used as a string it returns the translation of the current language
raw_data attribute stores a dictionary with all translations
Also implements a method "get_in_language(language)" that returns the translation on any available language
'''
raw_data = {}
def get_in_language(self, language):
if self.raw_data and self.raw_data.has_key(language):
return self.raw_data[language]
else:
return u''
def set_in_language(self, language, value):
self.raw_data[language] = value
class TransFormField(Field):
'''
forms field, used when ModelForm (or deprecated form_for_model/form_form_instance) is called
Also implements form validation in admin
'''
def clean(self, value):
if isinstance(value, dict) and self.required:
filled_value = [ v for v in value.values() if bool(v) ]
if not filled_value:
raise ValidationError, _("This field is required.")
return super(TransFormField, self).clean(value)
class TransField(models.Field):
'''
Model field to be subclassed
Used for storing a string in many languages at database (with python's dictionary format)
pickle module could be used, but wouldn't alow search on fields?
'''
def get_internal_type(self):
return 'TextField'
def to_python(self, value):
if isinstance(value, TransDbValue):
return value
if isinstance(value, dict): # formfield method makes this function be called with value as a dict
python_value = value
else:
try:
python_value = eval(value)
for k,v in python_value.items():
python_value[k] = smart_unicode(v)
except Exception:
python_value = None
if isinstance(python_value, dict):
if python_value.has_key(get_language()) and python_value[get_language()]:
result = TransDbValue(python_value[get_language()])
elif python_value.has_key(settings.LANGUAGE_CODE) and python_value[settings.LANGUAGE_CODE]:
result = TransDbValue(python_value[settings.LANGUAGE_CODE])
else:
val = "bal"
for item in python_value.items():
try:
val = item[1]
except:
pass
if val: break
result = TransDbValue(python_value.items()[0][1])
result.raw_data = python_value
else:
result = TransDbValue(value)
result.raw_data = {settings.LANGUAGE_CODE: value}
return result
def get_db_prep_save(self, value):
if not isinstance(value, TransDbValue):
return value
value = [u"'%s': '''%s'''" % (k, v) for k, v in value.raw_data.items()]
value = u'{%s}' % u','.join(value)
return smart_str(value)
def formfield(self, **kwargs):
defaults = {'form_class': TransFormField}
defaults.update(kwargs)
return super(TransField, self).formfield(**defaults)
def flatten_data(self, follow, obj=None):
'''
for serializing objects
'''
raw_data = self._get_val_from_obj(obj).raw_data.copy()
for k,v in raw_data.items():
raw_data[k] = smart_str(v)
return {self.attname: raw_data}
class TransCharField(TransField):
'''
TransField used with CharField widget
'''
__metaclass__ = models.SubfieldBase
def formfield(self, **kwargs):
kwargs['widget'] = TransCharWidget
return super(TransCharField, self).formfield(**kwargs)
class TransTextField(TransField):
'''
TransField used with CharField widget
'''
__metaclass__ = models.SubfieldBase
def formfield(self, **kwargs):
kwargs['widget'] = TransTextWidget
return super(TransTextField, self).formfield(**kwargs)
| gpl-3.0 | 5,646,233,992,082,309,000 | 34.755556 | 111 | 0.618189 | false | 4.275465 | false | false | false |
opennode/nodeconductor-openstack | src/waldur_openstack/openstack_tenant/migrations/0008_backup_schedule.py | 1 | 2253 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import waldur_core.logging.loggers
import django.utils.timezone
import waldur_core.core.fields
import waldur_core.core.validators
class Migration(migrations.Migration):
dependencies = [
('openstack_tenant', '0007_backup_backuprestoration'),
]
operations = [
migrations.CreateModel(
name='BackupSchedule',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.CharField(max_length=500, verbose_name='description', blank=True)),
('name', models.CharField(max_length=150, verbose_name='name', validators=[waldur_core.core.validators.validate_name])),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
('schedule', waldur_core.core.fields.CronScheduleField(max_length=15, validators=[waldur_core.core.validators.validate_cron_schedule, waldur_core.core.validators.MinCronValueValidator(1)])),
('next_trigger_at', models.DateTimeField(null=True)),
('timezone', models.CharField(default=django.utils.timezone.get_current_timezone_name, max_length=50)),
('is_active', models.BooleanField(default=False)),
('retention_time', models.PositiveIntegerField(help_text=b'Retention time in days, if 0 - backup will be kept forever')),
('maximal_number_of_backups', models.PositiveSmallIntegerField()),
('instance', models.ForeignKey(related_name='backup_schedules', to='openstack_tenant.Instance')),
],
options={
'abstract': False,
},
bases=(models.Model, waldur_core.logging.loggers.LoggableMixin),
),
migrations.AddField(
model_name='backup',
name='backup_schedule',
field=models.ForeignKey(related_name='backups', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='openstack_tenant.BackupSchedule', null=True),
),
]
| mit | 3,486,332,113,417,544,700 | 49.066667 | 206 | 0.640923 | false | 4.156827 | false | false | false |
jocelynmass/nrf51 | sdk/nRF51_SDK_9.0.0_2e23562/examples/dfu/experimental/master_control_panel_patch/init_packet.py | 1 | 6654 | # Copyright (c) 2015, Nordic Semiconductor
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Nordic Semiconductor ASA nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from enum import Enum
import struct
INIT_PACKET_USES_CRC16 = 0
INIT_PACKET_USES_HASH = 1
INIT_PACKET_EXT_USES_ECDS = 2
class PacketField(Enum):
PACKET_VERSION = 1
COMPRESSION_TYPE = 2
DEVICE_TYPE = 3
DEVICE_REVISION = 4
APP_VERSION = 5
REQUIRED_SOFTDEVICES_ARRAY = 6
OPT_DATA = 7
NORDIC_PROPRIETARY_OPT_DATA_EXT_PACKET_ID = 8
NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_LENGTH = 9
NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_HASH = 10
NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_CRC16 = 11
NORDIC_PROPRIETARY_OPT_DATA_INIT_PACKET_ECDS = 12
class Packet(object):
"""
Class that implements the INIT packet format.
http://developer.nordicsemi.com/nRF51_SDK/doc/7.1.0/s110/html/a00065.html
"""
UNSIGNED_SHORT = "H"
UNSIGNED_INT = "I"
UNSIGNED_CHAR = "B"
CHAR_ARRAY = "s"
def __init__(self, init_packet_fields):
"""
:param init_packet_fields: Dictionary with packet fields
"""
self.init_packet_fields = init_packet_fields
def generate_packet(self):
"""
Generates a binary packet from provided init_packet_fields provided in constructor.
This version includes the extended data
:return str: Returns a string representing the init_packet (in binary)
"""
# Create struct format string based on keys that are
# present in self.init_packet_fields
format_string = self.__generate_struct_format_string()
args = []
for key in sorted(self.init_packet_fields.keys(), key=lambda x: x.value):
# Add length to fields that required that
if key in [PacketField.REQUIRED_SOFTDEVICES_ARRAY,
PacketField.OPT_DATA]:
args.append(len(self.init_packet_fields[key]))
args.extend(self.init_packet_fields[key])
elif key in [PacketField.NORDIC_PROPRIETARY_OPT_DATA_EXT_PACKET_ID]:
args.append(self.init_packet_fields[key]) # Extended packet id format
elif key in [PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_LENGTH]:
args.append(self.init_packet_fields[key]) # Length of firmware image
elif key in [PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_CRC16]:
args.append(self.init_packet_fields[key]) # CRC-16
elif key in [PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_HASH]:
args.append(self.init_packet_fields[key]) # SHA-256 hash of firmware image
elif key in [PacketField.NORDIC_PROPRIETARY_OPT_DATA_INIT_PACKET_ECDS]:
args.append(self.init_packet_fields[key]) # ECDS of base init packet using Curve P-256 amd SHA-256
else:
args.append(self.init_packet_fields[key])
return struct.pack(format_string, *args)
def __generate_struct_format_string(self):
format_string = "<" # Use little endian format with standard sizes for python,
# see https://docs.python.org/2/library/struct.html
for key in sorted(self.init_packet_fields.keys(), key=lambda x: x.value):
if key in [PacketField.PACKET_VERSION,
PacketField.COMPRESSION_TYPE,
PacketField.DEVICE_TYPE,
PacketField.DEVICE_REVISION,
]:
format_string += Packet.UNSIGNED_SHORT
elif key in [PacketField.APP_VERSION]:
format_string += Packet.UNSIGNED_INT
elif key in [PacketField.REQUIRED_SOFTDEVICES_ARRAY]:
array_elements = self.init_packet_fields[key]
format_string += Packet.UNSIGNED_SHORT # Add length field to format packet
for _ in range(len(array_elements)):
format_string += Packet.UNSIGNED_SHORT
elif key in [PacketField.OPT_DATA]:
format_string += Packet.UNSIGNED_SHORT # Add length field to optional data
format_string += "{0}{1}".format(len(self.init_packet_fields[key]), Packet.CHAR_ARRAY)
elif key in [PacketField.NORDIC_PROPRIETARY_OPT_DATA_EXT_PACKET_ID]:
format_string += Packet.UNSIGNED_INT # Add the extended packet id field
elif key == PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_LENGTH:
format_string += Packet.UNSIGNED_INT # Add the firmware length field
elif key == PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_HASH:
format_string += "32{0}".format(Packet.CHAR_ARRAY) # SHA-256 requires 32 bytes
elif key == PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_CRC16:
format_string += Packet.UNSIGNED_SHORT
elif key == PacketField.NORDIC_PROPRIETARY_OPT_DATA_INIT_PACKET_ECDS:
format_string += "64{0}".format(Packet.CHAR_ARRAY) # ECDS based on P-256 using SHA-256 requires 64 bytes
return format_string | gpl-2.0 | 6,692,166,544,894,411,000 | 45.884892 | 121 | 0.651488 | false | 3.930301 | false | false | false |
HPI-SWA-Lab/RSqueak | rsqueakvm/plugins/immutability_plugin.py | 1 | 3782 | """
RSqueak/VM plugin which provides support for immutable objects.
Immutable objects can be created as copy of existing objects
or from a list of arguments. The package `ImmutableObjects`, located in
`/repository`, needs to be loaded in the image.
"""
from rsqueakvm.error import PrimitiveFailedError, UnwrappingError
from rsqueakvm.model.variable import W_BytesObject, W_WordsObject
from rsqueakvm.plugins.immutability import patch_w_object
from rsqueakvm.plugins.immutability.bytes import W_Immutable_BytesObject
from rsqueakvm.plugins.immutability.pointers import (
select_immutable_pointers_class)
from rsqueakvm.plugins.immutability.words import W_Immutable_WordsObject
from rsqueakvm.plugins.plugin import Plugin
from rsqueakvm.storage_classes import BYTES, POINTERS, WORDS
class ImmutabilityPlugin(Plugin):
def setup(self):
patch_w_object()
plugin = ImmutabilityPlugin()
@plugin.expose_primitive(unwrap_spec=[object])
def primitiveIsImmutable(interp, s_frame, w_recv):
"""
Tests if `w_recv` is an immutable object.
:param interp: The interpreter proxy.
:param s_frame: The stack frame.
:param w_recv: The receiver object.
:returns: `w_true` if `w_recv` is immutable object, otherwise `w_false`.
"""
if w_recv.is_immutable():
return interp.space.w_true
return interp.space.w_false
@plugin.expose_primitive(unwrap_spec=[object, object])
def primitiveImmutableFrom(interp, s_frame, w_cls, w_obj):
"""
Creates an immutable copy of a given Smalltalk object.
:param interp: The interpreter proxy.
:param s_frame: The stack frame.
:param w_cls: The imutable objects target class.
:param w_obj: The Smalltalk object to produce an immutable copy from.
:returns: An immutable copy of `w_obj` with class `w_cls`.
:raises: PrimitiveFailedError
"""
space = interp.space
instance_kind = w_cls.as_class_get_shadow(space).get_instance_kind()
if instance_kind == POINTERS:
pointers = w_obj.fetch_all(space)
cls = select_immutable_pointers_class(pointers)
return cls(space, w_cls, pointers)
elif instance_kind == BYTES and isinstance(w_obj, W_BytesObject):
return W_Immutable_BytesObject(space, w_cls, w_obj.bytes)
elif instance_kind == WORDS and isinstance(w_obj, W_WordsObject):
return W_Immutable_WordsObject(space, w_cls, w_obj.words)
raise PrimitiveFailedError
@plugin.expose_primitive(unwrap_spec=None)
def primitiveImmutableFromArgs(interp, s_frame, argcount):
"""
Returns an immutable instance of the receiver (which is a class) with
all fields initialized with the arguments given.
:param interp: The interpreter proxy.
:param s_frame: The stack frame.
:param argcount: The number of arguments.
:returns: An immutable object.
:raises: PrimitiveFailedError
"""
if argcount == 0:
raise PrimitiveFailedError
w_args = s_frame.pop_and_return_n(argcount)[:]
w_cls = s_frame.pop()
space = interp.space
instance_kind = w_cls.as_class_get_shadow(space).get_instance_kind()
if instance_kind == POINTERS:
cls = select_immutable_pointers_class(w_args)
return cls(space, w_cls, w_args)
elif instance_kind == BYTES:
try:
bytes = [chr(interp.space.unwrap_uint(b)) for b in w_args]
except (ValueError, TypeError, UnwrappingError):
raise PrimitiveFailedError
return W_Immutable_BytesObject(space, w_cls, bytes)
elif instance_kind == WORDS:
try:
words = [interp.space.unwrap_uint(b) for b in w_args]
except UnwrappingError:
raise PrimitiveFailedError
return W_Immutable_WordsObject(space, w_cls, words)
raise PrimitiveFailedError
| bsd-3-clause | 4,435,581,936,282,197,000 | 35.019048 | 76 | 0.705447 | false | 3.629559 | false | false | false |
Cowa/Django-FileManager | FileManager/settings.py | 1 | 5489 | # Django settings for FileManagerHTML project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'fileManagerDB', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Paris'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-EN'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
MEDIA_ROOT = '/home/brice/Programmation/Django-FileManager/media'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = 'media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '(!f!+ygp*i+75v9nvqxrgnk@tt36t+v3%ppdlshos95ct4z74f'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'FileManager.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'FileManager.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'manager',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| gpl-2.0 | -9,046,051,876,094,814,000 | 33.740506 | 127 | 0.692476 | false | 3.74165 | false | false | false |
Frodox/buildbot | master/buildbot/db/builders.py | 1 | 5881 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import sqlalchemy as sa
from twisted.internet import defer
from buildbot.db import base
class BuildersConnectorComponent(base.DBConnectorComponent):
def findBuilderId(self, name, autoCreate=True):
tbl = self.db.model.builders
name_hash = self.hashColumns(name)
return self.findSomethingId(
tbl=tbl,
whereclause=(tbl.c.name_hash == name_hash),
insert_values=dict(
name=name,
name_hash=name_hash,
), autoCreate=autoCreate)
@defer.inlineCallbacks
def updateBuilderInfo(self, builderid, description, tags):
# convert to tag IDs first, as necessary
def toTagid(tag):
if isinstance(tag, type(1)):
return defer.succeed(tag)
ssConnector = self.master.db.tags
return ssConnector.findTagId(tag)
tagsids = [r[1] for r in (yield defer.DeferredList(
[toTagid(tag) for tag in tags],
fireOnOneErrback=True,
consumeErrors=True))]
def thd(conn):
builders_tbl = self.db.model.builders
builders_tags_tbl = self.db.model.builders_tags
transaction = conn.begin()
q = builders_tbl.update(
whereclause=(builders_tbl.c.id == builderid))
conn.execute(q, description=description).close()
# remove previous builders_tags
conn.execute(builders_tags_tbl.delete(
whereclause=((builders_tags_tbl.c.builderid == builderid)))).close()
# add tag ids
if tagsids:
conn.execute(builders_tags_tbl.insert(),
[dict(builderid=builderid, tagid=tagid)
for tagid in tagsids]).close()
transaction.commit()
defer.returnValue((yield self.db.pool.do(thd)))
def getBuilder(self, builderid):
d = self.getBuilders(_builderid=builderid)
@d.addCallback
def first(bldrs):
if bldrs:
return bldrs[0]
return None
return d
def addBuilderMaster(self, builderid=None, masterid=None):
def thd(conn, no_recurse=False):
try:
tbl = self.db.model.builder_masters
q = tbl.insert()
conn.execute(q, builderid=builderid, masterid=masterid)
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
pass
return self.db.pool.do(thd)
def removeBuilderMaster(self, builderid=None, masterid=None):
def thd(conn, no_recurse=False):
tbl = self.db.model.builder_masters
conn.execute(tbl.delete(
whereclause=((tbl.c.builderid == builderid) &
(tbl.c.masterid == masterid))))
return self.db.pool.do(thd)
def getBuilders(self, masterid=None, _builderid=None):
def thd(conn):
bldr_tbl = self.db.model.builders
bm_tbl = self.db.model.builder_masters
j = bldr_tbl.outerjoin(bm_tbl)
# if we want to filter by masterid, we must join to builder_masters
# again, so we can still get the full set of masters for each
# builder
if masterid is not None:
limiting_bm_tbl = bm_tbl.alias('limiting_bm')
j = j.join(limiting_bm_tbl,
onclause=(bldr_tbl.c.id == limiting_bm_tbl.c.builderid))
q = sa.select(
[bldr_tbl.c.id, bldr_tbl.c.name,
bldr_tbl.c.description, bm_tbl.c.masterid],
from_obj=[j],
order_by=[bldr_tbl.c.id, bm_tbl.c.masterid])
if masterid is not None:
# filter the masterid from the limiting table
q = q.where(limiting_bm_tbl.c.masterid == masterid)
if _builderid is not None:
q = q.where(bldr_tbl.c.id == _builderid)
# now group those by builderid, aggregating by masterid
rv = []
last = None
for row in conn.execute(q).fetchall():
# pylint: disable=unsubscriptable-object
if not last or row['id'] != last['id']:
last = self._thd_row2dict(conn, row)
rv.append(last)
if row['masterid']:
last['masterids'].append(row['masterid'])
return rv
return self.db.pool.do(thd)
def _thd_row2dict(self, conn, row):
# get tags
builders_tags = self.db.model.builders_tags
tags = self.db.model.tags
from_clause = tags
from_clause = from_clause.join(builders_tags)
q = sa.select([tags.c.name],
(builders_tags.c.builderid == row.id)).select_from(from_clause)
tags = [r.name for r in
conn.execute(q).fetchall()]
return dict(id=row.id, name=row.name, masterids=[],
description=row.description,
tags=tags)
| gpl-2.0 | -5,907,836,894,642,851,000 | 37.437908 | 85 | 0.577113 | false | 3.879288 | false | false | false |
ssorj/pencil | setup.py | 1 | 1079 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from distutils.core import setup
setup(name="pencil",
version="1",
url="http://www.ssorj.net/projects/pencil.html",
author="Justin Ross",
author_email="[email protected]",
py_modules=["pencil"],
package_dir={"": "python"})
| apache-2.0 | -6,843,731,593,604,329,000 | 36.206897 | 62 | 0.728452 | false | 3.895307 | false | false | false |
apanda/modeling | tests/examples/ConvertedAclFwTest.py | 1 | 1751 | import components
def ConvertedAclFwTest ():
"""ACL firewall test"""
ctx = components.Context (['a', 'b', 'c', 'd', 'fw'],\
['ip_a', 'ip_b', 'ip_c', 'ip_d', 'ip_f'])
net = components.Network (ctx)
a = components.EndHost(ctx.a, net, ctx)
b = components.EndHost(ctx.b, net, ctx)
c = components.EndHost(ctx.c, net, ctx)
d = components.EndHost(ctx.d, net, ctx)
fw = components.ConvertedAclFw(ctx.fw, net, ctx)
net.setAddressMappings([(a, ctx.ip_a), \
(b, ctx.ip_b), \
(c, ctx.ip_c), \
(d, ctx.ip_d), \
(fw, ctx.ip_f)])
addresses = [ctx.ip_a, ctx.ip_b, ctx.ip_c, ctx.ip_d, ctx.ip_f]
net.RoutingTable(a, [(x, fw) for x in addresses])
net.RoutingTable(b, [(x, fw) for x in addresses])
net.RoutingTable(c, [(x, fw) for x in addresses])
net.RoutingTable(d, [(x, fw) for x in addresses])
#net.SetGateway(a, fw)
#net.SetGateway(b, fw)
#net.SetGateway(c, fw)
#net.SetGateway(d, fw)
net.RoutingTable(fw, [(ctx.ip_a, a), \
(ctx.ip_b, b), \
(ctx.ip_c, c), \
(ctx.ip_d, d)])
fw.AddAcls([(ctx.ip_a, ctx.ip_b), (ctx.ip_c, ctx.ip_d)])
net.Attach(a, b, c, d, fw)
endhosts = [a, b, c, d]
class AclFwReturn (object):
def __init__ (self, net, ctx, a, b, c, d, fw):
self.net = net
self.ctx = ctx
self.a = a
self.b = b
self.c = c
self.d = d
self.fw = fw
self.check = components.PropertyChecker (ctx, net)
return AclFwReturn(net, ctx, a, b, c, d, fw)
| bsd-3-clause | -7,122,530,099,745,691,000 | 38.795455 | 71 | 0.475157 | false | 3.003431 | false | false | false |
bufke/chat-experiment | chatroom/models.py | 1 | 1963 | from django.db import models
from swampdragon.models import SelfPublishModel
from allauth.account.signals import user_signed_up
from .dragon_serializers import MessageSerializer, ProfileSerializer
class Profile(SelfPublishModel, models.Model):
serializer_class = ProfileSerializer
user = models.OneToOneField('auth.User', primary_key=True)
display_name = models.CharField(max_length=100)
is_online = models.BooleanField(default=False)
status = models.CharField(max_length=255, blank=True)
def __str__(self):
return self.display_name
@staticmethod
def create_profile(request, user, **kwargs):
return Profile.objects.create(
user=user,
display_name='{}.{}'.format(
user.first_name, user.last_name).strip('.'),
)
user_signed_up.connect(Profile.create_profile)
class Organization(models.Model):
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
users = models.ManyToManyField(Profile)
def __str__(self):
return self.name
class Room(models.Model):
name = models.CharField(max_length=75)
organization = models.ManyToManyField(Organization, blank=True)
users = models.ManyToManyField(
Profile,
help_text="Users in this room. May include non organization users.")
is_active = models.BooleanField(default=True)
add_by_default = models.BooleanField(
default=True,
help_text="Organization users will automatically join this room.")
def __str__(self):
return self.name
class Message(SelfPublishModel, models.Model):
serializer_class = MessageSerializer
user = models.ForeignKey('auth.User')
text = models.TextField()
posted = models.DateTimeField(auto_now_add=True)
room = models.ForeignKey(Room)
class Meta:
ordering = ['-posted']
def __str__(self):
return '{}: {}'.format(self.user, self.text)
| agpl-3.0 | -5,963,895,665,154,620,000 | 30.15873 | 76 | 0.683647 | false | 4.072614 | false | false | false |
ianb/sheets | sheets/env.py | 1 | 9837 | import os
import ast
import traceback
import time
import sys
import types
import builtins
import collections
import astor
import weakref
from .jsonify import jsonify, jsonify_print, jsonify_print_expr
from .datalayer import Analysis, Execution, FileEdit
from .router import send
from . import stdlib
def now():
return int(time.time() * 1000)
class Environment:
extra_globals = {}
active = weakref.WeakSet()
def __init__(self, path):
self.path = path
self.globals = {
"print": jsonify_print,
"print_expr": jsonify_print_expr,
"jsonify": jsonify,
"jsonify_print": jsonify_print,
"listdir": stdlib.listdir,
"__builtins__": __builtins__,
"FILES": stdlib.FilesDict(self.path),
}
for name in stdlib.builtin_names:
self.globals[name] = getattr(stdlib, name)
self._cached_analysis = {}
self.active.add(self)
predefined_names = set(["parsed"])
def init_commands(self):
"""Returns a list of commands that represent the existing state of the
filesystem"""
for path in os.listdir(self.path):
if path.endswith(".json"):
continue
if not os.path.isfile(os.path.join(self.path, path)):
continue
try:
with open(os.path.join(self.path, path), "r") as fp:
content = fp.read()
yield FileEdit(filename=path, content=content, external_edit=True)
except UnicodeDecodeError:
pass
def fixup_globals(self):
for name, value in self.extra_globals.items():
self.globals.setdefault(name, value)
def execute(self, filename, content, subexpressions=False):
print("Executing", filename, subexpressions)
self.fixup_globals()
stdout = Stdout()
compiled = None
try:
parsed = ast.parse(content, filename, mode='exec')
RewriteExprToPrint(subexpressions).walk(parsed)
var_inspect = VariableInspector()
var_inspect.walk(parsed)
print("varsed used:", sorted(var_inspect.used), "set:", sorted(var_inspect.set), "imported:", var_inspect.imports)
compiled = compile(parsed, filename, 'exec')
except:
stdout.write(traceback.format_exc())
def displayhook(value):
stdout.write_repr(value)
orig_displayhook = sys.displayhook
sys.displayhook = displayhook
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sys.stdout = stdout
sys.stderr = stdout
self.globals["parsed"] = parsed
self.globals["ast"] = ast
globals_before = self.globals.copy()
start = time.time()
try:
try:
if compiled:
exec(compiled, self.globals)
except:
traceback.print_exc()
finally:
end = time.time()
sys.dipslayhook = orig_displayhook
sys.stdout = orig_stdout
sys.stderr = orig_stderr
local_scope = dict(
(name, value)
for name, value in self.globals.items()
if name not in globals_before or globals_before[name] is not value)
defines = dict(
(key, {
"json": jsonify(local_scope[key]),
"type": str(type(local_scope[key])),
})
for key in local_scope
if not isinstance(local_scope[key], types.ModuleType))
command = Execution(
filename=filename,
content=content,
emitted=stdout.emitted,
defines=defines,
start_time=int(start * 1000),
end_time=int(end * 1000),
exec_time=int((end - start) * 1000),
with_subexpressions=subexpressions,
)
send(command)
def analyze(self, filename, content):
print("Analyzing", filename)
properties = {}
try:
parsed = ast.parse(content, filename, mode='exec')
var_inspect = VariableInspector()
var_inspect.walk(parsed)
except:
return
properties["parse_error"] = jsonify(traceback.format_exc())
else:
properties = var_inspect.json
if properties != self._cached_analysis.get(filename):
self._cached_analysis[filename] = properties
send(Analysis(filename=filename, content=content, properties=properties))
class VariableInspector(astor.TreeWalk):
builtin_names = dir(builtins)
def init_variables(self):
self.used = set()
self.set = set()
self.imports = set()
self.in_target = False
@property
def json(self):
used = set(self.used)
for key in self.builtin_names:
used.discard(key)
for key in self.set:
used.discard(key)
for key in Environment.predefined_names:
used.discard(key)
return {
"variables_used": list(used),
"variables_set": list(self.set),
"imports": list(self.imports)
}
def pre_arg(self):
self.set.add(self.cur_node.arg)
def pre_Name(self):
if self.in_target:
# Actually this is a set
self.set.add(self.cur_node.id)
else:
self.used.add(self.cur_node.id)
def pre_For(self):
self.process_assignment(self.cur_node.target)
def pre_Assign(self):
self.process_assignment(self.cur_node.targets)
def pre_withitem(self):
self.process_assignment(self.cur_node.optional_vars)
def pre_ExceptHandler(self):
if self.cur_node.name:
self.set.add(self.cur_node.name)
def pre_alias(self):
# Used in imports
name = self.cur_node.asname or self.cur_node.name
name = name.split(".")[0]
self.set.add(name)
self.imports.add(name)
def pre_FunctionDef(self):
self.set.add(self.cur_node.name)
def pre_ListComp(self):
self.process_assignment(self.cur_node.elt)
def process_assignment(self, item):
if isinstance(item, list):
for x in item:
self.process_assignment(x)
return
old_in_target = self.in_target
self.in_target = True
try:
self.walk(item)
finally:
self.in_target = old_in_target
class RewriteExprToPrint(astor.TreeWalk):
expr_node_types = """
UnaryOp
BinOp
BoolOp
Compare
Call
IfExp
Attribute
Subscript
ListComp SetComp GeneratorExp DictComp
""".split()
# Skipped:
# UAdd USub Not Invert
# Add Sub Mult Div FloorDiv Mod Pow LShift RShift BitOr BitXor BitAnd MatMult
# And Or
# Eq NotEq Lt Gt GtE Is IsNot In NotIn
# Index Slice ExtSlice
def __init__(self, subexpressions=False):
self.subexpressions = subexpressions
self.id_counter = 0
astor.TreeWalk.__init__(self)
if self.subexpressions:
for method in self.expr_node_types:
self.pre_handlers[method] = self.save_node_name
self.post_handlers[method] = self.fixup_subexpressions
del self.post_handlers['Module']
def post_Name(self):
if not self.subexpressions:
return
if isinstance(self.cur_node.ctx, ast.Load):
self.replace(self.rewrite_expr(self.cur_node))
def post_Module(self):
node = self.cur_node
node.body = [
self.rewrite_expr(n) if isinstance(n, ast.Expr) else n
for n in node.body]
def save_node_name(self):
self.cur_node.astor_repr = astor.to_source(self.cur_node)
def fixup_subexpressions(self):
new_node = self.rewrite_expr(self.cur_node, self.cur_node.astor_repr)
self.replace(new_node)
def rewrite_expr(self, node, expr_string=None):
if expr_string is None:
expr_string = astor.to_source(node)
node_string = ast.Str(s=expr_string)
self.id_counter += 1
if isinstance(node, ast.Expr):
new_node = ast.Expr(
ast.Call(
func=ast.Name(id='print_expr', ctx=ast.Load()),
args=[node_string, node.value, ast.Num(n=self.id_counter)],
keywords=[],
starargs=None,
)
)
new_node.is_print_expr = True
else:
new_node = ast.Call(
func=ast.Name(id='print_expr', ctx=ast.Load()),
args=[node_string, node, ast.Num(n=self.id_counter)],
keywords=[],
starargs=None,
)
new_node.is_print_expr = True
ast.fix_missing_locations(new_node)
return new_node
class Stdout:
total_exprs_limit = 100
expr_limit = 10
def __init__(self):
self.emitted = []
self.total_exprs_printed = 0
self.exprs_printed = collections.Counter()
def write(self, content):
self.emitted.append({
"type": "print",
"time": now(),
"parts": [{"type": "str", "str": content}],
})
def writejson(self, json):
assert json.get("type"), "JSON objects must have a type"
json.setdefault("time", now())
self.emitted.append(json)
def write_repr(self, o):
self.emitted.append(jsonify(o))
def flush(self):
pass
def add_global(name, value):
Environment.extra_globals[name] = value
Environment.predefined_names.add(name)
for env in Environment.active:
env.globals.setdefault(name, value)
| mit | 8,401,066,148,762,453,000 | 29.549689 | 126 | 0.564196 | false | 3.985818 | false | false | false |
OmkarPathak/Python-Programs | OOP/P11_Property decorators.py | 1 | 1069 | #This shows the usage of property decorators
#Python @property is one of the built-in decorators. The main purpose of any decorator is to change your class methods or attributes in such a way so that the users neeed not make any additional changes in their code.
#Without property decorators
class BankAccount:
def __init__(self,name,balance):
self.name=name
self.balance=balance
self.total= self.name+ " has "+self.balance+ " dollars in the account"
user1=BankAccount("Elon Musk","10000")
user1.name="Tim cook"
print(user1.name)
print(user1.total)
# Output: Tim cook
# Elon Musk has 10000 dollars in the account
#With property decorators
class BankAccount:
def __init__(self,name,balance):
self.name=name
self.balance=balance
@property
def total(self):
return self.name+ " has "+self.balance+ " dollars in the account"
user1=BankAccount("Elon Musk","10000")
user1.name="Tim cook"
print(user1.name)
print(user1.total)
#Output: Tim cook
# Tim cook has 10000 dollars in the account
| gpl-3.0 | 6,660,911,220,316,598,000 | 27.131579 | 217 | 0.705332 | false | 3.426282 | false | false | false |
sheagcraig/python-jss | jss/queryset.py | 1 | 5924 | #!/usr/bin/env python
# Copyright (C) 2014-2017 Shea G Craig
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""queryset.py
Class that adds some extra functionality to a basic list. Used as the
result of all queries in python-jss.
"""
from __future__ import absolute_import
from collections import defaultdict
try:
import cPickle # Python 2.X
except ImportError:
import _pickle as cPickle # Python 3+
import datetime
import os
from .jssobject import DATE_FMT, Identity
STR_FMT = "{0:>{1}} | {2:>{3}} | {4:>{5}}"
class QuerySet(list):
"""A list style collection of JSSObjects.
Listing operations retrieve minimal or overview information for most
object types. For example, we may want to see all the Computers on
the JSS but that does not mean we want to do a full object GET for
each one.
QuerySets hold instances of a single type of JSSObject, and use the
python list API, while adding some extra helper-methods on top.
"""
def __init__(self, objects):
"""Construct a list of JSSObjects.
Args:
objects (sequence of JSSObjects):
Sequences must be of a single class.
"""
if objects and not len({i.__class__ for i in objects}) == 1:
raise ValueError
super(QuerySet, self).__init__(objects)
self.sort()
self.contained_class = objects[0].__class__ if objects else None
def __str__(self):
"""Make data human readable."""
# Make ID, Name first, no matter what.
sort_keys = ["id", "name"]
if self:
sort_keys.extend([
key for key in self[0]._basic_identity.keys() if
key not in sort_keys])
# Build a dict of max lengths per column for table output.
lengths = defaultdict(int)
for item in self:
for key in sort_keys:
val = item._basic_identity[key] or ""
length = max(len(key), len(val))
if length > lengths[key]:
lengths[key] = length
# Build a format string for row output.
format_strings = []
for key in sort_keys:
length = lengths[key]
format_strings.append("{{data[{}]:>{}}}".format(key, length))
cached = 'cached'
cached_format = '| {{cached:>{}}} |'.format(len(cached))
fmt = "| " + " | ".join(format_strings) + cached_format
# Begin building output with header lines.
# Contained class can be None
contained_name = self.contained_class.__name__ if self.contained_class is not None else "Empty"
results = ["{} QuerySet".format(contained_name)]
headers = {key: key for key in lengths}
header_line = fmt.format(data=headers, cached="cached")
bar = len(header_line) * '-'
results.extend([bar, header_line, bar])
str_cached = (
lambda i: str(i.cached) if isinstance(i.cached, bool) else 'True')
table = [
fmt.format(data=item._basic_identity, cached=str_cached(item)) for
item in self]
results.extend(table)
results.append(bar)
return "\n".join(results)
def __repr__(self):
"""Make data human readable."""
return "QuerySet({})".format(super(QuerySet, self).__repr__())
def sort(self, **kwargs):
"""Sort list elements by ID.
"""
super(QuerySet, self).sort(key=lambda k: int(k.id))
def sort_by_name(self):
"""Sort list elements by name."""
super(QuerySet, self).sort(key=lambda k: k.name.upper())
def retrieve_all(self):
"""Tell each contained object to retrieve its data from the JSS
This can take a long time given a large number of objects,
and depending on the size of each object.
Returns:
self (QuerySet) to allow method chaining.
"""
for obj in self:
if not obj.cached:
obj.retrieve()
return self
def save_all(self):
"""Tell each contained object to save its data to the JSS
This can take a long time given a large number of objects,
and depending on the size of each object.
Returns:
self (QuerySet) to allow method chaining.
"""
for obj in self:
obj.save()
return self
def invalidate(self):
"""Clear the cache datetime for all contents.
This causes objects to retrieve their data again when accessed.
"""
for i in self: i.cached = False
def names(self):
"""Return a generator of contents names"""
return (item.name for item in self)
def ids(self):
"""Return a generator of contents ids"""
return (item.id for item in self)
@classmethod
def from_response(cls, obj_class, response, jss=None, **kwargs):
"""Build a QuerySet from a listing Response."""
response_objects = (
i for i in response if i is not None and i.tag != "size")
dicts = (
{child.tag: child.text for child in item} for item in
response_objects)
identities = (Identity(d) for d in dicts)
objects = [obj_class(jss, data=i, **kwargs) for i in identities]
return cls(objects)
| gpl-3.0 | -5,587,489,524,983,974,000 | 31.549451 | 103 | 0.603646 | false | 4.154278 | false | false | false |
openstack/python-troveclient | troveclient/client.py | 1 | 19150 | # Copyright 2011 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
OpenStack Client interface. Handles the REST calls and responses.
"""
import logging
from keystoneauth1 import adapter
from oslo_utils import importutils
import requests
from urllib import parse as urlparse
from troveclient.apiclient import client
from troveclient import exceptions
from troveclient import service_catalog
try:
import eventlet as sleep_lib
except ImportError:
import time as sleep_lib
try:
import json
except ImportError:
import simplejson as json
# Python 2.5 compat fix
if not hasattr(urlparse, 'parse_qsl'):
import cgi
urlparse.parse_qsl = cgi.parse_qsl
osprofiler_web = importutils.try_import("osprofiler.web")
class TroveClientMixin(object):
def get_database_api_version_from_endpoint(self):
magic_tuple = urlparse.urlsplit(self.management_url)
scheme, netloc, path, query, frag = magic_tuple
v = path.split("/")[1]
valid_versions = ['v1.0']
if v not in valid_versions:
msg = "Invalid client version '%s'. must be one of: %s" % (
(v, ', '.join(valid_versions)))
raise exceptions.UnsupportedVersion(msg)
return v[1:]
class HTTPClient(TroveClientMixin):
USER_AGENT = 'python-troveclient'
def __init__(self, user, password, projectid, auth_url, insecure=False,
timeout=None, tenant_id=None, proxy_tenant_id=None,
proxy_token=None, region_name=None,
endpoint_type='publicURL', service_type=None,
service_name=None, database_service_name=None, retries=None,
http_log_debug=False, cacert=None, bypass_url=None,
auth_system='keystone', auth_plugin=None):
if auth_system and auth_system != 'keystone' and not auth_plugin:
raise exceptions.AuthSystemNotFound(auth_system)
if not auth_url and auth_system and auth_system != 'keystone':
auth_url = auth_plugin.get_auth_url()
if not auth_url:
raise exceptions.EndpointNotFound()
self.user = user
self.password = password
self.projectid = projectid
self.tenant_id = tenant_id
self.auth_url = auth_url.rstrip('/') if auth_url else auth_url
self.version = 'v1'
self.region_name = region_name
self.endpoint_type = endpoint_type
self.service_type = service_type
self.service_name = service_name
self.database_service_name = database_service_name
self.retries = int(retries or 0)
self.http_log_debug = http_log_debug
self.management_url = None
self.auth_token = None
self.proxy_token = proxy_token
self.proxy_tenant_id = proxy_tenant_id
self.timeout = timeout
self.bypass_url = bypass_url
self.auth_system = auth_system
self.auth_plugin = auth_plugin
if insecure:
self.verify_cert = False
else:
if cacert:
self.verify_cert = cacert
else:
self.verify_cert = True
self.auth_system = auth_system
self.auth_plugin = auth_plugin
self.LOG = logging.getLogger(__name__)
if self.http_log_debug and not self.LOG.handlers:
ch = logging.StreamHandler()
self.LOG.setLevel(logging.DEBUG)
self.LOG.addHandler(ch)
if hasattr(requests, 'logging'):
requests.logging.getLogger(requests.__name__).addHandler(ch)
def http_log_req(self, args, kwargs):
if not self.http_log_debug:
return
string_parts = ['curl -i']
for element in args:
if element in ('GET', 'POST', 'DELETE', 'PUT'):
string_parts.append(' -X %s' % element)
else:
string_parts.append(' %s' % element)
for element in kwargs['headers']:
header = ' -H "%s: %s"' % (element, kwargs['headers'][element])
string_parts.append(header)
if 'data' in kwargs:
string_parts.append(" -d '%s'" % (kwargs['data']))
self.LOG.debug("\nREQ: %s\n", "".join(string_parts))
def http_log_resp(self, resp):
if not self.http_log_debug:
return
self.LOG.debug(
"RESP: [%s] %s\nRESP BODY: %s\n",
resp.status_code,
resp.headers,
resp.text)
def request(self, url, method, **kwargs):
kwargs.setdefault('headers', kwargs.get('headers', {}))
kwargs['headers']['User-Agent'] = self.USER_AGENT
kwargs['headers']['Accept'] = 'application/json'
if osprofiler_web:
kwargs['headers'].update(osprofiler_web.get_trace_id_headers())
if 'body' in kwargs:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['data'] = json.dumps(kwargs['body'])
del kwargs['body']
if self.timeout:
kwargs.setdefault('timeout', self.timeout)
self.http_log_req((url, method,), kwargs)
resp = requests.request(
method,
url,
verify=self.verify_cert,
**kwargs)
self.http_log_resp(resp)
if resp.text:
try:
body = json.loads(resp.text)
except ValueError:
pass
body = None
else:
body = None
if resp.status_code >= 400:
raise exceptions.from_response(resp, body, url)
return resp, body
def _cs_request(self, url, method, **kwargs):
auth_attempts = 0
attempts = 0
backoff = 1
while True:
attempts += 1
if not self.management_url or not self.auth_token:
self.authenticate()
kwargs.setdefault('headers', {})['X-Auth-Token'] = self.auth_token
if self.projectid:
kwargs['headers']['X-Auth-Project-Id'] = self.projectid
try:
resp, body = self.request(self.management_url + url, method,
**kwargs)
return resp, body
except exceptions.BadRequest:
if attempts > self.retries:
raise
except exceptions.Unauthorized:
if auth_attempts > 0:
raise
self.LOG.debug("Unauthorized, reauthenticating.")
self.management_url = self.auth_token = None
# First reauth. Discount this attempt.
attempts -= 1
auth_attempts += 1
continue
except exceptions.ClientException as e:
if attempts > self.retries:
raise
if 500 <= e.code <= 599:
pass
else:
raise
except requests.exceptions.ConnectionError as e:
# Catch a connection refused from requests.request
self.LOG.debug("Connection refused: %s", e)
msg = 'Unable to establish connection: %s' % e
raise exceptions.ConnectionRefused(msg)
self.LOG.debug(
"Failed attempt(%s of %s), retrying in %s seconds",
attempts, self.retries, backoff)
sleep_lib.sleep(backoff)
backoff *= 2
def get(self, url, **kwargs):
return self._cs_request(url, 'GET', **kwargs)
def patch(self, url, **kwargs):
return self._cs_request(url, 'PATCH', **kwargs)
def post(self, url, **kwargs):
return self._cs_request(url, 'POST', **kwargs)
def put(self, url, **kwargs):
return self._cs_request(url, 'PUT', **kwargs)
def delete(self, url, **kwargs):
return self._cs_request(url, 'DELETE', **kwargs)
def _extract_service_catalog(self, url, resp, body, extract_token=True):
"""See what the auth service told us and process the response.
We may get redirected to another site, fail or actually get
back a service catalog with a token and our endpoints.
"""
if resp.status_code == 200: # content must always present
try:
self.auth_url = url
self.service_catalog = \
service_catalog.ServiceCatalog(body)
if extract_token:
self.auth_token = self.service_catalog.get_token()
management_url = self.service_catalog.url_for(
attr='region',
filter_value=self.region_name,
endpoint_type=self.endpoint_type,
service_type=self.service_type,
service_name=self.service_name,
database_service_name=self.database_service_name)
self.management_url = management_url.rstrip('/')
return None
except exceptions.AmbiguousEndpoints:
print("Found more than one valid endpoint. Use a more "
"restrictive filter")
raise
except KeyError:
raise exceptions.AuthorizationFailure()
except exceptions.EndpointNotFound:
print("Could not find any suitable endpoint. Correct region?")
raise
elif resp.status_code == 305:
return resp['location']
else:
raise exceptions.from_response(resp, body, url)
def _fetch_endpoints_from_auth(self, url):
"""We have a token, but don't know the final endpoint for
the region. We have to go back to the auth service and
ask again. This request requires an admin-level token
to work. The proxy token supplied could be from a low-level enduser.
We can't get this from the keystone service endpoint, we have to use
the admin endpoint.
This will overwrite our admin token with the user token.
"""
# GET ...:5001/v2.0/tokens/#####/endpoints
url = '/'.join([url, 'tokens', '%s?belongsTo=%s'
% (self.proxy_token, self.proxy_tenant_id)])
self.LOG.debug("Using Endpoint URL: %s", url)
resp, body = self.request(url, "GET",
headers={'X-Auth-Token': self.auth_token})
return self._extract_service_catalog(url, resp, body,
extract_token=False)
def authenticate(self):
magic_tuple = urlparse.urlsplit(self.auth_url)
scheme, netloc, path, query, frag = magic_tuple
port = magic_tuple.port
if port is None:
port = 80
path_parts = path.split('/')
for part in path_parts:
if len(part) > 0 and part[0] == 'v':
self.version = part
break
# TODO(sandy): Assume admin endpoint is 35357 for now.
# Ideally this is going to have to be provided by the service catalog.
new_netloc = netloc.replace(':%d' % port, ':%d' % (35357,))
admin_url = urlparse.urlunsplit((scheme, new_netloc,
path, query, frag))
auth_url = self.auth_url
if self.version == "v2.0":
while auth_url:
if not self.auth_system or self.auth_system == 'keystone':
auth_url = self._v2_auth(auth_url)
else:
auth_url = self._plugin_auth(auth_url)
# Are we acting on behalf of another user via an
# existing token? If so, our actual endpoints may
# be different than that of the admin token.
if self.proxy_token:
self._fetch_endpoints_from_auth(admin_url)
# Since keystone no longer returns the user token
# with the endpoints any more, we need to replace
# our service account token with the user token.
self.auth_token = self.proxy_token
else:
try:
while auth_url:
auth_url = self._v1_auth(auth_url)
# In some configurations trove makes redirection to
# v2.0 keystone endpoint. Also, new location does not contain
# real endpoint, only hostname and port.
except exceptions.AuthorizationFailure:
if auth_url.find('v2.0') < 0:
auth_url = auth_url + '/v2.0'
self._v2_auth(auth_url)
# Allows for setting an endpoint not defined in the catalog
if self.bypass_url is not None and self.bypass_url != '':
self.management_url = self.bypass_url
def _plugin_auth(self, auth_url):
return self.auth_plugin.authenticate(self, auth_url)
def _v1_auth(self, url):
if self.proxy_token:
raise exceptions.NoTokenLookupException()
headers = {'X-Auth-User': self.user,
'X-Auth-Key': self.password}
if self.projectid:
headers['X-Auth-Project-Id'] = self.projectid
resp, body = self.request(url, 'GET', headers=headers)
if resp.status_code in (200, 204): # in some cases we get No Content
try:
mgmt_header = 'x-server-management-url'
self.management_url = resp.headers[mgmt_header].rstrip('/')
self.auth_token = resp.headers['x-auth-token']
self.auth_url = url
except (KeyError, TypeError):
raise exceptions.AuthorizationFailure()
elif resp.status_code == 305:
return resp.headers['location']
else:
raise exceptions.from_response(resp, body, url)
def _v2_auth(self, url):
"""Authenticate against a v2.0 auth service."""
body = {"auth": {
"passwordCredentials": {"username": self.user,
"password": self.password}}}
if self.projectid:
body['auth']['tenantName'] = self.projectid
elif self.tenant_id:
body['auth']['tenantId'] = self.tenant_id
self._authenticate(url, body)
def _authenticate(self, url, body):
"""Authenticate and extract the service catalog."""
token_url = url + "/tokens"
# Make sure we follow redirects when trying to reach Keystone
resp, body = self.request(
token_url,
"POST",
body=body,
allow_redirects=True)
return self._extract_service_catalog(url, resp, body)
class SessionClient(adapter.LegacyJsonAdapter, TroveClientMixin):
def __init__(self, session, auth, **kwargs):
self.database_service_name = kwargs.pop('database_service_name', None)
super(SessionClient, self).__init__(session=session,
auth=auth,
**kwargs)
# FIXME(jamielennox): this is going to cause an authentication request
# on client init. This is different to how the other clients work.
endpoint = self.get_endpoint()
if not endpoint:
raise exceptions.EndpointNotFound()
self.management_url = endpoint.rstrip('/')
def request(self, url, method, **kwargs):
raise_exc = kwargs.pop('raise_exc', True)
resp, body = super(SessionClient, self).request(url,
method,
raise_exc=False,
**kwargs)
if raise_exc and resp.status_code >= 400:
raise exceptions.from_response(resp, body, url)
return resp, body
def _construct_http_client(username=None, password=None, project_id=None,
auth_url=None, insecure=False, timeout=None,
proxy_tenant_id=None, proxy_token=None,
region_name=None, endpoint_type='publicURL',
service_type='database',
service_name=None, database_service_name=None,
retries=None,
http_log_debug=False,
auth_system='keystone', auth_plugin=None,
cacert=None, bypass_url=None, tenant_id=None,
session=None,
**kwargs):
if session:
try:
kwargs.setdefault('interface', endpoint_type)
except KeyError:
pass
return SessionClient(session=session,
service_type=service_type,
service_name=service_name,
region_name=region_name,
database_service_name=database_service_name,
connect_retries=retries,
**kwargs)
else:
return HTTPClient(username,
password,
projectid=project_id,
auth_url=auth_url,
insecure=insecure,
timeout=timeout,
tenant_id=tenant_id,
proxy_token=proxy_token,
proxy_tenant_id=proxy_tenant_id,
region_name=region_name,
endpoint_type=endpoint_type,
service_type=service_type,
service_name=service_name,
database_service_name=database_service_name,
retries=retries,
http_log_debug=http_log_debug,
cacert=cacert,
bypass_url=bypass_url,
auth_system=auth_system,
auth_plugin=auth_plugin,
)
def get_version_map():
return {
'1.0': 'troveclient.v1.client.Client',
}
def Client(version, *args, **kwargs):
version_map = get_version_map()
client_class = client.BaseClient.get_class('database',
version, version_map)
return client_class(*args, **kwargs)
| apache-2.0 | 2,275,167,824,754,101,500 | 36.920792 | 78 | 0.542924 | false | 4.449349 | false | false | false |
cybergreen-net/etl2 | tests/etl2/etlharness.py | 1 | 2500 | import ETL
import os
import tempfile
import gzip
from io import BytesIO, StringIO
class EtlHarness:
def __init__(self, feed, out_prefix):
root_dir = tempfile.mkdtemp()
self.feed_name = feed
self.out_prefix = out_prefix
self.source_root = os.path.join(root_dir, "raw")
self.source_dir = os.path.join(self.source_root, self.out_prefix)
self.dest_root = os.path.join(root_dir, "clean")
self.agg_root = os.path.join(root_dir, "agg")
self.dest_dir = os.path.join(self.dest_root, self.out_prefix)
os.makedirs(self.source_dir)
os.makedirs(self.dest_dir)
print(self.source_dir, self.dest_dir)
# doesn't effect shell env
os.environ["CYBERGREEN_SOURCE_ROOT"] = self.source_root
os.environ["CYBERGREEN_DEST_ROOT"] = self.dest_root
os.environ["CYBERGREEN_AGG_ROOT"] = self.agg_root
os.environ["CYBERGREEN_MAXMIND_DB_PATH"]= "tests/fixtures/maxminddb/"
os.environ["CYBERGREEN_PYASN_DB_PATH"]= "tests/fixtures/pyasndb/"
os.environ["CYBERGREEN_AGGREGATION_PATH"] = ""
os.environ["CYBERGREEN_BUILD_ENV"]="test"
os.environ["DD_API_KEY"] = ""
os.environ["RDS_PASSWORD"] = ""
os.environ["REDSHIFT_PASSWORD"] = ""
os.environ["AWS_ACCESS_KEY_ID"] = ""
os.environ["AWS_SECRET_ACCESS_KEY"] = ""
os.environ["CYBERGREEN_CERTBR_HOST"] = ""
os.environ["CYBERGREEN_CERTBR_PATH"] = ""
os.environ["CYBERGREEN_OPENX_HOST"] = ""
os.environ["CYBERGREEN_OPENX_PATH"] = ""
def _write_source_file(self, file_name, data):
file_path = os.path.join(self.source_dir, file_name)
with gzip.open(file_path, "w") as f:
f.write(data.encode('ascii'))
def _read_dest_file(self, file_name):
file_path = os.path.join(self.dest_dir, file_name)
with open(file_path, "r") as f:
return f.readlines()
def _get_etl_output(self, data):
#self._write_source_file("parsed.20000101.out.gz", data)
in_handle = BytesIO(data.encode())
out_handle = BytesIO()
etl = ETL.etl_process(event_date="2000-W01", feed=self.feed_name, config_path="configs/config.json",
in_handle=in_handle, out_handle=out_handle)
# Rewind the file
out_handle.seek(0)
byte_lines = out_handle.readlines()
str_lines = [line.decode("utf-8") for line in byte_lines]
return str_lines, etl
| gpl-3.0 | -13,470,691,697,173,366 | 38.68254 | 108 | 0.6052 | false | 3.201024 | false | false | false |
LuisAlejandro/condiment | condiment/common/fabric/docker.py | 1 | 13369 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Luis Alejandro Martínez Faneyth
#
# This file is part of Condiment.
#
# Condiment is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Condiment is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module contains directives to manage Docker containers.
This module define funtions to accomplish the following tasks:
- Creating a Debian (stable) minimal base (Docker) image.
- Creating a Condiment environment (Docker) image.
- Execute commands on a Docker image to create commands.
- Destroy all images and/or containers.
- Other management commands (reset, updat, login, etc).
.. versionadded:: 0.2
"""
import sys
import time
import json
import paramiko
from contextlib import nested
from fabric.api import env, local, hide, run, shell_env, cd
from condiment.common.logger import get_logger
log = get_logger()
def docker_generate_debian_base_image():
"""
Generate a Debian base (Docker) image.
This function generates a minimal Debian (stable) chroot using debootstrap,
then configures apt, cleans and truncates the filesystem, and finally
imports it to docker.
.. versionadded:: 0.2
"""
docker_stop_container()
with hide('warnings', 'stderr', 'running'):
log.info('Generating a fresh Debian image for Docker ...')
local(('sudo bash %(debian_base_image_script)s '
'luisalejandro/debian-%(arch)s '
'wheezy %(arch)s') % env, capture=False)
docker_stop_container()
def docker_generate_condiment_base_image():
"""
Generate a Condiment environment (Docker) image.
This function generates a minimal Debian (stable) chroot using debootstrap.
.. versionadded:: 0.2
"""
docker_stop_container()
with hide('warnings', 'stderr', 'running'):
log.info('Creating a new Condiment base image ...')
local(('sudo bash -c '
'"%(docker)s run -it --name %(condiment_runtime_container)s '
'%(mounts)s %(dvars)s %(debian_base_image)s '
'bash %(condiment_base_image_script)s"') % env, capture=False)
log.info('Creating the runtime container ...')
local(('sudo bash -c '
'"%(docker)s commit %(condiment_runtime_container)s '
'%(condiment_base_image)s"') % env, capture=True)
docker_stop_container()
def docker_kill_all_containers():
"""
Destroy all containers listed with ``docker ps -aq``.
.. versionadded:: 0.2
"""
with hide('warnings', 'stderr', 'running'):
log.info('Listing available containers ...')
containers = local(('sudo bash -c "%(docker)s ps -aq"') % env,
capture=True).split('\n')
for container in containers:
if container:
log.info('Checking if container "%s" exists ...' % container)
inspect = json.loads(local(('sudo bash -c '
'"%s inspect %s"') % (env.docker,
container),
capture=True))
if inspect:
log.info('Destroying container "%s" ...' % container)
local(('sudo bash -c '
'"%s stop --time 1 %s"') % (env.docker, container),
capture=True)
local(('sudo bash -c '
'"%s rm -fv %s"') % (env.docker, container),
capture=True)
def docker_kill_condiment_images():
"""
Destroy all Docker images made for Condiment.
.. versionadded:: 0.2
"""
with hide('warnings', 'stderr', 'running'):
log.info('Listing available images ...')
images = [env.condiment_base_image, env.condiment_runtime_image,
env.debian_base_image]
for image in images:
if image:
log.info('Checking if image "%s" exists ...' % image)
inspect = json.loads(local(('sudo bash -c '
'"%s inspect %s"') % (env.docker,
image),
capture=True))
if inspect:
log.info('Destroying image "%s" ...' % image)
local(('sudo bash -c '
'"%s rmi -f %s"') % (env.docker, image),
capture=True)
def docker_kill_all_images():
"""
Destroy all Docker images.
.. versionadded:: 0.2
"""
with hide('warnings', 'stderr', 'running'):
log.info('Listing available images ...')
images = local(('sudo bash -c "%(docker)s images -aq"') % env,
capture=True).split('\n')
for image in images:
if image:
log.info('Checking if image "%s" exists ...' % image)
inspect = json.loads(local(('sudo bash -c '
'"%s inspect %s"') % (env.docker,
image),
capture=True))
if inspect:
log.info('Destroying image "%s" ...' % image)
local(('sudo bash -c '
'"%s rmi -f %s"') % (env.docker, image),
capture=True)
def docker_pull_debian_base_image():
"""
Pull the Debian base image from the Docker index.
.. versionadded:: 0.2
"""
docker_stop_container()
with hide('warnings', 'stderr', 'running'):
log.info('Downloading the Debian base image ...')
local(('sudo bash -c '
'"%(docker)s pull %(debian_base_image)s"') % env, capture=False)
docker_stop_container()
def docker_pull_condiment_base_image():
"""
Pull the Condiment environment image from the Docker index.
.. versionadded:: 0.2
"""
docker_stop_container()
with hide('warnings', 'stderr', 'running'):
log.info('Downloading the Condiment base image ...')
local(('sudo bash -c '
'"%(docker)s pull %(condiment_base_image)s"') % env, capture=False)
log.info('Creating the runtime container ...')
local(('sudo bash -c '
'"%(docker)s run -it --name %(condiment_runtime_container)s '
'%(condiment_base_image)s true"') % env, capture=False)
docker_stop_container()
def docker_check_image():
"""
Check if the runtime image exists, build environment if not.
.. versionadded:: 0.2
"""
with hide('warnings', 'stderr', 'running'):
log.info('Checking if we have a runtime image ...')
state = json.loads(local(('sudo bash -c '
'"%(docker)s inspect '
'%(condiment_runtime_image)s"') % env,
capture=True))
if not state:
from condiment.common.fabric.django import django_syncdb
docker_pull_debian_base_image()
docker_pull_condiment_base_image()
django_syncdb()
def docker_check_container():
"""
Check if the runtime container is up, start if not.
.. versionadded:: 0.2
"""
with hide('warnings', 'stderr', 'running'):
log.info('Checking if the runtime container is up ...')
state = json.loads(local(('sudo bash -c '
'"%(docker)s inspect '
'%(condiment_runtime_container)s"') % env,
capture=True))
if state:
if not state[0]['State']['Running']:
docker_stop_container()
docker_start_container()
else:
docker_start_container()
docker_check_ssh_to_container()
def docker_check_ssh_to_container():
"""
Test if SSH is up inside the runtime container.
.. versionadded:: 0.2
"""
log.info('Testing communication with container ...')
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
tries = 0
while True:
tries += 1
try:
time.sleep(2)
ssh.connect(hostname=env.host_string, port=env.port,
username=env.user, password=env.password)
except Exception, e:
log.info('SSH is not ready yet: %s' % e)
else:
break
finally:
ssh.close()
if tries == 10:
log.error('Failed to connect to the container.')
sys.exit(1)
log.info('Communication with the container succeded!')
def docker_start_container():
"""
Start the runtime container.
.. versionadded:: 0.2
"""
docker_check_image()
with hide('warnings', 'stderr', 'running'):
log.info('Starting the runtime container ...')
local(('sudo bash -c '
'"%(docker)s run -d '
'-p 127.0.0.1:22222:22 '
'-p 127.0.0.1:8000:8000 '
'--name %(condiment_runtime_container)s '
'%(mounts)s %(dvars)s %(condiment_runtime_image)s '
'bash %(condiment_start_container_script)s"') % env, capture=True)
def docker_stop_container():
"""
Stop & commit the runtime container. Removes intermediate container.
.. versionadded:: 0.2
"""
with hide('warnings', 'stderr', 'running'):
log.info('Checking if the runtime container is up ...')
runtime_id = json.loads(local(('sudo bash -c '
'"%(docker)s inspect '
'%(condiment_runtime_image)s"') % env,
capture=True))
inspect = json.loads(local(('sudo bash -c '
'"%(docker)s inspect '
'%(condiment_runtime_container)s"') % env,
capture=True))
if inspect:
log.info('Stopping the runtime container ...')
local(('sudo bash -c '
'"%(docker)s stop --time 1 '
'%(condiment_runtime_container)s"') % env,
capture=True)
local(('sudo bash -c '
'"%(docker)s commit %(condiment_runtime_container)s '
'%(condiment_runtime_image)s"') % env, capture=True)
local(('sudo bash -c '
'"%(docker)s rm -fv %(condiment_runtime_container)s"') % env,
capture=True)
if runtime_id:
# This way all the dictionary keys are lower case
lower_runtime_id = dict([(k.lower(), v) for k, v in runtime_id[0].items()])
local(('sudo bash -c '
'"%s rmi -f %s"') % (env.docker, lower_runtime_id['id']),
capture=True)
def docker_login_container():
"""
Login into the runtime container.
.. versionadded:: 0.2
"""
docker_check_container()
with nested(hide('warnings', 'stderr', 'running'),
shell_env(**env.fvars), cd(env.basedir)):
log.info('Opening a shell inside the runtime container ...')
log.info('(When you are done, press CTRL+D to get out).')
run('bash')
def docker_update_container():
"""
Update the runtime container with latest changes to dependencies.
This function executes the script that generates the Condiment environment
image inside the runtime container so that it picks up the changes
made to the environment dependencies.
.. versionadded:: 0.2
"""
docker_check_image()
docker_stop_container()
with hide('warnings', 'stderr', 'running'):
log.info('Updating the Condiment base image ...')
local(('sudo bash -c '
'"%(docker)s run -it --name %(condiment_runtime_container)s '
'%(mounts)s %(dvars)s %(condiment_runtime_image)s '
'bash %(condiment_base_image_script)s"') % env, capture=False)
docker_stop_container()
def docker_reset_container():
"""
Restore the Condiment environment image to its original state.
.. versionadded:: 0.2
"""
from condiment.common.fabric.django import django_syncdb
docker_check_image()
docker_stop_container()
with hide('warnings', 'stderr', 'running'):
log.info('Restoring the Condiment base image ...')
local(('sudo bash -c '
'"%(docker)s run -it --name %(condiment_runtime_container)s '
'%(condiment_base_image)s true"') % env, capture=False)
docker_stop_container()
django_syncdb()
| gpl-3.0 | 6,817,259,882,447,641,000 | 28.973094 | 87 | 0.537777 | false | 4.273657 | false | false | false |
lino-framework/xl | lino_xl/lib/lists/fixtures/demo.py | 1 | 1215 | # -*- coding: UTF-8 -*-
# Copyright 2014-2019 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from lino.api import dd, rt, _
from lino.utils.mldbc import babeld
def objects():
ListType = rt.models.lists.ListType
List = rt.models.lists.List
mailing = babeld(ListType, _("Mailing list"))
yield mailing
discuss = babeld(ListType, _("Discussion group"))
yield discuss
flags = ListType(**dd.str2kw('designation', _("Flags")))
yield flags
yield List(list_type=mailing, **dd.str2kw('designation', _("Announcements")))
yield List(list_type=mailing, **dd.str2kw('designation', _("Weekly newsletter")))
yield List(list_type=discuss, **dd.str2kw('designation', _("General discussion")))
yield List(list_type=discuss, **dd.str2kw('designation', _("Beginners forum")))
yield List(list_type=discuss, **dd.str2kw('designation', _("Developers forum")))
yield List(list_type=flags,
**dd.str2kw('designation', _("PyCon 2014")))
yield List(list_type=flags,
**dd.str2kw('designation', _("Free Software Day 2014")))
yield List(list_type=flags, **dd.str2kw('designation', _("Schools")))
| bsd-2-clause | 5,112,860,678,266,054,000 | 34.735294 | 86 | 0.655967 | false | 3.422535 | false | false | false |
fro391/Investing | Sentiments/RSS_URL.py | 1 | 1126 | from bs4 import BeautifulSoup
import gethtml
import re
import urlparse
#gets titles
def getURLs (rss):
Titles = []
soup = BeautifulSoup(gethtml.getHtmlText(rss))
for item in soup.findAll('item'):
#link tag cut off after stripping for item... only </link> is there
for i in item.findAll('title'):
try:
Titles.append(i.contents[0])
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print message
return Titles
#gets links
def getURLs2 (rss):
htmltext = gethtml.getHtmlText(rss)
regex = '<link>(.+?)</link>'
pattern = re.compile(regex)
links = re.findall(pattern,htmltext)
#returns valid links
goodlinks = [link for link in links if bool(urlparse.urlparse(link))==True ]
return goodlinks
#gets dates
def getURLs3 (rss):
htmltext = gethtml.getHtmlText(rss)
regex = '<pubDate>(.+?)</pubDate>'
pattern = re.compile(regex)
date = re.findall(pattern,htmltext)
return date
| gpl-2.0 | 7,792,483,381,561,359,000 | 31.171429 | 80 | 0.627886 | false | 3.655844 | false | false | false |
bert/geda-gaf | xorn/src/python/geda/xmlread.py | 1 | 30020 | # Copyright (C) 2013-2017 Roland Lutz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
## \namespace xorn.geda.xmlread
## Reading gEDA schematic/symbol files in XML format.
import cStringIO, xml.parsers.expat
from gettext import gettext as _
import xorn.base64
import xorn.fixednum
import xorn.hybridnum
import xorn.proxy
import xorn.storage
import xorn.geda.ref
from xorn.geda.xmlformat import *
NSSEP = '!'
class VoidHandler:
def start_element(self, name, attributes):
return self
def end_element(self, name):
pass
def character_data(self, data):
pass
class NullHandler:
def __init__(self, log):
self.log = log
def start_element(self, name, attributes):
self.log.error(_("unexpected element \"%s\"") % name)
return VoidHandler()
def end_element(self, name):
pass
def character_data(self, data):
s = data.strip()
if s:
self.log.error(_("unexpected character data \"%s\"") % s)
class OverbarHandler(NullHandler):
def __init__(self, log, text):
self.log = log
self.text = text
def start_element(self, name, attributes):
if name != 'br':
return NullHandler.start_element(self, name, attributes)
self.text.append('\n')
return NullHandler(self.log)
def character_data(self, data):
self.text.append(data.replace('\\', '\\\\'))
def end_element(self, name):
self.text.append('\\_')
class TextHandler(NullHandler):
def __init__(self, log, rev, attached_to, data, attribute_name):
self.log = log
self.rev = rev
self.attached_to = attached_to
self.data = data
self.text = []
if attribute_name is not None:
self.text.append('%s=' % attribute_name)
def start_element(self, name, attributes):
if name == 'br':
self.text.append('\n')
return NullHandler(self.log)
if name == 'overbar':
self.text.append('\\_')
return OverbarHandler(self.log, self.text)
return NullHandler.start_element(self, name, attributes)
def character_data(self, data):
self.text.append(data.replace('\\', '\\\\'))
def end_element(self, name):
self.data.text = ''.join(self.text).encode('utf-8')
ob = self.rev.add_object(self.data)
if self.attached_to is not None:
self.rev.relocate_object(ob, self.attached_to, None)
class PathHandler(NullHandler):
def __init__(self, log, rev, data):
self.log = log
self.rev = rev
self.data = data
self.fragments = []
def start_element(self, name, attributes):
if name != 'br':
return NullHandler.start_element(self, name, attributes)
self.fragments.append('\n')
return NullHandler(self.log)
def character_data(self, data):
try:
self.fragments.append(data.encode())
except UnicodeEncodeError:
self.log.error(_("non-ASCII character in path data"))
def end_element(self, name):
self.data.pathdata = ''.join(self.fragments)
self.rev.add_object(self.data)
def parse_angle(x):
angle = int(x)
if angle != 0 and angle != 90 and angle != 180 and angle != 270:
raise ValueError
return angle
class ContentHandler(NullHandler):
def __init__(self, c, rev, attached_to):
self.log = c.log
self.c = c
self.rev = rev
self.attached_to = attached_to
def start_element(self, name, attributes):
if name == 'text' or name == 'attribute':
is_attribute = name == 'attribute'
data = xorn.storage.Text(
x = self.c.parse_attribute(
attributes, 'x', None,
self.c.parse, 'X coordinate'),
y = self.c.parse_attribute(
attributes, 'y', None,
self.c.parse, 'Y coordinate'),
color = self.c.parse_attribute(
attributes, 'color',
5 if is_attribute else 9,
ENUM_COLOR.index, 'color'),
text_size = self.c.parse_attribute(
attributes, 'size', None,
int, 'text size'),
visibility = self.c.parse_attribute(
attributes, 'visible', None if is_attribute else 1,
ENUM_BOOLEAN.index, 'text visibility'),
show_name_value = self.c.parse_attribute(
attributes, 'show', None if is_attribute else 0,
ENUM_SHOW_NAME_VALUE.index, 'show name/value value'),
angle = self.c.parse_attribute(
attributes, 'angle', 0,
parse_angle, 'angle'),
alignment = self.c.parse_attribute(
attributes, 'alignment', 0,
ENUM_ALIGNMENT.index, 'alignment'))
if is_attribute:
try:
name = attributes.pop('name')
except KeyError:
self.c.log.error(_("attribute name not specified"))
name = None
else:
name = None
return TextHandler(
self.c.log, self.rev, self.attached_to, data, name)
if self.attached_to:
self.c.log.error(_("non-text element can't be attached"))
return VoidHandler()
if name == 'arc':
self.rev.add_object(
xorn.storage.Arc(
x = self.c.parse_attribute(
attributes, 'x', None,
self.c.parse, 'X coordinate'),
y = self.c.parse_attribute(
attributes, 'y', None,
self.c.parse, 'Y coordinate'),
radius = self.c.parse_attribute(
attributes, 'radius', None,
self.c.parse, 'radius'),
startangle = self.c.parse_attribute(
attributes, 'startangle', None,
int, 'start angle'),
sweepangle = self.c.parse_attribute(
attributes, 'sweepangle', None,
int, 'sweep angle'),
color = self.c.parse_attribute(
attributes, 'color', 3,
ENUM_COLOR.index, 'color'),
line = self.c.parse_line(attributes)))
return NullHandler(self.c.log)
if name == 'box':
self.rev.add_object(
xorn.storage.Box(
x = self.c.parse_attribute(
attributes, 'x', None,
self.c.parse, 'X coordinate'),
y = self.c.parse_attribute(
attributes, 'y', None,
self.c.parse, 'Y coordinate'),
width = self.c.parse_attribute(
attributes, 'width', None,
self.c.parse, 'width'),
height = self.c.parse_attribute(
attributes, 'height', None,
self.c.parse, 'height'),
color = self.c.parse_attribute(
attributes, 'color', 3,
ENUM_COLOR.index, 'color'),
line = self.c.parse_line(attributes),
fill = self.c.parse_fill(attributes)))
return NullHandler(self.c.log)
if name == 'circle':
self.rev.add_object(
xorn.storage.Circle(
x = self.c.parse_attribute(
attributes, 'x', None,
self.c.parse, 'X coordinate'),
y = self.c.parse_attribute(
attributes, 'y', None,
self.c.parse, 'Y coordinate'),
radius = self.c.parse_attribute(
attributes, 'radius', None,
self.c.parse, 'radius'),
color = self.c.parse_attribute(
attributes, 'color', 3,
ENUM_COLOR.index, 'color'),
line = self.c.parse_line(attributes),
fill = self.c.parse_fill(attributes)))
return NullHandler(self.c.log)
if name == 'component':
ob = self.rev.add_object(
xorn.storage.Component(
x = self.c.parse_attribute(
attributes, 'x', None,
self.c.parse, 'X coordinate'),
y = self.c.parse_attribute(
attributes, 'y', None,
self.c.parse, 'Y coordinate'),
selectable = self.c.parse_attribute(
attributes, 'selectable', True,
ENUM_BOOLEAN.index, 'selectability'),
angle = self.c.parse_attribute(
attributes, 'angle', 0,
parse_angle, 'angle'),
mirror = self.c.parse_attribute(
attributes, 'mirror', False,
ENUM_BOOLEAN.index, 'mirror flag')))
try:
symbol_id = attributes.pop('symbol')
except KeyError:
self.c.log.error(_("symbol not specified"))
else:
if not symbol_id:
self.c.log.error(_("symbol id can't be empty"))
else:
self.c.symbol_refs.append(
(self.rev, ob, symbol_id, self.c.log.lineno))
return ContentHandler(self.c, self.rev, ob)
if name == 'line':
x0 = self.c.parse_attribute(attributes, 'x0', None,
self.c.parse, 'first X coordinate')
y0 = self.c.parse_attribute(attributes, 'y0', None,
self.c.parse, 'first Y coordinate')
x1 = self.c.parse_attribute(attributes, 'x1', None,
self.c.parse, 'second X coordinate')
y1 = self.c.parse_attribute(attributes, 'y1', None,
self.c.parse, 'second Y coordinate')
self.rev.add_object(
xorn.storage.Line(
x = x0, y = y0, width = x1 - x0, height = y1 - y0,
color = self.c.parse_attribute(
attributes, 'color', 3,
ENUM_COLOR.index, 'color'),
line = self.c.parse_line(attributes)))
return NullHandler(self.c.log)
if name == 'net' or name == 'pin':
is_pin = name == 'pin'
is_bus = self.c.parse_attribute(attributes, 'type', False,
ENUM_NETTYPE.index, 'net/pin type')
if is_pin:
default_color = 1
is_inverted = self.c.parse_attribute(
attributes, 'inverted', False,
ENUM_BOOLEAN.index, 'invertedness')
else:
if is_bus:
default_color = 10
else:
default_color = 4
is_inverted = False
x0 = self.c.parse_attribute(attributes, 'x0', None,
self.c.parse, 'first X coordinate')
y0 = self.c.parse_attribute(attributes, 'y0', None,
self.c.parse, 'first Y coordinate')
x1 = self.c.parse_attribute(attributes, 'x1', None,
self.c.parse, 'second X coordinate')
y1 = self.c.parse_attribute(attributes, 'y1', None,
self.c.parse, 'second Y coordinate')
ob = self.rev.add_object(
xorn.storage.Net(
x = x0, y = y0, width = x1 - x0, height = y1 - y0,
color = self.c.parse_attribute(
attributes, 'color', default_color,
ENUM_COLOR.index, 'color'),
is_bus = is_bus,
is_pin = is_pin,
is_inverted = is_inverted))
return ContentHandler(self.c, self.rev, ob)
if name == 'path':
return PathHandler(self.c.log, self.rev, xorn.storage.Path(
color = self.c.parse_attribute(attributes, 'color', 3,
ENUM_COLOR.index, 'color'),
line = self.c.parse_line(attributes),
fill = self.c.parse_fill(attributes)))
if name == 'picture':
ob = self.rev.add_object(
xorn.storage.Picture(
x = self.c.parse_attribute(
attributes, 'x', None,
self.c.parse, 'X coordinate'),
y = self.c.parse_attribute(
attributes, 'y', None,
self.c.parse, 'Y coordinate'),
width = self.c.parse_attribute(
attributes, 'width', None,
self.c.parse, 'width'),
height = self.c.parse_attribute(
attributes, 'height', None,
self.c.parse, 'height'),
angle = self.c.parse_attribute(
attributes, 'angle', 0,
parse_angle, 'angle'),
mirror = self.c.parse_attribute(
attributes, 'mirrored', False,
ENUM_BOOLEAN.index, 'mirror flag'),
pixmap = None))
try:
pixmap_id = attributes.pop('pixmap')
except KeyError:
self.c.log.error(_("pixmap not specified"))
else:
if not pixmap_id:
self.c.log.error(_("pixmap id can't be empty"))
else:
self.c.pixmap_refs.append(
(self.rev, ob, pixmap_id, self.c.log.lineno))
return NullHandler(self.c.log)
self.c.log.error(_("unexpected element \"%s\"") % name)
return VoidHandler()
class PixmapHandler(NullHandler):
def __init__(self, log, pixmap, just_verify):
self.log = log
self.pixmap = pixmap
self.just_verify = just_verify
self.f = cStringIO.StringIO()
def character_data(self, data):
self.f.write(data)
def end_element(self, name):
self.f.seek(0)
try:
data = xorn.base64.decode(self.f)
except xorn.base64.DecodingError:
self.log.error(_("base64 decoding error"))
return
if not self.just_verify:
self.pixmap.data = data
elif data != self.pixmap.data:
self.log.warn(_("contents of pixmap file \"%s\" don't match "
"embedded data") % self.pixmap.filename)
class LoadContext:
def __init__(self, log, load_symbol, load_pixmap):
self.log = log
self.ids = set()
self.symbols = {}
self.pixmaps = {}
self.symbol_refs = []
self.pixmap_refs = []
self.load_symbol = load_symbol
self.load_pixmap = load_pixmap
self.use_hybridnum = False
def parse(self, x):
if self.use_hybridnum:
return xorn.hybridnum.parse(x, 2)
else:
return float(xorn.fixednum.parse(x, 2))
def parse_attribute(self, d, key, default, processor, msg_fragment):
try:
x = d.pop(key)
except KeyError:
if default is not None:
return default
self.log.error(_("%s not specified") % msg_fragment)
else:
try:
return processor(x)
except (KeyError, ValueError):
self.log.error(_("invalid %s \"%s\"") % (msg_fragment, x))
# guess a well-formed return value from processor function
return 0. if processor == self.parse else 0
def parse_line(self, attributes):
line = xorn.storage.LineAttr()
line.width = self.parse_attribute(
attributes, 'linewidth', 0, self.parse, 'line width')
line.cap_style = self.parse_attribute(
attributes, 'capstyle', 0, ENUM_CAPSTYLE.index, 'cap style')
line.dash_style = self.parse_attribute(
attributes, 'dashstyle', 0, ENUM_DASHSTYLE.index, 'dash style')
if line.dash_style != 0 and line.dash_style != 1:
line.dash_length = self.parse_attribute(
attributes, 'dashlength', None, self.parse, 'dash length')
else:
line.dash_length = -1
if line.dash_style != 0:
line.dash_space = self.parse_attribute(
attributes, 'dashspace', None, self.parse, 'dash space')
else:
line.dash_space = -1
return line
def parse_fill(self, attributes):
fill = xorn.storage.FillAttr()
fill.type = self.parse_attribute(
attributes, 'filltype', 0, ENUM_FILLTYPE.index, 'fill type')
if fill.type == 2 or fill.type == 3:
fill.width = self.parse_attribute(
attributes, 'fillwidth', None, self.parse, 'fill width')
fill.angle0 = self.parse_attribute(
attributes, 'angle0', None, int, 'first fill angle')
fill.pitch0 = self.parse_attribute(
attributes, 'pitch0', None, self.parse, 'first fill pitch')
else:
fill.width = -1
fill.angle0 = -1
fill.pitch0 = -1
if fill.type == 2:
fill.angle1 = self.parse_attribute(
attributes, 'angle1', None, int, 'second fill angle')
fill.pitch1 = self.parse_attribute(
attributes, 'pitch1', None, self.parse, 'second fill pitch')
else:
fill.angle1 = -1
fill.pitch1 = -1
return fill
class RootElementHandler(NullHandler):
def __init__(self, c):
self.log = c.log
self.c = c
self.rev = xorn.storage.Revision()
self.had_content = False
def start_element(self, name, attributes):
if name == 'content':
if self.had_content:
self.c.log.error(_("duplicate content tag"))
return VoidHandler()
self.had_content = True
return ContentHandler(self.c, self.rev, None)
if name == 'symbol':
try:
mode = attributes.pop('mode')
except KeyError:
self.c.log.error(_("symbol mode not specified"))
return VoidHandler()
if mode == 'omitted':
read_symbol = False
is_embedded = False
elif mode == 'referenced':
read_symbol = True
is_embedded = False
elif mode == 'embedded':
read_symbol = True
is_embedded = True
else:
self.c.log.error(_("invalid symbol mode \"%s\"") % mode)
return VoidHandler()
try:
name = attributes.pop('name')
except KeyError:
if not is_embedded:
self.c.log.error(_("symbol name not specified"))
return VoidHandler()
name = None
if is_embedded:
symbol = xorn.geda.ref.Symbol(name, None, True)
else:
symbol = self.c.load_symbol(name, read_symbol)
if symbol is None:
symbol = xorn.geda.ref.Symbol(name, None, False)
is_embedded = True
assert not symbol.embedded
try:
symbol_id = attributes.pop('id')
except KeyError:
self.c.log.error(_("symbol id not specified"))
return VoidHandler()
if not symbol_id:
self.c.log.error(_("symbol id can't be empty"))
return VoidHandler()
if symbol_id in self.c.ids:
self.c.log.error(_("duplicate id \"%s\"") % symbol_id)
return VoidHandler()
self.c.ids.add(symbol_id)
self.c.symbols[symbol_id] = symbol
if not read_symbol:
return NullHandler(self.c.log)
reh = RootElementHandler(self.c)
if is_embedded:
symbol.prim_objs = reh.rev
return reh
if name == 'pixmap':
try:
mode = attributes.pop('mode')
except KeyError:
self.c.log.error(_("pixmap mode not specified"))
return VoidHandler()
if mode == 'omitted':
read_pixmap = False
is_embedded = False
elif mode == 'referenced':
read_pixmap = True
is_embedded = False
elif mode == 'embedded':
read_pixmap = True
is_embedded = True
else:
self.c.log.error(_("invalid pixmap mode \"%s\"") % mode)
return VoidHandler()
try:
name = attributes.pop('name')
except KeyError:
if not is_embedded:
self.c.log.error(_("pixmap name not specified"))
return VoidHandler()
name = None
if is_embedded:
pixmap = xorn.geda.ref.Pixmap(name, None, True)
else:
pixmap = self.c.load_pixmap(name, read_pixmap)
if pixmap is None:
pixmap = xorn.geda.ref.Pixmap(name, None, False)
is_embedded = True
assert not pixmap.embedded
try:
pixmap_id = attributes.pop('id')
except KeyError:
self.c.log.error(_("pixmap id not specified"))
return VoidHandler()
if not pixmap_id:
self.c.log.error(_("pixmap id can't be empty"))
return VoidHandler()
if pixmap_id in self.c.ids:
self.c.log.error(_("duplicate id \"%s\"") % pixmap_id)
return VoidHandler()
self.c.ids.add(pixmap_id)
self.c.pixmaps[pixmap_id] = pixmap
if read_pixmap:
return PixmapHandler(self.c.log, pixmap, not is_embedded)
else:
return NullHandler(self.c.log)
self.c.log.error(_("unexpected element \"%s\"") % name)
return VoidHandler()
def end_element(self, name):
if not self.had_content:
self.c.log.error(_("content missing"))
def read_file(f, name, log, load_symbol, load_pixmap):
context = LoadContext(log, load_symbol, load_pixmap)
reh = RootElementHandler(context)
def start_root_element(name, attributes):
if name != 'symbol' and name != 'schematic':
log.error(_("invalid root element \"%s\"") % name)
return VoidHandler()
for feature in attributes.pop('file-format-features', '').split(' '):
if not feature:
continue
if feature == 'experimental':
pass
elif feature == 'hybridnum':
if context.use_hybridnum:
log.error(_("duplicate file format feature"))
context.use_hybridnum = True
else:
log.error(_("unsupported file format feature \"%s\"")
% feature)
return reh
read_xml_file(f, log, NAMESPACE, start_root_element)
for rev, ob, symbol_id, lineno in context.symbol_refs:
if symbol_id not in context.symbols:
log.lineno = lineno
log.error(_("undefined symbol \"%s\"") % symbol_id)
continue
data = rev.get_object_data(ob)
data.symbol = context.symbols[symbol_id]
rev.set_object_data(ob, data)
for rev, ob, pixmap_id, lineno in context.pixmap_refs:
if pixmap_id not in context.pixmaps:
log.lineno = lineno
log.error(_("undefined pixmap \"%s\"") % pixmap_id)
continue
data = rev.get_object_data(ob)
data.pixmap = context.pixmaps[pixmap_id]
rev.set_object_data(ob, data)
return xorn.proxy.RevisionProxy(reh.rev)
def read_xml_file(f, log, namespace, start_root_element):
stack = []
def strip_namespace(name, ignore_errors):
try:
pos = name.index(NSSEP)
except ValueError:
if not ignore_errors:
log.error(_("element name \"%s\" without namespace") % name)
return None
if name[:pos] != namespace and not ignore_errors:
log.error(_("invalid namespace \"%s\"") % name[:pos])
return None
return name[pos + 1:]
def StartElementHandler(name, attributes):
log.lineno = p.CurrentLineNumber - 1
name = strip_namespace(name, False)
if name is None:
new_handler = VoidHandler()
elif stack:
new_handler = stack[-1].start_element(name, attributes)
else:
new_handler = start_root_element(name, attributes)
stack.append(new_handler)
if attributes and not isinstance(new_handler, VoidHandler):
log.error(_("unexpected attribute(s) %s") % _(", ").join(
_("\"%s\"") % attr for attr in sorted(attributes)))
def EndElementHandler(name):
log.lineno = p.CurrentLineNumber - 1
name = strip_namespace(name, True)
stack.pop().end_element(name)
def CharacterDataHandler(data):
log.lineno = p.CurrentLineNumber - 1
stack[-1].character_data(data)
def StartDoctypeDeclHandler(doctype_name, system_id, public_id,
has_internal_subset):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected XML document type declaration"))
def ElementDeclHandler(name, model):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected XML element type declaration"))
def AttlistDeclHandler(elname, attname, type, default, required):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected XML element type attribute declaration"))
def ProcessingInstructionHandler(target, data):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected XML processing instruction"))
def UnparsedEntityDeclHandler(entity_name, base, system_id, public_id,
notationName):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected XML unparsed entity declaration"))
def EntityDeclHandler(entity_name, is_parameter_entity, value, base,
system_id, public_id, notation_name):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected XML entity declaration"))
def NotationDeclHandler(notation_name, base, system_id, public_id):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected XML notation declaration"))
def StartCdataSectionHandler():
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected XML CDATA section"))
def DefaultHandler(data):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected characters in XML document"))
def NotStandaloneHandler():
log.lineno = p.CurrentLineNumber - 1
log.error(_("XML document hasn't been declared as standalone"))
def ExternalEntityRefHandler(context, base, systemId, publicId):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected reference to external XML entity"))
p = xml.parsers.expat.ParserCreate(namespace_separator = '!')
p.XmlDeclHandler = None
p.StartDoctypeDeclHandler = StartDoctypeDeclHandler
p.EndDoctypeDeclHandler = None
p.ElementDeclHandler = ElementDeclHandler
p.AttlistDeclHandler = AttlistDeclHandler
p.StartElementHandler = StartElementHandler
p.EndElementHandler = EndElementHandler
p.ProcessingInstructionHandler = ProcessingInstructionHandler
p.CharacterDataHandler = CharacterDataHandler
p.UnparsedEntityDeclHandler = UnparsedEntityDeclHandler
p.EntityDeclHandler = EntityDeclHandler
p.NotationDeclHandler = NotationDeclHandler
p.StartNamespaceDeclHandler = None
p.EndNamespaceDeclHandler = None
p.CommentHandler = None
p.StartCdataSectionHandler = StartCdataSectionHandler
p.EndCdataSectionHandler = None
p.DefaultHandler = DefaultHandler
p.DefaultHandlerExpand = None
p.NotStandaloneHandler = NotStandaloneHandler
p.ExternalEntityRefHandler = ExternalEntityRefHandler
try:
p.ParseFile(f)
except xml.parsers.expat.ExpatError as e:
log.lineno = e.lineno - 1
log.error(_("%s") % e)
| gpl-2.0 | -6,952,313,439,646,739,000 | 37.78553 | 79 | 0.521286 | false | 4.323157 | false | false | false |
JoaoCarabetta/bradata | bradata/tse/pipeline.py | 1 | 6105 | import bradata.utils
import bradata.connection
import os
import io
from zipfile import ZipFile
import pandas as pd
import glob
import yaml
import shutil
import luigi
import luigi.contrib.postgres
def _find_header(data_type, year, path):
with open(path, 'r') as f:
data = yaml.load(f)
a = data[data_type]['columns']
final = min(list(a.keys()))
for k in a.keys():
if int(year) >= k:
final = k
return str(a[final])
class Get_Headers(luigi.Task):
def output(self):
return luigi.LocalTarget(os.path.join(bradata.__download_dir__, 'tse', 'config', 'headers.csv'))
def run(self):
conn = bradata.connection.Connection()
result = conn.perform_request('https://raw.githubusercontent.com/labFGV/bradata/master/bradata/tse/headersTSE.csv')
if result['status'] == 'ok':
result = result['content']
else:
print('File was not dowloaded')
with self.output().open('w') as o_file:
o_file.write(result)
class Get_Header_Relation(luigi.Task):
def output(self):
return luigi.LocalTarget(os.path.join(bradata.__download_dir__, 'tse', 'config', 'header_relation.yaml'))
def run(self):
conn = bradata.connection.Connection()
result = conn.perform_request(
'https://raw.githubusercontent.com/labFGV/bradata/master/bradata/tse/header_relation.yaml')
if result['status'] == 'ok':
result = result['content']
else:
raise Warning ('Header Relation was not dowloaded')
with self.output().open('w') as o_file:
o_file.write(result)
class Download_Unzip(luigi.Task):
"""
Download and unzip
"""
year = luigi.Parameter()
data_type = luigi.Parameter()
def output(self):
"""
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.LocalTarget(os.path.join(bradata.__download_dir__, 'tse', 'temp', '{}_{}'.format(self.data_type, self.year)))
def requires(self):
"""
* :py:class:`~.Streams`
:return: list of object (:py:class:`luigi.task.Task`)
"""
return Get_Header_Relation()
def run(self):
conn = bradata.connection.Connection()
with self.input().open('r') as input_file:
base_url = self.select_url(self.data_type)
url = base_url + bradata.utils._treat_inputs(self.year) + '.zip'
result = conn.perform_request(url, binary=True)
if result['status'] == 'ok':
result = result['content']
else:
raise Exception ('File was not dowloaded')
zipfile = ZipFile(io.BytesIO(result))
zipfile.extractall(self.output().path)
def select_url(self, data_type):
with open(self.input().path, 'r') as f:
data = yaml.load(f)
return data[data_type]['url']
class Aggregat(luigi.Task):
"""
Get all states csv files aggregate it to a unique file with header
"""
year = luigi.Parameter()
data_type = luigi.Parameter()
def requires(self):
"""
"""
return {'download': Download_Unzip(data_type=self.data_type, year=self.year),
'headers': Get_Headers(),
'header_relation': Get_Header_Relation()}
def output(self):
"""
"""
return luigi.LocalTarget(os.path.join(bradata.__download_dir__, 'tse', '{}_{}.csv'.format(self.data_type, self.year)))
def run(self):
headers = pd.read_csv(self.input()['headers'].path)
files = glob.glob(self.input()['download'].path + "/*.txt".format(self.year))
header = _find_header(self.data_type, self.year, self.input()['header_relation'].path)
df_list = []
for filename in sorted(files):
df_list.append(
pd.read_csv(filename, sep=';', names=headers[header].dropna().tolist(), encoding='latin1'))
full_df = pd.concat(df_list)
full_df.to_csv(self.output().path, index=False, encoding='utf-8')
print('Completed! Access your file at',
os.path.join(bradata.__download_dir__, 'tse', '{}_{}.csv'.format(self.data_type, self.year)))
class ToSQl(luigi.Task):
data_type = luigi.Parameter()
year = luigi.Parameter()
def requires(self):
return Aggregat(data_type=self.data_type, year=self.year)
def run(self):
with open('bradata/tse/config_server.yaml', 'r') as f:
server = yaml.load(f)
host = server['host']
database = server['database']
user = server['user']
password = server['password']
schema = 'tse'
table = '{}_{}'.format(self.data_type, self.year)
from sqlalchemy import create_engine
url = 'postgresql://{}:{}@{}/{}'
url = url.format(user, password, host, database)
engine = create_engine(url)
headers = pd.read_csv(self.input().path)
print('Inserting data do DB. It can take a while...')
headers.to_sql(table, engine, schema=schema, if_exists='replace')
print('The data is on your DB! Check schema {}, table {}'.format(schema, table))
with self.output().open('w') as f:
f.write('')
def output(self):
return luigi.LocalTarget(os.path.join(bradata.__download_dir__, 'tse', 'temp',
'{}_{}'.format(self.data_type, self.year), 'dumb.txt'))
class Fetch(luigi.WrapperTask):
data_types = luigi.Parameter()
years = luigi.Parameter()
def requires(self):
data_types = self.string_to_list(self.data_types)
years = self.string_to_list(self.years)
yield [ToSQl(data_type=t, year=y) for t in data_types for y in years]
def string_to_list(self, string):
string = string.replace("'",'').replace('[', '').replace(']','').replace(' ', '')
return [s for s in string.split(',')]
if __name__ == "__main__":
luigi.run() | mit | -2,733,309,150,001,114,000 | 27.666667 | 130 | 0.578051 | false | 3.691052 | false | false | false |
vathpela/anaconda | pyanaconda/ui/gui/helpers.py | 1 | 13727 | # Abstract base classes for GUI classes
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# This file contains abstract base classes that are specific to GUI
# functionality. See also pyanaconda.ui.helpers.
from pyanaconda.anaconda_loggers import get_module_logger
log = get_module_logger(__name__)
from abc import ABCMeta, abstractmethod
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from pyanaconda.flags import flags
from pyanaconda.ui.helpers import InputCheck, InputCheckHandler
from pyanaconda.ui.gui.utils import timed_action
from pyanaconda.core.i18n import _
from pyanaconda.errors import NonInteractiveError
from pyanaconda.core import constants
def autoinstall_stopped(reason):
""" Reaction on stop of automatic kickstart installation
Log why the installation stopped and raise the NonInteractiveError in
non interactive mode.
:param data: Kickstart data object.
:param reason: Why the automatic kickstart installation stopped.
"""
log.info("kickstart installation stopped for info: %s", reason)
if not flags.ksprompt:
raise NonInteractiveError("Non interactive installation failed: %s" % reason)
class GUIInputCheck(InputCheck):
""" Add timer awareness to an InputCheck.
Add a delay before running the validation function so that the
function is not run for every keystroke. Run any pending actions
before returning a status.
"""
def __init__(self, parent, input_obj, run_check, data=None):
super().__init__(parent, input_obj, run_check, data)
# Add the timer here instead of decorating a method so that a new
# TimedAction is created for every instance
self.update_check_status = timed_action(busy_cursor=False)(self.update_check_status)
@property
def check_status(self):
if self.update_check_status.timer_active:
# The timer is hooked up to update_check_status, which takes no arguments.
# Since the timed_action wrapper was made around the bound method of a
# GUIInputCheck instance and not the function of a GUIInputCheck class,
# self is already applied and update_check_status is just a regular TimedAction
# object, not a curried function around the object.
self.update_check_status.run_now()
return super().check_status
# Inherit abstract methods from InputCheckHandler
# pylint: disable=abstract-method
class GUIInputCheckHandler(InputCheckHandler, metaclass=ABCMeta):
"""Provide InputCheckHandler functionality for Gtk input screens.
This class assumes that all input objects are of type GtkEditable and
attaches InputCheck.update_check_status to the changed signal.
"""
def _update_check_status(self, editable, inputcheck):
inputcheck.update_check_status()
def get_input(self, input_obj):
return input_obj.get_text()
def add_check(self, input_obj, run_check, data=None):
# Use a GUIInputCheck to run the validation in a GLib timer
checkRef = GUIInputCheck(self, input_obj, run_check, data)
# Start a new timer on each keystroke
input_obj.connect_after("changed", self._update_check_status, checkRef)
# Add the InputCheck to the parent class's list of checks
self._check_list.append(checkRef)
return checkRef
def can_go_back_focus_if_not(self):
"""Check whether the input validation checks allow the spoke to be exited.
Return True if yes, otherwise focus the problematic input field and return False.
"""
failed_check = next(self.failed_checks, None)
if failed_check:
failed_check.input_obj.grab_focus()
return False
else:
return True
class GUIDialogInputCheckHandler(GUIInputCheckHandler, metaclass=ABCMeta):
"""Provide InputCheckHandler functionality for Gtk dialogs.
If an OK button is provided in the constructor, this class will
handle setting the sensitivity of the button to match the input
check result. A method on_ok_clicked is provided to determine whether
the dialog can be exited, similar to on_back_clicked for spokes.
It's not possible (or at least not easy) to prent a GtkDialog from
returning a response, so the caller of gtk_dialog_run needs to check
whether the input is valid and decide based on that whether to destroy
the dialog or call gtk_dialog_run again.
"""
def __init__(self, ok_button=None):
super().__init__()
self._ok_button = ok_button
def _update_check_status(self, editable, inputcheck):
# If an OK button was provided, set it to sensitive on any change in
# input. This way if a user changes invalid input to valid, they can
# immediately leave the dialog. This also means that there will be a
# period in which the user is not prented from leaving with empty input,
# and this condition needs to be checked.
if self._ok_button:
self._ok_button.set_sensitive(True)
return super()._update_check_status(editable, inputcheck)
def set_status(self, inputcheck):
if inputcheck.check_status in (InputCheck.CHECK_OK, InputCheck.CHECK_SILENT):
inputcheck.input_obj.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, None)
inputcheck.input_obj.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, "")
else:
inputcheck.input_obj.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY,
"dialog-error")
inputcheck.input_obj.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY,
inputcheck.check_status)
# Update the ok button sensitivity based on the check status.
# If the result is CHECK_OK, set_sensitive(True) still needs to be
# called, even though the changed handler above also makes the button
# sensitive. A direct call to update_check_status may have bypassed the
# changed signal.
if self._ok_button:
self._ok_button.set_sensitive(inputcheck.check_status == InputCheck.CHECK_OK)
def on_ok_clicked(self):
"""Return whether the input validation checks allow the dialog to be exited.
Unlike GUISpokeInputCheckHandler.on_back_clicked, it is not expected that
subclasses will implement this method.
"""
failed_check = next(self.failed_checks, None)
if failed_check:
failed_check.input_obj.grab_focus()
return False
else:
return True
class GUISpokeInputCheckHandler(GUIInputCheckHandler, metaclass=ABCMeta):
"""Provide InputCheckHandler functionality for graphical spokes.
This class implements set_status to set a message in the warning area of
the spoke window and provides an implementation of on_back_clicked to
prevent the user from exiting a spoke with bad input.
"""
def __init__(self):
super().__init__()
self._checker = None
self._prev_status = None
self._password_kickstarted = False
# return to hub logic
self._can_go_back = False
self._needs_waiver = False
self._waive_clicks = 0
# important UI object instances
self._password_entry = None
self._password_confirmation_entry = None
self._password_bar = None
self._password_label = None
@property
def checker(self):
return self._checker
# Implemented by NormalSpoke
@abstractmethod
def clear_info(self):
pass
# Implemented by GUIObject
@abstractmethod
def set_warning(self, msg):
pass
# Implemented by NormalSpoke
@abstractmethod
def show_warning_message(self, message):
pass
def set_status(self, inputcheck):
"""Update the warning with the input validation error from the first
error message.
"""
failed_check = next(self.failed_checks_with_message, None)
if not failed_check:
self.clear_info()
self._prev_status = None
elif failed_check.check_status != self._prev_status:
self._prev_status = failed_check.check_status
self.clear_info()
self.set_warning(failed_check.check_status)
def remove_placeholder_texts(self):
"""Remove password and confirmation placeholder texts."""
self.password_entry.set_placeholder_text("")
self.password_confirmation_entry.set_placeholder_text("")
@property
def password_bar(self):
"""Password strength bar."""
return self._password_bar
@property
def password_label(self):
"""Short password status label."""
return self._password_label
def set_password_score(self, score):
self.password_bar.set_value(score)
def set_password_status(self, status_message):
self.password_label.set_text(status_message)
@property
def password_entry(self):
"""The password entry widget."""
return self._password_entry
@property
def password(self):
"""Input to be checked.
Content of the input field, etc.
:returns: input to be checked
:rtype: str
"""
return self.password_entry.get_text()
@property
def password_confirmation_entry(self):
"""The password confirmation entry widget."""
return self._password_confirmation_entry
@property
def password_confirmation(self):
"""Content of the input confirmation field.
Note that not all spokes might have a password confirmation field.
:returns: content of the password confirmation field
:rtype: str
"""
return self.password_confirmation_entry.get_text()
@property
def password_kickstarted(self):
"""Reports if the input was initialized from kickstart.
:returns: if the input was initialized from kickstart
:rtype: bool
"""
return self._password_kickstarted
@password_kickstarted.setter
def password_kickstarted(self, value):
self._password_kickstarted = value
@property
def can_go_back(self):
return self._can_go_back
@can_go_back.setter
def can_go_back(self, value):
self._can_go_back = value
@property
def needs_waiver(self):
return self._needs_waiver
@needs_waiver.setter
def needs_waiver(self, value):
self._needs_waiver = value
@property
def waive_clicks(self):
"""Number of waive clicks the user has done to override an input check.
:returns: number of waive clicks
:rtype: int
"""
return self._waive_clicks
@waive_clicks.setter
def waive_clicks(self, clicks):
"""Set number of waive clicks.
:param int clicks: number of waive clicks
"""
self._waive_clicks = clicks
def on_password_changed(self, editable, data=None):
"""Tell checker that the content of the password field changed."""
self.checker.password.content = self.password
def on_password_confirmation_changed(self, editable, data=None):
"""Tell checker that the content of the password confirmation field changed."""
self.checker.password_confirmation.content = self.password_confirmation
def try_to_go_back(self):
"""Check whether the input validation checks allow the spoke to be exited.
Unlike NormalSpoke.on_back_clicked, this function returns a boolean value.
Classes implementing this class should run GUISpokeInputCheckHandler.try_to_go_back,
and if it succeeded, run NormalSpoke.on_back_clicked.
"""
# check if we can go back
if self.can_go_back:
if self.needs_waiver:
# We can proceed but need waiver.
# - this means we can start accumulating thw waive clicks
self.waive_clicks += 1
# we need to have enough waive clicks to go back
if self.waive_clicks == 1:
self.show_warning_message(_(constants.PASSWORD_FINAL_CONFIRM))
elif self.waive_clicks >= 2:
# clear the waive clicks & any messages
self.waive_clicks = 0
self.clear_info()
return True
# we can go back unconditionally
else:
# clear the waive clicks & any messages
self.waive_clicks = 0
self.clear_info()
return True
# we can't get back
return False
| gpl-2.0 | 1,719,857,370,931,019,000 | 35.703209 | 95 | 0.660523 | false | 4.312598 | false | false | false |
OlexandrI/pyside | paste/util/looper.py | 1 | 4017 | """
Helper for looping over sequences, particular in templates.
Often in a loop in a template it's handy to know what's next up,
previously up, if this is the first or last item in the sequence, etc.
These can be awkward to manage in a normal Python loop, but using the
looper you can get a better sense of the context. Use like::
>>> for loop, item in looper(['a', 'b', 'c']):
... print loop.number, item
... if not loop.last:
... print '---'
1 a
---
2 b
---
3 c
"""
import collections
__all__ = ['looper']
class looper(object):
"""
Helper for looping (particularly in templates)
Use this like::
for loop, item in looper(seq):
if loop.first:
...
"""
def __init__(self, seq):
self.seq = seq
def __iter__(self):
return looper_iter(self.seq)
def __repr__(self):
return '<%s for %r>' % (
self.__class__.__name__, self.seq)
class looper_iter(object):
def __init__(self, seq):
self.seq = list(seq)
self.pos = 0
def __iter__(self):
return self
def __next__(self):
if self.pos >= len(self.seq):
raise StopIteration
result = loop_pos(self.seq, self.pos), self.seq[self.pos]
self.pos += 1
return result
class loop_pos(object):
def __init__(self, seq, pos):
self.seq = seq
self.pos = pos
def __repr__(self):
return '<loop pos=%r at %r>' % (
self.seq[pos], pos)
def index(self):
return self.pos
index = property(index)
def number(self):
return self.pos + 1
number = property(number)
def item(self):
return self.seq[self.pos]
item = property(item)
def __next__(self):
try:
return self.seq[self.pos+1]
except IndexError:
return None
next = property(next)
def previous(self):
if self.pos == 0:
return None
return self.seq[self.pos-1]
previous = property(previous)
def odd(self):
return not self.pos % 2
odd = property(odd)
def even(self):
return self.pos % 2
even = property(even)
def first(self):
return self.pos == 0
first = property(first)
def last(self):
return self.pos == len(self.seq)-1
last = property(last)
def length(self):
return len(self.seq)
length = property(length)
def first_group(self, getter=None):
"""
Returns true if this item is the start of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.first:
return True
return self._compare_group(self.item, self.previous, getter)
def last_group(self, getter=None):
"""
Returns true if this item is the end of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.last:
return True
return self._compare_group(self.item, self.__next__, getter)
def _compare_group(self, item, other, getter):
if getter is None:
return item != other
elif (isinstance(getter, str)
and getter.startswith('.')):
getter = getter[1:]
if getter.endswith('()'):
getter = getter[:-2]
return getattr(item, getter)() != getattr(other, getter)()
else:
return getattr(item, getter) != getattr(other, getter)
elif isinstance(getter, collections.Callable):
return getter(item) != getter(other)
else:
return item[getter] != other[getter]
| lgpl-3.0 | -7,224,671,793,880,922,000 | 25.254902 | 74 | 0.550411 | false | 4.008982 | false | false | false |
rialto-px/openprocurement.tender.twostage | openprocurement/tender/twostage/views/bid_document.py | 1 | 13476 | # -*- coding: utf-8 -*-
from openprocurement.api.models import get_now
from openprocurement.api.utils import (
get_file,
save_tender,
upload_file,
apply_patch,
update_file_content_type,
opresource,
json_view,
context_unpack,
)
from openprocurement.api.validation import (
validate_file_update,
validate_file_upload,
validate_patch_document_data,
)
from openprocurement.tender.twostage.utils import (
bid_financial_documents_resource,
)
from openprocurement.tender.openua.views.bid_document import TenderUaBidDocumentResource
@opresource(name='Tender Two Stage Bid Documents',
collection_path='/tenders/{tender_id}/bids/{bid_id}/documents',
path='/tenders/{tender_id}/bids/{bid_id}/documents/{document_id}',
procurementMethodType='aboveThresholdTS',
description="Tender Two Stage bidder documents")
class TenderTSBidDocumentResource(TenderUaBidDocumentResource):
container = "documents"
view_forbidden_states = ['active.tendering']
view_forbidden_bid_states = ['invalid', 'deleted']
def _doc_access_restricted(self, doc):
is_bid_owner = self.request.authenticated_role == 'bid_owner'
is_tender_owner = self.request.authenticated_role == 'tender_owner'
return doc.confidentiality != 'public' and not (is_bid_owner or is_tender_owner)
@json_view(permission='view_tender')
def collection_get(self):
"""Tender Bid Documents List"""
if self.request.validated['tender_status'] in self.view_forbidden_states and self.request.authenticated_role != 'bid_owner':
self.request.errors.add('body', 'data', 'Can\'t view bid documents in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
if self.context.status in self.view_forbidden_bid_states and self.request.authenticated_role != 'bid_owner':
self.request.errors.add('body', 'data', 'Can\'t view bid documents in current ({}) bid status'.format(self.context.status))
self.request.errors.status = 403
return
if self.request.params.get('all', ''):
collection_data = [i.serialize("restricted_view") if self._doc_access_restricted(i) else i.serialize("view")
for i in getattr(self.context, self.container)]
else:
collection_data = sorted(dict([(i.id, i.serialize("restricted_view") if self._doc_access_restricted(i) else i.serialize("view"))
for i in getattr(self.context, self.container)]).values(), key=lambda i: i['dateModified'])
return {'data': collection_data}
@json_view(validators=(validate_file_upload,), permission='edit_bid')
def collection_post(self):
"""Tender Bid Document Upload
"""
if self.request.validated['tender_status'] not in ['active.tendering', 'active.qualification', 'active.awarded']:
self.request.errors.add('body', 'data', 'Can\'t add document in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
tender = self.request.validated['tender']
if self.request.validated['tender_status'] == 'active.tendering' and (tender.tenderPeriod.startDate and get_now() < tender.tenderPeriod.startDate or get_now() > tender.tenderPeriod.endDate):
self.request.errors.add('body', 'data', 'Document can be added only during the tendering period: from ({}) to ({}).'.format(tender.tenderPeriod.startDate and tender.tenderPeriod.startDate.isoformat(), tender.tenderPeriod.endDate.isoformat()))
self.request.errors.status = 403
return
if self.request.validated['tender_status'] in ['active.qualification', 'active.awarded'] and \
not [i for i in self.request.validated['tender'].awards if i.status in ['pending', 'active'] and i.bid_id == self.request.validated['bid_id']]:
self.request.errors.add('body', 'data', 'Can\'t add document because award of bid is not in pending or active state')
self.request.errors.status = 403
return
if self.context.status in ['invalid', 'unsuccessful', 'deleted']:
self.request.errors.add('body', 'data', 'Can\'t add document to \'{}\' bid'.format(self.context.status))
self.request.errors.status = 403
return
document = upload_file(self.request)
getattr(self.context, self.container).append(document)
if self.request.validated['tender_status'] == 'active.tendering':
self.request.validated['tender'].modified = False
if save_tender(self.request):
self.LOGGER.info('Created tender bid document {}'.format(document.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_bid_document_create'}, {'document_id': document.id}))
self.request.response.status = 201
document_route = self.request.matched_route.name.replace("collection_", "")
self.request.response.headers['Location'] = self.request.current_route_url(_route_name=document_route, document_id=document.id, _query={})
return {'data': document.serialize("view")}
@json_view(permission='view_tender')
def get(self):
"""Tender Bid Document Read"""
is_bid_owner = self.request.authenticated_role == 'bid_owner'
if self.request.validated['tender_status'] in self.view_forbidden_states and not is_bid_owner:
self.request.errors.add('body', 'data', 'Can\'t view bid document in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
if self.request.validated['bid'].status in self.view_forbidden_bid_states and self.request.authenticated_role != 'bid_owner':
self.request.errors.add('body', 'data', 'Can\'t view bid documents in current ({}) bid status'.format(self.request.validated['bid'].status))
self.request.errors.status = 403
return
document = self.request.validated['document']
if self.request.params.get('download'):
if self._doc_access_restricted(document):
self.request.errors.add('body', 'data', 'Document download forbidden.')
self.request.errors.status = 403
return
else:
return get_file(self.request)
document_data = document.serialize('restricted_view' if self._doc_access_restricted(document) else 'view')
document_data['previousVersions'] = [i.serialize('restricted_view') if self._doc_access_restricted(i) else i.serialize('view')
for i in self.request.validated['documents'] if i.url != document.url]
return {'data': document_data}
@json_view(content_type="application/json", validators=(validate_patch_document_data,), permission='edit_bid')
def patch(self):
"""Tender Bid Document Update"""
if self.request.validated['tender_status'] not in ['active.tendering', 'active.qualification', 'active.awarded']:
self.request.errors.add('body', 'data', 'Can\'t update document in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
tender = self.request.validated['tender']
if self.request.validated['tender_status'] == 'active.tendering' and (tender.tenderPeriod.startDate and get_now() < tender.tenderPeriod.startDate or get_now() > tender.tenderPeriod.endDate):
self.request.errors.add('body', 'data', 'Document can be updated only during the tendering period: from ({}) to ({}).'.format(tender.tenderPeriod.startDate and tender.tenderPeriod.startDate.isoformat(), tender.tenderPeriod.endDate.isoformat()))
self.request.errors.status = 403
return
if self.request.validated['tender_status'] in ['active.qualification', 'active.awarded'] and \
not [i for i in self.request.validated['tender'].awards if i.status in ['pending', 'active'] and i.bid_id == self.request.validated['bid_id']]:
self.request.errors.add('body', 'data', 'Can\'t update document because award of bid is not in pending or active state')
self.request.errors.status = 403
return
if self.request.validated['tender_status'] != 'active.tendering' and 'confidentiality' in self.request.validated['data']:
if self.context.confidentiality != self.request.validated['data']['confidentiality']:
self.request.errors.add('body', 'data', 'Can\'t update document confidentiality in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
bid = getattr(self.context, "__parent__")
if bid and bid.status in ['invalid', 'unsuccessful', 'deleted']:
self.request.errors.add('body', 'data', 'Can\'t update document data for \'{}\' bid'.format(bid.status))
self.request.errors.status = 403
return
if self.request.validated['tender_status'] == 'active.tendering':
self.request.validated['tender'].modified = False
if apply_patch(self.request, src=self.request.context.serialize()):
update_file_content_type(self.request)
self.LOGGER.info('Updated tender bid document {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_bid_document_patch'}))
return {'data': self.request.context.serialize("view")}
@json_view(validators=(validate_file_update,), permission='edit_bid')
def put(self):
"""Tender Bid Document Update"""
if self.request.validated['tender_status'] not in ['active.tendering', 'active.qualification', 'active.awarded']:
self.request.errors.add('body', 'data', 'Can\'t update document in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
tender = self.request.validated['tender']
if self.request.validated['tender_status'] == 'active.tendering' and (tender.tenderPeriod.startDate and get_now() < tender.tenderPeriod.startDate or get_now() > tender.tenderPeriod.endDate):
self.request.errors.add('body', 'data', 'Document can be updated only during the tendering period: from ({}) to ({}).'.format(tender.tenderPeriod.startDate and tender.tenderPeriod.startDate.isoformat(), tender.tenderPeriod.endDate.isoformat()))
self.request.errors.status = 403
return
if self.request.validated['tender_status'] in ['active.qualification', 'active.awarded'] and \
not [i for i in self.request.validated['tender'].awards if i.status in ['pending', 'active'] and i.bid_id == self.request.validated['bid_id']]:
self.request.errors.add('body', 'data', 'Can\'t update document because award of bid is not in pending or active state')
self.request.errors.status = 403
return
if self.request.validated['tender_status'] != 'active.tendering' and 'confidentiality' in self.request.validated.get('data', {}):
if self.context.confidentiality != self.request.validated['data']['confidentiality']:
self.request.errors.add('body', 'data', 'Can\'t update document confidentiality in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
bid = getattr(self.context, "__parent__")
if bid and bid.status in ['invalid', 'unsuccessful', 'deleted']:
self.request.errors.add('body', 'data', 'Can\'t update document in \'{}\' bid'.format(bid.status))
self.request.errors.status = 403
return
document = upload_file(self.request)
getattr(self.request.validated['bid'], self.container).append(document)
if self.request.validated['tender_status'] == 'active.tendering':
self.request.validated['tender'].modified = False
if save_tender(self.request):
self.LOGGER.info('Updated tender bid document {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_bid_document_put'}))
return {'data': document.serialize("view")}
@bid_financial_documents_resource(
name='Tender Two Stage Bid Financial Documents',
collection_path='/tenders/{tender_id}/bids/{bid_id}/financial_documents',
path='/tenders/{tender_id}/bids/{bid_id}/financial_documents/{document_id}',
procurementMethodType='aboveThresholdTS',
description="Tender Two Stage bidder financial documents")
class TenderTSBidFinancialDocumentResource(TenderTSBidDocumentResource):
""" Tender Two Stage Bid Financial Documents """
container = "financialDocuments"
view_forbidden_states = ['active.tendering', 'active.pre-qualification',
'active.pre-qualification.stand-still', 'active.auction']
view_forbidden_bid_states = ['invalid', 'deleted', 'invalid.pre-qualification', 'unsuccessful']
| apache-2.0 | 8,687,690,805,947,830,000 | 64.736585 | 256 | 0.65279 | false | 3.872414 | false | false | false |
lamogui/ogre_blender_importer | OgreMeshFileFormat.py | 1 | 2168 | from enum import IntEnum;
class OgreMeshChunkID(IntEnum):
"""
Definition of the OGRE .mesh file format
.mesh files are binary files (for read efficiency at runtime) and are arranged into chunks
of data, very like 3D Studio's format.
A chunk always consists of:
unsigned short CHUNK_ID : one of the following chunk ids identifying the chunk
unsigned long LENGTH : length of the chunk in bytes, including this header
void* DATA : the data, which may contain other sub-chunks (various data types)
A .mesh file can contain both the definition of the Mesh itself, and optionally the definitions
of the materials is uses (although these can be omitted, if so the Mesh assumes that at runtime the
Materials referred to by name in the Mesh are loaded/created from another source)
A .mesh file only contains a single mesh, which can itself have multiple submeshes.
"""
M_HEADER = 0x1000;
M_MESH = 0x3000;
M_SUBMESH = 0x4000;
M_SUBMESH_OPERATION = 0x4010;
M_SUBMESH_BONE_ASSIGNMENT = 0x4100;
M_SUBMESH_TEXTURE_ALIAS = 0x4200;
M_GEOMETRY = 0x5000;
M_GEOMETRY_VERTEX_DECLARATION = 0x5100;
M_GEOMETRY_VERTEX_ELEMENT = 0x5110;
M_GEOMETRY_VERTEX_BUFFER = 0x5200;
M_GEOMETRY_VERTEX_BUFFER_DATA = 0x5210;
M_MESH_SKELETON_LINK = 0x6000;
M_MESH_BONE_ASSIGNMENT = 0x7000;
M_MESH_LOD_LEVEL = 0x8000;
M_MESH_LOD_USAGE = 0x8100;
M_MESH_LOD_MANUAL = 0x8110;
M_MESH_LOD_GENERATED = 0x8120;
M_MESH_BOUNDS = 0x9000;
M_SUBMESH_NAME_TABLE = 0xA000;
M_SUBMESH_NAME_TABLE_ELEMENT = 0xA100;
M_EDGE_LISTS = 0xB000;
M_EDGE_LIST_LOD = 0xB100;
M_EDGE_GROUP = 0xB110;
M_POSES = 0xC000;
M_POSE = 0xC100;
M_POSE_VERTEX = 0xC111;
M_ANIMATIONS = 0xD000;
M_ANIMATION = 0xD100;
M_ANIMATION_BASEINFO = 0xD105;
M_ANIMATION_TRACK = 0xD110;
M_ANIMATION_MORPH_KEYFRAME = 0xD111;
M_ANIMATION_POSE_KEYFRAME = 0xD112;
M_ANIMATION_POSE_REF = 0xD113;
M_TABLE_EXTREMES = 0xE000;
M_GEOMETRY_NORMALS = 0x5100;
M_GEOMETRY_COLOURS = 0x5200;
M_GEOMETRY_TEXCOORDS = 0x5300;
| mit | 5,566,725,582,043,546,000 | 37.035088 | 106 | 0.674815 | false | 2.986226 | false | false | false |
gbd-consult/CartoCSS-Export | CartoCSSExport/ce/error.py | 1 | 1066 | """Error codes."""
#: No layers in this project (loading error?)
EMPTY_PROJECT = 'EMPTY_PROJECT'
#: No converter for this Qgis class.
CLASS_NOT_IMPLEMENTED = 'CLASS_NOT_IMPLEMENTED'
#: No converter for this Qgis property
PROP_NOT_IMPLEMENTED = 'PROP_NOT_IMPLEMENTED'
#: No converter for this Qgis data provider
DATA_PROVIDER_NOT_IMPLEMENTED = 'DATA_PROVIDER_NOT_IMPLEMENTED'
#: No converter for this Qgis measurement unit
UNIT_NOT_IMPLEMENTED = 'UNIT_NOT_IMPLEMENTED'
#: No converter for this Qgis value
VALUE_NOT_IMPLEMENTED = 'VALUE_NOT_IMPLEMENTED'
#: Expression is not supported in CartoCSS
EXPRESSION_NOT_SUPPORTED = 'EXPRESSION_NOT_SUPPORTED'
#: Empty expression
EMPTY_EXPRESSION = 'EMPTY_EXPRESSION'
#: Invalid number
INVALID_NUMBER = 'INVALID_NUMBER'
#: Invalid color specification
INVALID_COLOR = 'INVALID_COLOR'
#: Invalid field specification, only identifier fields are supported
INVALID_FIELD = 'INVALID_FIELD'
#: Unknown CSS property
INVALID_CSS_PROP = 'INVALID_CSS_PROP'
#: expression too complex
COMPLEX_EXPRESSION = 'COMPLEX_EXPRESSION'
| gpl-2.0 | 356,750,921,144,582,340 | 25.65 | 68 | 0.763602 | false | 3.384127 | false | false | false |
wolfbeacon/wolfbeacon-core-api | api/migrations/0008_auto_20171230_0919.py | 1 | 2732 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-12-30 09:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0007_mentor_organizer_volunteer'),
]
operations = [
migrations.AlterField(
model_name='event',
name='location',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='event',
name='tagline',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='hackathon',
name='location',
field=models.CharField(max_length=250),
),
migrations.AlterField(
model_name='hackathon',
name='name',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='hackathon',
name='shipping_address',
field=models.CharField(max_length=150),
),
migrations.AlterField(
model_name='hackathon',
name='university_name',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='user',
name='about_me',
field=models.CharField(max_length=1000, null=True),
),
migrations.AlterField(
model_name='user',
name='city',
field=models.CharField(max_length=75),
),
migrations.AlterField(
model_name='user',
name='country',
field=models.CharField(max_length=75),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='user',
name='major_of_study',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='user',
name='school_last_attended',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='user',
name='special_accommodations',
field=models.CharField(max_length=250, null=True),
),
migrations.AlterField(
model_name='user',
name='street_address',
field=models.CharField(max_length=100, null=True),
),
]
| gpl-3.0 | -4,158,574,478,643,937,000 | 29.355556 | 63 | 0.528917 | false | 4.435065 | false | false | false |
mic4ael/indico | indico/modules/events/abstracts/lists.py | 1 | 12697 | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from collections import OrderedDict
from operator import attrgetter
from flask import flash, request, session
from sqlalchemy.orm import joinedload, subqueryload
from indico.core.db import db
from indico.modules.events.abstracts.models.abstracts import Abstract, AbstractState
from indico.modules.events.abstracts.models.fields import AbstractFieldValue
from indico.modules.events.abstracts.models.reviews import AbstractReview
from indico.modules.events.contributions.models.fields import ContributionField
from indico.modules.events.tracks.models.tracks import Track
from indico.modules.events.util import ListGeneratorBase
from indico.util.i18n import _
from indico.web.flask.templating import get_template_module
class AbstractListGeneratorBase(ListGeneratorBase):
"""Listing and filtering actions in an abstract list."""
show_contribution_fields = True
def __init__(self, event):
super(AbstractListGeneratorBase, self).__init__(event)
self.default_list_config = {
'items': (),
'filters': {'fields': {}, 'items': {}, 'extra': {}}
}
track_empty = {None: _('No track')}
type_empty = {None: _('No type')}
track_choices = OrderedDict((unicode(t.id), t.title) for t in sorted(self.event.tracks,
key=attrgetter('title')))
type_choices = OrderedDict((unicode(t.id), t.name) for t in sorted(self.event.contribution_types,
key=attrgetter('name')))
self.static_items = OrderedDict([
('state', {'title': _('State'), 'filter_choices': {state.value: state.title for state in AbstractState}}),
('submitter', {'title': _('Submitter')}),
('authors', {'title': _('Primary authors')}),
('accepted_track', {'title': _('Accepted track'),
'filter_choices': OrderedDict(track_empty.items() + track_choices.items())}),
('submitted_for_tracks', {'title': _('Submitted for tracks'),
'filter_choices': OrderedDict(track_empty.items() + track_choices.items())}),
('reviewed_for_tracks', {'title': _('Reviewed for tracks'),
'filter_choices': OrderedDict(track_empty.items() + track_choices.items())}),
('accepted_contrib_type', {'title': _('Accepted type'),
'filter_choices': OrderedDict(type_empty.items() + type_choices.items())}),
('submitted_contrib_type', {'title': _('Submitted type'),
'filter_choices': OrderedDict(type_empty.items() + type_choices.items())}),
('score', {'title': _('Score')}),
('submitted_dt', {'title': _('Submission date')}),
('modified_dt', {'title': _('Modification date')})
])
self.extra_filters = {}
self.list_config = self._get_config()
def _get_static_columns(self, ids):
"""
Retrieve information needed for the header of the static columns.
:return: a list of {'id': ..., 'caption': ...} dicts
"""
return [{'id': id_, 'caption': self.static_items[id_]['title']} for id_ in self.static_items if id_ in ids]
def get_all_contribution_fields(self):
"""Return the list of contribution fields for the event"""
return self.event.contribution_fields if self.show_contribution_fields else []
def _get_sorted_contribution_fields(self, item_ids):
"""Return the contribution fields ordered by their position in the abstract form."""
if not item_ids or not self.show_contribution_fields:
return []
return (ContributionField.query
.with_parent(self.event)
.filter(ContributionField.id.in_(item_ids))
.order_by(ContributionField.position)
.all())
def _get_filters_from_request(self):
filters = super(AbstractListGeneratorBase, self)._get_filters_from_request()
for field in self.event.contribution_fields:
if field.field_type == 'single_choice':
options = request.form.getlist('field_{}'.format(field.id))
if options:
filters['fields'][unicode(field.id)] = options
return filters
def _build_query(self):
return (Abstract.query
.with_parent(self.event)
.options(joinedload('submitter'),
joinedload('accepted_track'),
joinedload('accepted_contrib_type'),
joinedload('submitted_contrib_type'),
joinedload('contribution').load_only('id', 'event_id'),
subqueryload('field_values'),
subqueryload('submitted_for_tracks'),
subqueryload('reviewed_for_tracks'),
subqueryload('person_links'),
subqueryload('reviews').joinedload('ratings'))
.order_by(Abstract.friendly_id))
def _filter_list_entries(self, query, filters):
criteria = []
field_filters = filters.get('fields')
item_filters = filters.get('items')
extra_filters = filters.get('extra')
if not (field_filters or item_filters or extra_filters):
return query
if field_filters:
for contribution_type_id, field_values in field_filters.iteritems():
criteria.append(Abstract.field_values.any(db.and_(
AbstractFieldValue.contribution_field_id == contribution_type_id,
AbstractFieldValue.data.op('#>>')('{}').in_(field_values)
)))
if item_filters:
static_filters = {
'accepted_track': Abstract.accepted_track_id,
'accepted_contrib_type': Abstract.accepted_contrib_type_id,
'submitted_contrib_type': Abstract.submitted_contrib_type_id,
'submitted_for_tracks': Abstract.submitted_for_tracks,
'reviewed_for_tracks': Abstract.reviewed_for_tracks
}
for key, column in static_filters.iteritems():
ids = set(item_filters.get(key, ()))
if not ids:
continue
column_criteria = []
if '_for_tracks' in key:
if None in ids:
column_criteria.append(~column.any())
ids.discard(None)
if ids:
column_criteria.append(column.any(Track.id.in_(ids)))
else:
if None in ids:
column_criteria.append(column.is_(None))
ids.discard(None)
if ids:
column_criteria.append(column.in_(ids))
criteria.append(db.or_(*column_criteria))
if 'state' in item_filters:
states = [AbstractState(int(state)) for state in item_filters['state']]
criteria.append(Abstract.state.in_(states))
if extra_filters:
if extra_filters.get('multiple_tracks'):
submitted_for_count = (db.select([db.func.count()])
.as_scalar()
.where(Abstract.submitted_for_tracks.prop.primaryjoin))
criteria.append(submitted_for_count > 1)
if extra_filters.get('comments'):
criteria.append(Abstract.submission_comment != '')
return query.filter(db.and_(*criteria))
def get_list_kwargs(self):
list_config = self._get_config()
abstracts_query = self._build_query()
total_entries = abstracts_query.count()
abstracts = self._filter_list_entries(abstracts_query, list_config['filters']).all()
dynamic_item_ids, static_item_ids = self._split_item_ids(list_config['items'], 'dynamic')
static_columns = self._get_static_columns(static_item_ids)
dynamic_columns = self._get_sorted_contribution_fields(dynamic_item_ids)
return {
'abstracts': abstracts,
'total_abstracts': total_entries,
'static_columns': static_columns,
'dynamic_columns': dynamic_columns,
'filtering_enabled': total_entries != len(abstracts)
}
def get_list_export_config(self):
list_config = self._get_config()
static_item_ids, dynamic_item_ids = self._split_item_ids(list_config['items'], 'static')
return {
'static_item_ids': static_item_ids,
'dynamic_items': self._get_sorted_contribution_fields(dynamic_item_ids)
}
def render_list(self, abstract=None):
list_kwargs = self.get_list_kwargs()
tpl = get_template_module('events/abstracts/management/_abstract_list.html')
filtering_enabled = list_kwargs.pop('filtering_enabled')
tpl_lists = get_template_module('events/management/_lists.html')
filter_statistics = tpl_lists.render_displayed_entries_fragment(len(list_kwargs['abstracts']),
list_kwargs['total_abstracts'])
return {
'html': tpl.render_abstract_list(**list_kwargs),
'filtering_enabled': filtering_enabled,
'filter_statistics': filter_statistics,
'hide_abstract': abstract not in list_kwargs['abstracts'] if abstract else None
}
def flash_info_message(self, abstract):
flash(_("The abstract '{}' is not displayed in the list due to the enabled filters")
.format(abstract.title), 'info')
class AbstractListGeneratorManagement(AbstractListGeneratorBase):
"""Listing and filtering actions in the abstract list in the management view"""
list_link_type = 'abstract_management'
endpoint = '.manage_abstract_list'
def __init__(self, event):
super(AbstractListGeneratorManagement, self).__init__(event)
self.default_list_config['items'] = ('submitted_contrib_type', 'accepted_contrib_type', 'state')
if event.tracks:
self.default_list_config['items'] += ('submitted_for_tracks', 'reviewed_for_tracks', 'accepted_track')
self.extra_filters = OrderedDict([
('multiple_tracks', {'title': _('Proposed for multiple tracks'), 'type': 'bool'}),
('comments', {'title': _('Must have comments'), 'type': 'bool'})
])
class AbstractListGeneratorDisplay(AbstractListGeneratorBase):
"""Listing and filtering actions in the abstract list in the display view"""
list_link_type = 'abstract_display'
endpoint = '.display_reviewable_track_abstracts'
show_contribution_fields = False
def __init__(self, event, track):
super(AbstractListGeneratorDisplay, self).__init__(event)
self.track = track
self.default_list_config['items'] = ('accepted_contrib_type', 'state')
items = {'submitted_contrib_type', 'submitter', 'accepted_contrib_type', 'state'}
if self.track.can_convene(session.user):
items.add('score')
self.static_items = OrderedDict((key, value)
for key, value in self.static_items.iteritems()
if key in items)
def _build_query(self):
return (super(AbstractListGeneratorDisplay, self)._build_query()
.filter(Abstract.state != AbstractState.invited,
Abstract.reviewed_for_tracks.contains(self.track)))
def get_user_reviewed_abstracts_for_track(self, user, track):
return (Abstract.query
.join(Abstract.reviews)
.filter(AbstractReview.user == user,
Abstract.state != AbstractState.invited,
Abstract.reviewed_for_tracks.contains(track),
~Abstract.is_deleted)
.all())
def get_list_kwargs(self):
kwargs = super(AbstractListGeneratorDisplay, self).get_list_kwargs()
kwargs['reviewed_abstracts'] = self.get_user_reviewed_abstracts_for_track(session.user, self.track)
return kwargs
| mit | -2,068,415,674,711,308,500 | 47.277567 | 118 | 0.579349 | false | 4.394946 | true | false | false |
symbolicdata/code | src/sdeval/classes/templates/comp/GB_Z_lp/Maple/template.py | 1 | 1407 | """
This is the template for the computation problem of computing a Groebner basis of an ideal
generated by a finite set of polynomials with integer coefficients (commutative). It creates
code for the computer algebra system Maple.
.. moduleauthor:: Albert Heinle <[email protected]>
"""
#--------------------------------------------------
#---------------The template-----------------------
#--------------------------------------------------
def generateCode(vars, basis):
"""
The main function generating the Maple code for the computation of
the Groebner basis given the input variables.
:param vars: A list of variables used in the IntPS-System
:type vars: list
:param basis: The polynomials forming a basis of the IntPS-System. This input will not be checked whether
there are polynomials using variables not in the list of variables.
:type basis: list
"""
result = "\
with(Groebner):\n\
Ideal := {%s}:\n\
ordering := plex(%s):\n\
B := Basis(Ideal, ordering):\n\
printf(\"=====Solution Begin=====\");\n\
printf(\"%%a\\n\",B);\n\
printf(\"=====Solution End=====\");\n\
quit;\
" % (",".join(basis),
",".join(vars))
return result
#--------------------------------------------------
#----------------Help Functions--------------------
#--------------------------------------------------
| gpl-3.0 | 8,111,292,419,854,983,000 | 35.076923 | 116 | 0.518834 | false | 4.410658 | false | false | false |
schinmayee/metric-learning | losses.py | 1 | 1953 | import torch
import torch.nn as nn
import torch.nn.functional as F
def SimpleHingeLoss(dista, distb, distc, target, margin, hard_triplet=False):
if hard_triplet:
dist_neg = torch.cat([distb, distc], dim=1)
dist_neg = torch.min(dist_neg, dim=1)[0]
else:
dist_neg = distb
return nn.MarginRankingLoss(margin = margin)(dista, dist_neg, target)
def SimpleSquareHingeLoss(dista, distb, distc, target, margin, hard_triplet=False):
if hard_triplet:
dist_neg = torch.cat([distb, distc], dim=1)
dist_neg = torch.min(dist_neg, dim=1)[0]
else:
dist_neg = distb
return nn.MarginRankingLoss(margin = margin)(torch.pow(dista, 2), torch.pow(dist_neg, 2), target)
def RatioLoss(dista, distb, distc, target, margin, hard_triplet=False):
if hard_triplet:
dist_neg = torch.cat([distb, distc], dim=1)
dist_neg = torch.min(dist_neg, dim=1)[0]
else:
dist_neg = distb
ep = torch.exp(dista)
en = torch.exp(dist_neg)
t1 = ep/(ep+en)
t2 = en/(ep+en)
loss = torch.mean(torch.pow(t1, 2) + 1 - torch.pow(t2, 2))
return loss
def EmbHingeLoss(emba, embb, embc, margin, target):
triplet_loss = nn.functional.triplet_margin_loss(
emba, embb, embc, margin=margin)
return triplet_loss
def EmbSquareHingeLoss(emba, embb, embc, margin, target):
dist_pos = F.pairwise_distance(emba, embb, 2)
dist_neg = F.pairwise_distance(emba, embc, 2)
triplet_loss = nn.MarginRankingLoss(margin = margin)(torch.pow(dist_pos, 2), torch.pow(dist_neg, 2), target)
return triplet_loss
def EmbSoftHingeLoss(emba, embb, embc, margin, target):
dist_pos = F.pairwise_distance(emba, embb, 2)
dist_neg1 = F.pairwise_distance(emba, embc, 2)
dist_neg2 = F.pairwise_distance(embb, embc, 2)
dist_neg_s = (torch.exp(margin - dist_neg1) + torch.exp(margin - dist_neg2))
loss = torch.mean(torch.log(dist_neg_s) + dist_pos)
return loss
| mit | -4,633,895,436,246,591,000 | 37.294118 | 112 | 0.65745 | false | 2.793991 | false | false | false |
AntonSax/plantcv | utils/util-avg_background_img.py | 1 | 1578 | #!/usr/bin/env python
import argparse
import numpy as np
import sys, os
from os import listdir
import plantcv as pcv
import datetime
### Parse command-line arguments
def options():
parser = argparse.ArgumentParser(description="Get images from an SQLite database and some input information")
parser.add_argument("-d", "--directory", help="path to directory of images to average.")
parser.add_argument("-o", "--outdir", help="Output directory.", required=False)
args = parser.parse_args()
return args
### Functions
def average_all_img(directory,outdir):
allfiles=os.listdir(directory)
path=str(directory)
allpaths=[]
for files in allfiles:
p=path+str(files)
allpaths.append(p)
img, path, filename = pcv.readimage(allpaths[0])
n=len(allpaths)
if len(np.shape(img))==3:
ix,iy,iz=np.shape(img)
arr=np.zeros((ix,iy,iz),np.float)
else:
ix,iy=np.shape(img)
arr=np.zeros((ix,iy,iz),np.float)
# Build up average pixel intensities, casting each image as an array of floats
for i,paths in enumerate(allpaths):
img,path,filename=pcv.readimage(allpaths[i])
imarr=np.array(img,dtype=np.float)
arr=arr+imarr/n
#Round values in array and cast as 8-bit integer
arr=np.array(np.round(arr),dtype=np.uint8)
pcv.print_image(arr, (str(outdir)+"average_"+str(allfiles[0])))
### Main pipeline
def main():
# Get options
args = options()
average_all_img(args.directory, args.outdir)
if __name__ == '__main__':
main() | mit | 1,587,402,601,414,127,600 | 22.924242 | 111 | 0.652725 | false | 3.379015 | false | false | false |
tatyankaZSGX/addressbook | fixture/application.py | 1 | 1228 | __author__ = 'ZSGX'
from selenium import webdriver
from fixture.Session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, homeurl):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Browser %s is not recognized" % browser)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.homeurl = homeurl
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get(self.homeurl)
def destroy(self):
self.wd.quit()
def edit_field(self, field_name, text):
wd = self.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text) | apache-2.0 | -6,081,401,815,482,364,000 | 27.581395 | 70 | 0.588762 | false | 3.987013 | false | false | false |
akrherz/iem | htdocs/plotting/auto/scripts100/p151.py | 1 | 13587 | """Period deltas"""
import datetime
from collections import OrderedDict
from geopandas import read_postgis
import numpy as np
from pyiem.plot import MapPlot, centered_bins, get_cmap
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
PDICT = {
"state": "State Level Maps (select state)",
"cornbelt": "Corn Belt",
"midwest": "Midwest Map",
}
PDICT2 = {
"both": "Show both contour and values",
"values": "Show just the values",
"contour": "Show just the contour",
}
PDICT3 = OrderedDict(
[
("total_precip", "Annual Precipitation"),
("gdd", "Growing Degree Days (base=50/86)"),
("sdd", "Stress Degree Days (High > 86)"),
("avg_temp", "Average Temperature"),
("avg_high", "Average High Temperature"),
("avg_low", "Average Low Temperature"),
("days_high_above", "Days with High Temp At or Above [Threshold]"),
("days_high_below", "Days with High Temp Below [Threshold]"),
("days_low_above", "Days with Low Temp At or Above [Threshold]"),
("days_low_below", "Days with Low Temp Below [Threshold]"),
]
)
PDICT4 = {
"english": "English",
"metric": "Metric",
}
UNITS = {
"total_precip": "inch",
"gdd": "F",
"sdd": "F",
"avg_temp": "F",
"avg_high": "F",
"avg_low": "F",
"days_high_above": "days",
"days_high_below": "days",
"days_low_above": "days",
"days_low_below": "days",
}
MUNITS = {
"total_precip": "mm",
"gdd": "C",
"sdd": "C",
"avg_temp": "C",
"avg_high": "C",
"avg_low": "C",
"days_high_above": "days",
"days_high_below": "days",
"days_low_above": "days",
"days_low_below": "days",
}
PRECISION = {"total_precip": 2}
MDICT = OrderedDict(
[
("all", "Annual"),
("spring", "Spring (MAM)"),
("fall", "Fall (SON)"),
("winter", "Winter (DJF)"),
("summer", "Summer (JJA)"),
("gs", "1 May to 30 Sep"),
("jan", "January"),
("feb", "February"),
("mar", "March"),
("apr", "April"),
("may", "May"),
("jun", "June"),
("jul", "July"),
("aug", "August"),
("sep", "September"),
("oct", "October"),
("nov", "November"),
("dec", "December"),
]
)
OPT1 = {"diff": "Plot Difference", "p1": "Just Plot Period One Values"}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc[
"description"
] = """This map produces an analysis yearly averages. You
can either plot the difference between two period of years or simply the
years between the first period. This app is meant to address the question
about changes in climate or just to produce a simple plot of yearly
averages over some period of years."""
desc["arguments"] = [
dict(
type="select",
name="month",
default="all",
options=MDICT,
label="Show Monthly or Annual Averages",
),
dict(
type="select",
name="sector",
default="state",
options=PDICT,
label="Select Map Region",
),
dict(
type="state",
name="state",
default="IA",
label="Select State to Plot (when appropriate)",
),
dict(
type="select",
name="opt",
options=PDICT2,
default="both",
label="Map Plot/Contour View Option",
),
dict(
type="select",
name="var",
options=PDICT3,
default="total_precip",
label="Which Variable to Plot",
),
dict(
type="select",
name="r",
options=PDICT4,
default="english",
label="Which Unit System to Use (GDD/SDD always english)",
),
dict(
type="float",
name="threshold",
default=-99,
label="Enter threshold (where appropriate)",
),
dict(
type="select",
options=OPT1,
default="diff",
name="opt1",
label="Period plotting option",
),
dict(
type="year",
name="p1syear",
default=1951,
label="Start Year (inclusive) of Period One:",
),
dict(
type="year",
name="p1eyear",
default=1980,
label="End Year (inclusive) of Period One:",
),
dict(
type="year",
name="p2syear",
default=1981,
label="Start Year (inclusive) of Period Two:",
),
dict(
type="year",
name="p2eyear",
default=2010,
label="End Year (inclusive) of Period Two:",
),
dict(
type="cmap", name="cmap", default="seismic_r", label="Color Ramp:"
),
]
return desc
def get_data(ctx):
"""Get the data, please."""
pgconn = get_dbconn("coop")
state = ctx["state"]
sector = ctx["sector"]
threshold = ctx["threshold"]
month = ctx["month"]
p1syear = ctx["p1syear"]
p1eyear = ctx["p1eyear"]
p1years = p1eyear - p1syear + 1
p2syear = ctx["p2syear"]
p2eyear = ctx["p2eyear"]
p2years = p2eyear - p2syear + 1
if month == "all":
months = range(1, 13)
elif month == "fall":
months = [9, 10, 11]
elif month == "winter":
months = [12, 1, 2]
elif month == "spring":
months = [3, 4, 5]
elif month == "summer":
months = [6, 7, 8]
elif month == "gs":
months = [5, 6, 7, 8, 9]
else:
ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
# make sure it is length two for the trick below in SQL
months = [ts.month]
table = "alldata"
if sector == "state":
# optimization
table = f"alldata_{state}"
hcol = "high"
lcol = "low"
pcol = "precip"
if ctx["r"] == "metric":
hcol = "f2c(high)"
lcol = "f2c(low)"
pcol = "precip * 25.4"
df = read_postgis(
f"""
WITH period1 as (
SELECT station, year, sum({pcol}) as total_precip,
avg(({hcol}+{lcol}) / 2.) as avg_temp, avg({hcol}) as avg_high,
avg({lcol}) as avg_low,
sum(gddxx(50, 86, high, low)) as sum_gdd,
sum(case when high > 86 then high - 86 else 0 end) as sum_sdd,
sum(case when {hcol} >= %s then 1 else 0 end) as days_high_above,
sum(case when {hcol} < %s then 1 else 0 end) as days_high_below,
sum(case when {lcol} >= %s then 1 else 0 end) as days_low_above,
sum(case when {lcol} < %s then 1 else 0 end) as days_low_below
from {table} WHERE year >= %s and year <= %s
and month in %s GROUP by station, year),
period2 as (
SELECT station, year, sum({pcol}) as total_precip,
avg(({hcol}+{lcol}) / 2.) as avg_temp, avg({hcol}) as avg_high,
avg({lcol}) as avg_low,
sum(gddxx(50, 86, high, low)) as sum_gdd,
sum(case when high > 86 then high - 86 else 0 end) as sum_sdd,
sum(case when {hcol} >= %s then 1 else 0 end) as days_high_above,
sum(case when {hcol} < %s then 1 else 0 end) as days_high_below,
sum(case when {lcol} >= %s then 1 else 0 end) as days_low_above,
sum(case when {lcol} < %s then 1 else 0 end) as days_low_below
from {table} WHERE year >= %s and year <= %s
and month in %s GROUP by station, year),
p1agg as (
SELECT station, avg(total_precip) as precip,
avg(avg_temp) as avg_temp, avg(avg_high) as avg_high,
avg(avg_low) as avg_low, avg(sum_sdd) as sdd,
avg(sum_gdd) as gdd,
avg(days_high_above) as avg_days_high_above,
avg(days_high_below) as avg_days_high_below,
avg(days_low_above) as avg_days_low_above,
avg(days_low_below) as avg_days_low_below,
count(*) as count
from period1 GROUP by station),
p2agg as (
SELECT station, avg(total_precip) as precip,
avg(avg_temp) as avg_temp, avg(avg_high) as avg_high,
avg(avg_low) as avg_low, avg(sum_sdd) as sdd,
avg(sum_gdd) as gdd,
avg(days_high_above) as avg_days_high_above,
avg(days_high_below) as avg_days_high_below,
avg(days_low_above) as avg_days_low_above,
avg(days_low_below) as avg_days_low_below,
count(*) as count
from period2 GROUP by station),
agg as (
SELECT p2.station,
p2.precip as p2_total_precip,
p1.precip as p1_total_precip,
p2.gdd as p2_gdd, p1.gdd as p1_gdd,
p2.sdd as p2_sdd, p1.sdd as p1_sdd,
p2.avg_temp as p2_avg_temp, p1.avg_temp as p1_avg_temp,
p1.avg_high as p1_avg_high, p2.avg_high as p2_avg_high,
p1.avg_low as p1_avg_low, p2.avg_low as p2_avg_low,
p1.avg_days_high_above as p1_days_high_above,
p2.avg_days_high_above as p2_days_high_above,
p1.avg_days_high_below as p1_days_high_below,
p2.avg_days_high_below as p2_days_high_below,
p1.avg_days_low_above as p1_days_low_above,
p2.avg_days_low_above as p2_days_low_above,
p1.avg_days_low_below as p1_days_low_below,
p2.avg_days_low_below as p2_days_low_below
from p1agg p1 JOIN p2agg p2 on
(p1.station = p2.station)
WHERE p1.count >= %s and p2.count >= %s)
SELECT ST_X(geom) as lon, ST_Y(geom) as lat, t.geom,
d.* from agg d JOIN stations t ON (d.station = t.id)
WHERE t.network ~* 'CLIMATE'
and substr(station, 3, 1) != 'C' and substr(station, 3, 4) != '0000'
""",
pgconn,
params=[
threshold,
threshold,
threshold,
threshold,
p1syear,
p1eyear,
tuple(months),
threshold,
threshold,
threshold,
threshold,
p2syear,
p2eyear,
tuple(months),
p1years,
p2years,
],
index_col="station",
geom_col="geom",
)
if df.empty:
raise NoDataFound("No Data Found.")
df["total_precip"] = df["p2_total_precip"] - df["p1_total_precip"]
df["avg_temp"] = df["p2_avg_temp"] - df["p1_avg_temp"]
df["avg_high"] = df["p2_avg_high"] - df["p1_avg_high"]
df["avg_low"] = df["p2_avg_low"] - df["p1_avg_low"]
df["gdd"] = df["p2_gdd"] - df["p1_gdd"]
df["sdd"] = df["p2_sdd"] - df["p1_sdd"]
df["days_high_above"] = df["p2_days_high_above"] - df["p1_days_high_above"]
df["days_high_below"] = df["p2_days_high_below"] - df["p1_days_high_below"]
df["days_low_above"] = df["p2_days_low_above"] - df["p1_days_low_above"]
df["days_low_below"] = df["p2_days_low_below"] - df["p1_days_low_below"]
return df
def geojson(fdict):
"""Handle GeoJSON output."""
ctx = get_autoplot_context(fdict, get_description())
return (get_data(ctx).drop(["lat", "lon"], axis=1)), ctx["var"]
def plotter(fdict):
""" Go """
ctx = get_autoplot_context(fdict, get_description())
df = get_data(ctx)
state = ctx["state"]
varname = ctx["var"]
sector = ctx["sector"]
threshold = ctx["threshold"]
opt = ctx["opt"]
month = ctx["month"]
p1syear = ctx["p1syear"]
p1eyear = ctx["p1eyear"]
p2syear = ctx["p2syear"]
p2eyear = ctx["p2eyear"]
opt1 = ctx["opt1"]
column = varname
title = "%s %s" % (MDICT[month], PDICT3[varname])
title = title.replace("[Threshold]", "%.1f" % (threshold,))
if opt1 == "p1":
column = "p1_%s" % (varname,)
title = "%.0f-%.0f %s" % (p1syear, p1eyear, title)
else:
title = ("%.0f-%.0f minus %.0f-%.0f %s Difference (%s)") % (
p2syear,
p2eyear,
p1syear,
p1eyear,
title,
UNITS[varname] if ctx["r"] == "english" else MUNITS[varname],
)
# Reindex so that most extreme values are first
df = df.reindex(df[column].abs().sort_values(ascending=False).index)
# drop 5% most extreme events, too much?
df2 = df.iloc[int(len(df.index) * 0.05) :]
mp = MapPlot(
sector=sector,
state=state,
axisbg="white",
title=title,
subtitle=("based on IEM Archives"),
titlefontsize=12,
)
if opt1 == "diff":
# Create 9 levels centered on zero
abval = df2[column].abs().max()
levels = centered_bins(abval)
else:
levels = [
round(v, PRECISION.get(varname, 1))
for v in np.percentile(df2[column].values, range(0, 101, 10))
]
if opt in ["both", "contour"]:
mp.contourf(
df2["lon"].values,
df2["lat"].values,
df2[column].values,
levels,
cmap=get_cmap(ctx["cmap"]),
units=UNITS[varname] if ctx["r"] == "english" else MUNITS[varname],
)
if sector == "state":
mp.drawcounties()
if opt in ["both", "values"]:
mp.plot_values(
df2["lon"].values,
df2["lat"].values,
df2[column].values,
fmt="%%.%if" % (PRECISION.get(varname, 1),),
labelbuffer=5,
)
return mp.fig, df.drop("geom", axis=1).round(2)
if __name__ == "__main__":
plotter(dict(over="annual"))
| mit | -7,669,432,662,049,230,000 | 31.120567 | 79 | 0.524693 | false | 3.23423 | false | false | false |
zprpa-ca/last.fm | 03.artist-correlation.py | 1 | 8514 | #!/usr/bin/env python
''' Calculate the correlation between the artists. Intermediate datasets are
saved in the HDF5 file and the final dataset is saved in the database as
well. The artist correlation matrix is saved only for the single
selected artist, used in the final step for the similarity comparison.
#--------------------------------------------------------------------------#
Copyright (C) 2014, Zlatko Prpa <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
#--------------------------------------------------------------------------#
'''
#-- standard libs
import os, sys, sqlite3, time, locale, itertools as it
#-- add-on libs
import numpy, h5py
#-- custom libs
import utils
#==============================================================================#
#--------------------------------- SETUP --------------------------------------#
#==============================================================================#
log = utils.ZpLog( 'logs/' + os.path.basename(__file__) + '.log')
elog = utils.ZpErrLog('logs/' + os.path.basename(__file__) + '.ERROR-traceback.log')
log.write(''.ljust(150,'*'), skip_line=1, add_line=1)
#-- setup number formatting
locale.setlocale( locale.LC_ALL, "" )
fmt = locale.format
#-- open the HDF5 file for the storage of the intermediate datasets
h5f = h5py.File('data/artist-correlation-datasets.h5','w')
vlen_dtype = h5py.special_dtype(vlen=str)
#==============================================================================#
#------------------------- Load and process data ------------------------------#
#==============================================================================#
#--------------------------------------#
#-- load data and apply basic filter #
#--------------------------------------#
''' Load the records from the artist/tag table.
There is no reason to apply any filter to this basic dataset, as opposite
to the tag correlation procedure. We do not need to generalize any
specific artist, as we had to do with tag data.
Otherwise, the whole processing logic is very much the same.
'''
log.write('Load data.')
dbs = sqlite3.connect('data/lastfm.sql3', detect_types=sqlite3.PARSE_DECLTYPES)
cur = dbs.cursor()
cur.execute("SELECT t.artist_name, t.tag, t.count FROM top_artist_tags t")
recs = numpy.array([r for r in cur],dtype=[('art','O'),('tag','O'),('count','i4')])
cur.close()
dbs.close()
log.write('Loaded %s records.'%fmt('%12d',recs.shape[0],True).strip())
#--------------------------------------#
#-- prepare data for correlation calc #
#--------------------------------------#
log.write('Prepare data for the correlation calc.')
#-- Get unique list of artists and tags.
unique_art = numpy.unique( recs['art'] )
unique_tags = numpy.unique( recs['tag'] )
''' Create 2d array to hold the vector for each artist. The vector size is 2x
the length of the list of the unique tags. First part will have the
value 0/1, depending if the given artist is associated with the given tag.
The second part will have the tag ranking (count) value, at the same
position for the given tag.
Assuming the following tuples in the basic dataset [recs]:
(art1,tag1,90), (art1,tag2,80), (art1,tag3,60),
(art2,tag1,80), (art2,tag3,90),
(art3,tag2,90), (art3,tag3,80),
(art4,tag1,50), (art4,tag2,70), (art4,tag3,70)
The "unique_art" list is: [art1,art2,art3,art4]
The "unique_tags" list is: [tag1,tag2,tag3]
offset = 3
Single artist vector is [0,0,0,0,0,0], with logical mask as
[tag1,tag2,tag3,rank1,rank2,rank3].
Based on the above described data, the complete matrix "tags_mx"
will have 4 vectors with following values:
[[1,1,1,90,80,60],
[1,0,1,80, 0,90],
[0,1,1, 0,90,80],
[1,1,1,50,70,70]]
The sample data (tags for 1000 artists) is very small and this executes
fast, otherwise this loop would be a strong candidate for parallel
execution.
'''
offset = unique_tags.shape[0]
art_mx = numpy.zeros((unique_art.shape[0],offset*2),'i4')
for i in xrange(unique_art.shape[0]):
#-- find indicies for all records in the basic dataset for given artist
idx = numpy.where( recs['art']==unique_art[i] )[0]
#-- get all tags and counts for the given artist
tags = recs['tag'].take(idx)
counts = recs['count'].take(idx)
#-- find the index positions in the tag unique list, for all tag artists
idx = unique_tags.searchsorted(tags)
#-- fill in the first part of the artist vector with 1, for each tag found
numpy.put( art_mx[i], idx, 1 )
#-- fill in the tag count (rank) in the second part of the artist vector
numpy.put( art_mx[i], idx+offset, counts )
ds = h5f.create_dataset('unique_art', unique_art.shape, dtype=vlen_dtype)
ds[...] = unique_art
ds = h5f.create_dataset('unique_tags', unique_tags.shape, dtype=vlen_dtype)
ds[...] = unique_tags
ds = h5f.create_dataset('art_mx', art_mx.shape, dtype=art_mx.dtype)
ds[...] = art_mx
h5f.flush()
log.write('Saved following datasets:')
log.write('unique_art: shape->%s\tdtype->%s'%(unique_art.shape, unique_art.dtype))
log.write('unique_tags: shape->%s\tdtype->%s'%(unique_tags.shape,unique_tags.dtype))
log.write('art_mx: shape->%s\tdtype->%s'%(art_mx.shape, art_mx.dtype), add_line=1)
#--------------------------------------#
#-- calculate artist correlation #
#--------------------------------------#
log.write('Calculate artist correlation.')
''' Calculate correlation for each distinct pair of artist vectors.
Again, in case of high data volume, this could be executed in parallel
using the pool of worker processes.
For the present dataset, the approx size of the artist correlation matrix
is around 500K recs.
'''
#-- first iterator to get the matrix size
itr = ((i,j) for i in xrange(unique_art.shape[0]) for j in xrange(i+1,unique_art.shape[0]))
size = sum(1 for _ in itr)
corr = numpy.empty( size, dtype=[('art1','O'),('art2','O'),('c','f8')] )
#-- full iterator
itr = it.izip( ((i,j) for i in xrange(unique_art.shape[0]) for j in xrange(i+1,unique_art.shape[0])),
(k for k in xrange(size)) )
t = time.time()
for (x,y),z in itr:
c = numpy.corrcoef( art_mx[x], art_mx[y] )[0,1]
corr[z] = (unique_art[x], unique_art[y], c)
#-- update progres every 10K recs
if z%10000==0:
log.write_timing1( z, size, t, time.time(), out_type='TTY')
''' Because the full dataset is somewhat big, save only the sample used later
in the "similar artist" comparison.
Comment out if you want to re-run and get all records.
'''
log.write('Full artist correlation matrix: [corr] shape->%s\tdtype->%s'%(corr.shape,corr.dtype))
sample_artist = 'Cyndi Lauper'
i = numpy.where( (corr['art1']==sample_artist)|(corr['art2']==sample_artist) )[0]
corr = corr.take(i)
log.write('Sample artist correlation matrix: [corr] shape->%s\tdtype->%s'%(corr.shape,corr.dtype))
ds = h5f.create_dataset('corr', corr.shape, dtype=[('art1',vlen_dtype),('art2',vlen_dtype),('c','f8')])
ds[...] = corr
h5f.close()
log.write('Saved sample artist correlation matrix: [corr] shape->%s\tdtype->%s'%(corr.shape,corr.dtype),add_line=1)
#-- save the records in the database as well
dbs = sqlite3.connect('data/lastfm.sql3', detect_types=sqlite3.PARSE_DECLTYPES)
cur = dbs.cursor()
cur.execute("DELETE FROM artist_correlation")
cur.executemany("INSERT INTO artist_correlation VALUES (?,?,?)",(r for r in corr))
log.write('Loaded %s records in the database.'%fmt('%6d',cur.rowcount,True))
dbs.commit()
cur.close()
dbs.close()
log.write(''.ljust(150,'*'), add_line=1)
log.close()
#==============================================================================#
#------------------------------------------------------------------------------#
#==============================================================================#
| gpl-3.0 | -9,014,966,826,731,524,000 | 42.218274 | 115 | 0.592436 | false | 3.581826 | false | false | false |
ROCmSoftwarePlatform/rocFFT | library/src/device/kernel-generator.py | 1 | 39884 | #!/usr/bin/env python3
"""rocFFT kernel generator.
Currently this acts as a shim between CMake and the C++ kernel generator.
It accept two sub-commands:
1. list - lists files that will be generated
2. generate - pass arguments down to the old generator
Note that 'small' kernels don't decompose their lengths.
"""
import argparse
import collections
import functools
import itertools
import os
import subprocess
import sys
from pathlib import Path
from types import SimpleNamespace as NS
from functools import reduce
from operator import mul
from copy import deepcopy
from generator import (ArgumentList, BaseNode, Call, CommentBlock, ExternC, Function, Include,
LineBreak, Map, Pragma, StatementList, Variable, name_args, format_and_write)
import stockham
supported_large = [50, 64, 81, 100, 128, 200, 256, 336]
old_gen_supported_large = [50, 64, 81, 100, 128, 200, 256]
#
# CMake helpers
#
def scjoin(xs):
"""Join 'xs' with semi-colons."""
return ';'.join(str(x) for x in xs)
def scprint(xs):
"""Print 'xs', joined by semi-colons, on a single line. CMake friendly."""
print(scjoin(xs), end='', flush=True)
def cjoin(xs):
"""Join 'xs' with commas."""
return ','.join(str(x) for x in xs)
#
# Helpers
#
def product(*args):
"""Cartesian product of input iteratables, as a list."""
return list(itertools.product(*args))
def merge(*ds):
"""Merge dictionaries; last one wins."""
r = collections.OrderedDict()
for d in ds:
r.update(d)
return r
def pmerge(d, fs):
"""Merge d with dicts of {(length, precision, scheme, transpose): f}."""
r = collections.OrderedDict()
r.update(d)
for f in fs:
r[f.meta.length, f.meta.precision, f.meta.scheme, f.meta.transpose] = f
return r
def flatten(lst):
"""Flatten a list of lists to a list."""
return sum(lst, [])
# this function should eventually go away
def pick(all, new_kernels, subtract_from_all=True):
"""From all old kernels, pick out those supported by new kernel, and remove from old list."""
old = collections.OrderedDict(all)
new = []
for nk in new_kernels:
assert hasattr(nk, 'length')
for target_length in all:
if nk.length == target_length:
new.append(nk) # pick out, put to new
if subtract_from_all:
del old[target_length] # remove from old
break
# old-list to old-gen, new-list to new-gen
return old, new
def merge_length(kernel_list, ks):
"""Merge kernel lists without duplicated meta.length; ignore later ones."""
merged_list = list(kernel_list)
lengths = [ item.length for item in kernel_list ]
for k in ks:
if k.length not in lengths:
merged_list.append(k)
return merged_list
#
# Supported kernel sizes
#
# this function should eventually go away
def supported_small_sizes(precision, pow2=True, pow3=True, pow5=True, commonRadix=True):
"""Return list of 1D small kernels."""
upper_bound = {
'sp': 4096,
'dp': 4096, # of course this isn't 2048... not sure why (double len 1594323 will fail)
}
powers = {
5: [5**k for k in range(6 if pow5 else 1)],
3: [3**k for k in range(8 if pow3 else 1)],
2: [2**k for k in range(13 if pow2 else 1)],
}
lengths = [p2 * p3 * p5 for p2, p3, p5 in product(powers[2], powers[3], powers[5])]
# common radix 7, 11, and 13
if commonRadix:
lengths += [7, 14, 21, 28, 42, 49, 56, 84, 112, 168, 224, 336, 343]
lengths += [11, 22, 44, 88, 121, 176]
lengths += [13, 17, 26, 52, 104, 169, 208, 272, 528, 1040]
def filter_bound(length):
return length <= upper_bound[precision]
filtered = sorted([x for x in set(lengths) if filter_bound(x)])
return product(filtered, ['CS_KERNEL_STOCKHAM'])
def supported_large_sizes(precision):
"""Return list of 1D large block kernels."""
return product(supported_large, ['CS_KERNEL_STOCKHAM_BLOCK_CC',
'CS_KERNEL_STOCKHAM_BLOCK_RC'])
# this function should eventually go away
def supported_2d_sizes(precision):
"""Return list of 2D kernels."""
# for now, mimic order of old generator so diffing is easier
powers = {
5: [5**k for k in range(3, 1, -1)],
3: [3**k for k in range(5, 1, -1)],
2: [2**k for k in range(8, 1, -1)],
}
lengths = []
for b1, b2 in [(2, 2), (3, 3), (5, 5), (2, 3), (3, 2), (3, 5), (5, 3), (2, 5), (5, 2)]:
lengths.extend(product(powers[b1], powers[b2]))
max_lds_size_bytes = 64 * 1024
bytes_per_element = {'sp': 8, 'dp': 16}[precision]
def filter_lds(length):
return length[0] * length[1] * bytes_per_element * 1.5 <= max_lds_size_bytes
# explicit list of fused 2D kernels that the old generator doesn't
# like; usually because the thread counts are wonky.
avoid = {
'sp': [(16, 243), (16, 256), (27, 125), (27, 128), (64, 64), (64, 81)],
'dp': [(16, 243), (16, 256), (25, 125), (27, 125), (32, 125), (25, 128), (27, 128), (32, 128), (64, 64), (64, 81)]
}[precision]
def filter_threads(length):
rlength = (length[1], length[0])
return length not in avoid and rlength not in avoid
filtered = [x for x in lengths if filter_lds(x) and filter_threads(x)]
return product(filtered, ['CS_KERNEL_2D_SINGLE'])
# this function should eventually go away
def get_dependent_1D_sizes(list_2D):
dep_1D = set()
for problem in list_2D:
dep_1D.update( [problem[0][0], problem[0][1]] )
return product(dep_1D, ['CS_KERNEL_STOCKHAM'])
#
# Prototype generators
#
@name_args(['function'])
class FFTKernel(BaseNode):
def __str__(self):
f = 'FFTKernel('
if self.function.meta.runtime_compile:
f += 'nullptr'
else:
f += str(self.function.address())
use_3steps_large_twd = getattr(self.function.meta, 'use_3steps_large_twd', None)
if use_3steps_large_twd is not None:
f += ', ' + str(use_3steps_large_twd[self.function.meta.precision])
else:
f += ', false'
factors = getattr(self.function.meta, 'factors', None)
if factors is not None:
f += ', {' + cjoin(factors) + '}'
transforms_per_block = getattr(self.function.meta, 'transforms_per_block', None)
if transforms_per_block is not None:
f += ', ' + str(transforms_per_block)
threads_per_block = getattr(self.function.meta, 'threads_per_block', None)
if threads_per_block is not None:
f += ', ' + str(threads_per_block)
f += ')'
return f
def generate_cpu_function_pool(functions):
"""Generate function to populate the kernel function pool."""
function_map = Map('function_map')
precisions = { 'sp': 'rocfft_precision_single',
'dp': 'rocfft_precision_double' }
populate = StatementList()
for f in functions:
length, precision, scheme, transpose = f.meta.length, f.meta.precision, f.meta.scheme, f.meta.transpose
if isinstance(length, (int, str)):
length = [length, 0]
key = Call(name='std::make_tuple',
arguments=ArgumentList('std::array<size_t, 2>({' + cjoin(length) + '})',
precisions[precision],
scheme,
transpose or 'NONE')).inline()
populate += function_map.assert_emplace(key, FFTKernel(f))
return StatementList(
Include('<iostream>'),
Include('"../include/function_pool.h"'),
StatementList(*[f.prototype() for f in functions]),
Function(name='function_pool::function_pool',
value=False,
arguments=ArgumentList(),
body=populate))
# this function should eventually go away
def generate_small_1d_prototypes(precision, transforms):
"""Generate prototypes for 1D small kernels that will be generated by the old generator."""
data = Variable('data_p', 'const void *')
back = Variable('back_p', 'void *')
functions = []
def add(name, scheme, transpose=None):
functions.append(Function(name=name,
arguments=ArgumentList(data, back),
meta=NS(
length=length,
precision=precision,
scheme=scheme,
transpose=transpose,
runtime_compile=False)))
for length, scheme in transforms.items():
add(f'rocfft_internal_dfn_{precision}_ci_ci_stoc_{length}', scheme)
return functions
# this function should eventually go away
def generate_large_1d_prototypes(precision, transforms):
"""Generate prototypes for 1D large block kernels that will be generated from the old generator."""
data = Variable('data_p', 'const void *')
back = Variable('back_p', 'void *')
functions = []
def add(name, scheme, transpose=None):
use3Steps = {'sp': 'true', 'dp': 'true'}
if length == 81:
use3Steps['dp'] = 'false'
elif length == 200:
use3Steps['sp'] = use3Steps['dp'] = 'false'
functions.append(Function(name=name,
arguments=ArgumentList(data, back),
meta=NS(
length=length,
precision=precision,
scheme=scheme,
use_3steps_large_twd=use3Steps,
transpose=transpose,
runtime_compile=False)))
for length, scheme in transforms.items():
if 0:
add(f'rocfft_internal_dfn_{precision}_ci_ci_sbcc_{length}', 'CS_KERNEL_STOCKHAM_BLOCK_CC')
elif scheme == 'CS_KERNEL_STOCKHAM_BLOCK_RC':
# for old-sbcc compatibility: always include the sbcc function (but will be overwritten if new gen has it)
add(f'rocfft_internal_dfn_{precision}_ci_ci_sbcc_{length}', 'CS_KERNEL_STOCKHAM_BLOCK_CC')
add(f'rocfft_internal_dfn_{precision}_op_ci_ci_sbrc_{length}', 'CS_KERNEL_STOCKHAM_BLOCK_RC')
add(f'rocfft_internal_dfn_{precision}_op_ci_ci_sbrc3d_fft_trans_xy_z_tile_aligned_{length}', 'CS_KERNEL_STOCKHAM_TRANSPOSE_XY_Z', 'TILE_ALIGNED')
add(f'rocfft_internal_dfn_{precision}_op_ci_ci_sbrc3d_fft_trans_z_xy_tile_aligned_{length}', 'CS_KERNEL_STOCKHAM_TRANSPOSE_Z_XY', 'TILE_ALIGNED')
add(f'rocfft_internal_dfn_{precision}_op_ci_ci_sbrc3d_fft_erc_trans_z_xy_tile_aligned_{length}', 'CS_KERNEL_STOCKHAM_R_TO_CMPLX_TRANSPOSE_Z_XY', 'TILE_ALIGNED')
if length in [128, 256]:
add(f'rocfft_internal_dfn_{precision}_op_ci_ci_sbrc3d_fft_trans_xy_z_diagonal_{length}', 'CS_KERNEL_STOCKHAM_TRANSPOSE_XY_Z', 'DIAGONAL')
add(f'rocfft_internal_dfn_{precision}_op_ci_ci_sbrc3d_fft_trans_z_xy_diagonal_{length}', 'CS_KERNEL_STOCKHAM_TRANSPOSE_Z_XY', 'DIAGONAL')
add(f'rocfft_internal_dfn_{precision}_op_ci_ci_sbrc3d_fft_erc_trans_z_xy_diagonal_{length}', 'CS_KERNEL_STOCKHAM_R_TO_CMPLX_TRANSPOSE_Z_XY', 'DIAGONAL')
return functions
# this function should eventually go away
def generate_2d_prototypes(precision, transforms):
"""Generate prototypes for 2D kernels that will be generated by the old generator."""
data = Variable('data_p', 'const void *')
back = Variable('back_p', 'void *')
functions = []
def add(name, scheme, transpose=None):
functions.append(Function(name=name,
arguments=ArgumentList(data, back),
meta=NS(
length=length,
precision=precision,
scheme=scheme,
transpose=transpose,
runtime_compile=False)))
for length, scheme in transforms.items():
add(f'rocfft_internal_dfn_{precision}_ci_ci_2D_{length[0]}_{length[1]}', 'CS_KERNEL_2D_SINGLE', 'NONE')
return functions
# this function should eventually go away
def list_old_generated_kernels(patterns=None,
precisions=None,
num_small_kernel_groups=150):
"""Return a list (for CMake) of files created by the (old) generator."""
if patterns is None:
patterns = ['all']
if precisions is None:
precisions = ['all']
#
# all this 'generated_kernels' should go away when the old generator goes away
#
generated_kernels = {
'kernels_launch_basic': [
'function_pool.cpp',
],
'kernels_launch_small_sp':
[f'kernel_launch_single_{i}.cpp' for i in range(num_small_kernel_groups)]
+ [f'kernel_launch_single_{i}.cpp.h' for i in range(num_small_kernel_groups)],
'kernels_launch_small_dp':
[f'kernel_launch_double_{i}.cpp' for i in range(num_small_kernel_groups)]
+ [f'kernel_launch_double_{i}.cpp.h' for i in range(num_small_kernel_groups)],
'kernels_launch_large_sp': [
'kernel_launch_single_large.cpp',
],
'kernels_launch_large_dp': [
'kernel_launch_double_large.cpp',
],
'kernels_launch_2D_sp': [
'kernel_launch_single_2D_pow2.cpp',
'kernel_launch_single_2D_pow3.cpp',
'kernel_launch_single_2D_pow5.cpp',
'kernel_launch_single_2D_mix_pow2_3.cpp',
'kernel_launch_single_2D_mix_pow3_2.cpp',
'kernel_launch_single_2D_mix_pow3_5.cpp',
'kernel_launch_single_2D_mix_pow5_3.cpp',
'kernel_launch_single_2D_mix_pow2_5.cpp',
'kernel_launch_single_2D_mix_pow5_2.cpp',
],
'kernels_launch_2D_dp': [
'kernel_launch_double_2D_pow2.cpp',
'kernel_launch_double_2D_pow3.cpp',
'kernel_launch_double_2D_pow5.cpp',
'kernel_launch_double_2D_mix_pow2_3.cpp',
'kernel_launch_double_2D_mix_pow3_2.cpp',
'kernel_launch_double_2D_mix_pow3_5.cpp',
'kernel_launch_double_2D_mix_pow5_3.cpp',
'kernel_launch_double_2D_mix_pow2_5.cpp',
'kernel_launch_double_2D_mix_pow5_2.cpp',
],
}
generated_kernels['kernels_launch_small_all'] = generated_kernels['kernels_launch_small_sp'] + generated_kernels['kernels_launch_small_dp']
generated_kernels['kernels_launch_large_all'] = generated_kernels['kernels_launch_large_sp'] + generated_kernels['kernels_launch_large_dp']
generated_kernels['kernels_launch_2D_all'] = generated_kernels['kernels_launch_2D_sp'] + generated_kernels['kernels_launch_2D_dp']
generated_kernels['kernels_launch_all_sp'] = generated_kernels['kernels_launch_small_sp'] + generated_kernels['kernels_launch_large_sp'] + generated_kernels['kernels_launch_2D_sp']
generated_kernels['kernels_launch_all_dp'] = generated_kernels['kernels_launch_small_dp'] + generated_kernels['kernels_launch_large_dp'] + generated_kernels['kernels_launch_2D_dp']
generated_kernels['kernels_launch_all_all'] = generated_kernels['kernels_launch_all_sp'] + generated_kernels['kernels_launch_all_dp']
gen = generated_kernels['kernels_launch_basic']
for patt in patterns:
for prec in precisions:
gen += generated_kernels[f'kernels_launch_{patt}_{prec}']
return list(set(gen))
def list_generated_kernels(kernels):
"""Return list of kernel filenames."""
return [kernel_file_name(x) for x in kernels if not x.runtime_compile]
#
# Main!
#
@name_args(['name', 'ip_fwd', 'ip_inv', 'op_fwd', 'op_inv', 'precision'])
class POWX_SMALL_GENERATOR(BaseNode):
def __str__(self):
return f'POWX_SMALL_GENERATOR({cjoin(self.args)});'
def function(self, meta, precision):
data = Variable('data_p', 'const void *')
back = Variable('back_p', 'void *')
meta = NS(precision=precision, **meta.__dict__)
return Function(name=self.name,
arguments=ArgumentList(data, back),
meta=meta)
@name_args(['name', 'ip_fwd', 'ip_inv', 'op_fwd', 'op_inv', 'precision'])
class POWX_LARGE_SBCC_GENERATOR(POWX_SMALL_GENERATOR):
def __str__(self):
return f'POWX_LARGE_SBCC_GENERATOR({cjoin(self.args)});'
@name_args(['name', 'op_fwd', 'op_inv', 'precision', 'sbrc_type', 'transpose_type'])
class POWX_LARGE_SBRC_GENERATOR(POWX_SMALL_GENERATOR):
def __str__(self):
return f'POWX_LARGE_SBRC_GENERATOR({cjoin(self.args)});'
def kernel_file_name(ns):
"""Given kernel info namespace, return reasonable file name."""
assert hasattr(ns, 'length')
length = ns.length
if isinstance(length, (tuple, list)):
length = 'x'.join(str(x) for x in length)
postfix = ''
if ns.scheme == 'CS_KERNEL_STOCKHAM_BLOCK_CC':
postfix = '_sbcc'
elif ns.scheme == 'CS_KERNEL_STOCKHAM_BLOCK_RC':
postfix = '_sbrc'
return f'rocfft_len{length}{postfix}.cpp'
def list_new_kernels():
"""Return list of kernels to generate with the new generator."""
# remaining lenghts less than 1024: 121 192 224 250 320 336 375
# 384 405 432 450 480 500 512 576 600 625 640 675 750 768 800 810
# 864 900 972 1000
# dictionary of (flavour, threads_per_block) -> list of kernels to generate
# note the length property is necessary for the latter pick and merge_length
small_kernels = {
('uwide', 256): [
# NS(length=2, factors=[2]),
# NS(length=3, factors=[3]),
# NS(length=5, factors=[5]),
# NS(length=6, factors=[6]),
# NS(length=7, factors=[7]),
# NS(length=8, factors=[8]),
NS(length=9, factors=[3,3], runtime_compile=True),
# NS(length=10, factors=[10]),
NS(length=12, factors=[6,2]),
NS(length=14, factors=[7,2]),
NS(length=15, factors=[5,3]),
NS(length=17, factors=[17]),
# NS(length=18, factors=[6,3]),
NS(length=20, factors=[10,2]),
NS(length=21, factors=[7,3]),
NS(length=24, factors=[8,3]),
NS(length=25, factors=[5,5]),
# NS(length=27, factors=[3,3,3]),
NS(length=28, factors=[7,4]),
NS(length=30, factors=[10,3]),
NS(length=36, factors=[6,6]),
NS(length=42, factors=[7,6]),
NS(length=45, factors=[5,3,3]),
# NS(length=49, factors=[7,7]),
NS(length=50, factors=[10,5]),
NS(length=54, factors=[6,3,3]),
NS(length=56, factors=[8,7]),
# NS(length=64, factors=[16,4]),
# NS(length=72, factors=[8,3,3]),
NS(length=75, factors=[5,5,3]),
NS(length=80, factors=[16,5]),
# NS(length=81, factors=[3,3,3,3]),
# NS(length=96, factors=[16,6]),
# NS(length=100, factors=[10,10]),
NS(length=108, factors=[6,6,3]),
NS(length=112, factors=[16,7]),
NS(length=125, factors=[5,5,5]),
# NS(length=128, factors=[16,8]),
# NS(length=135, factors=[5,3,3,3]),
# NS(length=150, factors=[10,5,3]),
NS(length=160, factors=[16,10]),
# NS(length=162, factors=[6,3,3,3]),
NS(length=168, factors=[8,7,3]),
NS(length=180, factors=[10,6,3]),
# NS(length=216, factors=[8,3,3,3]),
NS(length=225, factors=[5,5,3,3]),
NS(length=240, factors=[16,5,3]),
# NS(length=243, factors=[3,3,3,3,3]),
# NS(length=256, factors=[16,16]),
# NS(length=270, factors=[10,3,3,3]),
# NS(length=288, factors=[16,6,3]),
NS(length=324, factors=[6,6,3,3]),
NS(length=343, factors=[7,7,7]),
NS(length=360, factors=[10,6,6]),
NS(length=400, factors=[16,5,5]),
# NS(length=486, factors=[6,3,3,3,3]),
# NS(length=540, factors=[10,6,3,3]),
NS(length=648, factors=[8,3,3,3,3]),
NS(length=720, factors=[16,5,3,3]),
# NS(length=729, factors=[3,3,3,3,3,3]),
NS(length=960, factors=[16,10,6]),
NS(length=1040, factors=[13,16,5]),
],
('uwide', 128): [
NS(length=272, factors=[16,17]),
],
('wide', 64): [
# NS(length=11, factors=[11]),
NS(length=22, factors=[2,11]),
NS(length=44, factors=[4,11]),
NS(length=60, factors=[6,10]),
NS(length=84, factors=[2,6,7]),
NS(length=90, factors=[3,3,10]),
NS(length=120, factors=[2,6,10]),
# NS(length=200, factors=[2,10,10]),
NS(length=300, factors=[3,10,10]),
NS(length=528, factors=[4,4,3,11]),
],
('uwide', 64): [
NS(length=32, factors=[16,2]),
NS(length=40, factors=[10,4]),
NS(length=48, factors=[3,4,4]),
NS(length=88, factors=[11,8]),
NS(length=176, factors=[16,11]),
NS(length=336, factors=[7,8,6]),
],
# ('tall', X): [
# NS(length=4),
# NS(length=13),
# NS(length=16),
# NS(length=26),
# NS(length=52),
# NS(length=104),
# NS(length=169),
# NS(length=192),
# NS(length=208),
# NS(length=320),
# NS(length=512),
# NS(length=625),
# NS(length=864),
# NS(length=1000),
# ]
}
expanded = []
for params, kernels in small_kernels.items():
flavour, threads_per_block = params
expanded.extend(NS(**kernel.__dict__,
flavour=flavour,
threads_per_block=threads_per_block,
scheme='CS_KERNEL_STOCKHAM') for kernel in kernels)
return expanded
def list_new_2d_kernels():
"""Return list of fused 2D kernels to generate with new generator."""
# can probably merge this with above when old gen is gone
fused_kernels = {
(('tall', 'tall'), 128): [
NS(length=[32, 32], factors=[[8,4],[8,4]], threads_per_transform=4),
],
}
expanded = []
for params, kernels in fused_kernels.items():
flavours, threads_per_block = params
expanded.extend(NS(**kernel.__dict__,
flavour=flavours,
threads_per_block=threads_per_block,
scheme='CS_KERNEL_2D_SINGLE') for kernel in kernels)
return expanded
def list_new_large_kernels():
"""Return list of large kernels to generate with the new generator."""
kernels = [
NS(length=50, factors=[10, 5], use_3steps_large_twd={'sp': 'true', 'dp': 'true'}, threads_per_block=256),
NS(length=64, factors=[8, 8], use_3steps_large_twd={'sp': 'true', 'dp': 'false'}),
NS(length=81, factors=[3, 3, 3, 3], use_3steps_large_twd={'sp': 'true', 'dp': 'true'}),
# NS(length=100, factors=[5, 5, 4], use_3steps_large_twd={'sp': 'true', 'dp': 'false'}),
NS(length=128, factors=[8, 4, 4], use_3steps_large_twd={'sp': 'true', 'dp': 'false'}),
NS(length=200, factors=[8, 5, 5], use_3steps_large_twd={'sp': 'false', 'dp': 'false'}),
NS(length=256, factors=[4, 4, 4, 4], use_3steps_large_twd={'sp': 'true', 'dp': 'false'}),
NS(length=336, factors=[6, 7, 8], use_3steps_large_twd={'sp': 'false', 'dp': 'false'})
]
# for SBCC kernel, increase desired threads_per_block so that columns per
# thread block is also increased. currently targeting for 16 columns
block_width = 16
for k in kernels:
k.scheme = 'CS_KERNEL_STOCKHAM_BLOCK_CC'
if not hasattr(k, 'threads_per_block'):
k.threads_per_block = block_width * reduce(mul, k.factors, 1) // min(k.factors)
if not hasattr(k, 'length'):
k.length = functools.reduce(lambda a, b: a * b, k.factors)
# kernels += [
# NS(length=64, factors=[4, 4, 4], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=128),
# NS(length=128, factors=[8, 4, 4], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=128),
# ]
return kernels
def default_runtime_compile(kernels):
'''Returns a copy of input kernel list with a default value for runtime_compile.'''
return [k if hasattr(k, 'runtime_compile') else NS(**k.__dict__, runtime_compile=False) for k in kernels]
def generate_kernel(kernel, precisions):
"""Generate a single kernel file for 'kernel'.
The kernel file contains all kernel variations corresponding to
the kernel meta data in 'kernel'.
A list of CPU functions is returned.
"""
fname = Path(__file__).resolve()
typename_dict = {
'sp': 'float2',
'dp': 'double2',
}
src = StatementList(
CommentBlock(
'Stockham kernels generated by:',
'',
' ' + ' '.join(sys.argv),
'',
'Generator is: ' + str(fname),
''
'Kernel is: ' + str(kernel)),
LineBreak(),
Include('<hip/hip_runtime.h>'),
Include('"kernel_launch.h"'),
Include('"kernels/common.h"'),
Include('"kernels/butterfly_constant.h"'),
Include('"rocfft_butterfly_template.h"'),
Include('"real2complex.h"'),
LineBreak())
kdevice, kglobal = stockham.stockham(**kernel.__dict__)
# forward runtime compile flag into kglobal.meta so we can know
# whether to put a prototype into the function pool
kglobal.meta = NS(**kglobal.meta.__dict__, runtime_compile=kernel.runtime_compile)
length = kglobal.meta.length
forward, inverse = kglobal.name, kglobal.name.replace('forward', 'inverse')
if not kernel.runtime_compile:
src += stockham.make_variants(kdevice, kglobal)
cpu_functions = []
for p in precisions:
if kglobal.meta.scheme == 'CS_KERNEL_STOCKHAM':
prototype = POWX_SMALL_GENERATOR(f'rocfft_internal_dfn_{p}_ci_ci_stoc_{length}',
'ip_' + forward, 'ip_' + inverse,
'op_' + forward, 'op_' + inverse, typename_dict[p])
src += prototype
cpu_functions.append(prototype.function(kglobal.meta, p))
elif kglobal.meta.scheme == 'CS_KERNEL_STOCKHAM_BLOCK_CC':
prototype = POWX_LARGE_SBCC_GENERATOR(f'rocfft_internal_dfn_{p}_ci_ci_sbcc_{length}',
'ip_' + forward, 'ip_' + inverse,
'op_' + forward, 'op_' + inverse, typename_dict[p])
src += prototype
cpu_functions.append(prototype.function(kglobal.meta, p))
elif kglobal.meta.scheme == 'CS_KERNEL_2D_SINGLE':
prototype = POWX_SMALL_GENERATOR(f'rocfft_internal_dfn_{p}_ci_ci_2D_{length[0]}_{length[1]}',
'ip_' + forward, 'ip_' + inverse,
'op_' + forward, 'op_' + inverse, typename_dict[p])
src += prototype
cpu_functions.append(prototype.function(kglobal.meta, p))
elif kglobal.meta.scheme == 'CS_KERNEL_STOCKHAM_BLOCK_RC':
# SBRC_2D
sbrc_type, transpose_type, meta = 'SBRC_2D', 'TILE_ALIGNED', deepcopy(kglobal.meta)
prototype = POWX_LARGE_SBRC_GENERATOR(f'rocfft_internal_dfn_{p}_op_ci_ci_sbrc_{length}',
'op_' + forward, 'op_' + inverse, typename_dict[p],
sbrc_type, transpose_type)
src += prototype
cpu_functions.append(prototype.function(meta, p))
# SBRC_3D_FFT_TRANS_XY_Z
sbrc_type, transpose_type, meta = 'SBRC_3D_FFT_TRANS_XY_Z', 'TILE_ALIGNED', deepcopy(kglobal.meta)
prototype = POWX_LARGE_SBRC_GENERATOR(f'rocfft_internal_dfn_{p}_op_ci_ci_sbrc3d_fft_trans_xy_z_tile_aligned_{length}',
'op_' + forward, 'op_' + inverse, typename_dict[p],
sbrc_type, transpose_type)
src += prototype
meta.scheme, meta.transpose = 'CS_KERNEL_STOCKHAM_TRANSPOSE_XY_Z', 'TILE_ALIGNED'
cpu_functions.append(prototype.function(meta, p))
sbrc_type, transpose_type, meta = 'SBRC_3D_FFT_TRANS_XY_Z', 'DIAGONAL', deepcopy(kglobal.meta)
prototype = POWX_LARGE_SBRC_GENERATOR(f'rocfft_internal_dfn_{p}_op_ci_ci_sbrc3d_fft_trans_xy_z_diagonal_{length}',
'op_' + forward, 'op_' + inverse, typename_dict[p],
sbrc_type, transpose_type)
src += prototype
meta.scheme, meta.transpose = 'CS_KERNEL_STOCKHAM_TRANSPOSE_XY_Z', 'DIAGONAL'
cpu_functions.append(prototype.function(meta, p))
# SBRC_3D_FFT_TRANS_Z_XY
sbrc_type, transpose_type, meta = 'SBRC_3D_FFT_TRANS_Z_XY', 'TILE_ALIGNED', deepcopy(kglobal.meta)
prototype = POWX_LARGE_SBRC_GENERATOR(f'rocfft_internal_dfn_{p}_op_ci_ci_sbrc3d_fft_trans_z_xy_tile_aligned_{length}',
'op_' + forward, 'op_' + inverse, typename_dict[p],
sbrc_type, transpose_type)
src += prototype
meta.scheme, meta.transpose = 'CS_KERNEL_STOCKHAM_TRANSPOSE_Z_XY', 'TILE_ALIGNED'
cpu_functions.append(prototype.function(meta, p))
# SBRC_3D_FFT_TRANS_Z_XY
sbrc_type, transpose_type, meta = 'SBRC_3D_FFT_ERC_TRANS_Z_XY', 'TILE_ALIGNED', deepcopy(kglobal.meta)
prototype = POWX_LARGE_SBRC_GENERATOR(f'rocfft_internal_dfn_{p}_op_ci_ci_sbrc3d_fft_erc_trans_z_xy_tile_aligned_{length}',
'op_' + forward, 'op_' + inverse, typename_dict[p],
sbrc_type, transpose_type)
src += prototype
meta.scheme, meta.transpose = 'CS_KERNEL_STOCKHAM_R_TO_CMPLX_TRANSPOSE_Z_XY', 'TILE_ALIGNED'
cpu_functions.append(prototype.function(meta, p))
else:
raise NotImplementedError(f'Unable to generate host functions for scheme {kglobal.meta.scheme}.')
if not kernel.runtime_compile:
format_and_write(kernel_file_name(kernel), src)
return cpu_functions
def generate_new_kernels(kernels, precisions):
"""Generate and write kernels from the kernel list.
Entries in the kernel list are simple namespaces. These are
passed as keyword arguments to the Stockham generator.
A list of CPU functions is returned.
"""
return flatten([generate_kernel(k, precisions) for k in kernels])
def cli():
"""Command line interface..."""
parser = argparse.ArgumentParser(prog='kernel-generator')
subparsers = parser.add_subparsers(dest='command')
parser.add_argument('--groups', type=int, help='Numer of small kernel groups.', default=150)
parser.add_argument('--pattern', type=str, help='Kernel pattern to generate.', default='all')
parser.add_argument('--precision', type=str, help='Precision to generate.', default='all')
parser.add_argument('--manual-small', type=str, help='Small kernel sizes to generate.')
parser.add_argument('--manual-large', type=str, help='Large kernel sizes to generate.')
list_parser = subparsers.add_parser('list', help='List kernel files that will be generated.')
generate_parser = subparsers.add_parser('generate', help='Generate kernels.')
generate_parser.add_argument('generator', type=str, help='Kernel generator executable.')
args = parser.parse_args()
#
# which kernels to build? set the flags for generate before modifying patterns
#
patterns = args.pattern.split(',')
large = 'all' in patterns or 'large' in patterns
small = 'all' in patterns or 'small' in patterns
dim2 = 'all' in patterns or '2D' in patterns
pow2 = small or 'pow2' in patterns
pow3 = small or 'pow3' in patterns
pow5 = small or 'pow5' in patterns
pow7 = small or 'pow7' in patterns
if patterns == ['none']:
patterns = []
if args.manual_small:
patterns += ['small']
if args.manual_large:
patterns += ['large']
# TODO- if dim2, pattern += small as well
replacements = {
'pow2': 'small',
'pow3': 'small',
'pow5': 'small',
'pow7': 'small',
}
patterns = [replacements.get(key, key) for key in patterns if key != 'none']
if 'all' in patterns:
patterns += ['small']
patterns += ['large']
patterns += ['2D']
patterns = set(patterns)
#
# which precicions to build?
#
precisions = args.precision.split(',')
replacements = {
'single': 'sp',
'double': 'dp',
}
precisions = [replacements.get(key, key) for key in precisions if key != 'none']
if 'all' in precisions:
precisions = ['sp', 'dp']
precisions = set(precisions)
#
# list all the exact sizes of kernels to build
#
manual_small = None
if args.manual_small:
manual_small = product(map(int, args.manual_small.split(',')),
['CS_KERNEL_STOCKHAM'])
manual_large = None
if args.manual_large:
manual_large = product(map(int, args.manual_large.split(',')),
['CS_KERNEL_STOCKHAM_BLOCK_CC', 'CS_KERNEL_STOCKHAM_BLOCK_RC'])
# all kernels to be generated from arguments
expand_sizes = {
'small': { 'sp': [], 'dp': [] },
'large': { 'sp': [], 'dp': [] },
}
if small or pow2 or pow3 or pow5 or pow7:
for p in precisions:
expand_sizes['small'][p] = merge(expand_sizes['small'][p], supported_small_sizes(p, pow2, pow3, pow5, pow7))
if manual_small:
for p in precisions:
expand_sizes['small'][p] = merge(expand_sizes['small'][p], manual_small)
if large:
for p in precisions:
expand_sizes['large'][p] = merge(expand_sizes['large'][p], supported_large_sizes(p))
if manual_large:
for p in precisions:
expand_sizes['large'][p] = merge(expand_sizes['large'][p], manual_large)
# TODO- let dim2 ("CS_KERNEL_2D_SINGLE"-typed) use new-gen 1D kernels, and get the dependent kernels.
# For now, 2D_SINGLE kernels still use old-gen small kernels
#
# which kernels by new-gen and which by old-gen? categorize input kernels
#
supported_new_small_kernels = list_new_kernels()
supported_new_large_kernels = list_new_large_kernels() # currently 'large' really is sbcc kernels only
new_small_kernels = new_large_kernels = []
# Don't subtract_from_all for large, since so far sbrc and transpose still rely on old-gen.
for p in precisions:
expand_sizes['small'][p], new_smalls = pick(expand_sizes['small'][p], supported_new_small_kernels)
expand_sizes['large'][p], new_larges = pick(expand_sizes['large'][p], supported_new_large_kernels, subtract_from_all=False)
# remove unsupported length in old_gen
for length in list(expand_sizes['large'][p]):
if length not in old_gen_supported_large:
del expand_sizes['large'][p][length]
new_small_kernels = merge_length(new_small_kernels, new_smalls)
new_large_kernels = merge_length(new_large_kernels, new_larges)
new_kernels = new_small_kernels + new_large_kernels + list_new_2d_kernels()
# set runtime_compile on new kernels that haven't already set a
# value
new_kernels = default_runtime_compile(new_kernels)
# update the patterns after removing new kernels from old generator to avoid including some missing cpp
if 'small' in patterns and len(expand_sizes['small']['sp']) == 0 and len(expand_sizes['small']['dp']) == 0:
patterns.remove('small')
if 'large' in patterns and len(expand_sizes['large']['sp']) == 0 and len(expand_sizes['large']['dp']) == 0:
patterns.remove('large')
#
# return the necessary include files to cmake
#
if args.command == 'list':
scprint(set(list_old_generated_kernels(patterns=patterns,
precisions=precisions,
num_small_kernel_groups=args.groups)
+ list_generated_kernels(new_kernels)))
return
if args.command == 'generate':
# collection of Functions to generate prototypes for
psmall, plarge, p2d = {}, {}, {}
# already excludes small and large-1D from new-generators
for p in precisions:
psmall = pmerge(psmall, generate_small_1d_prototypes(p, expand_sizes['small'][p]))
plarge = pmerge(plarge, generate_large_1d_prototypes(p, expand_sizes['large'][p]))
if dim2:
for p in precisions:
transform_2D = merge([], supported_2d_sizes(p))
p2d = pmerge(p2d, generate_2d_prototypes(p, transform_2D))
# hijack a few new kernels...
pnew = pmerge({}, generate_new_kernels(new_kernels, precisions))
cpu_functions = list(merge(psmall, plarge, p2d, pnew).values())
format_and_write('function_pool.cpp', generate_cpu_function_pool(cpu_functions))
old_small_lengths = {f.meta.length for f in psmall.values()}
old_large_lengths = {f.meta.length for f in plarge.values()} # sbcc=new-gen, sbrc/transpose=old-gen
new_large_lengths = {k.length for k in new_large_kernels} # sbcc by new-gen
if old_small_lengths:
subprocess.run([args.generator, '-g', str(args.groups), '-p', args.precision, '-t', 'none', '--manual-small', cjoin(sorted(old_small_lengths))], check=True)
if old_large_lengths:
if new_large_lengths:
subprocess.run([args.generator, '-g', str(args.groups), '-p', args.precision, '-t', 'none', '--manual-large', cjoin(sorted(old_large_lengths)), '--no-sbcc', cjoin(sorted(new_large_lengths))], check=True)
else:
subprocess.run([args.generator, '-g', str(args.groups), '-p', args.precision, '-t', 'none', '--manual-large', cjoin(sorted(old_large_lengths))], check=True)
if dim2:
# XXX: currently new2d does both precisions...
new2d = {tuple(x.length) for x in list_new_2d_kernels()}
if 'sp' in precisions:
old2d = {f.meta.length for f in p2d.values() if f.meta.precision == 'sp'}
subprocess.run([args.generator, '-g', str(args.groups), '-p', 'single', '-t', '2D', '--manual-2d', cjoin('x'.join(map(str, lengths)) for lengths in old2d - new2d)], check=True)
if 'dp' in precisions:
old2d = {f.meta.length for f in p2d.values() if f.meta.precision == 'dp'}
subprocess.run([args.generator, '-g', str(args.groups), '-p', 'double', '-t', '2D', '--manual-2d', cjoin('x'.join(map(str, lengths)) for lengths in old2d - new2d)], check=True)
if __name__ == '__main__':
cli()
| mit | -6,375,603,604,371,238,000 | 40.159959 | 219 | 0.570605 | false | 3.503821 | false | false | false |
erggo/Harpy | harpia/bpGUI/sum.py | 1 | 5658 | # -*- coding: utf-8 -*-
# [HARPIA PROJECT]
#
#
# S2i - Intelligent Industrial Systems
# DAS - Automation and Systems Department
# UFSC - Federal University of Santa Catarina
# Copyright: 2006 - 2007 Luis Carlos Dill Junges ([email protected]), Clovis Peruchi Scotti ([email protected]),
# Guilherme Augusto Rutzen ([email protected]), Mathias Erdtmann ([email protected]) and S2i (www.s2i.das.ufsc.br)
# 2007 - 2009 Clovis Peruchi Scotti ([email protected]), S2i (www.s2i.das.ufsc.br)
#
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further information, check the COPYING file distributed with this software.
#
#----------------------------------------------------------------------
from harpia.GladeWindow import GladeWindow
from harpia.amara import binderytools as bt
import gtk
from harpia.s2icommonproperties import S2iCommonProperties
#i18n
import os
import gettext
APP='harpia'
DIR=os.environ['HARPIA_DATA_DIR']+'po'
_ = gettext.gettext
gettext.bindtextdomain(APP, DIR)
gettext.textdomain(APP)
#----------------------------------------------------------------------
class Properties( GladeWindow, S2iCommonProperties ):
#----------------------------------------------------------------------
def __init__( self, PropertiesXML, S2iBlockProperties):
self.m_sDataDir = os.environ['HARPIA_DATA_DIR']
filename = self.m_sDataDir+'glade/sum.glade'
self.m_oPropertiesXML = PropertiesXML
self.m_oS2iBlockProperties = S2iBlockProperties
widget_list = [
'Properties',
'SUMBackgroundColor',
'SUMBorderColor',
'SUMHelpView'
]
handlers = [
'on_sum_cancel_clicked',
'on_sum_confirm_clicked',
'on_SUMBackColorButton_clicked',
'on_SUMBorderColorButton_clicked'
]
top_window = 'Properties'
GladeWindow.__init__(self, filename, top_window, widget_list, handlers)
self.widgets['Properties'].set_icon_from_file(self.m_sDataDir+"images/harpia_ave.png")
#load properties values
#there is no properties
#load border color
self.m_oBorderColor = self.m_oS2iBlockProperties.GetBorderColor()
t_nBorderRed = self.m_oBorderColor[0] * 257
t_nBorderGreen = self.m_oBorderColor[1] * 257
t_nBorderBlue = self.m_oBorderColor[2] * 257
t_oBorderColor = gtk.gdk.Color(red=t_nBorderRed,green=t_nBorderGreen,blue=t_nBorderBlue)
self.widgets['SUMBorderColor'].modify_bg(gtk.STATE_NORMAL,t_oBorderColor)
#load block color
self.m_oBackColor = self.m_oS2iBlockProperties.GetBackColor()
t_nBackRed = self.m_oBackColor[0] * 257
t_nBackGreen = self.m_oBackColor[1] * 257
t_nBackBlue = self.m_oBackColor[2] * 257
t_oBackColor = gtk.gdk.Color(red=t_nBackRed,green=t_nBackGreen,blue=t_nBackBlue)
self.widgets['SUMBackgroundColor'].modify_bg(gtk.STATE_NORMAL,t_oBackColor)
#load help text
t_oS2iHelp = bt.bind_file(self.m_sDataDir+"help/sum"+ _("_en.help"))
t_oTextBuffer = gtk.TextBuffer()
t_oTextBuffer.set_text( unicode( str( t_oS2iHelp.help.content) ) )
self.widgets['SUMHelpView'].set_buffer( t_oTextBuffer )
#----------------------------------------------------------------------
def __del__(self):
pass
#----------------------------------------------------------------------
def on_sum_cancel_clicked( self, *args ):
self.widgets['Properties'].destroy()
#----------------------------------------------------------------------
def on_sum_confirm_clicked( self, *args ):
self.m_oS2iBlockProperties.SetBorderColor( self.m_oBorderColor )
self.m_oS2iBlockProperties.SetBackColor( self.m_oBackColor )
self.widgets['Properties'].destroy()
#----------------------------------------------------------------------
def on_SUMBackColorButton_clicked(self,*args):
t_oColor = self.RunColorSelection()
if t_oColor <> None:
self.widgets['SUMBackgroundColor'].modify_bg(gtk.STATE_NORMAL,t_oColor)
self.m_oBackColor[0] = t_oColor.red / 257
self.m_oBackColor[1] = t_oColor.green / 257
self.m_oBackColor[2] = t_oColor.blue / 257
#----------------------------------------------------------------------
def on_SUMBorderColorButton_clicked(self,*args):
t_oColor = self.RunColorSelection()
if t_oColor <> None:
self.widgets['SUMBorderColor'].modify_bg(gtk.STATE_NORMAL,t_oColor)
self.m_oBorderColor[0] = t_oColor.red / 257
self.m_oBorderColor[1] = t_oColor.green / 257
self.m_oBorderColor[2] = t_oColor.blue / 257
#----------------------------------------------------------------------
#SumProperties = Properties()
#SumProperties.show( center=0 )
| gpl-3.0 | -8,235,328,954,891,495,000 | 32.678571 | 139 | 0.567515 | false | 3.671642 | false | false | false |
wdbm/abstraction | fix_database.py | 1 | 7300 | #!/usr/bin/env python
"""
################################################################################
# #
# fix_database #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program inspects an existing database of conversational exchanges, #
# changes data stored in the database to appropriate types and then saves the #
# changed data to a new database. The original database is not modified. #
# #
# copyright (C) 2016 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
Usage:
program [options]
Options:
-h, --help display help message
--version display version and exit
-v, --verbose verbose logging
-s, --silent silent
-u, --username=USERNAME username
--inputdatabase=FILE database [default: database.db]
--outputdatabase=FILE database [default: database_1.db]
--table=NAME table [default: exchanges]
--tablemetadata=NAME metadata table [default: metadata]
"""
name = "fix_database"
version = "2016-06-17T1559Z"
logo = None
import ast
import datetime
import docopt
import inspect
import logging
import os
import subprocess
import sys
import time
import abstraction
import dataset
import propyte
import pyprel
import shijian
import technicolor
def main(options):
global program
program = propyte.Program(
options = options,
name = name,
version = version,
logo = logo
)
global log
from propyte import log
filename_database = options["--inputdatabase"]
filename_database_out = options["--outputdatabase"]
name_table = options["--table"]
name_table_metadata = options["--tablemetadata"]
log.info("\naccess database {filename}".format(
filename = filename_database
))
database = dataset.connect(
"sqlite:///{filename_database}".format(
filename_database = filename_database
)
)
log.info("access table \"{name_table}\"".format(
name_table = name_table
))
table = database[name_table]
log.info("number of rows in table \"{name_table}\": {number_of_rows}".format(
name_table = name_table,
number_of_rows = str(len(table))
))
# Fix database with data version 2015-01-06T172242Z.
# Build a list of unique exchanges.
exchanges = []
for entry in table:
utterance = entry["utterance"]
response = entry["response"]
utterance_time_UNIX = entry["utteranceTimeUNIX"]
response_time_UNIX = entry["responseTimeUNIX"]
utterance_reference = entry["utteranceReference"]
response_reference = entry["responseReference"]
exchange_reference = entry["exchangeReference"]
if type(utterance_reference) is tuple:
log.debug("\nchange utterance reference")
log.debug("from:\n{utterance_reference}".format(
utterance_reference = utterance_reference
))
utterance_reference = utterance_reference[0]
log.debug("to:\n{utterance_reference}".format(
utterance_reference = utterance_reference
))
if type(response_reference) is tuple:
log.debug("\nchange response reference")
log.debug("from:\n{response_reference}".format(
response_reference = response_reference
))
response_reference = response_reference[0]
log.debug("to:\n{response_reference}".format(
response_reference = response_reference
))
if exchange_reference[0] == "(":
log.debug("\nchange exchange reference")
log.debug("from:\n{exchange_reference}".format(
exchange_reference = exchange_reference
))
exchange_reference = ast.literal_eval(exchange_reference)
exchange_reference = unicode(str(exchange_reference[0]), "utf-8")
log.debug("to:\n{exchange_reference}".format(
exchange_reference = exchange_reference
))
# Create a new exchange object using the fixed entries and append it to
# the list of modified exchanges.
exchange = abstraction.Exchange(
utterance = utterance,
response = response,
utterance_time_UNIX = utterance_time_UNIX,
response_time_UNIX = response_time_UNIX,
utterance_reference = utterance_reference,
response_reference = response_reference,
exchange_reference = exchange_reference
)
exchange.printout()
exchanges.append(exchange)
# Save the exchanges to the new database.
log.info("save exchanges to database")
abstraction.save_exchanges_to_database(
exchanges = exchanges,
filename = filename_database_out
)
# Save metadata to the new database.
abstraction.save_database_metadata(filename = filename_database_out)
program.terminate()
if __name__ == "__main__":
options = docopt.docopt(__doc__)
if options["--version"]:
print(version)
exit()
main(options)
| gpl-3.0 | 9,110,137,506,986,018,000 | 39.782123 | 81 | 0.499178 | false | 5.080028 | false | false | false |
Azure/azure-sdk-for-python | sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/mysql_flexibleservers/operations/_check_virtual_network_subnet_usage_operations.py | 1 | 5283 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class CheckVirtualNetworkSubnetUsageOperations(object):
"""CheckVirtualNetworkSubnetUsageOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.mysql_flexibleservers.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def execute(
self,
location_name, # type: str
parameters, # type: "_models.VirtualNetworkSubnetUsageParameter"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkSubnetUsageResult"
"""Get virtual network subnet usage for a given vNet resource id.
:param location_name: The name of the location.
:type location_name: str
:param parameters: The required parameters for creating or updating a server.
:type parameters: ~azure.mgmt.rdbms.mysql_flexibleservers.models.VirtualNetworkSubnetUsageParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkSubnetUsageResult, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.mysql_flexibleservers.models.VirtualNetworkSubnetUsageResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkSubnetUsageResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.execute.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'locationName': self._serialize.url("location_name", location_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetworkSubnetUsageParameter')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkSubnetUsageResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
execute.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DBForMySql/locations/{locationName}/checkVirtualNetworkSubnetUsage'} # type: ignore
| mit | -119,604,675,933,735,000 | 47.916667 | 168 | 0.680863 | false | 4.477119 | true | false | false |
nlm/netgen | netgen/templateutils.py | 1 | 3361 | from colors import color
from functools import partial
from jinja2.exceptions import TemplateRuntimeError
import math
from math import log, ceil
from six.moves import xrange
class TemplateUtils(object):
def __init__(self, ipversion):
if ipversion not in (4, 6):
raise ValueError('ipversion must be 4 or 6')
self.function_ip46 = partial(self.ipver, ipversion)
self.function_minpref = partial(self.minpref, ipversion)
@property
def ipversion(self):
'''
ipversion() -> int
Returns the ip version for which this class is instantiated
'''
return self._ipversion
@staticmethod
def ipver(ipversion, valuev4, valuev6):
if ipversion == 4:
return valuev4
elif ipversion == 6:
return valuev6
else:
raise ValueError('invalid value for ipversion: {0}'
.format(ipversion))
@staticmethod
def filter_dotreverse(value, sep=None):
'''
filter_dot_reverse('1.2.3.4.5') -> '5.4.3.2.1'
Reverses a dotted string
'''
if sep is None:
sep = '.'
return sep.join(reversed(str(value).split(sep)))
@staticmethod
def filter_colored(text, fg, bg=None, style=None):
try:
return color(text, fg=fg, bg=bg, style=style)
except Exception as exc:
raise TemplateRuntimeError(exc)
@staticmethod
def minpref(ipversion, host_count):
if ipversion == 4:
return 32 - int(ceil(log(host_count, 2)))
elif ipversion == 6:
return 128 - int(ceil(log(host_count, 2)))
else:
raise ValueError('invalid value for ipversion: {0}'
.format(ipversion))
@staticmethod
def function_orange(*args, **kwargs):
offset = int(kwargs.get('offset', 0))
return [i + offset for i in range(*args)]
@staticmethod
def function_xorange(*args, **kwargs):
offset = int(kwargs.get('offset', 0))
return (i + offset for i in xrange(*args))
@classmethod
def function_range1(cls, *args, **kwargs):
return cls.function_orange(*args, offset=1, **kwargs)
@classmethod
def function_xrange1(cls, *args, **kwargs):
return cls.function_xorange(*args, offset=1, **kwargs)
@staticmethod
def function_raise(message):
raise TemplateRuntimeError(message)
@staticmethod
def function_assert(expr, message):
if not expr:
raise TemplateRuntimeError(message)
# Setup the environment
def add_custom_filters(self, env):
for name in ('colored', 'dotreverse'):
env.filters[name] = getattr(self, 'filter_{0}'.format(name))
def add_custom_functions(self, env):
for name in ('assert', 'ip46', 'minpref', 'raise'):
env.globals[name] = getattr(self, 'function_{0}'.format(name))
env.globals['range'] = self.function_xorange
env.globals['range1'] = self.function_xrange1
math.int = int
math.float = float
math.round = round
math.min = min
math.max = max
env.globals['math'] = math
def setup_environment(self, env):
self.add_custom_functions(env)
self.add_custom_filters(env)
return env
| gpl-2.0 | 542,898,760,346,410,400 | 29.554545 | 74 | 0.591491 | false | 4.015532 | false | false | false |
leonro/magpy-git | magpy/acquisition/palmacqprotocol.py | 1 | 11090 | import sys, time, os, socket
import struct, binascii, re, csv
from datetime import datetime, timedelta
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
from twisted.python import usage, log
from twisted.internet.serialport import SerialPort
from twisted.web.server import Site
from twisted.web.static import File
try: # version > 0.8.0
from autobahn.wamp1.protocol import exportRpc
except:
from autobahn.wamp import exportRpc
iddict = {'f': '10', 'x': '11', 'y': '12', 'z': '13', 'df': '14', 't': '30', 'rh': '33', 'p': '35', 'w': '38'}
"""
0: clientname -- str (atlas)
1: timestamp (PC) -- str (2013-01-23 12:10:32.712475)
2: date (PC) -- str (2013-01-23)
3: outtime (PC) -- str (12:10:32.712475)
4: timestamp (sensor) -- str (2013-01-23 12:10:32.712475)
5: GPS coordinates -- str (??.??N ??.??E)
9: Sensor Description -- str (to be found in the adict)
10: f -- float (48633.04) [nT]
11: x -- float (20401.3) [nT]
12: y -- float (-30.0) [nT]
13: z -- float (43229.7) [nT]
14: df -- float (0.06) [nT]
30: T (ambient) -- float (7.2) [C]
31: T (sensor) -- float (10.0) [C]
32: T (electronics) -- float (12.5) [C]
33: rh (relative humidity) -- float (99.0) [%]
34: T (dewpoint) -- float (6.0) [C]
38: W (weight) -- float (24.0042) [g]
40: Error code (POS1) -- float (80) [-]
60: VDD (support voltage) -- float (5.02) [V]
61: VAD (measured voltage) -- float (2.03) [V]
62: VIS (measured voltage) -- float (0.00043) [V]
"""
def timeToArray(timestring):
# Converts time string of format 2013-12-12 23:12:23.122324
# to an array similiar to a datetime object
try:
splittedfull = timestring.split(' ')
splittedday = splittedfull[0].split('-')
splittedsec = splittedfull[1].split('.')
splittedtime = splittedsec[0].split(':')
datearray = splittedday + splittedtime
datearray.append(splittedsec[1])
datearray = map(int,datearray)
return datearray
except:
log.msg('Error while extracting time array')
return []
def dataToFile(outputdir, sensorid, filedate, bindata, header):
# File Operations
try:
hostname = socket.gethostname()
path = os.path.join(outputdir,hostname,sensorid)
# outputdir defined in main options class
if not os.path.exists(path):
os.makedirs(path)
savefile = os.path.join(path, sensorid+'_'+filedate+".bin")
if not os.path.isfile(savefile):
with open(savefile, "wb") as myfile:
myfile.write(header + "\n")
myfile.write(bindata + "\n")
else:
with open(savefile, "a") as myfile:
myfile.write(bindata + "\n")
except:
log.err("PalmAcq - Protocol: Error while saving file")
## PalmAcq protocol
## --------------------
class PalmAcqProtocol(LineReceiver):
"""
Protocol to read Arduino data (usually from ttyACM0)
Tested so far only for Arduino Uno on a Linux machine
The protocol works only if the serial output follows the MagPy convention:
Up to 99 Sensors are supported identified by unique sensor names and ID's.
ARDUINO OUTPUT:
- serial output on ttyACM0 needs to follow the MagPy definition:
Three data sequences are supported:
1.) The meta information
The meta information line contains all information for a specific sensor.
If more than one sensor is connected, then several meta information
lines should be sent (e.g. M1:..., M2:..., M99:...)
Meta lines should be resent once in a while (e.g. every 10-100 data points)
Example:
M1: SensorName: MySensor, SensorID: 12345, SensorRevision: 0001
2.) The header line
The header line contains information on the provided data for each sensor.
The typical format includes the MagPy key, the actual Variable and the unit.
Key and Variable are separeted by an underscore, unit is provided in brackets.
Like the Meta information the header should be sent out once in a while
Example:
H1: f_F [nT], t1_Temp [deg C], var1_Quality [None], var2_Pressure [mbar]
3.) The data line:
The data line containes all data from a specific sensor
Example:
D1: 46543.7898, 6.9, 10, 978.000
- recording starts after meta and header information have been received
MARTAS requirements:
- add the following line to the sensor.txt
ARDUINO ACM0 9600
- on the MARTAS machine an additional information file will be created
containing the sensor information for connected ARDUINO boards:
arduinolist.csv:
"HMC5883_12345_0001","['x', 'y', 'z']"
This file is used by the MARCOS machine to identify connected sensors and their keys
"""
delimiter = "\r"
## need a reference to our WS-MCU gateway factory to dispatch PubSub events
##
def __init__(self, wsMcuFactory, sensor, outputdir):
self.wsMcuFactory = wsMcuFactory
self.sensorid = sensor
self.hostname = socket.gethostname()
self.outputdir = outputdir
self.sensor = ''
self.sensordict = {}
self.ConversionConstant = 40/4/float(int("0x800000",16))
eventstring = "evt0,evt1,evt3,evt11,evt12,evt13,evt32,evt60,evt99"
self.eventlist = eventstring.split(',')
def connectionMade(self):
log.msg('%s connected.' % self.sensorid)
def extractPalmAcqData(self, line):
"""
Method to convert hexadecimals to doubles
Returns a data array
"""
# INTERPRETING INCOMING DATA AND CONVERTING HEXDECIMALS TO DOUBLE
if line.startswith('*'):
try:
data = []
chunks = []
line = line.strip('*')
chunks.append(line[:6])
chunks.append(line[6:12])
chunks.append(line[12:18])
trigger = line[18]
ar = line.split(':')
if len(ar) == 2:
extended = ar[1]
chunks.append(extended[:4])
chunks.append(extended[4:8])
chunks.append(extended[8:12])
chunks.append(extended[12:16])
chunks.append(extended[16:20])
for idx, chunk in enumerate(chunks):
if len(chunk) == 6:
val = hex(int('0x'+chunk,16) ^ int('0x800000',16))
val = hex(int(val,16) - int('0x800000',16))
# Conversion constanst should be obtained from palmacq-init
val = float(int(val,16)) * self.ConversionConstant
elif len(chunk) == 4:
val = hex(int('0x'+chunk,16) ^ int('0x8000',16))
val = hex(int(val,16) - int('0x8000',16))
if idx == 3:
val = float(int(val,16)) * 0.000575 + 1.0
elif idx == 4:
val = float(int(val,16)) / 128.0
elif idx > 4:
val = float(int(val,16)) / 8000.0
data.append(val)
# SOME TEST OUTPUT
#if len(data)> 4:
# print datetime.utcnow(), data
#print data, trigger
return data, trigger
except:
#print "PALMACQ: an error occurred while interpreting the hexadecimal code"
return [], 'N'
else:
return [], 'N'
def processPalmAcqData(self, data):
"""Convert raw ADC counts into SI units as per datasheets"""
printdata = False
currenttime = datetime.utcnow()
outdate = datetime.strftime(currenttime, "%Y-%m-%d")
filename = outdate
outtime = datetime.strftime(currenttime, "%H:%M:%S")
# IMPORTANT : GET TIMESTAMP FROM DATA !!!!!!
timestamp = datetime.strftime(currenttime, "%Y-%m-%d %H:%M:%S.%f")
datearray = timeToArray(timestamp)
packcode = '6hL'
# Would probably be good to preserve the hexadecimal format
# Seems to be extremely effective regarding accuracy and storage
x = data[0]
y = data[1]
z = data[2]
v = 0.0
t = 0.0
p = 0.0
q = 0.0
r = 0.0
if len(data) > 4:
v = data[3]
t = data[4]
p = data[5]
q = data[6]
r = data[7]
datearray.append(x)
datearray.append(y)
datearray.append(z)
datearray.append(int(float(v)*10000))
datearray.append(int(float(t)*10000))
datearray.append(p)
datearray.append(q)
datearray.append(r)
packcode = packcode + 'fffllfff'
multiplier = [1,1,1,10000,10000,1,1,1]
try:
data_bin = struct.pack(packcode,*datearray)
except:
log.msg('Error while packing binary data')
pass
header = "# MagPyBin %s %s %s %s %s %s %d" % (self.sensorid, "[x,y,z,v,t,p,q,r]", "[x,y,z,v,t,p,q,r]", "[V,V,V,V,C,V,V,V]", str(multiplier).replace(" ",""), packcode, struct.calcsize(packcode))
if printdata:
#print header
print timestamp
# File Operations
try:
dataToFile(self.outputdir, self.sensorid, filename, data_bin, header)
except:
log.msg('Saving failed')
pass
evt0 = {'id': 0, 'value': self.hostname}
evt1 = {'id': 1, 'value': timestamp}
evt3 = {'id': 3, 'value': outtime}
evt11 = {'id': 11, 'value': x}
evt12 = {'id': 12, 'value': y}
evt13 = {'id': 13, 'value': z}
evt32 = {'id': 32, 'value': t}
evt60 = {'id': 60, 'value': v}
evt99 = {'id': 99, 'value': 'eol'}
return evt0,evt1,evt3,evt11,evt12,evt13,evt32,evt60,evt99
def lineReceived(self, line):
data=[]
if line:
data, trigger = self.extractPalmAcqData(line)
if len(data) > 1:
evt0,evt1,evt3,evt11,evt12,evt13,evt32,evt60,evt99 = self.processPalmAcqData(data)
dispatch_url = "http://example.com/"+self.hostname+"/pal#"+self.sensorid+"-value"
# eventlist defined in init
for event in self.eventlist:
self.wsMcuFactory.dispatch(dispatch_url, eval(event))
| gpl-3.0 | 7,457,627,093,789,535,000 | 38.049296 | 201 | 0.533724 | false | 3.747888 | false | false | false |
deplicate/deplicate | duplicate/utils/fs/osx.py | 1 | 1187 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from ..init import compilecards
from .common import fsdecode
from .posix import has_hidden_attribute as _has_hidden_attribute
from .posix import has_archive_attribute, is_archived
WILDCARDS = (
'*.DS_Store', '.AppleDouble', '.LSOverride', 'Icon', '._*',
'.DocumentRevisions-V100', '.fseventsd', '.Spotlight-V100',
'.TemporaryItems', '.Trashes', '.VolumeIcon.icns',
'.com.apple.timemachine.donotpresent', '.AppleDB', '.AppleDesktop',
'Network Trash Folder', 'Temporary Items', '.apdisk')
_wildcards_match = compilecards(WILDCARDS).match
def has_hidden_attribute(filename):
try:
import Foundation
ufilename = fsdecode(filename)
url = Foundation.NSURL.fileURLWithPath_(ufilename)
res = url.getResourceValue_forKey_error_(
None, Foundation.NSURLIsHiddenKey, None
)
flag = res[1]
except ImportError:
flag = _has_hidden_attribute(filename)
return flag
def is_hidden(filename):
return filename.startswith('.') or has_hidden_attribute(filename)
def is_system(filename):
return bool(_wildcards_match(filename))
| mit | 7,296,047,325,512,074,000 | 26.604651 | 71 | 0.676495 | false | 3.618902 | false | false | false |
abilng/Mtech-proj-scripts | Others/testDTW.py | 1 | 5762 | import numpy as np
from collections import Counter
from numpy import array, zeros, argmin, inf
from numpy.linalg import norm
import sys,argparse
DATA_PATH='/others/abilng/Database/MSR2-abil/test/data_out/'
GroundTruthFile="/others/abilng/Database/MSR2-abil/Videos/groundtruth.txt";
PrintProgress=True
def dtw(x, y, dist=lambda x, y: norm(x - y, ord=1)):
""" Computes the DTW of two sequences.
:param array x: N1*M array
:param array y: N2*M array
:param func dist: distance used as cost measure (default L1 norm)
Returns the minimum distance, the accumulated cost matrix and the wrap path.
"""
x = array(x)
if len(x.shape) == 1:
x = x.reshape(-1, 1)
y = array(y)
if len(y.shape) == 1:
y = y.reshape(-1, 1)
r, c = len(x), len(y)
D = zeros((r + 1, c + 1))
D[0, 1:] = inf
D[1:, 0] = inf
for i in range(r):
for j in range(c):
D[i+1, j+1] = dist(x[i], y[j])
for i in range(r):
for j in range(c):
D[i+1, j+1] += min(D[i, j], D[i, j+1], D[i+1, j])
D = D[1:, 1:]
dist = D[-1, -1] / sum(D.shape)
return dist, D
def getMSR2GroundTruth(GroundTruthFile):
labels = {}
with open(GroundTruthFile) as f:
data = f.read();
for line in data.splitlines():
if line[0]=='#':
#comment
continue;
seg={};
words=line.split()
#video_name, left, width, top, height, start, time duration, action(1-clapping-2-waving-3-boxing)
seg['action']=int(words[7])
seg['start']=int(words[5])
seg['length']=int(words[6])
video=(words[0].strip('".avi'));
try:
labels[video]
except KeyError:
labels[video]=list();
finally:
labels[video].append(seg);
return labels;
def getRes(groundTruth, qFile, classes=[], nFiles=54):
targetScore={}
nonTargetScore={}
Tp={}
Fp={}
q={}
for cls in classes:
targetScore[cls]=list()
nonTargetScore[cls]=list()
Tp[cls]=Fp[cls]=0
q[cls]=None
##############################
#READ Q File
f = open(DATA_PATH+'/'+str(qFile)+'.avi.txt','r');
f.readline();
dat=np.loadtxt(f);
f.close()
for label in groundTruth[str(qFile)]:
if label['action'] not in classes:
continue
start=label['start']
end=label['start']+label['length']
q[label['action']]=dat[start:end]
############
##For each File
for name in xrange(1,nFiles+1):
filename=str(name)
#if filename==str(qFile):
# continue
#init var
#read data
f = open(DATA_PATH+'/'+filename+'.avi.txt','r');
f.readline();
dat=np.loadtxt(f);
f.close()
#print filename,Query
if PrintProgress:
sys.stderr.write('[Query '+str(qFile)+' ]Testing on File:'+filename+'\r')
#for each label
for label in groundTruth[filename]:
orgLabel=label['action']
if orgLabel not in classes:
continue
start=label['start']
end=label['start']+label['length']
distance ={}
for cls in classes:
#dtw scores
if q[cls] is None:
continue
distance[cls], _ = dtw(dat[start:end], q[cls])
if cls==orgLabel:
targetScore[orgLabel].append(distance[cls])
else:
nonTargetScore[orgLabel].append(distance[cls])
preLabel=min(distance, key=distance.get);
if preLabel==orgLabel:
Tp[preLabel]+=1
else:
Fp[preLabel]+=1
if PrintProgress:
sys.stderr.write('[Query '+str(qFile)+' ]Testing on File: [DONE]\n')
return targetScore,nonTargetScore,Tp,Fp
def precision(Tp,Fp,Total):
retrieved =Counter(Tp)+Counter(Fp)
prec=dict()
for (key,val) in retrieved.iteritems():
prec[key]=float(Tp[key])/retrieved[key]
prec['Avg'] = sum(i for i in Tp.itervalues())/sum(i for i in retrieved.itervalues())
return prec
def recall(Tp,Fp,Total):
rec=dict()
for (key,val) in Total.iteritems():
rec[key]=float(Tp[key])/Total[key]
rec['Avg'] = sum(i for i in Tp.itervalues())/sum(i for i in Total.itervalues())
return rec
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GMM Testing')
parser.add_argument('-v','--verbose', action='store_true')
parser.add_argument('targetFile')
parser.add_argument('nonTargetFile')
args = parser.parse_args()
PrintProgress = args.verbose
targetFile = args.targetFile
nonTargetFile = args.nonTargetFile
groundTruth = getMSR2GroundTruth(GroundTruthFile);
q=[2,11,44,50,32,8,45,33,20,25]
frameLen=15
nClass =3
nFiles=54
classes = range(1,nClass+1)
AvgTp = Counter({1:0,2:0,3:0})
AvgFp = Counter({1:0,2:0,3:0})
targetFptr=file(targetFile,'w');
nonTargetFptr=file(nonTargetFile,'w');
print "|| Query |",
for cls in classes:
print "Tp(%02d) | Fp(%02d) |"%(cls,cls),
print "Tp(Avg) | Fp(Avg) ||"
print "||=======",
for cls in classes:
print "======== ========",
print "===================||"
for qFile in q:
(targetScore,nonTargetScore,Tp,Fp)=getRes(groundTruth,qFile,classes,nFiles)
AvgTp +=Counter(Tp)
AvgFp +=Counter(Fp)
print "|| %2d |"%(qFile),
for cls in classes:
print " %02d | %02d |"%(Tp[cls],Fp[cls]),
print "%.04f | %.04f ||"%(
sum(i for i in Tp.itervalues())/float(len(classes)),
sum(i for i in Fp.itervalues())/float(len(classes)))
for scores in targetScore.itervalues():
for score in scores:
targetFptr.write("%.5f"%score+"\n")
for scores in nonTargetScore.itervalues():
for score in scores:
nonTargetFptr.write("%.5f"%score+"\n")
targetFptr.close()
nonTargetFptr.close()
n=float(len(q))
for (key,val) in AvgTp.iteritems():
AvgTp[key] = AvgTp[key]/n
for (key,val) in AvgFp.iteritems():
AvgFp[key] = AvgFp[key]/n
print "|| Avg |",
for cls in classes:
print " %02d | %02d |"%(AvgTp[cls],AvgFp[cls]),
print "%.04f | %.04f ||"%(
sum(i for i in AvgTp.itervalues())/float(nClass),
sum(i for i in AvgFp.itervalues())/float(nClass))
| apache-2.0 | 1,400,461,167,598,897,200 | 22.140562 | 100 | 0.618709 | false | 2.670065 | false | false | false |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs/subTLVs_/prefix_sid/sid/__init__.py | 1 | 12694 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class sid(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-ipv4-reachability/prefixes/prefix/subTLVs/subTLVs/prefix-sid/sid. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Prefix Segment-ID list. IGP-Prefix Segment is an IGP segment attached
to an IGP prefix. An IGP-Prefix Segment is global (unless explicitly
advertised otherwise) within the SR/IGP domain.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "sid"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-ipv4-reachability",
"prefixes",
"prefix",
"subTLVs",
"subTLVs",
"prefix-sid",
"sid",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs/subTLVs/prefix_sid/sid/state (container)
YANG Description: State parameters for Prefix-SID.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs/subTLVs/prefix_sid/sid/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters for Prefix-SID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class sid(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-ipv4-reachability/prefixes/prefix/subTLVs/subTLVs/prefix-sid/sid. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Prefix Segment-ID list. IGP-Prefix Segment is an IGP segment attached
to an IGP prefix. An IGP-Prefix Segment is global (unless explicitly
advertised otherwise) within the SR/IGP domain.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "sid"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-ipv4-reachability",
"prefixes",
"prefix",
"subTLVs",
"subTLVs",
"prefix-sid",
"sid",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs/subTLVs/prefix_sid/sid/state (container)
YANG Description: State parameters for Prefix-SID.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs/subTLVs/prefix_sid/sid/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters for Prefix-SID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
| apache-2.0 | 1,686,331,334,871,978,000 | 37.12012 | 375 | 0.576178 | false | 4.29577 | true | false | false |
jakebas/homepage | routes.py | 1 | 1408 | from flask import Flask, render_template, request
from forms import ContactForm
from flask.ext.mail import Message, Mail
mail = Mail()
app = Flask(__name__)
app.secret_key = #removed from public version
app.config['MAIL_SERVER'] = "smtp.gmail.com"
app.config['MAIL_PORT'] = 465
app.config['MAIL_USE_SSL'] = True
app.config['MAIL_USERNAME'] = #removed from public version
app.config['MAIL_PASSWORD'] = #removed from public version
mail.init_app(app)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/contact', methods=['GET', 'POST'])
def contact():
form = ContactForm()
if request.method == 'POST':
if not form.validate():
return render_template('contact.html', form=form)
else:
msg = Message(form.subject.data, sender="[email protected]", recipients=['[email protected]'])
msg.body = """
From: %s <%s>
%s
""" % (form.name.data, form.email.data, form.message.data)
mail.send(msg)
return render_template('contact.html', success=True)
elif request.method == 'GET':
return render_template('contact.html', form=form)
@app.route('/teaching')
def teaching():
return render_template('teaching.html')
@app.route('/compsci')
def compsci():
return render_template('compsci.html')
if __name__ == '__main__':
app.run(debug=True)
| mit | -5,081,545,841,456,852,000 | 24.6 | 104 | 0.667614 | false | 3.274419 | false | false | false |
patpatpatpatpat/stormpath-django | django_stormpath/admin.py | 1 | 1326 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import StormpathUser
from .forms import StormpathUserCreationForm, StormpathUserChangeForm
class StormpathUserAdmin(UserAdmin):
# Set the add/modify forms
add_form = StormpathUserCreationForm
form = StormpathUserChangeForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('username', 'email', 'is_staff', 'given_name', 'surname')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups')
search_fields = ('email', 'given_name', 'surname')
ordering = ('email',)
filter_horizontal = ('groups', 'user_permissions',)
fieldsets = (
(None, {'fields': ('username', 'email', 'password')}),
('Personal info', {'fields': ('given_name', 'surname')}),
('Permissions', {'fields': ('is_active', 'is_staff', 'is_superuser', 'groups',)}),
('Important dates', {'fields': ('last_login',)}),
)
add_fieldsets = (
(None, {'classes': ('wide',),
'fields': ('given_name', 'surname', 'email', 'password1', 'password2')}),
)
# Register the new CustomUserAdmin
admin.site.register(StormpathUser, StormpathUserAdmin)
| apache-2.0 | 1,799,797,049,859,641,000 | 40.4375 | 90 | 0.6546 | false | 3.724719 | false | false | false |
jturner314/map_ssh_attempts | map_ssh_attempts/geoip.py | 1 | 4272 | # Copyright (C) 2014 Jim Turner
# This file is part of map_ssh_attempts.
# map_ssh_attempts is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option) any
# later version.
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from pygeoip import GeoIPError
import gzip
import collections
import os
import os.path
import pygeoip
import urllib.request
Coordinate = collections.namedtuple('Coordinate', ('longitude', 'latitude'))
class GeoIPMultiversion(object):
versions = [4, 6]
db_names = {4: 'GeoLiteCity.dat',
6: 'GeoLiteCityv6.dat'}
db_sources = {4: 'http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz',
6: 'http://geolite.maxmind.com/download/geoip/database/GeoLiteCityv6-beta/GeoLiteCityv6.dat.gz'}
def __init__(self, cache_dir='~/.cache/map_ssh_attempts'):
"""Create an object to lookup GeoIP information regardless of IP version.
:param cache_dir: directory in which to place the GeoIP databases
"""
self.cache_dir = os.path.expanduser(cache_dir)
self.dbs = {}
def update_cache(self):
"""Update GeoIP database cache."""
if not os.path.isdir(self.cache_dir):
if os.path.lexists(self.cache_dir):
raise NotADirectoryError('Download location exists but is not a directory.')
else:
os.makedirs(self.cache_dir)
for version in GeoIPMultiversion.versions:
name = GeoIPMultiversion.db_names[version]
url = GeoIPMultiversion.db_sources[version]
with open(os.path.join(self.cache_dir, name), 'wb') as f:
print("Updating {}... ".format(name), end='')
db_gz = urllib.request.urlopen(url).read()
db = gzip.decompress(db_gz)
f.write(db)
print("100%")
def check_cache(self):
"""Check if GeoIP database files exist in cache."""
for version in GeoIPMultiversion.versions:
name = GeoIPMultiversion.db_names[version]
if not os.path.isfile(os.path.join(self.cache_dir, name)):
return False
else:
return True
def load_dbs(self):
"""Load GeoIP objects from database files."""
if not self.check_cache():
self.update_cache()
self.dbs = {}
for version in GeoIPMultiversion.versions:
name = GeoIPMultiversion.db_names[version]
print("Loading {}... ".format(name), end='')
self.dbs[version] = pygeoip.GeoIP(os.path.join(self.cache_dir, name))
print("100%")
def check_loaded(self):
"""Check if GeoIP databases have been loaded."""
for version in GeoIPMultiversion.versions:
if not version in self.dbs:
return False
else:
return True
def coord_by_addr(self, addr):
"""Given an IPv4Address or IPv6Address, return a location Coordinate.
:param addr: IPv4Address or IPv6Address object with address of host
:return: Coordinate object
"""
if not self.check_loaded():
self.load_dbs()
record = self.dbs[addr.version].record_by_addr(str(addr))
if record:
return Coordinate(record['longitude'], record['latitude'])
else:
raise GeoIPError("Unable to determine coordinates.")
def __getattr__(self, name):
if name.endswith('_by_addr'):
def f(addr):
if not self.check_loaded():
self.load_dbs()
return getattr(self.dbs[addr.version], name)(str(addr))
return f
else:
raise AttributeError("'GeoIPMultiversion' has no attribute '{}'".format(name))
| gpl-2.0 | 1,047,337,268,620,959,500 | 37.142857 | 114 | 0.61868 | false | 4.131528 | false | false | false |
sbustreamspot/sbustreamspot-cdm | test_kafka_vm.py | 1 | 2510 | #!/usr/bin/env python
import argparse
from constants import *
import json
import pdb
from pykafka import KafkaClient
from pykafka.exceptions import OffsetOutOfRangeError, RequestTimedOut
from pykafka.partitioners import HashingPartitioner
import sys
from tc.schema.serialization import Utils
from tc.schema.serialization.kafka import KafkaAvroGenericSerializer, KafkaAvroGenericDeserializer
parser = argparse.ArgumentParser()
parser.add_argument('--kafka-group', help='Kafka consumer group', required=True)
parser.add_argument('--only-produce', help='Only produce messages',
required=False, action='store_true')
args = vars(parser.parse_args())
kafka_client = KafkaClient(KAFKA_URL)
kafka_topic = kafka_client.topics[args['kafka_group']]
producer = kafka_topic.get_producer(
partitioner=HashingPartitioner(),
sync=True, linger_ms=1, ack_timeout_ms=30000, max_retries=0)
schema = Utils.load_schema(SCHEMA_FILE)
input_file = open('avro/infoleak_small_units.CDM13.avro', 'rb')
serializer = KafkaAvroGenericSerializer(schema)
deserializer = KafkaAvroGenericDeserializer(schema, input_file=input_file)
records = deserializer.deserialize_from_file()
i = 0
produced = []
for edge in records:
#kafka_key = str(i).encode() # this is hashed to select a partition
kafka_key = '0'
produced.append(edge)
message = serializer.serialize(args['kafka_group'], edge)
producer.produce(message, kafka_key)
i += 1
print 'Pushed', i, 'messages'
producer.stop()
input_file.close()
if args['only_produce']:
sys.exit(0)
consumer = kafka_topic.get_balanced_consumer(
consumer_group=args['kafka_group'], auto_commit_enable=True,
auto_commit_interval_ms=1000, reset_offset_on_start=False,
consumer_timeout_ms=100, fetch_wait_max_ms=0, managed=True)
j = 0
consumed = []
while True:
if j >= i:
break
try:
for kafka_message in consumer:
if kafka_message.value is not None:
message = deserializer.deserialize(args['kafka_group'],
kafka_message.value)
consumed.append(message)
j += 1
except RequestTimedOut:
logger.warn('Kafka consumer request timed out')
except OffsetOutOfRangeError:
logger.warn('Kafka consumer offset out of range')
print 'Consumed', i, 'messages'
consumer.stop()
for i in range(len(produced)):
assert consumed[i] == produced[i]
| apache-2.0 | 5,254,498,871,606,089,000 | 30.375 | 98 | 0.686853 | false | 3.707533 | false | false | false |
aclowes/yawn | yawn/task/serializers.py | 1 | 2485 | from rest_framework import serializers
from yawn.task.models import Task, Execution
from yawn.worker.serializers import MessageSerializer, WorkerSerializer
from yawn.workflow.models import Workflow
class SimpleWorkflowSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='name.name', read_only=True)
class Meta:
model = Workflow
fields = ('id', 'name', 'version')
class TaskSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='template.name', read_only=True)
workflow = SimpleWorkflowSerializer(source='template.workflow', read_only=True)
class Meta:
model = Task
exclude = ('run', 'template')
class ExecutionDetailSerializer(serializers.ModelSerializer):
worker = WorkerSerializer(read_only=True)
class Meta:
model = Execution
exclude = ('task',)
class ExecutionListSerializer(serializers.ModelSerializer):
worker = WorkerSerializer(read_only=True)
task = TaskSerializer(read_only=True)
minutes_running = serializers.SerializerMethodField()
class Meta:
model = Execution
fields = ('id', 'task', 'worker', 'status', 'start_timestamp', 'minutes_running')
def get_minutes_running(self, obj):
if obj.stop_timestamp:
runtime = (obj.stop_timestamp - obj.start_timestamp).total_seconds()
return '{:.0f}m {:.2f}s'.format(runtime // 60, runtime % 60)
class TaskDetailSerializer(TaskSerializer):
executions = serializers.SerializerMethodField()
messages = MessageSerializer(many=True, source='message_set', read_only=True)
max_retries = serializers.IntegerField(source='template.max_retries')
timeout = serializers.IntegerField(source='template.timeout')
command = serializers.CharField(source='template.command')
# actions
terminate = serializers.IntegerField(write_only=True)
enqueue = serializers.BooleanField(write_only=True)
def get_executions(self, instance):
executions = instance.execution_set.order_by('id')
return ExecutionDetailSerializer(executions, many=True).data
def update(self, instance, validated_data):
if validated_data.get('terminate'):
instance.execution_set.filter(
id=validated_data['terminate'], status=Execution.RUNNING
).update(status=Execution.KILLED)
if validated_data.get('enqueue'):
instance.enqueue()
return instance
| mit | -2,446,936,753,634,268,000 | 34 | 89 | 0.700604 | false | 4.306759 | false | false | false |
ifduyue/sentry | tests/sentry/integrations/vsts/testutils.py | 1 | 33812 | from __future__ import absolute_import
import responses
from six.moves.urllib.parse import urlparse, urlencode, parse_qs
from sentry.integrations.vsts import VstsIntegrationProvider
from sentry.testutils import IntegrationTestCase
class VstsIntegrationTestCase(IntegrationTestCase):
provider = VstsIntegrationProvider
def setUp(self):
super(VstsIntegrationTestCase, self).setUp()
self.access_token = '9d646e20-7a62-4bcc-abc0-cb2d4d075e36'
self.refresh_token = '32004633-a3c0-4616-9aa0-a40632adac77'
self.vsts_account_id = 'c8a585ae-b61f-4ba6-833c-9e8d5d1674d8'
self.vsts_account_name = 'MyVSTSAccount'
self.vsts_account_uri = 'https://MyVSTSAccount.vssps.visualstudio.com:443/'
self.vsts_user_id = 'd6245f20-2af8-44f4-9451-8107cb2767db'
self.vsts_user_name = 'Foo Bar'
self.vsts_user_email = '[email protected]'
self.repo_id = '47166099-3e16-4868-9137-22ac6b05b06e'
self.repo_name = 'cool-service'
self.project_a = {
'id': 'eb6e4656-77fc-42a1-9181-4c6d8e9da5d1',
'name': 'ProjectA',
}
self.project_b = {
'id': '6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c',
'name': 'ProjectB',
}
responses.start()
self._stub_vsts()
def tearDown(self):
responses.stop()
def _stub_vsts(self):
responses.reset()
responses.add(
responses.POST,
'https://app.vssps.visualstudio.com/oauth2/token',
json={
'access_token': self.access_token,
'token_type': 'grant',
'expires_in': 300, # seconds (5 min)
'refresh_token': self.refresh_token,
},
)
responses.add(
responses.GET,
'https://app.vssps.visualstudio.com/_apis/accounts',
json=[{
'AccountId': self.vsts_account_id,
'AccountUri': self.vsts_account_uri,
'AccountName': self.vsts_account_name,
'Properties': {},
}],
)
responses.add(
responses.GET,
'https://app.vssps.visualstudio.com/_apis/profile/profiles/me?api-version=1.0',
json={
'id': self.vsts_user_id,
'displayName': self.vsts_user_name,
'emailAddress': self.vsts_user_email,
},
)
responses.add(
responses.GET,
'https://app.vssps.visualstudio.com/_apis/connectionData/',
json={
'authenticatedUser': {
'subjectDescriptor': self.vsts_account_id,
},
},
)
responses.add(
responses.GET,
'https://{}.visualstudio.com/DefaultCollection/_apis/projects'.format(
self.vsts_account_name.lower(),
),
json={
'value': [
self.project_a,
self.project_b,
],
},
)
responses.add(
responses.POST,
'https://{}.visualstudio.com/_apis/hooks/subscriptions'.format(
self.vsts_account_name.lower(),
),
json=CREATE_SUBSCRIPTION,
)
responses.add(
responses.GET,
'https://{}.visualstudio.com/_apis/git/repositories'.format(
self.vsts_account_name.lower(),
),
json={
'value': [{
'id': self.repo_id,
'name': self.repo_name,
'project': {
'name': self.project_a['name'],
},
}],
},
)
responses.add(
responses.GET,
'https://{}.visualstudio.com/{}/_apis/wit/workitemtypes/{}/states'.format(
self.vsts_account_name.lower(),
self.project_a['name'],
'Bug',
),
json={
'value': [{'name': 'resolve_status'},
{'name': 'resolve_when'},
{'name': 'regression_status'},
{'name': 'sync_comments'},
{'name': 'sync_forward_assignment'},
{'name': 'sync_reverse_assignment'}],
}
)
def make_init_request(self, path=None, body=None):
return self.client.get(
path or self.init_path,
body or {},
)
def make_oauth_redirect_request(self, state):
return self.client.get('{}?{}'.format(
self.setup_path,
urlencode({
'code': 'oauth-code',
'state': state,
}),
))
def assert_vsts_oauth_redirect(self, redirect):
assert redirect.scheme == 'https'
assert redirect.netloc == 'app.vssps.visualstudio.com'
assert redirect.path == '/oauth2/authorize'
def assert_account_selection(self, response, account_id=None):
account_id = account_id or self.vsts_account_id
assert response.status_code == 200
assert '<option value="{}"'.format(account_id) in response.content
def assert_installation(self):
# Initial request to the installation URL for VSTS
resp = self.make_init_request()
redirect = urlparse(resp['Location'])
assert resp.status_code == 302
self.assert_vsts_oauth_redirect(redirect)
query = parse_qs(redirect.query)
# OAuth redirect back to Sentry (identity_pipeline_view)
resp = self.make_oauth_redirect_request(query['state'][0])
self.assert_account_selection(resp)
# User choosing which VSTS Account to use (AccountConfigView)
# Final step.
return self.client.post(
self.setup_path,
{
'account': self.vsts_account_id,
'provider': 'vsts',
},
)
COMPARE_COMMITS_EXAMPLE = b"""
{
"count": 1,
"value": [
{
"commitId": "6c36052c58bde5e57040ebe6bdb9f6a52c906fff",
"author": {
"name": "max bittker",
"email": "[email protected]",
"date": "2018-04-24T00:03:18Z"
},
"committer": {
"name": "max bittker",
"email": "[email protected]",
"date": "2018-04-24T00:03:18Z"
},
"comment": "Updated README.md",
"changeCounts": {"Add": 0, "Edit": 1, "Delete": 0},
"url":
"https://mbittker.visualstudio.com/_apis/git/repositories/b1e25999-c080-4ea1-8c61-597c4ec41f06/commits/6c36052c58bde5e57040ebe6bdb9f6a52c906fff",
"remoteUrl":
"https://mbittker.visualstudio.com/_git/MyFirstProject/commit/6c36052c58bde5e57040ebe6bdb9f6a52c906fff"
}
]
}
"""
FILE_CHANGES_EXAMPLE = b"""
{
"changeCounts": {"Edit": 1},
"changes": [
{
"item": {
"objectId": "b48e843656a0a12926a0bcedefe8ef3710fe2867",
"originalObjectId": "270b590a4edf3f19aa7acc7b57379729e34fc681",
"gitObjectType": "blob",
"commitId": "6c36052c58bde5e57040ebe6bdb9f6a52c906fff",
"path": "/README.md",
"url":
"https://mbittker.visualstudio.com/DefaultCollection/_apis/git/repositories/b1e25999-c080-4ea1-8c61-597c4ec41f06/items/README.md?versionType=Commit&version=6c36052c58bde5e57040ebe6bdb9f6a52c906fff"
},
"changeType": "edit"
}
]
}
"""
WORK_ITEM_RESPONSE = """{
"id": 309,
"rev": 1,
"fields": {
"System.AreaPath": "Fabrikam-Fiber-Git",
"System.TeamProject": "Fabrikam-Fiber-Git",
"System.IterationPath": "Fabrikam-Fiber-Git",
"System.WorkItemType": "Product Backlog Item",
"System.State": "New",
"System.Reason": "New backlog item",
"System.CreatedDate": "2015-01-07T18:13:01.807Z",
"System.CreatedBy": "Jamal Hartnett <[email protected]>",
"System.ChangedDate": "2015-01-07T18:13:01.807Z",
"System.ChangedBy": "Jamal Hartnett <[email protected]>",
"System.Title": "Hello",
"Microsoft.VSTS.Scheduling.Effort": 8,
"WEF_6CB513B6E70E43499D9FC94E5BBFB784_Kanban.Column": "New",
"System.Description": "Fix this."
},
"_links": {
"self": {
"href": "https://fabrikam-fiber-inc.visualstudio.com/DefaultCollection/_apis/wit/workItems/309"
},
"workItemUpdates": {
"href": "https://fabrikam-fiber-inc.visualstudio.com/DefaultCollection/_apis/wit/workItems/309/updates"
},
"workItemRevisions": {
"href": "https://fabrikam-fiber-inc.visualstudio.com/DefaultCollection/_apis/wit/workItems/309/revisions"
},
"workItemHistory": {
"href": "https://fabrikam-fiber-inc.visualstudio.com/DefaultCollection/_apis/wit/workItems/309/history"
},
"html": {
"href": "https://fabrikam-fiber-inc.visualstudio.com/web/wi.aspx?pcguid=d81542e4-cdfa-4333-b082-1ae2d6c3ad16&id=309"
},
"workItemType": {
"href": "https://fabrikam-fiber-inc.visualstudio.com/DefaultCollection/6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c/_apis/wit/workItemTypes/Product%20Backlog%20Item"
},
"fields": {
"href": "https://fabrikam-fiber-inc.visualstudio.com/DefaultCollection/_apis/wit/fields"
}
},
"url": "https://fabrikam-fiber-inc.visualstudio.com/DefaultCollection/_apis/wit/workItems/309"
}"""
GET_USERS_RESPONSE = b"""{
"count": 4,
"value": [
{
"subjectKind": "user",
"cuid": "ec09a4d8-d914-4f28-9e39-23d52b683f90",
"domain": "Build",
"principalName": "51ac8d19-6694-459f-a65e-bec30e9e2e33",
"mailAddress": "",
"origin": "vsts",
"originId": "ec09a4d8-d914-4f28-9e39-23d52b683f90",
"displayName": "Project Collection Build Service (Ftottentest2)",
"_links": {
"self": {
"href": "https://fabrikam.vssps.visualstudio.com/_apis/graph/users/TWljcm9zb2Z0LlRlYW1Gb3VuZGF0aW9uLlNlcnZpY2VJZGVudGl0eTtmMzViOTAxNS1jZGU4LTQ4MzQtYTFkNS0wOWU4ZjM1OWNiODU6QnVpbGQ6NTFhYzhkMTktNjY5NC00NTlmLWE2NWUtYmVjMzBlOWUyZTMz"
},
"memberships": {
"href": "https://fabrikam.vssps.visualstudio.com/_apis/graph/memberships/TWljcm9zb2Z0LlRlYW1Gb3VuZGF0aW9uLlNlcnZpY2VJZGVudGl0eTtmMzViOTAxNS1jZGU4LTQ4MzQtYTFkNS0wOWU4ZjM1OWNiODU6QnVpbGQ6NTFhYzhkMTktNjY5NC00NTlmLWE2NWUtYmVjMzBlOWUyZTMz"
}
},
"url": "https://fabrikam.vssps.visualstudio.com/_apis/graph/users/TWljcm9zb2Z0LlRlYW1Gb3VuZGF0aW9uLlNlcnZpY2VJZGVudGl0eTtmMzViOTAxNS1jZGU4LTQ4MzQtYTFkNS0wOWU4ZjM1OWNiODU6QnVpbGQ6NTFhYzhkMTktNjY5NC00NTlmLWE2NWUtYmVjMzBlOWUyZTMz",
"descriptor": "TWljcm9zb2Z0LlRlYW1Gb3VuZGF0aW9uLlNlcnZpY2VJZGVudGl0eTtmMzViOTAxNS1jZGU4LTQ4MzQtYTFkNS0wOWU4ZjM1OWNiODU6QnVpbGQ6NTFhYzhkMTktNjY5NC00NTlmLWE2NWUtYmVjMzBlOWUyZTMz"
},
{
"subjectKind": "user",
"metaType": "member",
"cuid": "00ca946b-2fe9-4f2a-ae2f-40d5c48001bc",
"domain": "LOCAL AUTHORITY",
"principalName": "TeamFoundationService (TEAM FOUNDATION)",
"mailAddress": "",
"origin": "vsts",
"originId": "00ca946b-2fe9-4f2a-ae2f-40d5c48001bc",
"displayName": "TeamFoundationService (TEAM FOUNDATION)",
"_links": {
"self": {
"href": "https://fabrikam.vssps.visualstudio.com/_apis/graph/users/TWljcm9zb2Z0LklkZW50aXR5TW9kZWwuQ2xhaW1zLkNsYWltc0lkZW50aXR5Ozc3ODlmMDlkLWUwNTMtNGYyZS1iZGVlLTBjOGY4NDc2YTRiYw"
},
"memberships": {
"href": "https://fabrikam.vssps.visualstudio.com/_apis/graph/memberships/TWljcm9zb2Z0LklkZW50aXR5TW9kZWwuQ2xhaW1zLkNsYWltc0lkZW50aXR5Ozc3ODlmMDlkLWUwNTMtNGYyZS1iZGVlLTBjOGY4NDc2YTRiYw"
}
},
"url": "https://fabrikam.vssps.visualstudio.com/_apis/graph/users/TWljcm9zb2Z0LklkZW50aXR5TW9kZWwuQ2xhaW1zLkNsYWltc0lkZW50aXR5Ozc3ODlmMDlkLWUwNTMtNGYyZS1iZGVlLTBjOGY4NDc2YTRiYw",
"descriptor": "TWljcm9zb2Z0LklkZW50aXR5TW9kZWwuQ2xhaW1zLkNsYWltc0lkZW50aXR5Ozc3ODlmMDlkLWUwNTMtNGYyZS1iZGVlLTBjOGY4NDc2YTRiYw"
},
{
"subjectKind": "user",
"metaType": "member",
"cuid": "ddd94918-1fc8-459b-994a-cca86c4fbe95",
"domain": "TEAM FOUNDATION",
"principalName": "Anonymous",
"mailAddress": "",
"origin": "vsts",
"originId": "ddd94918-1fc8-459b-994a-cca86c4fbe95",
"displayName": "Anonymous",
"_links": {
"self": {
"href": "https://fabrikam.vssps.visualstudio.com/_apis/graph/users/TWljcm9zb2Z0LlRlYW1Gb3VuZGF0aW9uLlVuYXV0aGVudGljYXRlZElkZW50aXR5O1MtMS0wLTA"
},
"memberships": {
"href": "https://fabrikam.vssps.visualstudio.com/_apis/graph/memberships/TWljcm9zb2Z0LlRlYW1Gb3VuZGF0aW9uLlVuYXV0aGVudGljYXRlZElkZW50aXR5O1MtMS0wLTA"
}
},
"url": "https://fabrikam.vssps.visualstudio.com/_apis/graph/users/TWljcm9zb2Z0LlRlYW1Gb3VuZGF0aW9uLlVuYXV0aGVudGljYXRlZElkZW50aXR5O1MtMS0wLTA",
"descriptor": "TWljcm9zb2Z0LlRlYW1Gb3VuZGF0aW9uLlVuYXV0aGVudGljYXRlZElkZW50aXR5O1MtMS0wLTA"
},
{
"subjectKind": "user",
"metaType": "member",
"cuid": "65903f92-53dc-61b3-bb0e-e69cfa1cb719",
"domain": "45aa3d2d-7442-473d-b4d3-3c670da9dd96",
"principalName": "[email protected]",
"mailAddress": "[email protected]",
"origin": "aad",
"originId": "4be8f294-000d-4431-8506-57420b88e204",
"displayName": "Francis Totten",
"_links": {
"self": {
"href": "https://fabrikam.vssps.visualstudio.com/_apis/graph/users/TWljcm9zb2Z0LklkZW50aXR5TW9kZWwuQ2xhaW1zLkNsYWltc0lkZW50aXR5OzQ1YWEzZDJkLTc0NDItNDczZC1iNGQzLTNjNjcwZGE5ZGQ5NlxmdG90dGVuQHZzY3NpLnVz"
},
"memberships": {
"href": "https://fabrikam.vssps.visualstudio.com/_apis/graph/memberships/TWljcm9zb2Z0LklkZW50aXR5TW9kZWwuQ2xhaW1zLkNsYWltc0lkZW50aXR5OzQ1YWEzZDJkLTc0NDItNDczZC1iNGQzLTNjNjcwZGE5ZGQ5NlxmdG90dGVuQHZzY3NpLnVz"
}
},
"url": "https://fabrikam.vssps.visualstudio.com/_apis/graph/users/TWljcm9zb2Z0LklkZW50aXR5TW9kZWwuQ2xhaW1zLkNsYWltc0lkZW50aXR5OzQ1YWEzZDJkLTc0NDItNDczZC1iNGQzLTNjNjcwZGE5ZGQ5NlxmdG90dGVuQHZzY3NpLnVz",
"descriptor": "TWljcm9zb2Z0LklkZW50aXR5TW9kZWwuQ2xhaW1zLkNsYWltc0lkZW50aXR5OzQ1YWEzZDJkLTc0NDItNDczZC1iNGQzLTNjNjcwZGE5ZGQ5NlxmdG90dGVuQHZzY3NpLnVz"
}
]
}
"""
CREATE_SUBSCRIPTION = {
'id': 'fd672255-8b6b-4769-9260-beea83d752ce',
'url': 'https://fabrikam.visualstudio.com/_apis/hooks/subscriptions/fd672255-8b6b-4769-9260-beea83d752ce',
'publisherId': 'tfs',
'eventType': 'workitem.update',
'resourceVersion': '1.0-preview.1',
'eventDescription': 'WorkItem Updated',
'consumerId': 'webHooks',
'consumerActionId': 'httpRequest',
'actionDescription': 'To host myservice',
'createdBy': {
'id': '00ca946b-2fe9-4f2a-ae2f-40d5c48001bc'
},
'createdDate': '2014-10-27T15:37:24.873Z',
'modifiedBy': {
'id': '00ca946b-2fe9-4f2a-ae2f-40d5c48001bc'
},
'modifiedDate': '2014-10-27T15:37:26.23Z',
'publisherInputs': {
'buildStatus': 'Failed',
'definitionName': 'MyWebSite CI',
'hostId': 'd81542e4-cdfa-4333-b082-1ae2d6c3ad16',
'projectId': '6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c',
'tfsSubscriptionId': '3e8b33e7-426d-4c92-9bf9-58e163dd7dd5'
},
'consumerInputs': {
'url': 'https://myservice/newreceiver'
}
}
WORK_ITEM_UPDATED = {
u'resourceContainers': {
u'project': {u'id': u'c0bf429a-c03c-4a99-9336-d45be74db5a6', u'baseUrl': u'https://laurynsentry.visualstudio.com/'},
u'account': {u'id': u'90e9a854-eb98-4c56-ae1a-035a0f331dd6', u'baseUrl': u'https://laurynsentry.visualstudio.com/'},
u'collection': {u'id': u'80ded3e8-3cd3-43b1-9f96-52032624aa3a', u'baseUrl': u'https://laurynsentry.visualstudio.com/'}
},
u'resource': {
u'revisedBy': {
u'displayName': u'lauryn', u'name': u'lauryn <[email protected]>', u'url': u'https://app.vssps.visualstudio.com/A90e9a854-eb98-4c56-ae1a-035a0f331dd6/_apis/Identities/21354f98-ab06-67d9-b974-5a54d992082e', u'imageUrl': u'https://laurynsentry.visualstudio.com/_api/_common/identityImage?id=21354f98-ab06-67d9-b974-5a54d992082e', u'descriptor': u'msa.MjEzNTRmOTgtYWIwNi03N2Q5LWI5NzQtNWE1NGQ5OTIwODJl', u'_links': {u'avatar': {u'href': u'https://laurynsentry.visualstudio.com/_apis/GraphProfile/MemberAvatars/msa.MjEzNTRmOTgtYWIwNi03N2Q5LWI5NzQtNWE1NGQ5OTIwODJl'}},
u'uniqueName': u'[email protected]', u'id': u'21354f98-ab06-67d9-b974-5a54d992082e'
},
u'revisedDate': u'9999-01-01T00:00:00Z',
u'url': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/31/updates/2',
u'fields': {
u'System.AuthorizedDate': {u'newValue': u'2018-07-05T20:52:14.777Z', u'oldValue': u'2018-07-05T20:51:58.927Z'},
u'System.AssignedTo': {u'newValue': u'lauryn <[email protected]>', u'oldValue': u'lauryn2 <[email protected]>'},
u'System.Watermark': {u'newValue': 78, u'oldValue': 77},
u'System.Rev': {u'newValue': 2, u'oldValue': 1},
u'System.RevisedDate': {u'newValue': u'9999-01-01T00:00:00Z', u'oldValue': u'2018-07-05T20:52:14.777Z'},
u'System.ChangedDate': {u'newValue': u'2018-07-05T20:52:14.777Z', u'oldValue': u'2018-07-05T20:51:58.927Z'}
},
u'workItemId': 31,
u'rev': 2,
u'_links': {
u'self': {u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/31/updates/2'},
u'workItemUpdates': {u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/31/updates'},
u'html': {u'href': u'https://laurynsentry.visualstudio.com/web/wi.aspx?pcguid=80ded3e8-3cd3-43b1-9f96-52032624aa3a&id=31'},
u'parent': {u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/31'}
},
u'id': 2,
u'revision': {
u'url': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/31/revisions/2',
u'fields': {
u'System.AreaPath': u'MyFirstProject',
u'System.WorkItemType': u'Bug',
u'System.Reason': u'New',
u'System.Title': u"NameError: global name 'BitbucketRepositoryProvider' is not defined",
u'Microsoft.VSTS.Common.Priority': 2,
u'System.CreatedBy': u'lauryn <[email protected]>',
u'System.AssignedTo': u'lauryn <[email protected]>',
u'System.CreatedDate': u'2018-07-05T20:51:58.927Z',
u'System.TeamProject': u'MyFirstProject',
u'Microsoft.VSTS.Common.Severity': u'3 - Medium',
u'Microsoft.VSTS.Common.ValueArea': u'Business',
u'System.State': u'New',
u'System.Description': u'<p><a href="https://lauryn.ngrok.io/sentry/internal/issues/55/">https://lauryn.ngrok.io/sentry/internal/issues/55/</a></p>\n<pre><code>NameError: global name \'BitbucketRepositoryProvider\' is not defined\n(1 additional frame(s) were not displayed)\n...\n File "sentry/runner/__init__.py", line 125, in configure\n configure(ctx, py, yaml, skip_service_validation)\n File "sentry/runner/settings.py", line 152, in configure\n skip_service_validation=skip_service_validation\n File "sentry/runner/initializer.py", line 315, in initialize_app\n register_plugins(settings)\n File "sentry/runner/initializer.py", line 60, in register_plugins\n integration.setup()\n File "sentry/integrations/bitbucket/integration.py", line 78, in setup\n BitbucketRepositoryProvider,\n\nNameError: global name \'BitbucketRepositoryProvider\' is not defined\n</code></pre>\n',
u'System.ChangedBy': u'lauryn <[email protected]>',
u'System.ChangedDate': u'2018-07-05T20:52:14.777Z',
u'Microsoft.VSTS.Common.StateChangeDate': u'2018-07-05T20:51:58.927Z',
u'System.IterationPath': u'MyFirstProject'},
u'rev': 2,
u'id': 31,
u'_links': {u'self': {u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/31/revisions/2'}, u'workItemRevisions': {u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/31/revisions'}, u'parent': {u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/31'}}
}
},
u'eventType': u'workitem.updated',
u'detailedMessage': None,
u'createdDate': u'2018-07-05T20:52:16.3051288Z',
u'id': u'18f51331-2640-4bce-9ebd-c59c855956a2',
u'resourceVersion': u'1.0',
u'notificationId': 1,
u'subscriptionId': u'7bf628eb-b3a7-4fb2-ab4d-8b60f2e8cb9b',
u'publisherId': u'tfs',
u'message': None
}
WORK_ITEM_UNASSIGNED = {
u'resourceContainers': {
u'project': {
u'id': u'c0bf429a-c03c-4a99-9336-d45be74db5a6',
u'baseUrl': u'https://laurynsentry.visualstudio.com/'
},
u'account': {
u'id': u'90e9a854-eb98-4c56-ae1a-035a0f331dd6',
u'baseUrl': u'https://laurynsentry.visualstudio.com/'
},
u'collection': {
u'id': u'80ded3e8-3cd3-43b1-9f96-52032624aa3a',
u'baseUrl': u'https://laurynsentry.visualstudio.com/'
}
},
u'resource': {
u'revisedBy': {
u'displayName': u'lauryn',
u'name': u'lauryn <[email protected]>',
u'url': u'https://app.vssps.visualstudio.com/A90e9a854-eb98-4c56-ae1a-035a0f331dd6/_apis/Identities/21354f98-ab06-67d9-b974-5a54d992082e',
u'imageUrl': u'https://laurynsentry.visualstudio.com/_api/_common/identityImage?id=21354f98-ab06-67d9-b974-5a54d992082e',
u'descriptor': u'msa.MjEzNTRmOTgtYWIwNi03N2Q5LWI5NzQtNWE1NGQ5OTIwODJl',
u'_links': {
u'avatar': {
u'href': u'https://laurynsentry.visualstudio.com/_apis/GraphProfile/MemberAvatars/msa.MjEzNTRmOTgtYWIwNi03N2Q5LWI5NzQtNWE1NGQ5OTIwODJl'
}
},
u'uniqueName': u'[email protected]',
u'id': u'21354f98-ab06-67d9-b974-5a54d992082e'
},
u'revisedDate': u'9999-01-01T00:00:00 Z',
u'url': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/updates/3',
u'fields': {
u'System.AuthorizedDate': {
u'newValue': u'2018-07-05T23:23:09.493 Z',
u'oldValue': u'2018-07-05T23:21:38.243 Z'
},
u'System.AssignedTo': {
u'oldValue': u'lauryn <[email protected]>'
},
u'System.Watermark': {
u'newValue': 83,
u'oldValue': 82
},
u'System.Rev': {
u'newValue': 3,
u'oldValue': 2
},
u'System.RevisedDate': {
u'newValue': u'9999-01-01T00:00:00 Z',
u'oldValue': u'2018-07-05T23:23:09.493 Z'
},
u'System.ChangedDate': {
u'newValue': u'2018-07-05T23:23:09.493 Z',
u'oldValue': u'2018-07-05T23:21:38.243 Z'
}
},
u'workItemId': 33,
u'rev': 3,
u'_links': {
u'self': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/updates/3'
},
u'workItemUpdates': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/updates'
},
u'html': {
u'href': u'https://laurynsentry.visualstudio.com/web/wi.aspx?pcguid=80ded3e8-3cd3-43b1-9f96-52032624aa3a&id=33'
},
u'parent': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33'
}
},
u'id': 3,
u'revision': {
u'url': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/revisions/3',
u'fields': {
u'System.AreaPath': u'MyFirstProject',
u'System.WorkItemType': u'Bug',
u'System.Reason': u'New',
u'System.Title': u'NotImplementedError:Visual Studio Team Services requires an organization_id',
u'Microsoft.VSTS.Common.Priority': 2,
u'System.CreatedBy': u'lauryn <[email protected]>',
u'Microsoft.VSTS.Common.StateChangeDate': u'2018-07-05T23:21:25.847 Z',
u'System.CreatedDate': u'2018-07-05T23:21:25.847 Z',
u'System.TeamProject': u'MyFirstProject',
u'Microsoft.VSTS.Common.ValueArea': u'Business',
u'System.State': u'New',
u'System.Description': u'<p><a href="https: //lauryn.ngrok.io/sentry/internal/issues/196/">https: //lauryn.ngrok.io/sentry/internal/issues/196/</a></p>\n<pre><code>NotImplementedError:Visual Studio Team Services requires an organization_id\n(57 additional frame(s) were not displayed)\n...\n File "sentry/tasks/base.py"',
u'System.ChangedBy': u'lauryn <[email protected]>',
u'System.ChangedDate': u'2018-07-05T23:23:09.493 Z',
u'Microsoft.VSTS.Common.Severity': u'3 - Medium',
u'System.IterationPath': u'MyFirstProject'
},
u'rev': 3,
u'id': 33,
u'_links': {
u'self': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/revisions/3'
},
u'workItemRevisions': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/revisions'
},
u'parent': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33'
}
}
}
},
u'eventType': u'workitem.updated',
u'detailedMessage': None,
u'createdDate': u'2018-07-05T23:23:11.1935112 Z',
u'id': u'cc349c85-6595-4939-9b69-f89480be6a26',
u'resourceVersion': u'1.0',
u'notificationId': 2,
u'subscriptionId': u'7405a600-6a25-48e6-81b6-1dde044783ad',
u'publisherId': u'tfs',
u'message': None
}
WORK_ITEM_UPDATED_STATUS = {
u'resourceContainers': {
u'project': {
u'id': u'c0bf429a-c03c-4a99-9336-d45be74db5a6',
u'baseUrl': u'https://laurynsentry.visualstudio.com/'
},
u'account': {
u'id': u'90e9a854-eb98-4c56-ae1a-035a0f331dd6',
u'baseUrl': u'https://laurynsentry.visualstudio.com/'
},
u'collection': {
u'id': u'80ded3e8-3cd3-43b1-9f96-52032624aa3a',
u'baseUrl': u'https://laurynsentry.visualstudio.com/'
}
},
u'resource': {
u'revisedBy': {
u'displayName': u'lauryn',
u'name': u'lauryn <[email protected]>',
u'url': u'https://app.vssps.visualstudio.com/A90e9a854-eb98-4c56-ae1a-035a0f331dd6/_apis/Identities/21354f98-ab06-67d9-b974-5a54d992082e',
u'imageUrl': u'https://laurynsentry.visualstudio.com/_api/_common/identityImage?id=21354f98-ab06-67d9-b974-5a54d992082e',
u'descriptor': u'msa.MjEzNTRmOTgtYWIwNi03N2Q5LWI5NzQtNWE1NGQ5OTIwODJl',
u'_links': {
u'avatar': {
u'href': u'https://laurynsentry.visualstudio.com/_apis/GraphProfile/MemberAvatars/msa.MjEzNTRmOTgtYWIwNi03N2Q5LWI5NzQtNWE1NGQ5OTIwODJl'
}
},
u'uniqueName': u'[email protected]',
u'id': u'21354f98-ab06-67d9-b974-5a54d992082e'
},
u'revisedDate': u'9999-01-01T00:00:00 Z',
u'url': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/updates/3',
u'fields': {
u'System.AuthorizedDate': {
u'newValue': u'2018-07-05T23:23:09.493 Z',
u'oldValue': u'2018-07-05T23:21:38.243 Z'
},
u'System.State': {
u'oldValue': u'New',
u'newValue': u'Resolved'
},
u'System.Watermark': {
u'newValue': 83,
u'oldValue': 82
},
u'System.Rev': {
u'newValue': 3,
u'oldValue': 2
},
u'System.RevisedDate': {
u'newValue': u'9999-01-01T00:00:00 Z',
u'oldValue': u'2018-07-05T23:23:09.493 Z'
},
u'System.ChangedDate': {
u'newValue': u'2018-07-05T23:23:09.493 Z',
u'oldValue': u'2018-07-05T23:21:38.243 Z'
}
},
u'workItemId': 33,
u'rev': 3,
u'_links': {
u'self': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/updates/3'
},
u'workItemUpdates': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/updates'
},
u'html': {
u'href': u'https://laurynsentry.visualstudio.com/web/wi.aspx?pcguid=80ded3e8-3cd3-43b1-9f96-52032624aa3a&id=33'
},
u'parent': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33'
}
},
u'id': 3,
u'revision': {
u'url': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/revisions/3',
u'fields': {
u'System.AreaPath': u'MyFirstProject',
u'System.WorkItemType': u'Bug',
u'System.Reason': u'New',
u'System.Title': u'NotImplementedError:Visual Studio Team Services requires an organization_id',
u'Microsoft.VSTS.Common.Priority': 2,
u'System.CreatedBy': u'lauryn <[email protected]>',
u'Microsoft.VSTS.Common.StateChangeDate': u'2018-07-05T23:21:25.847 Z',
u'System.CreatedDate': u'2018-07-05T23:21:25.847 Z',
u'System.TeamProject': u'MyFirstProject',
u'Microsoft.VSTS.Common.ValueArea': u'Business',
u'System.State': u'New',
u'System.Description': u'<p><a href="https: //lauryn.ngrok.io/sentry/internal/issues/196/">https: //lauryn.ngrok.io/sentry/internal/issues/196/</a></p>\n<pre><code>NotImplementedError:Visual Studio Team Services requires an organization_id\n(57 additional frame(s) were not displayed)\n...\n File "sentry/tasks/base.py"',
u'System.ChangedBy': u'lauryn <[email protected]>',
u'System.ChangedDate': u'2018-07-05T23:23:09.493 Z',
u'Microsoft.VSTS.Common.Severity': u'3 - Medium',
u'System.IterationPath': u'MyFirstProject'
},
u'rev': 3,
u'id': 33,
u'_links': {
u'self': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/revisions/3'
},
u'workItemRevisions': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/revisions'
},
u'parent': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33'
}
}
}
},
u'eventType': u'workitem.updated',
u'detailedMessage': None,
u'createdDate': u'2018-07-05T23:23:11.1935112 Z',
u'id': u'cc349c85-6595-4939-9b69-f89480be6a26',
u'resourceVersion': u'1.0',
u'notificationId': 2,
u'subscriptionId': u'7405a600-6a25-48e6-81b6-1dde044783ad',
u'publisherId': u'tfs',
u'message': None
}
WORK_ITEM_STATES = {
'count': 5,
'value': [
{
'name': 'New',
'color': 'b2b2b2',
'category': 'Proposed'
},
{
'name': 'Active',
'color': '007acc',
'category': 'InProgress'
},
{
'name': 'CustomState',
'color': '5688E0',
'category': 'InProgress'
},
{
'name': 'Resolved',
'color': 'ff9d00',
'category': 'Resolved'
},
{
'name': 'Closed',
'color': '339933',
'category': 'Completed'
}
]
}
GET_PROJECTS_RESPONSE = """{
"count": 1,
"value": [{
"id": "ac7c05bb-7f8e-4880-85a6-e08f37fd4a10",
"name": "Fabrikam-Fiber-Git",
"url": "https://jess-dev.visualstudio.com/_apis/projects/ac7c05bb-7f8e-4880-85a6-e08f37fd4a10",
"state": "wellFormed",
"revision": 16,
"visibility": "private"
}]
}"""
| bsd-3-clause | -7,866,642,662,394,803,000 | 43.784106 | 966 | 0.593073 | false | 2.786091 | false | false | false |
mganeva/mantid | Framework/PythonInterface/plugins/algorithms/Abins.py | 1 | 40996 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
try:
import pathos.multiprocessing as mp
PATHOS_FOUND = True
except ImportError:
PATHOS_FOUND = False
import numpy as np
import six
import os
from mantid.api import AlgorithmFactory, FileAction, FileProperty, PythonAlgorithm, Progress, WorkspaceProperty, mtd
from mantid.api import WorkspaceFactory, AnalysisDataService
# noinspection PyProtectedMember
from mantid.api._api import WorkspaceGroup
from mantid.simpleapi import CloneWorkspace, GroupWorkspaces, SaveAscii, Load, Scale
from mantid.kernel import logger, StringListValidator, Direction, StringArrayProperty, Atom
import AbinsModules
# noinspection PyPep8Naming,PyMethodMayBeStatic
class Abins(PythonAlgorithm):
_ab_initio_program = None
_vibrational_or_phonon_data_file = None
_experimental_file = None
_temperature = None
_bin_width = None
_scale = None
_sample_form = None
_instrument_name = None
_atoms = None
_sum_contributions = None
_scale_by_cross_section = None
_calc_partial = None
_out_ws_name = None
_num_quantum_order_events = None
_extracted_ab_initio_data = None
def category(self):
return "Simulation"
# ----------------------------------------------------------------------------------------
def summary(self):
return "Calculates inelastic neutron scattering."
# ----------------------------------------------------------------------------------------
def PyInit(self):
# Declare all properties
self.declareProperty(name="AbInitioProgram",
direction=Direction.Input,
defaultValue="CASTEP",
validator=StringListValidator(["CASTEP", "CRYSTAL", "DMOL3", "GAUSSIAN"]),
doc="An ab initio program which was used for vibrational or phonon calculation.")
self.declareProperty(FileProperty("VibrationalOrPhononFile", "",
action=FileAction.Load,
direction=Direction.Input,
extensions=["phonon", "out", "outmol", "log", "LOG"]),
doc="File with the data from a vibrational or phonon calculation.")
self.declareProperty(FileProperty("ExperimentalFile", "",
action=FileAction.OptionalLoad,
direction=Direction.Input,
extensions=["raw", "dat"]),
doc="File with the experimental inelastic spectrum to compare.")
self.declareProperty(name="TemperatureInKelvin",
direction=Direction.Input,
defaultValue=10.0,
doc="Temperature in K for which dynamical structure factor S should be calculated.")
self.declareProperty(name="BinWidthInWavenumber", defaultValue=1.0, doc="Width of bins used during rebining.")
self.declareProperty(name="Scale", defaultValue=1.0,
doc='Scale the intensity by the given factor. Default is no scaling.')
self.declareProperty(name="SampleForm",
direction=Direction.Input,
defaultValue="Powder",
validator=StringListValidator(AbinsModules.AbinsConstants.ALL_SAMPLE_FORMS),
# doc="Form of the sample: SingleCrystal or Powder.")
doc="Form of the sample: Powder.")
self.declareProperty(name="Instrument",
direction=Direction.Input,
defaultValue="TOSCA",
# validator=StringListValidator(AbinsModules.AbinsConstants.ALL_INSTRUMENTS)
validator=StringListValidator(["TOSCA"]),
doc="Name of an instrument for which analysis should be performed.")
self.declareProperty(StringArrayProperty("Atoms", Direction.Input),
doc="List of atoms to use to calculate partial S."
"If left blank, workspaces with S for all types of atoms will be calculated.")
self.declareProperty(name="SumContributions", defaultValue=False,
doc="Sum the partial dynamical structure factors into a single workspace.")
self.declareProperty(name="ScaleByCrossSection", defaultValue='Incoherent',
validator=StringListValidator(['Total', 'Incoherent', 'Coherent']),
doc="Scale the partial dynamical structure factors by the scattering cross section.")
self.declareProperty(name="QuantumOrderEventsNumber", defaultValue='1',
validator=StringListValidator(['1', '2', '3', '4']),
doc="Number of quantum order effects included in the calculation "
"(1 -> FUNDAMENTALS, 2-> first overtone + FUNDAMENTALS + "
"2nd order combinations, 3-> FUNDAMENTALS + first overtone + second overtone + 2nd "
"order combinations + 3rd order combinations etc...)")
self.declareProperty(WorkspaceProperty("OutputWorkspace", '', Direction.Output),
doc="Name to give the output workspace.")
def validateInputs(self):
"""
Performs input validation. Use to ensure the user has defined a consistent set of parameters.
"""
input_file_validators = {"CASTEP": self._validate_castep_input_file,
"CRYSTAL": self._validate_crystal_input_file,
"DMOL3": self._validate_dmol3_input_file,
"GAUSSIAN": self._validate_gaussian_input_file}
issues = dict()
temperature = self.getProperty("TemperatureInKelvin").value
if temperature < 0:
issues["TemperatureInKelvin"] = "Temperature must be positive."
scale = self.getProperty("Scale").value
if scale < 0:
issues["Scale"] = "Scale must be positive."
ab_initio_program = self.getProperty("AbInitioProgram").value
vibrational_or_phonon_data_filename = self.getProperty("VibrationalOrPhononFile").value
output = input_file_validators[ab_initio_program](filename_full_path=vibrational_or_phonon_data_filename)
bin_width = self.getProperty("BinWidthInWavenumber").value
if not (isinstance(bin_width, float) and 1.0 <= bin_width <= 10.0):
issues["BinWidthInWavenumber"] = ["Invalid bin width. Valid range is [1.0, 10.0] cm^-1"]
if output["Invalid"]:
issues["VibrationalOrPhononFile"] = output["Comment"]
workspace_name = self.getPropertyValue("OutputWorkspace")
# list of special keywords which cannot be used in the name of workspace
forbidden_keywords = ["total"]
if workspace_name in mtd:
issues["OutputWorkspace"] = "Workspace with name " + workspace_name + " already in use; please give " \
"a different name for workspace."
elif workspace_name == "":
issues["OutputWorkspace"] = "Please specify name of workspace."
for word in forbidden_keywords:
if word in workspace_name:
issues["OutputWorkspace"] = "Keyword: " + word + " cannot be used in the name of workspace."
break
self._check_advanced_parameter()
return issues
def PyExec(self):
# 0) Create reporter to report progress
steps = 9
begin = 0
end = 1.0
prog_reporter = Progress(self, begin, end, steps)
# 1) get input parameters from a user
self._get_properties()
prog_reporter.report("Input data from the user has been collected.")
# 2) read ab initio data
ab_initio_loaders = {"CASTEP": AbinsModules.LoadCASTEP, "CRYSTAL": AbinsModules.LoadCRYSTAL,
"DMOL3": AbinsModules.LoadDMOL3, "GAUSSIAN": AbinsModules.LoadGAUSSIAN}
rdr = ab_initio_loaders[self._ab_initio_program](input_ab_initio_filename=self._vibrational_or_phonon_data_file)
ab_initio_data = rdr.get_formatted_data()
prog_reporter.report("Vibrational/phonon data has been read.")
# 3) calculate S
s_calculator = AbinsModules.CalculateS.init(filename=self._vibrational_or_phonon_data_file,
temperature=self._temperature,
sample_form=self._sample_form, abins_data=ab_initio_data,
instrument=self._instrument,
quantum_order_num=self._num_quantum_order_events,
bin_width=self._bin_width)
s_data = s_calculator.get_formatted_data()
prog_reporter.report("Dynamical structure factors have been determined.")
# 4) get atoms for which S should be plotted
self._extracted_ab_initio_data = ab_initio_data.get_atoms_data().extract()
num_atoms = len(self._extracted_ab_initio_data)
all_atms_smbls = list(set([self._extracted_ab_initio_data["atom_%s" % atom]["symbol"]
for atom in range(num_atoms)]))
all_atms_smbls.sort()
if len(self._atoms) == 0: # case: all atoms
atoms_symbol = all_atms_smbls
else: # case selected atoms
if len(self._atoms) != len(set(self._atoms)): # only different types
raise ValueError("Not all user defined atoms are unique.")
for atom_symbol in self._atoms:
if atom_symbol not in all_atms_smbls:
raise ValueError("User defined atom not present in the system.")
atoms_symbol = self._atoms
prog_reporter.report("Atoms, for which dynamical structure factors should be plotted, have been determined.")
# at the moment only types of atom, e.g, for benzene three options -> 1) C, H; 2) C; 3) H
# 5) create workspaces for atoms in interest
workspaces = []
if self._sample_form == "Powder":
workspaces.extend(self._create_partial_s_per_type_workspaces(atoms_symbols=atoms_symbol, s_data=s_data))
prog_reporter.report("Workspaces with partial dynamical structure factors have been constructed.")
# 6) Create a workspace with sum of all atoms if required
if self._sum_contributions:
total_atom_workspaces = []
for ws in workspaces:
if "total" in ws:
total_atom_workspaces.append(ws)
total_workspace = self._create_total_workspace(partial_workspaces=total_atom_workspaces)
workspaces.insert(0, total_workspace)
prog_reporter.report("Workspace with total S has been constructed.")
# 7) add experimental data if available to the collection of workspaces
if self._experimental_file != "":
workspaces.insert(0, self._create_experimental_data_workspace().name())
prog_reporter.report("Workspace with the experimental data has been constructed.")
GroupWorkspaces(InputWorkspaces=workspaces, OutputWorkspace=self._out_ws_name)
# 8) save workspaces to ascii_file
num_workspaces = mtd[self._out_ws_name].getNumberOfEntries()
for wrk_num in range(num_workspaces):
wrk = mtd[self._out_ws_name].getItem(wrk_num)
SaveAscii(InputWorkspace=Scale(wrk, 1.0/self._bin_width, "Multiply"),
Filename=wrk.name() + ".dat", Separator="Space", WriteSpectrumID=False)
prog_reporter.report("All workspaces have been saved to ASCII files.")
# 9) set OutputWorkspace
self.setProperty('OutputWorkspace', self._out_ws_name)
prog_reporter.report("Group workspace with all required dynamical structure factors has been constructed.")
def _create_workspaces(self, atoms_symbols=None, s_data=None):
"""
Creates workspaces for all types of atoms. Creates both partial and total workspaces for all types of atoms.
:param atoms_symbols: list of atom types for which S should be created
:param s_data: dynamical factor data of type SData
:returns: workspaces for list of atoms types, S for the particular type of atom
"""
s_data_extracted = s_data.extract()
shape = [self._num_quantum_order_events]
shape.extend(list(s_data_extracted["atom_0"]["s"]["order_1"].shape))
s_atom_data = np.zeros(shape=tuple(shape), dtype=AbinsModules.AbinsConstants.FLOAT_TYPE)
shape.pop(0)
num_atoms = len([key for key in s_data_extracted.keys() if "atom" in key])
temp_s_atom_data = np.copy(s_atom_data)
result = []
masses = {}
for i in range(num_atoms):
symbol = self._extracted_ab_initio_data["atom_%s" % i]["symbol"]
mass = self._extracted_ab_initio_data["atom_%s" % i]["mass"]
if symbol not in masses:
masses[symbol] = set()
masses[symbol].add(mass)
one_m = AbinsModules.AbinsConstants.ONLY_ONE_MASS
eps = AbinsModules.AbinsConstants.MASS_EPS
# convert set to list to fix order
for s in masses:
masses[s] = sorted(list(set(masses[s])))
for symbol in atoms_symbols:
sub = len(masses[symbol]) > one_m or abs(Atom(symbol=symbol).mass - masses[symbol][0]) > eps
for m in masses[symbol]:
result.extend(self._atom_type_s(num_atoms=num_atoms, mass=m, s_data_extracted=s_data_extracted,
element_symbol=symbol, temp_s_atom_data=temp_s_atom_data,
s_atom_data=s_atom_data, substitution=sub))
return result
def _atom_type_s(self, num_atoms=None, mass=None, s_data_extracted=None, element_symbol=None, temp_s_atom_data=None,
s_atom_data=None, substitution=None):
"""
Helper function for calculating S for the given type of atom
:param num_atoms: number of atoms in the system
:param s_data_extracted: data with all S
:param element_symbol: label for the type of atom
:param temp_s_atom_data: helper array to store S
:param s_atom_data: stores all S for the given type of atom
:param substitution: True if isotope substitution and False otherwise
"""
atom_workspaces = []
s_atom_data.fill(0.0)
element = Atom(symbol=element_symbol)
for atom in range(num_atoms):
eps = AbinsModules.AbinsConstants.MASS_EPS
if (self._extracted_ab_initio_data["atom_%s" % atom]["symbol"] == element_symbol and
abs(self._extracted_ab_initio_data["atom_%s" % atom]["mass"] - mass) < eps):
temp_s_atom_data.fill(0.0)
for order in range(AbinsModules.AbinsConstants.FUNDAMENTALS,
self._num_quantum_order_events + AbinsModules.AbinsConstants.S_LAST_INDEX):
order_indx = order - AbinsModules.AbinsConstants.PYTHON_INDEX_SHIFT
temp_s_order = s_data_extracted["atom_%s" % atom]["s"]["order_%s" % order]
temp_s_atom_data[order_indx] = temp_s_order
s_atom_data += temp_s_atom_data # sum S over the atoms of the same type
total_s_atom_data = np.sum(s_atom_data, axis=0)
nucleons_number = int(round(mass))
if substitution:
atom_workspaces.append(self._create_workspace(atom_name=str(nucleons_number) + element_symbol,
s_points=np.copy(total_s_atom_data),
optional_name="_total", protons_number=element.z_number,
nucleons_number=nucleons_number))
atom_workspaces.append(self._create_workspace(atom_name=str(nucleons_number) + element_symbol,
s_points=np.copy(s_atom_data),
protons_number=element.z_number,
nucleons_number=nucleons_number))
else:
atom_workspaces.append(self._create_workspace(atom_name=element_symbol,
s_points=np.copy(total_s_atom_data),
optional_name="_total", protons_number=element.z_number))
atom_workspaces.append(self._create_workspace(atom_name=element_symbol,
s_points=np.copy(s_atom_data),
protons_number=element.z_number))
return atom_workspaces
def _create_partial_s_per_type_workspaces(self, atoms_symbols=None, s_data=None):
"""
Creates workspaces for all types of atoms. Each workspace stores quantum order events for S for the given
type of atom. It also stores total workspace for the given type of atom.
:param atoms_symbols: list of atom types for which quantum order events of S should be calculated
:param s_data: dynamical factor data of type SData
:returns: workspaces for list of atoms types, each workspace contains quantum order events of
S for the particular atom type
"""
return self._create_workspaces(atoms_symbols=atoms_symbols, s_data=s_data)
def _fill_s_workspace(self, s_points=None, workspace=None, protons_number=None, nucleons_number=None):
"""
Puts S into workspace(s).
:param s_points: dynamical factor for the given atom
:param workspace: workspace to be filled with S
:param protons_number: number of protons in the given type fo atom
:param nucleons_number: number of nucleons in the given type of atom
"""
if self._instrument.get_name() in AbinsModules.AbinsConstants.ONE_DIMENSIONAL_INSTRUMENTS:
# only FUNDAMENTALS
if s_points.shape[0] == AbinsModules.AbinsConstants.FUNDAMENTALS:
self._fill_s_1d_workspace(s_points=s_points[0], workspace=workspace, protons_number=protons_number,
nucleons_number=nucleons_number)
# total workspaces
elif len(s_points.shape) == AbinsModules.AbinsConstants.ONE_DIMENSIONAL_SPECTRUM:
self._fill_s_1d_workspace(s_points=s_points, workspace=workspace, protons_number=protons_number,
nucleons_number=nucleons_number)
# quantum order events (fundamentals or overtones + combinations for the given order)
else:
dim = s_points.shape[0]
partial_wrk_names = []
for n in range(dim):
seed = "quantum_event_%s" % (n + 1)
wrk_name = workspace + "_" + seed
partial_wrk_names.append(wrk_name)
self._fill_s_1d_workspace(s_points=s_points[n], workspace=wrk_name, protons_number=protons_number,
nucleons_number=nucleons_number)
GroupWorkspaces(InputWorkspaces=partial_wrk_names, OutputWorkspace=workspace)
def _fill_s_1d_workspace(self, s_points=None, workspace=None, protons_number=None, nucleons_number=None):
"""
Puts 1D S into workspace.
:param protons_number: number of protons in the given type fo atom
:param nucleons_number: number of nucleons in the given type of atom
:param s_points: dynamical factor for the given atom
:param workspace: workspace to be filled with S
"""
if protons_number is not None:
s_points = s_points * self._scale * self._get_cross_section(protons_number=protons_number,
nucleons_number=nucleons_number)
dim = 1
length = s_points.size
wrk = WorkspaceFactory.create("Workspace2D", NVectors=dim, XLength=length + 1, YLength=length)
for i in range(dim):
wrk.getSpectrum(i).setDetectorID(i + 1)
wrk.setX(0, self._bins)
wrk.setY(0, s_points)
AnalysisDataService.addOrReplace(workspace, wrk)
# Set correct units on workspace
self._set_workspace_units(wrk=workspace)
def _get_cross_section(self, protons_number=None, nucleons_number=None):
"""
Calculates cross section for the given element.
:param protons_number: number of protons in the given type fo atom
:param nucleons_number: number of nucleons in the given type of atom
:returns: cross section for that element
"""
if nucleons_number is not None:
try:
atom = Atom(a_number=nucleons_number, z_number=protons_number)
# isotopes are not implemented for all elements so use different constructor in that cases
except RuntimeError:
atom = Atom(z_number=protons_number)
else:
atom = Atom(z_number=protons_number)
cross_section = None
if self._scale_by_cross_section == 'Incoherent':
cross_section = atom.neutron()["inc_scatt_xs"]
elif self._scale_by_cross_section == 'Coherent':
cross_section = atom.neutron()["coh_scatt_xs"]
elif self._scale_by_cross_section == 'Total':
cross_section = atom.neutron()["tot_scatt_xs"]
return cross_section
def _create_total_workspace(self, partial_workspaces=None):
"""
Sets workspace with total S.
:param partial_workspaces: list of workspaces which should be summed up to obtain total workspace
:returns: workspace with total S from partial_workspaces
"""
total_workspace = self._out_ws_name + "_total"
if isinstance(mtd[partial_workspaces[0]], WorkspaceGroup):
local_partial_workspaces = mtd[partial_workspaces[0]].names()
else:
local_partial_workspaces = partial_workspaces
if len(local_partial_workspaces) > 1:
# get frequencies
ws = mtd[local_partial_workspaces[0]]
# initialize S
s_atoms = np.zeros_like(ws.dataY(0))
# collect all S
for partial_ws in local_partial_workspaces:
if self._instrument.get_name() in AbinsModules.AbinsConstants.ONE_DIMENSIONAL_INSTRUMENTS:
s_atoms += mtd[partial_ws].dataY(0)
# create workspace with S
self._fill_s_workspace(s_atoms, total_workspace)
# # Otherwise just repackage the workspace we have as the total
else:
CloneWorkspace(InputWorkspace=local_partial_workspaces[0], OutputWorkspace=total_workspace)
return total_workspace
def _create_workspace(self, atom_name=None, s_points=None, optional_name="", protons_number=None,
nucleons_number=None):
"""
Creates workspace for the given frequencies and s_points with S data. After workspace is created it is rebined,
scaled by cross-section factor and optionally multiplied by the user defined scaling factor.
:param atom_name: symbol of atom for which workspace should be created
:param s_points: S(Q, omega)
:param optional_name: optional part of workspace name
:returns: workspace for the given frequency and S data
:param protons_number: number of protons in the given type fo atom
:param nucleons_number: number of nucleons in the given type of atom
"""
ws_name = self._out_ws_name + "_" + atom_name + optional_name
self._fill_s_workspace(s_points=s_points, workspace=ws_name, protons_number=protons_number,
nucleons_number=nucleons_number)
return ws_name
def _create_experimental_data_workspace(self):
"""
Loads experimental data into workspaces.
:returns: workspace with experimental data
"""
experimental_wrk = Load(self._experimental_file)
self._set_workspace_units(wrk=experimental_wrk.name())
return experimental_wrk
def _set_workspace_units(self, wrk=None):
"""
Sets x and y units for a workspace.
:param wrk: workspace which units should be set
"""
mtd[wrk].getAxis(0).setUnit("DeltaE_inWavenumber")
mtd[wrk].setYUnitLabel("S /Arbitrary Units")
mtd[wrk].setYUnit("Arbitrary Units")
def _check_advanced_parameter(self):
"""
Checks if parameters from AbinsParameters.py are valid. If any parameter is invalid then RuntimeError is thrown
with meaningful message.
"""
message = " in AbinsParameters.py. "
self._check_general_resolution(message)
self._check_tosca_parameters(message)
self._check_folder_names(message)
self._check_rebining(message)
self._check_threshold(message)
self._check_chunk_size(message)
self._check_threads(message)
def _check_general_resolution(self, message_end=None):
"""
Checks general parameters used in construction resolution functions.
:param message_end: closing part of the error message.
"""
# check fwhm
fwhm = AbinsModules.AbinsParameters.fwhm
if not (isinstance(fwhm, float) and 0.0 < fwhm < 10.0):
raise RuntimeError("Invalid value of fwhm" + message_end)
# check delta_width
delta_width = AbinsModules.AbinsParameters.delta_width
if not (isinstance(delta_width, float) and 0.0 < delta_width < 1.0):
raise RuntimeError("Invalid value of delta_width" + message_end)
def _check_tosca_parameters(self, message_end=None):
"""
Checks TOSCA parameters.
:param message_end: closing part of the error message.
"""
# TOSCA final energy in cm^-1
final_energy = AbinsModules.AbinsParameters.tosca_final_neutron_energy
if not (isinstance(final_energy, float) and final_energy > 0.0):
raise RuntimeError("Invalid value of final_neutron_energy for TOSCA" + message_end)
angle = AbinsModules.AbinsParameters.tosca_cos_scattering_angle
if not isinstance(angle, float):
raise RuntimeError("Invalid value of cosines scattering angle for TOSCA" + message_end)
resolution_const_a = AbinsModules.AbinsParameters.tosca_a
if not isinstance(resolution_const_a, float):
raise RuntimeError("Invalid value of constant A for TOSCA (used by the resolution TOSCA function)" +
message_end)
resolution_const_b = AbinsModules.AbinsParameters.tosca_b
if not isinstance(resolution_const_b, float):
raise RuntimeError("Invalid value of constant B for TOSCA (used by the resolution TOSCA function)" +
message_end)
resolution_const_c = AbinsModules.AbinsParameters.tosca_c
if not isinstance(resolution_const_c, float):
raise RuntimeError("Invalid value of constant C for TOSCA (used by the resolution TOSCA function)" +
message_end)
def _check_folder_names(self, message_end=None):
"""
Checks folders names.
:param message_end: closing part of the error message.
"""
folder_names = []
ab_initio_group = AbinsModules.AbinsParameters.ab_initio_group
if not isinstance(ab_initio_group, str) or ab_initio_group == "":
raise RuntimeError("Invalid name for folder in which the ab initio data should be stored.")
folder_names.append(ab_initio_group)
powder_data_group = AbinsModules.AbinsParameters.powder_data_group
if not isinstance(powder_data_group, str) or powder_data_group == "":
raise RuntimeError("Invalid value of powder_data_group" + message_end)
elif powder_data_group in folder_names:
raise RuntimeError("Name for powder_data_group already used by as name of another folder.")
folder_names.append(powder_data_group)
crystal_data_group = AbinsModules.AbinsParameters.crystal_data_group
if not isinstance(crystal_data_group, str) or crystal_data_group == "":
raise RuntimeError("Invalid value of crystal_data_group" + message_end)
elif crystal_data_group in folder_names:
raise RuntimeError("Name for crystal_data_group already used as a name of another folder.")
s_data_group = AbinsModules.AbinsParameters.s_data_group
if not isinstance(s_data_group, str) or s_data_group == "":
raise RuntimeError("Invalid value of s_data_group" + message_end)
elif s_data_group in folder_names:
raise RuntimeError("Name for s_data_group already used as a name of another folder.")
def _check_rebining(self, message_end=None):
"""
Checks rebinning parameters.
:param message_end: closing part of the error message.
"""
pkt_per_peak = AbinsModules.AbinsParameters.pkt_per_peak
if not (isinstance(pkt_per_peak, six.integer_types) and 1 <= pkt_per_peak <= 1000):
raise RuntimeError("Invalid value of pkt_per_peak" + message_end)
min_wavenumber = AbinsModules.AbinsParameters.min_wavenumber
if not (isinstance(min_wavenumber, float) and min_wavenumber >= 0.0):
raise RuntimeError("Invalid value of min_wavenumber" + message_end)
max_wavenumber = AbinsModules.AbinsParameters.max_wavenumber
if not (isinstance(max_wavenumber, float) and max_wavenumber > 0.0):
raise RuntimeError("Invalid number of max_wavenumber" + message_end)
if min_wavenumber > max_wavenumber:
raise RuntimeError("Invalid energy window for rebinning.")
def _check_threshold(self, message_end=None):
"""
Checks threshold for frequencies.
:param message_end: closing part of the error message.
"""
freq_threshold = AbinsModules.AbinsParameters.frequencies_threshold
if not (isinstance(freq_threshold, float) and freq_threshold >= 0.0):
raise RuntimeError("Invalid value of frequencies_threshold" + message_end)
# check s threshold
s_absolute_threshold = AbinsModules.AbinsParameters.s_absolute_threshold
if not (isinstance(s_absolute_threshold, float) and s_absolute_threshold > 0.0):
raise RuntimeError("Invalid value of s_absolute_threshold" + message_end)
s_relative_threshold = AbinsModules.AbinsParameters.s_relative_threshold
if not (isinstance(s_relative_threshold, float) and s_relative_threshold > 0.0):
raise RuntimeError("Invalid value of s_relative_threshold" + message_end)
def _check_chunk_size(self, message_end=None):
"""
Check optimal size of chunk
:param message_end: closing part of the error message.
"""
optimal_size = AbinsModules.AbinsParameters.optimal_size
if not (isinstance(optimal_size, six.integer_types) and optimal_size > 0):
raise RuntimeError("Invalid value of optimal_size" + message_end)
def _check_threads(self, message_end=None):
"""
Checks number of threads
:param message_end: closing part of the error message.
"""
if PATHOS_FOUND:
threads = AbinsModules.AbinsParameters.threads
if not (isinstance(threads, six.integer_types) and 1 <= threads <= mp.cpu_count()):
raise RuntimeError("Invalid number of threads for parallelisation over atoms" + message_end)
def _validate_ab_initio_file_extension(self, filename_full_path=None, expected_file_extension=None):
"""
Checks consistency between name of ab initio program and extension.
:param expected_file_extension: file extension
:returns: dictionary with error message
"""
ab_initio_program = self.getProperty("AbInitioProgram").value
msg_err = "Invalid %s file. " % filename_full_path
msg_rename = "Please rename your file and try again."
# check extension of a file
found_filename_ext = os.path.splitext(filename_full_path)[1]
if found_filename_ext.lower() != expected_file_extension:
return dict(Invalid=True,
Comment=msg_err + "Output from ab initio program " + ab_initio_program + " is expected." +
" The expected extension of file is ." + expected_file_extension +
". Found: " + found_filename_ext + ". " + msg_rename)
else:
return dict(Invalid=False, Comment="")
def _validate_dmol3_input_file(self, filename_full_path=None):
"""
Method to validate input file for DMOL3 ab initio program.
:param filename_full_path: full path of a file to check.
:returns: True if file is valid otherwise false.
"""
logger.information("Validate DMOL3 file with vibrational data.")
return self._validate_ab_initio_file_extension(filename_full_path=filename_full_path,
expected_file_extension=".outmol")
def _validate_gaussian_input_file(self, filename_full_path=None):
"""
Method to validate input file for GAUSSIAN ab initio program.
:param filename_full_path: full path of a file to check.
:returns: True if file is valid otherwise false.
"""
logger.information("Validate GAUSSIAN file with vibration data.")
return self._validate_ab_initio_file_extension(filename_full_path=filename_full_path,
expected_file_extension=".log")
def _validate_crystal_input_file(self, filename_full_path=None):
"""
Method to validate input file for CRYSTAL ab initio program.
:param filename_full_path: full path of a file to check.
:returns: True if file is valid otherwise false.
"""
logger.information("Validate CRYSTAL file with vibrational or phonon data.")
return self._validate_ab_initio_file_extension(filename_full_path=filename_full_path,
expected_file_extension=".out")
def _validate_castep_input_file(self, filename_full_path=None):
"""
Check if ab initio input vibrational or phonon file has been produced by CASTEP. Currently the crucial
keywords in the first few lines are checked (to be modified if a better validation is found...)
:param filename_full_path: full path of a file to check
:returns: Dictionary with two entries "Invalid", "Comment". Valid key can have two values: True/ False. As it
comes to "Comment" it is an empty string if Valid:True, otherwise stores description of the problem.
"""
logger.information("Validate CASTEP file with vibrational or phonon data.")
msg_err = "Invalid %s file. " % filename_full_path
output = self._validate_ab_initio_file_extension(filename_full_path=filename_full_path,
expected_file_extension=".phonon")
if output["Invalid"]:
return output
# check a structure of the header part of file.
# Here fortran convention is followed: case of letter does not matter
with open(filename_full_path) as castep_file:
line = self._get_one_line(castep_file)
if not self._compare_one_line(line, "beginheader"): # first line is BEGIN header
return dict(Invalid=True, Comment=msg_err + "The first line should be 'BEGIN header'.")
line = self._get_one_line(castep_file)
if not self._compare_one_line(one_line=line, pattern="numberofions"):
return dict(Invalid=True, Comment=msg_err + "The second line should include 'Number of ions'.")
line = self._get_one_line(castep_file)
if not self._compare_one_line(one_line=line, pattern="numberofbranches"):
return dict(Invalid=True, Comment=msg_err + "The third line should include 'Number of branches'.")
line = self._get_one_line(castep_file)
if not self._compare_one_line(one_line=line, pattern="numberofwavevectors"):
return dict(Invalid=True, Comment=msg_err + "The fourth line should include 'Number of wavevectors'.")
line = self._get_one_line(castep_file)
if not self._compare_one_line(one_line=line,
pattern="frequenciesin"):
return dict(Invalid=True, Comment=msg_err + "The fifth line should be 'Frequencies in'.")
return output
def _get_one_line(self, file_obj=None):
"""
:param file_obj: file object from which reading is done
:returns: string containing one non empty line
"""
line = file_obj.readline().replace(" ", "").lower()
while line and line == "":
line = file_obj.readline().replace(" ", "").lower()
return line
def _compare_one_line(self, one_line, pattern):
"""
compares line in the the form of string with a pattern.
:param one_line: line in the for mof string to be compared
:param pattern: string which should be present in the line after removing white spaces and setting all
letters to lower case
:returns: True is pattern present in the line, otherwise False
"""
return one_line and pattern in one_line.replace(" ", "")
def _get_properties(self):
"""
Loads all properties to object's attributes.
"""
self._ab_initio_program = self.getProperty("AbInitioProgram").value
self._vibrational_or_phonon_data_file = self.getProperty("VibrationalOrPhononFile").value
self._experimental_file = self.getProperty("ExperimentalFile").value
self._temperature = self.getProperty("TemperatureInKelvin").value
self._bin_width = self.getProperty("BinWidthInWavenumber").value
self._scale = self.getProperty("Scale").value
self._sample_form = self.getProperty("SampleForm").value
instrument_name = self.getProperty("Instrument").value
if instrument_name in AbinsModules.AbinsConstants.ALL_INSTRUMENTS:
self._instrument_name = instrument_name
instrument_producer = AbinsModules.InstrumentProducer()
self._instrument = instrument_producer.produce_instrument(name=self._instrument_name)
else:
raise ValueError("Unknown instrument %s" % instrument_name)
self._atoms = self.getProperty("Atoms").value
self._sum_contributions = self.getProperty("SumContributions").value
# conversion from str to int
self._num_quantum_order_events = int(self.getProperty("QuantumOrderEventsNumber").value)
self._scale_by_cross_section = self.getPropertyValue('ScaleByCrossSection')
self._out_ws_name = self.getPropertyValue('OutputWorkspace')
self._calc_partial = (len(self._atoms) > 0)
# user defined interval is exclusive with respect to
# AbinsModules.AbinsParameters.min_wavenumber
# AbinsModules.AbinsParameters.max_wavenumber
# with bin width AbinsModules.AbinsParameters.bin_width
step = self._bin_width
start = AbinsModules.AbinsParameters.min_wavenumber + step / 2.0
stop = AbinsModules.AbinsParameters.max_wavenumber + step / 2.0
self._bins = np.arange(start=start, stop=stop, step=step, dtype=AbinsModules.AbinsConstants.FLOAT_TYPE)
AlgorithmFactory.subscribe(Abins)
| gpl-3.0 | 8,994,032,376,073,273,000 | 47.804762 | 120 | 0.609791 | false | 4.180706 | false | false | false |
Oli76/rwslib | rwslib/builder_constants.py | 1 | 6274 | # -*- coding: utf-8 -*-
__author__ = 'isparks'
import enum
class DataType(enum.Enum):
"""ODM Data Types"""
Text = 'text'
Integer = 'integer'
Float = 'float'
Date = 'date'
DateTime = 'datetime'
Time = 'time'
String = 'string' # Used only by codelists
class QueryStatusType(enum.Enum):
"""MdsolQuery action type"""
Open = "Open"
Cancelled = "Cancelled"
Answered = "Answered"
Forwarded = "Forwarded"
Closed = "Closed"
class StepType(enum.Enum):
"""Edit/Derivation step types"""
CustomFunction = "CustomFunction"
IsEmpty = "IsEmpty"
IsNotEmpty = "IsNotEmpty"
Contains = "Contains"
StartsWith = "StartsWith"
IsLessThan = "IsLessThan"
IsLessThanOrEqualTo = "IsLessThanOrEqualTo"
IsGreaterThan = "IsGreaterThan"
IsGreaterThanOrEqualTo = "IsGreaterThanOrEqualTo"
IsEqualTo = "IsEqualTo"
IsNonConformant = "IsNonConformant"
IsNotEqualTo = "IsNotEqualTo"
InLocalLabRange = "InLocalLabRange"
LengthIsLessThan = "LengthIsLessThan"
LengthIsLessThanOrEqualTo = "LengthIsLessThanOrEqualTo"
LengthIsGreaterThan = "LengthIsGreaterThan"
LengthIsGreaterThanOrEqualTo = "LengthIsGreaterThanOrEqualTo"
LengthIsEqualTo = "LengthIsEqualTo"
Or = "Or"
And = "And"
Not = "Not"
Now = "Now"
IsPresent = "IsPresent"
IsActive = "IsActive"
Add = "Add"
Subtract = "Subtract"
Multiply = "Multiply"
Divide = "Divide"
AddDay = "AddDay"
AddMonth = "AddMonth"
AddYear = "AddYear"
AddSec = "AddSec"
AddMin = "AddMin"
AddHour = "AddHour"
DaySpan = "DaySpan"
TimeSpan = "TimeSpan"
Age = "Age"
StringAdd = "StringAdd"
Space = "Space"
ALL_STEPS = [StepType.CustomFunction,
StepType.IsEmpty,
StepType.IsNotEmpty,
StepType.Contains,
StepType.StartsWith,
StepType.IsLessThan,
StepType.IsLessThanOrEqualTo,
StepType.IsGreaterThan,
StepType.IsGreaterThanOrEqualTo,
StepType.IsEqualTo,
StepType.IsNonConformant,
StepType.IsNotEqualTo,
StepType.InLocalLabRange,
StepType.LengthIsLessThan,
StepType.LengthIsLessThanOrEqualTo,
StepType.LengthIsGreaterThan,
StepType.LengthIsGreaterThanOrEqualTo,
StepType.LengthIsEqualTo,
StepType.Or,
StepType.And,
StepType.Not,
StepType.Now,
StepType.IsPresent,
StepType.IsActive,
StepType.Add,
StepType.Subtract,
StepType.Multiply,
StepType.Divide,
StepType.AddDay,
StepType.AddMonth,
StepType.AddYear,
StepType.AddSec,
StepType.AddMin,
StepType.AddHour,
StepType.DaySpan,
StepType.TimeSpan,
StepType.Age,
StepType.StringAdd]
# Note: Missing 2015 additions to edit check step functions.
VALID_DERIVATION_STEPS = [
StepType.Age,
StepType.Subtract,
StepType.Multiply,
StepType.Divide,
StepType.AddDay,
StepType.AddMonth,
StepType.AddYear,
StepType.AddSec,
StepType.AddMin,
StepType.AddHour,
StepType.DaySpan,
StepType.TimeSpan,
StepType.Now,
StepType.StringAdd,
StepType.CustomFunction,
StepType.Space,
StepType.Add
]
class ActionType(enum.Enum):
OpenQuery = "OpenQuery"
RequireReview = "RequireReview"
RequireVerification = "RequireVerification"
AddComment = "AddComment"
AddDeviation = "AddDeviation"
CustomFunction = "CustomFunction"
PlaceSticky = "PlaceSticky"
AddForm = "AddForm"
AddMatrix = "AddMatrix"
MrgMatrix = "MrgMatrix"
OldMrgMatrix = "OldMrgMatrix"
SetNonconformant = "SetNonconformant"
SendMessage = "SendMessage"
SetDataPoint = "SetDataPoint"
SetTimeZero = "SetTimeZero"
SetTimeForward = "SetTimeForward"
SetSubjectStatus = "SetSubjectStatus"
SetSubjectName = "SetSubjectName"
UpdateFormName = "UpdateFormName"
UpdateFolderName = "UpdateFolderName"
SetRecordDate = "SetRecordDate"
SetDataPageDate = "SetDataPageDate"
SetInstanceDate = "SetInstanceDate"
SetSubjectDate = "SetSubjectDate"
SetDataPointVisible = "SetDataPointVisible"
SetSecondarySubjectName = "SetSecondarySubjectName"
SetFormRequiresSignature = "SetFormRequiresSignature"
SetFolderRequiresSignature = "SetFolderRequiresSignature"
SetSubjectRequiresSignature = "SetSubjectRequiresSignature"
SetDynamicSearchList = "SetDynamicSearchList"
ALL_ACTIONS = [
ActionType.OpenQuery,
ActionType.RequireReview,
ActionType.RequireVerification,
ActionType.AddComment,
ActionType.AddDeviation,
ActionType.CustomFunction,
ActionType.PlaceSticky,
ActionType.AddForm,
ActionType.AddMatrix,
ActionType.MrgMatrix,
ActionType.OldMrgMatrix,
ActionType.SetNonconformant,
ActionType.SendMessage,
ActionType.SetDataPoint,
ActionType.SetTimeZero,
ActionType.SetTimeForward,
ActionType.SetSubjectStatus,
ActionType.SetSubjectName,
ActionType.UpdateFormName,
ActionType.UpdateFolderName,
ActionType.SetRecordDate,
ActionType.SetDataPageDate,
ActionType.SetInstanceDate,
ActionType.SetSubjectDate,
ActionType.SetDataPointVisible,
ActionType.SetSecondarySubjectName,
ActionType.SetFormRequiresSignature,
ActionType.SetFolderRequiresSignature,
ActionType.SetSubjectRequiresSignature,
ActionType.SetDynamicSearchList
]
class RangeCheckComparatorType(enum.Enum):
LessThanEqualTo = 'LE'
GreaterThanEqualTo = 'GE'
class RangeCheckType(enum.Enum):
Soft = 'Soft'
Hard = 'Hard'
class ControlType(enum.Enum):
CheckBox = 'CheckBox'
Text = 'Text'
DateTime = 'DateTime'
DropDownList = 'DropDownList'
SearchList = 'SearchList'
RadioButton = 'RadioButton'
RadioButtonVertical = 'RadioButton (Vertical)'
FileUpload = 'File Upload'
LongText = 'LongText'
SignaturePage = 'Signature page'
SignatureFolder = 'Signature folder'
SignatureSubject = 'Signature subject'
| mit | -2,006,527,805,749,792,300 | 27.261261 | 65 | 0.669908 | false | 3.712426 | false | false | false |
litong01/python-monasca | kiloeyes/v2/elasticsearch/versions.py | 1 | 1877 | # Copyright 2013 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import falcon
from kiloeyes.common import resource_api
from kiloeyes.openstack.common import log
try:
import ujson as json
except ImportError:
import json
LOG = log.getLogger(__name__)
UPDATED = str(datetime.datetime(2014, 1, 1, 0, 0, 0))
class VersionDispatcher(object):
def __init__(self, global_conf):
LOG.debug('initializing V2API!')
super(VersionDispatcher, self).__init__()
@resource_api.Restify('/', method='get')
def get_versions(self, req, res):
res.body = json.dumps([{
"id": "v2.0",
"links": [{"rel": "self",
"href": req.uri}],
"status": "CURRENT",
"updated": UPDATED}])
res.status = getattr(falcon, 'HTTP_200')
@resource_api.Restify('/{version_id}', method='get')
def get_version_by_id(self, req, res, version_id):
if version_id in ['v2.0', '2.0', '2']:
res.body = json.dumps({
"id": "v2.0",
"links": [{"rel": "self",
"href": req.uri}],
"status": "CURRENT",
"updated": UPDATED})
res.status = getattr(falcon, 'HTTP_200')
else:
res.body = ''
res.status = getattr(falcon, 'HTTP_501')
| apache-2.0 | -6,706,174,373,474,287,000 | 31.362069 | 75 | 0.595631 | false | 3.709486 | false | false | false |
goodtune/vitriolic | touchtechnology/news/forms.py | 1 | 1627 | from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from modelforms.forms import ModelForm
from touchtechnology.common.forms.mixins import (
BootstrapFormControlMixin, SuperUserSlugMixin,
)
from touchtechnology.news.models import Article, Category, Translation
class ArticleForm(SuperUserSlugMixin, ModelForm):
def __init__(self, *args, **kwargs):
super(ArticleForm, self).__init__(*args, **kwargs)
if not self.fields["categories"].queryset.count():
self.fields.pop("categories", None)
self.fields["image"].required = getattr(
settings, "TOUCHTECHNOLOGY_NEWS_IMAGE_REQUIRED", True
)
class Meta:
model = Article
fields = (
"headline",
"image",
"abstract",
"copy",
"published",
"slug",
"slug_locked",
"byline",
"keywords",
"categories",
"is_active",
)
class CategoryForm(SuperUserSlugMixin, ModelForm):
class Meta:
model = Category
fields = (
"title",
"short_title",
"slug",
"slug_locked",
"is_active",
"hidden_from_navigation",
)
class TranslationForm(BootstrapFormControlMixin, ModelForm):
class Meta:
model = Translation
fields = (
"locale",
"headline",
"abstract",
"copy",
)
locale = forms.ChoiceField(choices=settings.LANGUAGES, label=_("Language"))
| bsd-3-clause | 2,439,485,887,015,281,000 | 26.116667 | 79 | 0.564229 | false | 4.519444 | false | false | false |
Subsets and Splits